From 11119333fabe16a6e1f10a8a264013776ba1e97c Mon Sep 17 00:00:00 2001 From: Adriana Draghici Date: Fri, 27 Nov 2009 18:56:44 +0000 Subject: [PATCH] Tribler: version without .svn dirs. --- tribler-mod/Tribler/Category/Category.py | 386 ++ tribler-mod/Tribler/Category/Category.py.bak | 385 ++ tribler-mod/Tribler/Category/FamilyFilter.py | 112 + .../Tribler/Category/FamilyFilter.py.bak | 111 + tribler-mod/Tribler/Category/TestCategory.py | 149 + .../Tribler/Category/TestCategory.py.bak | 148 + tribler-mod/Tribler/Category/__init__.py | 3 + tribler-mod/Tribler/Category/__init__.py.bak | 2 + tribler-mod/Tribler/Category/category.conf | 62 + .../Tribler/Category/filter_terms.filter | 3903 +++++++++++++++++ tribler-mod/Tribler/Category/init_category.py | 58 + .../Tribler/Category/init_category.py.bak | 57 + tribler-mod/Tribler/Category/porncat.txt | 1046 +++++ tribler-mod/Tribler/Core/API.py | 72 + tribler-mod/Tribler/Core/API.py.bak | 71 + .../Core/APIImplementation/DownloadImpl.py | 562 +++ .../APIImplementation/DownloadImpl.py.bak | 561 +++ .../DownloadRuntimeConfig.py | 518 +++ .../DownloadRuntimeConfig.py.bak | 517 +++ .../Core/APIImplementation/LaunchManyCore.py | 806 ++++ .../APIImplementation/LaunchManyCore.py.bak | 805 ++++ .../APIImplementation/SessionRuntimeConfig.py | 866 ++++ .../SessionRuntimeConfig.py.bak | 865 ++++ .../Core/APIImplementation/SingleDownload.py | 303 ++ .../APIImplementation/SingleDownload.py.bak | 294 ++ .../Core/APIImplementation/ThreadPool.py | 181 + .../Core/APIImplementation/ThreadPool.py.bak | 180 + .../APIImplementation/UserCallbackHandler.py | 130 + .../UserCallbackHandler.py.bak | 129 + .../Core/APIImplementation/__init__.py | 3 + .../Core/APIImplementation/__init__.py.bak | 2 + .../Core/APIImplementation/maketorrent.py | 581 +++ .../Core/APIImplementation/maketorrent.py.bak | 580 +++ .../Core/APIImplementation/miscutils.py | 43 + .../Core/APIImplementation/miscutils.py.bak | 42 + tribler-mod/Tribler/Core/Base.py | 31 + tribler-mod/Tribler/Core/Base.py.bak | 30 + .../Tribler/Core/BitTornado/BT1/Choker.py | 248 ++ .../Tribler/Core/BitTornado/BT1/Choker.py.bak | 247 ++ .../Tribler/Core/BitTornado/BT1/Connecter.py | 1251 ++++++ .../Core/BitTornado/BT1/Connecter.py.bak | 1251 ++++++ .../Tribler/Core/BitTornado/BT1/Downloader.py | 957 ++++ .../Core/BitTornado/BT1/Downloader.py.bak | 956 ++++ .../Core/BitTornado/BT1/DownloaderFeedback.py | 196 + .../BitTornado/BT1/DownloaderFeedback.py.bak | 195 + .../Tribler/Core/BitTornado/BT1/Encrypter.py | 685 +++ .../Core/BitTornado/BT1/Encrypter.py.bak | 684 +++ .../Core/BitTornado/BT1/FileSelector.py | 244 ++ .../Core/BitTornado/BT1/FileSelector.py.bak | 243 + .../Tribler/Core/BitTornado/BT1/Filter.py | 16 + .../Tribler/Core/BitTornado/BT1/Filter.py.bak | 15 + .../Core/BitTornado/BT1/HTTPDownloader.py | 291 ++ .../Core/BitTornado/BT1/HTTPDownloader.py.bak | 290 ++ .../Tribler/Core/BitTornado/BT1/MessageID.py | 236 + .../Core/BitTornado/BT1/MessageID.py.bak | 235 + .../Tribler/Core/BitTornado/BT1/NatCheck.py | 95 + .../Core/BitTornado/BT1/NatCheck.py.bak | 94 + .../Core/BitTornado/BT1/PiecePicker.py | 576 +++ .../Core/BitTornado/BT1/PiecePicker.py.bak | 575 +++ .../Core/BitTornado/BT1/Rerequester.py | 538 +++ .../Core/BitTornado/BT1/Rerequester.py.bak | 537 +++ .../Tribler/Core/BitTornado/BT1/Statistics.py | 180 + .../Core/BitTornado/BT1/Statistics.py.bak | 179 + .../Tribler/Core/BitTornado/BT1/Storage.py | 595 +++ .../Core/BitTornado/BT1/Storage.py.bak | 594 +++ .../Core/BitTornado/BT1/StorageWrapper.py | 1269 ++++++ .../Core/BitTornado/BT1/StorageWrapper.py.bak | 1268 ++++++ .../Tribler/Core/BitTornado/BT1/T2T.py | 192 + .../Tribler/Core/BitTornado/BT1/T2T.py.bak | 191 + .../Tribler/Core/BitTornado/BT1/Uploader.py | 160 + .../Core/BitTornado/BT1/Uploader.py.bak | 159 + .../Tribler/Core/BitTornado/BT1/__init__.py | 3 + .../Core/BitTornado/BT1/__init__.py.bak | 2 + .../Tribler/Core/BitTornado/BT1/btformats.py | 131 + .../Core/BitTornado/BT1/btformats.py.bak | 130 + .../Tribler/Core/BitTornado/BT1/convert.py | 13 + .../Core/BitTornado/BT1/convert.py.bak | 12 + .../Tribler/Core/BitTornado/BT1/fakeopen.py | 88 + .../Core/BitTornado/BT1/fakeopen.py.bak | 87 + .../Tribler/Core/BitTornado/BT1/track.py | 1030 +++++ .../Tribler/Core/BitTornado/BT1/track.py.bak | 1029 +++++ .../Core/BitTornado/CurrentRateMeasure.py | 39 + .../Core/BitTornado/CurrentRateMeasure.py.bak | 38 + .../Tribler/Core/BitTornado/HTTPHandler.py | 195 + .../Core/BitTornado/HTTPHandler.py.bak | 194 + tribler-mod/Tribler/Core/BitTornado/PSYCO.py | 9 + .../Tribler/Core/BitTornado/PSYCO.py.bak | 8 + .../Tribler/Core/BitTornado/RateLimiter.py | 169 + .../Core/BitTornado/RateLimiter.py.bak | 168 + .../Tribler/Core/BitTornado/RateMeasure.py | 71 + .../Core/BitTornado/RateMeasure.py.bak | 70 + .../Tribler/Core/BitTornado/RawServer.py | 263 ++ .../Tribler/Core/BitTornado/RawServer.py.bak | 262 ++ .../Core/BitTornado/ServerPortHandler.py | 239 + .../Core/BitTornado/ServerPortHandler.py.bak | 238 + .../Tribler/Core/BitTornado/SocketHandler.py | 551 +++ .../Core/BitTornado/SocketHandler.py.bak | 550 +++ .../Tribler/Core/BitTornado/__init__.py | 99 + .../Tribler/Core/BitTornado/__init__.py.bak | 98 + .../Tribler/Core/BitTornado/bencode.py | 345 ++ .../Tribler/Core/BitTornado/bencode.py.bak | 344 ++ .../Tribler/Core/BitTornado/bitfield.py | 172 + .../Tribler/Core/BitTornado/bitfield.py.bak | 171 + tribler-mod/Tribler/Core/BitTornado/clock.py | 31 + .../Tribler/Core/BitTornado/clock.py.bak | 30 + .../Tribler/Core/BitTornado/download_bt1.py | 761 ++++ .../Core/BitTornado/download_bt1.py.bak | 760 ++++ .../Tribler/Core/BitTornado/inifile.py | 170 + .../Tribler/Core/BitTornado/inifile.py.bak | 169 + .../Tribler/Core/BitTornado/iprangeparse.py | 195 + .../Core/BitTornado/iprangeparse.py.bak | 194 + .../Tribler/Core/BitTornado/natpunch.py | 382 ++ .../Tribler/Core/BitTornado/natpunch.py.bak | 381 ++ .../Tribler/Core/BitTornado/parseargs.py | 141 + .../Tribler/Core/BitTornado/parseargs.py.bak | 140 + .../Tribler/Core/BitTornado/parsedir.py | 151 + .../Tribler/Core/BitTornado/parsedir.py.bak | 150 + .../Tribler/Core/BitTornado/piecebuffer.py | 87 + .../Core/BitTornado/piecebuffer.py.bak | 86 + .../Tribler/Core/BitTornado/selectpoll.py | 129 + .../Tribler/Core/BitTornado/selectpoll.py.bak | 128 + .../Tribler/Core/BitTornado/subnetparse.py | 219 + .../Core/BitTornado/subnetparse.py.bak | 218 + .../Core/BitTornado/torrentlistparse.py | 39 + .../Core/BitTornado/torrentlistparse.py.bak | 38 + .../Tribler/Core/BitTornado/zurllib.py | 101 + .../Tribler/Core/BitTornado/zurllib.py.bak | 100 + .../Core/BuddyCast/TorrentCollecting.py | 27 + .../Core/BuddyCast/TorrentCollecting.py.bak | 26 + .../Tribler/Core/BuddyCast/__init__.py | 3 + .../Tribler/Core/BuddyCast/__init__.py.bak | 2 + .../Tribler/Core/BuddyCast/bartercast.py | 344 ++ .../Tribler/Core/BuddyCast/bartercast.py.bak | 343 ++ .../Tribler/Core/BuddyCast/buddycast.py | 2482 +++++++++++ .../Tribler/Core/BuddyCast/buddycast.py.bak | 2481 +++++++++++ .../Tribler/Core/BuddyCast/moderationcast.py | 456 ++ .../Core/BuddyCast/moderationcast.py.bak | 455 ++ .../Core/BuddyCast/moderationcast_util.py | 388 ++ .../Core/BuddyCast/moderationcast_util.py.bak | 387 ++ .../Tribler/Core/BuddyCast/similarity.py | 101 + .../Tribler/Core/BuddyCast/similarity.py.bak | 100 + .../Tribler/Core/BuddyCast/votecast.py | 204 + .../Tribler/Core/BuddyCast/votecast.py.bak | 203 + .../Tribler/Core/CacheDB/BsdCacheDBHandler.py | 1257 ++++++ .../Core/CacheDB/BsdCacheDBHandler.py.bak | 1256 ++++++ .../Tribler/Core/CacheDB/CacheDBHandler.py | 5 + .../Core/CacheDB/CacheDBHandler.py.bak | 4 + tribler-mod/Tribler/Core/CacheDB/EditDist.py | 55 + .../Tribler/Core/CacheDB/EditDist.py.bak | 54 + tribler-mod/Tribler/Core/CacheDB/Notifier.py | 83 + .../Tribler/Core/CacheDB/Notifier.py.bak | 82 + .../Core/CacheDB/SqliteCacheDBHandler.py | 3444 +++++++++++++++ .../Core/CacheDB/SqliteCacheDBHandler.py.bak | 3443 +++++++++++++++ .../CacheDB/SqliteFriendshipStatsCacheDB.py | 201 + .../SqliteFriendshipStatsCacheDB.py.bak | 200 + .../Core/CacheDB/SqliteSeedingStatsCacheDB.py | 203 + .../CacheDB/SqliteSeedingStatsCacheDB.py.bak | 202 + .../SqliteVideoPlaybackStatsCacheDB.py | 177 + .../SqliteVideoPlaybackStatsCacheDB.py.bak | 176 + tribler-mod/Tribler/Core/CacheDB/__init__.py | 3 + .../Tribler/Core/CacheDB/__init__.py.bak | 2 + .../Tribler/Core/CacheDB/bsdcachedb.py | 1137 +++++ .../Tribler/Core/CacheDB/bsdcachedb.py.bak | 1136 +++++ .../Tribler/Core/CacheDB/bsddb2sqlite.py | 622 +++ .../Tribler/Core/CacheDB/bsddb2sqlite.py.bak | 621 +++ tribler-mod/Tribler/Core/CacheDB/cachedb.py | 8 + .../Tribler/Core/CacheDB/cachedb.py.bak | 7 + tribler-mod/Tribler/Core/CacheDB/friends.py | 143 + .../Tribler/Core/CacheDB/friends.py.bak | 142 + tribler-mod/Tribler/Core/CacheDB/maxflow.py | 163 + .../Tribler/Core/CacheDB/maxflow.py.bak | 162 + tribler-mod/Tribler/Core/CacheDB/read_db.py | 201 + .../Tribler/Core/CacheDB/read_db.py.bak | 200 + .../Tribler/Core/CacheDB/sqlitecachedb.py | 1008 +++++ .../Tribler/Core/CacheDB/sqlitecachedb.py.bak | 1007 +++++ tribler-mod/Tribler/Core/CacheDB/unicode.py | 86 + .../Tribler/Core/CacheDB/unicode.py.bak | 85 + .../Tribler/Core/CoopDownload/Coordinator.py | 274 ++ .../Core/CoopDownload/Coordinator.py.bak | 273 ++ .../CoopDownload/CoordinatorMessageHandler.py | 59 + .../CoordinatorMessageHandler.py.bak | 58 + .../Tribler/Core/CoopDownload/Helper.py | 309 ++ .../Tribler/Core/CoopDownload/Helper.py.bak | 308 ++ .../Core/CoopDownload/HelperMessageHandler.py | 198 + .../CoopDownload/HelperMessageHandler.py.bak | 197 + .../Tribler/Core/CoopDownload/Logger.py | 8 + .../Tribler/Core/CoopDownload/Logger.py.bak | 7 + .../Core/CoopDownload/RatePredictor.py | 61 + .../Core/CoopDownload/RatePredictor.py.bak | 60 + .../Tribler/Core/CoopDownload/__init__.py | 3 + .../Tribler/Core/CoopDownload/__init__.py.bak | 2 + .../Core/DecentralizedTracking/__init__.py | 3 + .../DecentralizedTracking/__init__.py.bak | 2 + .../Core/DecentralizedTracking/mainlineDHT.py | 35 + .../DecentralizedTracking/mainlineDHT.py.bak | 34 + .../mainlineDHTChecker.py | 52 + .../mainlineDHTChecker.py.bak | 51 + .../Core/DecentralizedTracking/rsconvert.py | 43 + .../DecentralizedTracking/rsconvert.py.bak | 42 + .../Core/DecentralizedTracking/ut_pex.py | 140 + .../Core/DecentralizedTracking/ut_pex.py.bak | 139 + tribler-mod/Tribler/Core/Download.py | 155 + tribler-mod/Tribler/Core/Download.py.bak | 154 + tribler-mod/Tribler/Core/DownloadConfig.py | 828 ++++ .../Tribler/Core/DownloadConfig.py.bak | 827 ++++ tribler-mod/Tribler/Core/DownloadState.py | 339 ++ tribler-mod/Tribler/Core/DownloadState.py.bak | 338 ++ .../Tribler/Core/LiveSourceAuthConfig.py | 61 + .../Tribler/Core/LiveSourceAuthConfig.py.bak | 60 + tribler-mod/Tribler/Core/Merkle/__init__.py | 3 + .../Tribler/Core/Merkle/__init__.py.bak | 2 + tribler-mod/Tribler/Core/Merkle/merkle.py | 269 ++ tribler-mod/Tribler/Core/Merkle/merkle.py.bak | 268 ++ .../Core/NATFirewall/ConnectionCheck.py | 154 + .../Core/NATFirewall/ConnectionCheck.py.bak | 153 + .../Core/NATFirewall/DialbackMsgHandler.py | 468 ++ .../NATFirewall/DialbackMsgHandler.py.bak | 467 ++ .../Tribler/Core/NATFirewall/NatCheck.py | 209 + .../Tribler/Core/NATFirewall/NatCheck.py.bak | 208 + .../Core/NATFirewall/NatCheckMsgHandler.py | 406 ++ .../NATFirewall/NatCheckMsgHandler.py.bak | 405 ++ .../Tribler/Core/NATFirewall/NatTraversal.py | 179 + .../Core/NATFirewall/NatTraversal.py.bak | 178 + .../Core/NATFirewall/ReturnConnHandler.py | 604 +++ .../Core/NATFirewall/ReturnConnHandler.py.bak | 603 +++ .../Tribler/Core/NATFirewall/TimeoutCheck.py | 94 + .../Core/NATFirewall/TimeoutCheck.py.bak | 93 + .../Tribler/Core/NATFirewall/UPnPThread.py | 104 + .../Core/NATFirewall/UPnPThread.py.bak | 103 + .../Tribler/Core/NATFirewall/__init__.py | 3 + .../Tribler/Core/NATFirewall/__init__.py.bak | 2 + .../Tribler/Core/NATFirewall/guessip.py | 162 + .../Tribler/Core/NATFirewall/guessip.py.bak | 161 + tribler-mod/Tribler/Core/NATFirewall/upnp.py | 301 ++ .../Tribler/Core/NATFirewall/upnp.py.bak | 300 ++ .../Tribler/Core/Overlay/MetadataHandler.py | 565 +++ .../Core/Overlay/MetadataHandler.py.bak | 564 +++ .../Tribler/Core/Overlay/OverlayApps.py | 306 ++ .../Tribler/Core/Overlay/OverlayApps.py.bak | 305 ++ .../Core/Overlay/OverlayThreadingBridge.py | 217 + .../Overlay/OverlayThreadingBridge.py.bak | 216 + .../Tribler/Core/Overlay/SecureOverlay.py | 929 ++++ .../Tribler/Core/Overlay/SecureOverlay.py.bak | 928 ++++ tribler-mod/Tribler/Core/Overlay/__init__.py | 3 + .../Tribler/Core/Overlay/__init__.py.bak | 2 + tribler-mod/Tribler/Core/Overlay/permid.py | 408 ++ .../Tribler/Core/Overlay/permid.py.bak | 407 ++ tribler-mod/Tribler/Core/RequestPolicy.py | 139 + tribler-mod/Tribler/Core/RequestPolicy.py.bak | 138 + .../Tribler/Core/Search/KeywordSearch.py | 108 + .../Tribler/Core/Search/KeywordSearch.py.bak | 107 + tribler-mod/Tribler/Core/Search/Reranking.py | 98 + .../Tribler/Core/Search/Reranking.py.bak | 97 + .../Tribler/Core/Search/SearchManager.py | 39 + .../Tribler/Core/Search/SearchManager.py.bak | 38 + tribler-mod/Tribler/Core/Search/__init__.py | 3 + .../Tribler/Core/Search/__init__.py.bak | 2 + tribler-mod/Tribler/Core/Session.py | 894 ++++ tribler-mod/Tribler/Core/Session.py.bak | 893 ++++ tribler-mod/Tribler/Core/SessionConfig.py | 1240 ++++++ tribler-mod/Tribler/Core/SessionConfig.py.bak | 1239 ++++++ .../SocialNetwork/FriendshipMsgHandler.py | 875 ++++ .../SocialNetwork/FriendshipMsgHandler.py.bak | 874 ++++ .../Core/SocialNetwork/OverlapMsgHandler.py | 274 ++ .../SocialNetwork/OverlapMsgHandler.py.bak | 273 ++ .../SocialNetwork/RemoteQueryMsgHandler.py | 442 ++ .../RemoteQueryMsgHandler.py.bak | 441 ++ .../SocialNetwork/RemoteTorrentHandler.py | 73 + .../SocialNetwork/RemoteTorrentHandler.py.bak | 72 + .../SocialNetwork/SocialNetworkMsgHandler.py | 80 + .../SocialNetworkMsgHandler.py.bak | 79 + .../Tribler/Core/SocialNetwork/__init__.py | 4 + .../Core/SocialNetwork/__init__.py.bak | 3 + .../Tribler/Core/Statistics/Crawler.py | 559 +++ .../Tribler/Core/Statistics/Crawler.py.bak | 558 +++ .../Core/Statistics/DatabaseCrawler.py | 103 + .../Core/Statistics/DatabaseCrawler.py.bak | 102 + .../Core/Statistics/FriendshipCrawler.py | 122 + .../Core/Statistics/FriendshipCrawler.py.bak | 121 + tribler-mod/Tribler/Core/Statistics/Logger.py | 222 + .../Tribler/Core/Statistics/Logger.py.bak | 221 + .../Core/Statistics/SeedingStatsCrawler.py | 179 + .../Statistics/SeedingStatsCrawler.py.bak | 178 + .../Core/Statistics/VideoPlaybackCrawler.py | 210 + .../Statistics/VideoPlaybackCrawler.py.bak | 209 + .../Tribler/Core/Statistics/__init__.py | 3 + .../Tribler/Core/Statistics/__init__.py.bak | 2 + .../Tribler/Core/Statistics/crawler.txt | 31 + .../tribler_friendship_stats_sdb.sql | 38 + .../Statistics/tribler_seedingstats_sdb.sql | 41 + .../tribler_videoplayback_stats.sql | 49 + tribler-mod/Tribler/Core/TorrentDef.py | 738 ++++ tribler-mod/Tribler/Core/TorrentDef.py.bak | 737 ++++ .../Tribler/Core/Utilities/__init__.py | 3 + .../Tribler/Core/Utilities/__init__.py.bak | 2 + .../Tribler/Core/Utilities/timeouturlopen.py | 43 + .../Core/Utilities/timeouturlopen.py.bak | 42 + tribler-mod/Tribler/Core/Utilities/unicode.py | 79 + .../Tribler/Core/Utilities/unicode.py.bak | 78 + .../Tribler/Core/Utilities/utilities.py | 541 +++ .../Tribler/Core/Utilities/utilities.py.bak | 540 +++ .../Tribler/Core/Utilities/win32regchecker.py | 114 + .../Core/Utilities/win32regchecker.py.bak | 113 + .../Tribler/Core/Video/LiveSourceAuth.py | 315 ++ .../Tribler/Core/Video/LiveSourceAuth.py.bak | 314 ++ .../Tribler/Core/Video/MovieTransport.py | 67 + .../Tribler/Core/Video/MovieTransport.py.bak | 66 + .../Core/Video/PiecePickerStreaming.py | 649 +++ .../Core/Video/PiecePickerStreaming.py.bak | 648 +++ .../Tribler/Core/Video/VideoOnDemand.py | 1573 +++++++ .../Tribler/Core/Video/VideoOnDemand.py.bak | 1572 +++++++ tribler-mod/Tribler/Core/Video/VideoSource.py | 248 ++ .../Tribler/Core/Video/VideoSource.py.bak | 247 ++ tribler-mod/Tribler/Core/Video/VideoStatus.py | 343 ++ .../Tribler/Core/Video/VideoStatus.py.bak | 342 ++ tribler-mod/Tribler/Core/Video/__init__.py | 3 + .../Tribler/Core/Video/__init__.py.bak | 2 + tribler-mod/Tribler/Core/__init__.py | 3 + tribler-mod/Tribler/Core/__init__.py.bak | 2 + tribler-mod/Tribler/Core/defaults.py | 227 + tribler-mod/Tribler/Core/defaults.py.bak | 226 + tribler-mod/Tribler/Core/exceptions.py | 83 + tribler-mod/Tribler/Core/exceptions.py.bak | 82 + tribler-mod/Tribler/Core/osutils.py | 169 + tribler-mod/Tribler/Core/osutils.py.bak | 168 + tribler-mod/Tribler/Core/simpledefs.py | 140 + tribler-mod/Tribler/Core/simpledefs.py.bak | 139 + tribler-mod/Tribler/Core/superpeer.txt | 9 + tribler-mod/Tribler/Debug/__init__.py | 0 tribler-mod/Tribler/Debug/__init__.py.bak | 0 tribler-mod/Tribler/Debug/console.py | 35 + tribler-mod/Tribler/Debug/console.py.bak | 34 + .../Tribler/Images/SwarmPlayerIcon.ico | Bin 0 -> 13094 bytes .../Tribler/Images/SwarmPlayerLogo.png | Bin 0 -> 6944 bytes .../Tribler/Images/SwarmPluginIcon.ico | Bin 0 -> 13094 bytes .../Tribler/Images/SwarmPluginLogo.png | Bin 0 -> 6944 bytes tribler-mod/Tribler/Images/background.png | Bin 0 -> 164 bytes tribler-mod/Tribler/Images/fullScreen.png | Bin 0 -> 280 bytes .../Tribler/Images/fullScreen_hover.png | Bin 0 -> 721 bytes tribler-mod/Tribler/Images/logoTribler.png | Bin 0 -> 3854 bytes .../Tribler/Images/logoTribler_small.png | Bin 0 -> 1621 bytes tribler-mod/Tribler/Images/mute.png | Bin 0 -> 698 bytes tribler-mod/Tribler/Images/mute_hover.png | Bin 0 -> 935 bytes tribler-mod/Tribler/Images/pause.png | Bin 0 -> 191 bytes tribler-mod/Tribler/Images/pause_hover.png | Bin 0 -> 464 bytes tribler-mod/Tribler/Images/play.png | Bin 0 -> 454 bytes tribler-mod/Tribler/Images/play_hover.png | Bin 0 -> 591 bytes tribler-mod/Tribler/Images/save.png | Bin 0 -> 356 bytes tribler-mod/Tribler/Images/saveDisabled.png | Bin 0 -> 370 bytes .../Tribler/Images/saveDisabled_hover.png | Bin 0 -> 370 bytes tribler-mod/Tribler/Images/save_hover.png | Bin 0 -> 483 bytes tribler-mod/Tribler/Images/sliderDot.png | Bin 0 -> 352 bytes tribler-mod/Tribler/Images/sliderVolume.png | Bin 0 -> 292 bytes tribler-mod/Tribler/Images/splash.jpg | Bin 0 -> 16047 bytes tribler-mod/Tribler/Images/torrenticon.ico | Bin 0 -> 24190 bytes tribler-mod/Tribler/Images/tribler.ico | Bin 0 -> 24190 bytes tribler-mod/Tribler/Images/volume.png | Bin 0 -> 517 bytes tribler-mod/Tribler/Images/volume_hover.png | Bin 0 -> 709 bytes tribler-mod/Tribler/LICENSE.txt | 630 +++ tribler-mod/Tribler/Lang/__init__.py | 4 + tribler-mod/Tribler/Lang/__init__.py.bak | 3 + tribler-mod/Tribler/Lang/english.lang | 1423 ++++++ tribler-mod/Tribler/Lang/lang.py | 210 + tribler-mod/Tribler/Lang/lang.py.bak | 209 + tribler-mod/Tribler/Main/Build/Mac/Info.plist | 57 + tribler-mod/Tribler/Main/Build/Mac/Makefile | 115 + .../Tribler/Main/Build/Mac/SLAResources.rsrc | Bin 0 -> 106972 bytes .../Tribler/Main/Build/Mac/TriblerDoc.icns | Bin 0 -> 36969 bytes .../Tribler/Main/Build/Mac/VolumeIcon.icns | Bin 0 -> 37447 bytes .../Tribler/Main/Build/Mac/background.png | Bin 0 -> 7512 bytes .../Main/Build/Mac/icon_sources/appicon.png | Bin 0 -> 4728 bytes .../Main/Build/Mac/icon_sources/appicon.psd | Bin 0 -> 70493 bytes .../Mac/icon_sources/default_document.png | Bin 0 -> 5282 bytes .../Mac/icon_sources/default_volumeicon.png | Bin 0 -> 5744 bytes .../Main/Build/Mac/icon_sources/dmgicon.png | Bin 0 -> 9878 bytes .../Main/Build/Mac/icon_sources/dmgicon.psd | Bin 0 -> 79031 bytes .../Main/Build/Mac/icon_sources/docicon.png | Bin 0 -> 7970 bytes .../Main/Build/Mac/icon_sources/docicon.psd | Bin 0 -> 84428 bytes .../Tribler/Main/Build/Mac/mkinstalldirs | 111 + .../Tribler/Main/Build/Mac/process_libs | 32 + .../Tribler/Main/Build/Mac/setuptriblermac.py | 148 + .../Main/Build/Mac/setuptriblermac.py.bak | 147 + .../Tribler/Main/Build/Mac/smart_lipo_merge | 46 + .../Tribler/Main/Build/Mac/smart_lipo_thin | 19 + .../Tribler/Main/Build/Mac/tribler.icns | Bin 0 -> 39546 bytes .../Tribler/Main/Build/Mac/triblermac.command | 28 + .../Main/Build/Mac/vlc-macosx-compile.patch | 509 +++ .../Tribler/Main/Build/Ubuntu/changelog | 128 + tribler-mod/Tribler/Main/Build/Ubuntu/compat | 1 + tribler-mod/Tribler/Main/Build/Ubuntu/control | 30 + .../Tribler/Main/Build/Ubuntu/copyright | 630 +++ tribler-mod/Tribler/Main/Build/Ubuntu/files | 1 + tribler-mod/Tribler/Main/Build/Ubuntu/prerm | 47 + .../Tribler/Main/Build/Ubuntu/pycompat | 1 + tribler-mod/Tribler/Main/Build/Ubuntu/rules | 84 + .../Tribler/Main/Build/Ubuntu/tribler.1 | 36 + .../Tribler/Main/Build/Ubuntu/tribler.desktop | 8 + .../Main/Build/Ubuntu/tribler.manpages | 1 + .../Tribler/Main/Build/Ubuntu/tribler.menu | 4 + .../Build/Ubuntu/tribler.postinst.debhelper | 14 + .../Build/Ubuntu/tribler.postrm.debhelper | 3 + .../Main/Build/Ubuntu/tribler.prerm.debhelper | 5 + .../Tribler/Main/Build/Ubuntu/tribler.sh | 63 + .../Main/Build/Ubuntu/tribler.substvars | 1 + .../Tribler/Main/Build/Ubuntu/tribler.xpm | 173 + .../Tribler/Main/Build/Ubuntu/tribler_big.xpm | 310 ++ .../Tribler/Main/Build/Win32/heading.bmp | Bin 0 -> 25818 bytes .../Tribler/Main/Build/Win32/setuptribler.py | 58 + .../Main/Build/Win32/setuptribler.py.bak | 57 + .../Main/Build/Win32/tribler.exe.manifest | 23 + .../Tribler/Main/Build/Win32/tribler.nsi | 254 ++ .../Tribler/Main/Dialogs/BandwidthSelector.py | 66 + .../Main/Dialogs/BandwidthSelector.py.bak | 65 + .../Tribler/Main/Dialogs/GUITaskQueue.py | 34 + .../Tribler/Main/Dialogs/GUITaskQueue.py.bak | 33 + .../Tribler/Main/Dialogs/TorrentMaker.py | 1029 +++++ .../Tribler/Main/Dialogs/TorrentMaker.py.bak | 1028 +++++ tribler-mod/Tribler/Main/Dialogs/__init__.py | 0 .../Tribler/Main/Dialogs/__init__.py.bak | 0 tribler-mod/Tribler/Main/Dialogs/abcoption.py | 1657 +++++++ .../Tribler/Main/Dialogs/abcoption.py.bak | 1656 +++++++ tribler-mod/Tribler/Main/Dialogs/aboutme.py | 243 + .../Tribler/Main/Dialogs/aboutme.py.bak | 242 + tribler-mod/Tribler/Main/Dialogs/common.py | 245 ++ .../Tribler/Main/Dialogs/common.py.bak | 244 ++ .../Tribler/Main/Dialogs/dlhelperframe.py | 379 ++ .../Tribler/Main/Dialogs/dlhelperframe.py.bak | 378 ++ .../Tribler/Main/Dialogs/makefriends.py | 269 ++ .../Tribler/Main/Dialogs/makefriends.py.bak | 268 ++ tribler-mod/Tribler/Main/Dialogs/regdialog.py | 69 + .../Tribler/Main/Dialogs/regdialog.py.bak | 68 + .../Tribler/Main/Dialogs/socnetmyinfo.py | 271 ++ .../Tribler/Main/Dialogs/socnetmyinfo.py.bak | 270 ++ tribler-mod/Tribler/Main/Dialogs/systray.py | 73 + .../Tribler/Main/Dialogs/systray.py.bak | 72 + tribler-mod/Tribler/Main/Utility/__init__.py | 4 + .../Tribler/Main/Utility/__init__.py.bak | 3 + tribler-mod/Tribler/Main/Utility/compat.py | 307 ++ .../Tribler/Main/Utility/compat.py.bak | 306 ++ tribler-mod/Tribler/Main/Utility/constants.py | 194 + .../Tribler/Main/Utility/constants.py.bak | 193 + .../Tribler/Main/Utility/getscrapedata.py | 151 + .../Tribler/Main/Utility/getscrapedata.py.bak | 150 + tribler-mod/Tribler/Main/Utility/helpers.py | 202 + .../Tribler/Main/Utility/helpers.py.bak | 201 + .../Tribler/Main/Utility/regchecker.py | 167 + .../Tribler/Main/Utility/regchecker.py.bak | 166 + tribler-mod/Tribler/Main/Utility/utility.py | 843 ++++ .../Tribler/Main/Utility/utility.py.bak | 842 ++++ tribler-mod/Tribler/Main/__init__.py | 3 + tribler-mod/Tribler/Main/__init__.py.bak | 2 + tribler-mod/Tribler/Main/crawler.py | 90 + tribler-mod/Tribler/Main/crawler.py.bak | 89 + tribler-mod/Tribler/Main/globals.py | 65 + tribler-mod/Tribler/Main/globals.py.bak | 64 + tribler-mod/Tribler/Main/metadata-injector.py | 131 + .../Tribler/Main/metadata-injector.py.bak | 130 + tribler-mod/Tribler/Main/notification.py | 78 + tribler-mod/Tribler/Main/notification.py.bak | 77 + tribler-mod/Tribler/Main/tribler.py | 1203 +++++ tribler-mod/Tribler/Main/tribler.py.bak | 1202 +++++ .../Tribler/Main/vwxGUI/ColumnHeader.py | 290 ++ .../Tribler/Main/vwxGUI/ColumnHeader.py.bak | 289 ++ .../Main/vwxGUI/FilesItemDetailsSummary.py | 310 ++ .../vwxGUI/FilesItemDetailsSummary.py.bak | 309 ++ .../Tribler/Main/vwxGUI/FriendsItemPanel.py | 482 ++ .../Main/vwxGUI/FriendsItemPanel.py.bak | 481 ++ .../Tribler/Main/vwxGUI/FriendshipManager.py | 112 + .../Main/vwxGUI/FriendshipManager.py.bak | 111 + tribler-mod/Tribler/Main/vwxGUI/GridState.py | 32 + .../Tribler/Main/vwxGUI/GridState.py.bak | 31 + tribler-mod/Tribler/Main/vwxGUI/GuiUtility.py | 1307 ++++++ .../Tribler/Main/vwxGUI/GuiUtility.py.bak | 1306 ++++++ .../Tribler/Main/vwxGUI/IconsManager.py | 208 + .../Tribler/Main/vwxGUI/IconsManager.py.bak | 207 + .../Tribler/Main/vwxGUI/LibraryItemPanel.py | 857 ++++ .../Main/vwxGUI/LibraryItemPanel.py.bak | 856 ++++ .../Tribler/Main/vwxGUI/LoadingDetails.py | 47 + .../Tribler/Main/vwxGUI/LoadingDetails.py.bak | 46 + tribler-mod/Tribler/Main/vwxGUI/MainFrame.py | 713 +++ .../Tribler/Main/vwxGUI/MainFrame.py.bak | 712 +++ tribler-mod/Tribler/Main/vwxGUI/MyFrame.xrc | 154 + tribler-mod/Tribler/Main/vwxGUI/MyPlayer.xrc | 168 + tribler-mod/Tribler/Main/vwxGUI/MyText.py | 70 + tribler-mod/Tribler/Main/vwxGUI/MyText.py.bak | 69 + .../Tribler/Main/vwxGUI/NewStaticText.py | 53 + .../Tribler/Main/vwxGUI/NewStaticText.py.bak | 52 + .../Main/vwxGUI/PersonsItemDetailsSummary.py | 116 + .../vwxGUI/PersonsItemDetailsSummary.py.bak | 115 + .../Tribler/Main/vwxGUI/PersonsItemPanel.py | 705 +++ .../Main/vwxGUI/PersonsItemPanel.py.bak | 704 +++ .../Tribler/Main/vwxGUI/SearchDetails.py | 123 + .../Tribler/Main/vwxGUI/SearchDetails.py.bak | 122 + .../Tribler/Main/vwxGUI/SearchGridManager.py | 675 +++ .../Main/vwxGUI/SearchGridManager.py.bak | 674 +++ .../Main/vwxGUI/SubscriptionsItemPanel.py | 506 +++ .../Main/vwxGUI/SubscriptionsItemPanel.py.bak | 505 +++ .../Tribler/Main/vwxGUI/Tab_graphs.xrc | 21 + tribler-mod/Tribler/Main/vwxGUI/TextButton.py | 279 ++ .../Tribler/Main/vwxGUI/TextButton.py.bak | 278 ++ tribler-mod/Tribler/Main/vwxGUI/TextEdit.py | 124 + .../Tribler/Main/vwxGUI/TextEdit.py.bak | 123 + .../Tribler/Main/vwxGUI/TopSearchPanel.py | 653 +++ .../Tribler/Main/vwxGUI/TopSearchPanel.py.bak | 652 +++ .../Tribler/Main/vwxGUI/TopSearchPanel.xrc | 358 ++ .../Tribler/Main/vwxGUI/TriblerProgressbar.py | 90 + .../Main/vwxGUI/TriblerProgressbar.py.bak | 89 + .../Tribler/Main/vwxGUI/TriblerStyles.py | 169 + .../Tribler/Main/vwxGUI/TriblerStyles.py.bak | 168 + tribler-mod/Tribler/Main/vwxGUI/__init__.py | 3 + .../Tribler/Main/vwxGUI/__init__.py.bak | 2 + tribler-mod/Tribler/Main/vwxGUI/bgPanel.py | 155 + .../Tribler/Main/vwxGUI/bgPanel.py.bak | 154 + .../Tribler/Main/vwxGUI/btn_DetailsHeader.py | 204 + .../Main/vwxGUI/btn_DetailsHeader.py.bak | 203 + .../Tribler/Main/vwxGUI/deleteTorrent.xrc | 71 + tribler-mod/Tribler/Main/vwxGUI/dummy.xrc | 23 + .../Tribler/Main/vwxGUI/filesDetails.xrc | 487 ++ .../Tribler/Main/vwxGUI/filesItemPanel.py | 1111 +++++ .../Tribler/Main/vwxGUI/filesItemPanel.py.bak | 1110 +++++ .../Tribler/Main/vwxGUI/filesTab_files.py | 88 + .../Tribler/Main/vwxGUI/filesTab_files.py.bak | 87 + .../Tribler/Main/vwxGUI/filesTab_files.xrc | 120 + .../Tribler/Main/vwxGUI/filterStandard.py | 284 ++ .../Tribler/Main/vwxGUI/filterStandard.py.bak | 283 ++ tribler-mod/Tribler/Main/vwxGUI/font.py | 27 + tribler-mod/Tribler/Main/vwxGUI/font.py.bak | 26 + .../Tribler/Main/vwxGUI/images/5.0/None.png | Bin 0 -> 165 bytes .../Main/vwxGUI/images/5.0/SRgradient.png | Bin 0 -> 1754 bytes .../Main/vwxGUI/images/5.0/SRgradient_new.png | Bin 0 -> 582 bytes .../vwxGUI/images/5.0/SRgradient_new_win.png | Bin 0 -> 562 bytes .../Tribler/Main/vwxGUI/images/5.0/SRind.png | Bin 0 -> 271 bytes .../Tribler/Main/vwxGUI/images/5.0/SRind2.png | Bin 0 -> 217 bytes .../Main/vwxGUI/images/5.0/SRind2_win.png | Bin 0 -> 235 bytes .../Main/vwxGUI/images/5.0/SRindicator.png | Bin 0 -> 1468 bytes .../vwxGUI/images/5.0/SRindicator_left.png | Bin 0 -> 1468 bytes .../vwxGUI/images/5.0/SRindicator_right.png | Bin 0 -> 1468 bytes .../vwxGUI/images/5.0/advanced_filtering.png | Bin 0 -> 332 bytes .../Main/vwxGUI/images/5.0/average_win.png | Bin 0 -> 1063 bytes .../Main/vwxGUI/images/5.0/black_spacer.png | Bin 0 -> 208 bytes .../Main/vwxGUI/images/5.0/files_friends.png | Bin 0 -> 316 bytes .../Tribler/Main/vwxGUI/images/5.0/go.png | Bin 0 -> 569 bytes .../Main/vwxGUI/images/5.0/good_win.png | Bin 0 -> 906 bytes .../Tribler/Main/vwxGUI/images/5.0/help.png | Bin 0 -> 1662 bytes .../Main/vwxGUI/images/5.0/help_win.png | Bin 0 -> 566 bytes .../Main/vwxGUI/images/5.0/iconSaved.png | Bin 0 -> 1427 bytes .../Tribler/Main/vwxGUI/images/5.0/left.png | Bin 0 -> 267 bytes .../Tribler/Main/vwxGUI/images/5.0/line3.png | Bin 0 -> 187 bytes .../Main/vwxGUI/images/5.0/poor_win.png | Bin 0 -> 733 bytes .../Tribler/Main/vwxGUI/images/5.0/right.png | Bin 0 -> 253 bytes .../Main/vwxGUI/images/5.0/search_files.png | Bin 0 -> 690 bytes .../Main/vwxGUI/images/5.0/search_new.gif | Bin 0 -> 682 bytes .../vwxGUI/images/5.0/search_new_windows.gif | Bin 0 -> 673 bytes .../Main/vwxGUI/images/5.0/seperator.png | Bin 0 -> 231 bytes .../Main/vwxGUI/images/5.0/seperator_win.png | Bin 0 -> 210 bytes .../vwxGUI/images/5.0/sharing_reputation.png | Bin 0 -> 863 bytes .../images/5.0/sharing_reputation_win.png | Bin 0 -> 1943 bytes .../Main/vwxGUI/images/5.0/top_image.png | Bin 0 -> 2580 bytes .../Tribler/Main/vwxGUI/images/5.0/video.gif | Bin 0 -> 8238 bytes .../Main/vwxGUI/images/5.0/welcome.png | Bin 0 -> 382 bytes .../Main/vwxGUI/images/5.0/wrapCorBL.png | Bin 0 -> 237 bytes .../Main/vwxGUI/images/5.0/wrapCorBR.png | Bin 0 -> 232 bytes .../Main/vwxGUI/images/5.0/wrapCorTL.png | Bin 0 -> 236 bytes .../Main/vwxGUI/images/5.0/wrapCorTR.png | Bin 0 -> 234 bytes .../Tribler/Main/vwxGUI/images/Save.png | Bin 0 -> 984 bytes .../Main/vwxGUI/images/Save_clicked.png | Bin 0 -> 931 bytes .../Tribler/Main/vwxGUI/images/Search_new.png | Bin 0 -> 675 bytes .../Main/vwxGUI/images/Search_new_clicked.png | Bin 0 -> 714 bytes .../Main/vwxGUI/images/Search_new_win.png | Bin 0 -> 682 bytes .../vwxGUI/images/Search_new_win_clicked.png | Bin 0 -> 676 bytes .../Tribler/Main/vwxGUI/images/basic.png | Bin 0 -> 265 bytes .../Main/vwxGUI/images/basicEnabled.png | Bin 0 -> 265 bytes .../vwxGUI/images/basicEnabled_clicked.png | Bin 0 -> 264 bytes .../Main/vwxGUI/images/basic_clicked.png | Bin 0 -> 264 bytes .../Tribler/Main/vwxGUI/images/bcicon.png | Bin 0 -> 421 bytes .../Main/vwxGUI/images/black_top_left.png | Bin 0 -> 200 bytes .../Main/vwxGUI/images/black_top_right.png | Bin 0 -> 199 bytes .../Tribler/Main/vwxGUI/images/blue_long.png | Bin 0 -> 1209 bytes .../Tribler/Main/vwxGUI/images/browse.png | Bin 0 -> 629 bytes .../Main/vwxGUI/images/browse_clicked.png | Bin 0 -> 464 bytes .../Main/vwxGUI/images/defaultThumb.png | Bin 0 -> 1735 bytes .../vwxGUI/images/defaultThumbFriends.png | Bin 0 -> 615 bytes .../Main/vwxGUI/images/defaultThumbL.png | Bin 0 -> 4404 bytes .../vwxGUI/images/defaultThumbL_audio.png | Bin 0 -> 2130 bytes .../images/defaultThumbL_compressed.png | Bin 0 -> 2349 bytes .../vwxGUI/images/defaultThumbL_hidden.png | Bin 0 -> 3415 bytes .../vwxGUI/images/defaultThumbL_other.png | Bin 0 -> 1266 bytes .../vwxGUI/images/defaultThumbL_video.png | Bin 0 -> 2103 bytes .../Main/vwxGUI/images/defaultThumbL_xxx.png | Bin 0 -> 2686 bytes .../vwxGUI/images/defaultThumbLibrary.png | Bin 0 -> 895 bytes .../Main/vwxGUI/images/defaultThumbPeer.png | Bin 0 -> 1080 bytes .../Main/vwxGUI/images/defaultThumbPeerS.png | Bin 0 -> 294 bytes .../vwxGUI/images/defaultThumbS_audio.png | Bin 0 -> 474 bytes .../images/defaultThumbS_compressed.png | Bin 0 -> 332 bytes .../vwxGUI/images/defaultThumbS_hidden.png | Bin 0 -> 400 bytes .../vwxGUI/images/defaultThumbS_other.png | Bin 0 -> 179 bytes .../vwxGUI/images/defaultThumbS_video.png | Bin 0 -> 417 bytes .../Main/vwxGUI/images/defaultThumbS_xxx.png | Bin 0 -> 351 bytes .../Main/vwxGUI/images/defaultThumb_audio.png | Bin 0 -> 612 bytes .../vwxGUI/images/defaultThumb_compressed.png | Bin 0 -> 948 bytes .../vwxGUI/images/defaultThumb_hidden.png | Bin 0 -> 1546 bytes .../Main/vwxGUI/images/defaultThumb_other.png | Bin 0 -> 616 bytes .../Main/vwxGUI/images/defaultThumb_video.png | Bin 0 -> 623 bytes .../Main/vwxGUI/images/defaultThumb_xxx.png | Bin 0 -> 899 bytes .../Tribler/Main/vwxGUI/images/download.png | Bin 0 -> 1672 bytes .../Main/vwxGUI/images/download_clicked.png | Bin 0 -> 575 bytes .../Tribler/Main/vwxGUI/images/edit.png | Bin 0 -> 1671 bytes .../Main/vwxGUI/images/edit_clicked.png | Bin 0 -> 574 bytes .../Main/vwxGUI/images/edit_clicked_old.png | Bin 0 -> 262 bytes .../Tribler/Main/vwxGUI/images/edit_old.png | Bin 0 -> 650 bytes .../Tribler/Main/vwxGUI/images/fake.png | Bin 0 -> 1693 bytes .../Main/vwxGUI/images/fake_clicked.png | Bin 0 -> 1685 bytes .../Main/vwxGUI/images/familyfilter.png | Bin 0 -> 332 bytes .../vwxGUI/images/familyfilterEnabled.png | Bin 0 -> 294 bytes .../images/familyfilterEnabled_clicked.png | Bin 0 -> 294 bytes .../vwxGUI/images/familyfilter_clicked.png | Bin 0 -> 332 bytes .../Main/vwxGUI/images/familyfilter_win.png | Bin 0 -> 1484 bytes .../vwxGUI/images/familyfilter_winEnabled.png | Bin 0 -> 1578 bytes .../familyfilter_winEnabled_clicked.png | Bin 0 -> 1578 bytes .../images/familyfilter_win_clicked.png | Bin 0 -> 1484 bytes .../Tribler/Main/vwxGUI/images/fiftyUp.png | Bin 0 -> 598 bytes .../Main/vwxGUI/images/fiftyUp_clicked.png | Bin 0 -> 397 bytes .../vwxGUI/images/firewallStatus_state1.png | Bin 0 -> 601 bytes .../vwxGUI/images/firewallStatus_state2.png | Bin 0 -> 459 bytes .../vwxGUI/images/firewallStatus_state3.png | Bin 0 -> 556 bytes tribler-mod/Tribler/Main/vwxGUI/images/go.png | Bin 0 -> 1690 bytes .../Tribler/Main/vwxGUI/images/goEnabled.png | Bin 0 -> 1978 bytes .../Tribler/Main/vwxGUI/images/go_backup.png | Bin 0 -> 1187 bytes .../Tribler/Main/vwxGUI/images/go_clicked.png | Bin 0 -> 1694 bytes .../Tribler/Main/vwxGUI/images/hline.png | Bin 0 -> 127 bytes .../Tribler/Main/vwxGUI/images/hundredUp.png | Bin 0 -> 588 bytes .../Main/vwxGUI/images/hundredUp_clicked.png | Bin 0 -> 383 bytes .../Main/vwxGUI/images/iconSaved_state3.png | Bin 0 -> 1846 bytes .../Main/vwxGUI/images/library_play.png | Bin 0 -> 590 bytes .../vwxGUI/images/library_play_clicked.png | Bin 0 -> 586 bytes .../Tribler/Main/vwxGUI/images/line.png | Bin 0 -> 139 bytes .../Tribler/Main/vwxGUI/images/line2.png | Bin 0 -> 139 bytes .../Tribler/Main/vwxGUI/images/line3.png | Bin 0 -> 139 bytes .../Tribler/Main/vwxGUI/images/line4.png | Bin 0 -> 139 bytes .../Tribler/Main/vwxGUI/images/logo4video.png | Bin 0 -> 5890 bytes .../Main/vwxGUI/images/logo4video2.png | Bin 0 -> 1317 bytes .../Main/vwxGUI/images/logo4video2_win.png | Bin 0 -> 1524 bytes .../Tribler/Main/vwxGUI/images/mainBG.png | Bin 0 -> 29076 bytes tribler-mod/Tribler/Main/vwxGUI/images/mt.png | Bin 0 -> 934 bytes .../Tribler/Main/vwxGUI/images/mtEnabled.png | Bin 0 -> 949 bytes .../Main/vwxGUI/images/mtEnabled_clicked.png | Bin 0 -> 949 bytes .../Tribler/Main/vwxGUI/images/mt_clicked.png | Bin 0 -> 934 bytes .../Tribler/Main/vwxGUI/images/mute.png | Bin 0 -> 230 bytes .../Main/vwxGUI/images/mute_clicked.png | Bin 0 -> 232 bytes .../Tribler/Main/vwxGUI/images/my_files.png | Bin 0 -> 250 bytes .../Main/vwxGUI/images/my_filesEnabled.png | Bin 0 -> 1372 bytes .../vwxGUI/images/my_filesEnabled_clicked.png | Bin 0 -> 1372 bytes .../Main/vwxGUI/images/my_files_clicked.png | Bin 0 -> 1372 bytes .../Main/vwxGUI/images/my_files_win.png | Bin 0 -> 1120 bytes .../Main/vwxGUI/images/my_files_winBlank.png | Bin 0 -> 1120 bytes .../vwxGUI/images/my_files_winEnabled.png | Bin 0 -> 1208 bytes .../images/my_files_winEnabled_clicked.png | Bin 0 -> 1208 bytes .../vwxGUI/images/my_files_win_clicked.png | Bin 0 -> 1208 bytes .../Tribler/Main/vwxGUI/images/nextpage.png | Bin 0 -> 1748 bytes .../Tribler/Main/vwxGUI/images/nextpage2.png | Bin 0 -> 1304 bytes .../Main/vwxGUI/images/nextpageEnabled.png | Bin 0 -> 593 bytes .../vwxGUI/images/nextpageEnabled_clicked.png | Bin 0 -> 627 bytes .../Main/vwxGUI/images/nextpage_clicked.png | Bin 0 -> 1748 bytes .../Main/vwxGUI/images/play_button.png | Bin 0 -> 1700 bytes .../vwxGUI/images/play_button_clicked.png | Bin 0 -> 1697 bytes .../Tribler/Main/vwxGUI/images/playbig.png | Bin 0 -> 913 bytes .../Main/vwxGUI/images/playbigEnabled.png | Bin 0 -> 2034 bytes .../vwxGUI/images/playbigEnabled_clicked.png | Bin 0 -> 949 bytes .../Main/vwxGUI/images/playbig_clicked.png | Bin 0 -> 928 bytes .../Main/vwxGUI/images/playsmallEnabled.png | Bin 0 -> 1560 bytes .../images/playsmallEnabled_clicked.png | Bin 0 -> 1560 bytes .../Main/vwxGUI/images/popularity1.png | Bin 0 -> 1378 bytes .../Main/vwxGUI/images/popularity10.png | Bin 0 -> 1311 bytes .../Main/vwxGUI/images/popularity2.png | Bin 0 -> 1389 bytes .../Main/vwxGUI/images/popularity3.png | Bin 0 -> 1392 bytes .../Main/vwxGUI/images/popularity4.png | Bin 0 -> 1392 bytes .../Main/vwxGUI/images/popularity5.png | Bin 0 -> 1390 bytes .../Main/vwxGUI/images/popularity6.png | Bin 0 -> 1393 bytes .../Main/vwxGUI/images/popularity7.png | Bin 0 -> 1392 bytes .../Main/vwxGUI/images/popularity8.png | Bin 0 -> 1391 bytes .../Main/vwxGUI/images/popularity9.png | Bin 0 -> 1377 bytes .../Tribler/Main/vwxGUI/images/prevpage.png | Bin 0 -> 1744 bytes .../Tribler/Main/vwxGUI/images/prevpage2.png | Bin 0 -> 1304 bytes .../Main/vwxGUI/images/prevpageEnabled.png | Bin 0 -> 587 bytes .../vwxGUI/images/prevpageEnabled_clicked.png | Bin 0 -> 621 bytes .../Main/vwxGUI/images/prevpage_clicked.png | Bin 0 -> 1744 bytes .../Tribler/Main/vwxGUI/images/real.png | Bin 0 -> 1715 bytes .../Main/vwxGUI/images/real_clicked.png | Bin 0 -> 1708 bytes .../Tribler/Main/vwxGUI/images/remove.png | Bin 0 -> 782 bytes .../Main/vwxGUI/images/remove_clicked.png | Bin 0 -> 733 bytes .../Main/vwxGUI/images/results_win.png | Bin 0 -> 1646 bytes .../Main/vwxGUI/images/results_winBlank.png | Bin 0 -> 485 bytes .../Main/vwxGUI/images/results_winEnabled.png | Bin 0 -> 1840 bytes .../Tribler/Main/vwxGUI/images/search.png | Bin 0 -> 722 bytes .../Main/vwxGUI/images/search_clicked.png | Bin 0 -> 722 bytes .../Main/vwxGUI/images/select_files.png | Bin 0 -> 2824 bytes .../vwxGUI/images/select_files_clicked.png | Bin 0 -> 2823 bytes .../Tribler/Main/vwxGUI/images/settings.png | Bin 0 -> 257 bytes .../Main/vwxGUI/images/settingsEnabled.png | Bin 0 -> 259 bytes .../vwxGUI/images/settingsEnabled_clicked.png | Bin 0 -> 259 bytes .../Main/vwxGUI/images/settings_clicked.png | Bin 0 -> 259 bytes .../Main/vwxGUI/images/settings_win.png | Bin 0 -> 1245 bytes .../Main/vwxGUI/images/settings_winBlank.png | Bin 0 -> 1245 bytes .../vwxGUI/images/settings_winEnabled.png | Bin 0 -> 1385 bytes .../images/settings_winEnabled_clicked.png | Bin 0 -> 1339 bytes .../vwxGUI/images/settings_win_clicked.png | Bin 0 -> 1339 bytes .../Main/vwxGUI/images/seventyfiveDown.png | Bin 0 -> 556 bytes .../vwxGUI/images/seventyfiveDown_clicked.png | Bin 0 -> 391 bytes .../Main/vwxGUI/images/sixhundreddDown.png | Bin 0 -> 566 bytes .../vwxGUI/images/sixhundreddDown_clicked.png | Bin 0 -> 411 bytes .../Tribler/Main/vwxGUI/images/sr_average.png | Bin 0 -> 2904 bytes .../Tribler/Main/vwxGUI/images/sr_good.png | Bin 0 -> 2845 bytes .../Tribler/Main/vwxGUI/images/sr_poor.png | Bin 0 -> 2608 bytes .../Tribler/Main/vwxGUI/images/tenUp.png | Bin 0 -> 585 bytes .../Main/vwxGUI/images/tenUp_clicked.png | Bin 0 -> 376 bytes .../Main/vwxGUI/images/threehundredDown.png | Bin 0 -> 535 bytes .../images/threehundredDown_clicked.png | Bin 0 -> 390 bytes .../Tribler/Main/vwxGUI/images/thumb.png | Bin 0 -> 1241 bytes .../Tribler/Main/vwxGUI/images/thumbField.png | Bin 0 -> 3672 bytes .../Tribler/Main/vwxGUI/images/topBG.png | Bin 0 -> 405 bytes .../Tribler/Main/vwxGUI/images/top_bg.png | Bin 0 -> 27912 bytes .../Tribler/Main/vwxGUI/images/top_search.png | Bin 0 -> 27912 bytes .../Main/vwxGUI/images/top_search_grey.png | Bin 0 -> 691 bytes .../Main/vwxGUI/images/unlimitedDown.png | Bin 0 -> 688 bytes .../vwxGUI/images/unlimitedDown_clicked.png | Bin 0 -> 476 bytes .../Main/vwxGUI/images/unlimitedUp.png | Bin 0 -> 688 bytes .../vwxGUI/images/unlimitedUp_clicked.png | Bin 0 -> 476 bytes .../Tribler/Main/vwxGUI/images/zeroDown.png | Bin 0 -> 542 bytes .../Main/vwxGUI/images/zeroDown_clicked.png | Bin 0 -> 334 bytes .../Tribler/Main/vwxGUI/images/zeroUp.png | Bin 0 -> 542 bytes .../Main/vwxGUI/images/zeroUp_clicked.png | Bin 0 -> 333 bytes .../Tribler/Main/vwxGUI/libraryDetails.xrc | 451 ++ .../Tribler/Main/vwxGUI/personsDetails.xrc | 424 ++ .../Tribler/Main/vwxGUI/personsItem.xrc | 61 + .../Main/vwxGUI/personsTab_advanced.xrc | 144 + .../Tribler/Main/vwxGUI/profileDetails.xrc | 116 + .../Main/vwxGUI/profileDetails_Download.xrc | 402 ++ .../Main/vwxGUI/profileDetails_Files.xrc | 157 + .../Main/vwxGUI/profileDetails_Persons.xrc | 148 + .../Main/vwxGUI/profileDetails_Presence.xrc | 289 ++ .../Main/vwxGUI/profileDetails_Quality.xrc | 150 + .../Tribler/Main/vwxGUI/settingsOverview.xrc | 500 +++ .../Main/vwxGUI/settingsOverviewPanel.py | 468 ++ .../Main/vwxGUI/settingsOverviewPanel.py.bak | 467 ++ .../Tribler/Main/vwxGUI/standardDetails.py | 2090 +++++++++ .../Main/vwxGUI/standardDetails.py.bak | 2089 +++++++++ .../Tribler/Main/vwxGUI/standardFilter.py | 161 + .../Tribler/Main/vwxGUI/standardFilter.py.bak | 160 + .../Tribler/Main/vwxGUI/standardGrid.py | 1000 +++++ .../Tribler/Main/vwxGUI/standardGrid.py.bak | 999 +++++ .../Tribler/Main/vwxGUI/standardOverview.py | 775 ++++ .../Main/vwxGUI/standardOverview.py.bak | 774 ++++ .../Tribler/Main/vwxGUI/standardPager.py | 287 ++ .../Tribler/Main/vwxGUI/standardPager.py.bak | 286 ++ .../Tribler/Main/vwxGUI/statusDownloads.xrc | 237 + .../Main/vwxGUI/subscriptionsDetails.xrc | 200 + .../Tribler/Main/vwxGUI/tribler_List.py | 324 ++ .../Tribler/Main/vwxGUI/tribler_List.py.bak | 323 ++ .../Tribler/Main/vwxGUI/tribler_topButton.py | 907 ++++ .../Main/vwxGUI/tribler_topButton.py.bak | 906 ++++ .../Tribler/Main/vwxGUI/uploadTab_details.xrc | 66 + tribler-mod/Tribler/Main/vwxGUI/web2.py | 173 + tribler-mod/Tribler/Main/vwxGUI/web2.py.bak | 172 + .../Main/vwxGUI/zudeo_torrent_description.txt | 54 + tribler-mod/Tribler/Player/BaseApp.py | 573 +++ tribler-mod/Tribler/Player/BaseApp.py.bak | 572 +++ .../Tribler/Player/Build/Mac/Info.plist | 57 + tribler-mod/Tribler/Player/Build/Mac/Makefile | 116 + .../Player/Build/Mac/SLAResources.rsrc | Bin 0 -> 46262 bytes .../Tribler/Player/Build/Mac/TriblerDoc.icns | Bin 0 -> 36476 bytes .../Tribler/Player/Build/Mac/VolumeIcon.icns | Bin 0 -> 37339 bytes .../Tribler/Player/Build/Mac/background.png | Bin 0 -> 7512 bytes .../Player/Build/Mac/icon_sources/appicon.png | Bin 0 -> 14993 bytes .../Player/Build/Mac/icon_sources/appicon.psd | Bin 0 -> 295912 bytes .../Mac/icon_sources/default_document.png | Bin 0 -> 5282 bytes .../Mac/icon_sources/default_volumeicon.png | Bin 0 -> 5744 bytes .../Player/Build/Mac/icon_sources/dmgicon.png | Bin 0 -> 15026 bytes .../Player/Build/Mac/icon_sources/dmgicon.psd | Bin 0 -> 73560 bytes .../Player/Build/Mac/icon_sources/docicon.png | Bin 0 -> 13135 bytes .../Player/Build/Mac/icon_sources/docicon.psd | Bin 0 -> 223697 bytes .../Tribler/Player/Build/Mac/mkinstalldirs | 111 + .../Tribler/Player/Build/Mac/process_libs | 34 + .../Player/Build/Mac/setuptriblermac.py | 121 + .../Player/Build/Mac/setuptriblermac.py.bak | 120 + .../Tribler/Player/Build/Mac/smart_lipo_merge | 46 + .../Tribler/Player/Build/Mac/smart_lipo_thin | 19 + .../Tribler/Player/Build/Mac/tribler.icns | Bin 0 -> 39131 bytes .../Player/Build/Mac/vlc-macosx-compile.patch | 509 +++ .../Tribler/Player/Build/Ubuntu/changelog | 5 + .../Tribler/Player/Build/Ubuntu/compat | 1 + .../Tribler/Player/Build/Ubuntu/control | 16 + .../Tribler/Player/Build/Ubuntu/copyright | 630 +++ tribler-mod/Tribler/Player/Build/Ubuntu/files | 1 + tribler-mod/Tribler/Player/Build/Ubuntu/prerm | 47 + .../Tribler/Player/Build/Ubuntu/pycompat | 1 + tribler-mod/Tribler/Player/Build/Ubuntu/rules | 85 + .../Tribler/Player/Build/Ubuntu/swarmplayer.1 | 22 + .../Player/Build/Ubuntu/swarmplayer.desktop | 8 + .../Player/Build/Ubuntu/swarmplayer.manpages | 1 + .../Player/Build/Ubuntu/swarmplayer.menu | 4 + .../Ubuntu/swarmplayer.postinst.debhelper | 5 + .../Build/Ubuntu/swarmplayer.postrm.debhelper | 3 + .../Player/Build/Ubuntu/swarmplayer.sh | 31 + .../Player/Build/Ubuntu/swarmplayer.xpm | 257 ++ .../Player/Build/Ubuntu/swarmplayer_big.xpm | 563 +++ .../Tribler/Player/Build/Win32/heading.bmp | Bin 0 -> 25818 bytes .../Player/Build/Win32/setuptriblerplay.py | 53 + .../Build/Win32/setuptriblerplay.py.bak | 52 + .../Build/Win32/swarmplayer.exe.manifest | 23 + .../Player/Build/Win32/triblerplay.nsi | 222 + tribler-mod/Tribler/Player/Reporter.py | 164 + tribler-mod/Tribler/Player/Reporter.py.bak | 163 + tribler-mod/Tribler/Player/UtilityStub.py | 38 + tribler-mod/Tribler/Player/UtilityStub.py.bak | 37 + tribler-mod/Tribler/Player/__init__.py | 3 + tribler-mod/Tribler/Player/__init__.py.bak | 2 + tribler-mod/Tribler/Player/swarmplayer.py | 620 +++ tribler-mod/Tribler/Player/swarmplayer.py.bak | 619 +++ tribler-mod/Tribler/Player/systray.py | 189 + tribler-mod/Tribler/Player/systray.py.bak | 188 + .../Tribler/Plugin/BackgroundProcess.py | 441 ++ .../Tribler/Plugin/BackgroundProcess.py.bak | 440 ++ .../Tribler/Plugin/Build/Win32/setupBGexe.py | 14 + .../Plugin/Build/Win32/setupBGexe.py.bak | 13 + .../Tribler/Plugin/pluginemulator-http.py | 46 + .../Tribler/Plugin/pluginemulator-http.py.bak | 45 + tribler-mod/Tribler/Plugin/pluginemulator.py | 45 + .../Tribler/Plugin/pluginemulator.py.bak | 44 + tribler-mod/Tribler/Policies/RateManager.py | 301 ++ .../Tribler/Policies/RateManager.py.bak | 300 ++ .../Tribler/Policies/SeedingManager.py | 210 + .../Tribler/Policies/SeedingManager.py.bak | 209 + .../Tribler/Policies/UploadLimitation.py | 256 ++ .../Tribler/Policies/UploadLimitation.py.bak | 255 ++ tribler-mod/Tribler/Policies/__init__.py | 3 + tribler-mod/Tribler/Policies/__init__.py.bak | 2 + tribler-mod/Tribler/Subscriptions/__init__.py | 4 + .../Tribler/Subscriptions/__init__.py.bak | 3 + .../Tribler/Subscriptions/rss_client.py | 551 +++ .../Tribler/Subscriptions/rss_client.py.bak | 550 +++ .../Tribler/Test/API/contentdir/file.avi | 1371 ++++++ .../Tribler/Test/API/contentdir/file.txt | 5 + tribler-mod/Tribler/Test/API/ec.pem | 5 + tribler-mod/Tribler/Test/API/ecpub.pem | 4 + tribler-mod/Tribler/Test/API/file.wmv | Bin 0 -> 82948 bytes tribler-mod/Tribler/Test/API/file2.wmv | Bin 0 -> 377860 bytes tribler-mod/Tribler/Test/API/test_api.bat | 7 + tribler-mod/Tribler/Test/API/test_api.sh | 11 + .../Tribler/Test/API/test_remote_torrent.py | 397 ++ .../Test/API/test_remote_torrent.py.bak | 396 ++ tribler-mod/Tribler/Test/API/test_seeding.py | 165 + .../Tribler/Test/API/test_seeding.py.bak | 164 + .../Tribler/Test/API/test_seeding_live.py | 208 + .../Tribler/Test/API/test_seeding_live.py.bak | 207 + .../Tribler/Test/API/test_seeding_vod.py | 162 + .../Tribler/Test/API/test_seeding_vod.py.bak | 161 + tribler-mod/Tribler/Test/API/test_tdef.py | 345 ++ tribler-mod/Tribler/Test/API/test_tdef.py.bak | 344 ++ tribler-mod/Tribler/Test/API/test_tracking.py | 91 + .../Tribler/Test/API/test_tracking.py.bak | 90 + .../Tribler/Test/API/test_upload_limit.py | 472 ++ .../Tribler/Test/API/test_upload_limit.py.bak | 471 ++ tribler-mod/Tribler/Test/API/test_vod.py | 63 + tribler-mod/Tribler/Test/API/test_vod.py.bak | 62 + tribler-mod/Tribler/Test/API/thumb.jpg | Bin 0 -> 4812 bytes tribler-mod/Tribler/Test/TESTSUITE.txt | 93 + tribler-mod/Tribler/Test/__init__.py | 3 + tribler-mod/Tribler/Test/__init__.py.bak | 2 + tribler-mod/Tribler/Test/bencode.py | 319 ++ tribler-mod/Tribler/Test/bencode.py.bak | 318 ++ tribler-mod/Tribler/Test/btconn.py | 157 + tribler-mod/Tribler/Test/btconn.py.bak | 156 + tribler-mod/Tribler/Test/buddycast_data.py | 342 ++ .../Tribler/Test/buddycast_data.py.bak | 341 ++ .../Test/extend_db_dir/bak_multiple.torrent | Bin 0 -> 85933 bytes .../Test/extend_db_dir/bak_single.torrent | Bin 0 -> 1200 bytes .../Test/extend_db_dir/bak_tribler.tar.gz | Bin 0 -> 5676157 bytes .../Tribler/Test/extend_db_dir/bsddb.tar.gz | Bin 0 -> 12498136 bytes .../superpeer120070902sp7001.log | 560 +++ .../Test/extend_db_dir/url_to_bsddb.txt | 3 + .../Test/extend_db_dir/url_to_tribler.sdb.txt | 4 + .../extend_hs_dir/dummydata.merkle.torrent | 1 + tribler-mod/Tribler/Test/log_parser.py | 146 + tribler-mod/Tribler/Test/log_parser.py.bak | 145 + tribler-mod/Tribler/Test/olconn.py | 108 + tribler-mod/Tribler/Test/olconn.py.bak | 107 + tribler-mod/Tribler/Test/test.bat | 37 + tribler-mod/Tribler/Test/test.sh | 41 + .../Tribler/Test/test_TimedTaskQueue.py | 81 + .../Tribler/Test/test_TimedTaskQueue.py.bak | 80 + tribler-mod/Tribler/Test/test_as_server.py | 74 + .../Tribler/Test/test_as_server.py.bak | 73 + tribler-mod/Tribler/Test/test_bartercast.py | 215 + .../Tribler/Test/test_bartercast.py.bak | 214 + tribler-mod/Tribler/Test/test_bsddb2sqlite.py | 123 + .../Tribler/Test/test_bsddb2sqlite.py.bak | 122 + tribler-mod/Tribler/Test/test_buddycast.py | 218 + .../Tribler/Test/test_buddycast.py.bak | 217 + .../Test/test_buddycast2_datahandler.bat | 3 + .../Test/test_buddycast2_datahandler.py | 263 ++ .../Test/test_buddycast2_datahandler.py.bak | 262 ++ .../Test/test_buddycast2_datahandler.sh | 12 + tribler-mod/Tribler/Test/test_buddycast4.py | 377 ++ .../Tribler/Test/test_buddycast4.py.bak | 376 ++ .../Test/test_buddycast4_stresstest.py | 243 + .../Test/test_buddycast4_stresstest.py.bak | 242 + .../Tribler/Test/test_buddycast_msg.bat | 7 + .../Tribler/Test/test_buddycast_msg.py | 497 +++ .../Tribler/Test/test_buddycast_msg.py.bak | 496 +++ .../Tribler/Test/test_buddycast_msg.sh | 16 + tribler-mod/Tribler/Test/test_cachedb.py | 266 ++ tribler-mod/Tribler/Test/test_cachedb.py.bak | 265 ++ .../Tribler/Test/test_cachedbhandler.py | 125 + .../Tribler/Test/test_cachedbhandler.py.bak | 124 + .../Tribler/Test/test_connect_overlay.py | 151 + .../Tribler/Test/test_connect_overlay.py.bak | 150 + tribler-mod/Tribler/Test/test_crawler.py | 306 ++ tribler-mod/Tribler/Test/test_crawler.py.bak | 305 ++ .../Test/test_dialback_conn_handler.bat | 15 + .../Test/test_dialback_conn_handler.py | 492 +++ .../Test/test_dialback_conn_handler.py.bak | 491 +++ .../Test/test_dialback_conn_handler.sh | 25 + .../Test/test_dialback_reply_active.bat | 12 + .../Test/test_dialback_reply_active.py | 252 ++ .../Test/test_dialback_reply_active.py.bak | 251 ++ .../Test/test_dialback_reply_active.sh | 21 + .../Test/test_dialback_reply_active2.py | 45 + .../Test/test_dialback_reply_active2.py.bak | 44 + .../Tribler/Test/test_dialback_request.py | 162 + .../Tribler/Test/test_dialback_request.py.bak | 161 + tribler-mod/Tribler/Test/test_dlhelp.bat | 12 + tribler-mod/Tribler/Test/test_dlhelp.py | 414 ++ tribler-mod/Tribler/Test/test_dlhelp.py.bak | 413 ++ tribler-mod/Tribler/Test/test_dlhelp.sh | 21 + tribler-mod/Tribler/Test/test_extend_hs.py | 318 ++ .../Tribler/Test/test_extend_hs.py.bak | 317 ++ .../Tribler/Test/test_extend_hs_t350.py | 181 + .../Tribler/Test/test_extend_hs_t350.py.bak | 180 + tribler-mod/Tribler/Test/test_friend.py | 137 + tribler-mod/Tribler/Test/test_friend.py.bak | 136 + tribler-mod/Tribler/Test/test_friendship.bat | 16 + tribler-mod/Tribler/Test/test_friendship.py | 752 ++++ .../Tribler/Test/test_friendship.py.bak | 751 ++++ tribler-mod/Tribler/Test/test_friendship.sh | 25 + .../Tribler/Test/test_friendship_crawler.py | 95 + .../Test/test_friendship_crawler.py.bak | 94 + tribler-mod/Tribler/Test/test_g2g.py | 283 ++ tribler-mod/Tribler/Test/test_g2g.py.bak | 282 ++ tribler-mod/Tribler/Test/test_gui_server.py | 68 + .../Tribler/Test/test_gui_server.py.bak | 67 + tribler-mod/Tribler/Test/test_merkle.py | 316 ++ tribler-mod/Tribler/Test/test_merkle.py.bak | 315 ++ tribler-mod/Tribler/Test/test_na_extend_hs.py | 184 + .../Tribler/Test/test_na_extend_hs.py.bak | 183 + tribler-mod/Tribler/Test/test_na_extend_hs.sh | 13 + tribler-mod/Tribler/Test/test_natcheck.py | 75 + tribler-mod/Tribler/Test/test_natcheck.py.bak | 74 + tribler-mod/Tribler/Test/test_osutils.py | 76 + tribler-mod/Tribler/Test/test_osutils.py.bak | 75 + .../Tribler/Test/test_overlay_bridge.py | 64 + .../Tribler/Test/test_overlay_bridge.py.bak | 63 + .../Tribler/Test/test_overlay_bridge.sh | 29 + tribler-mod/Tribler/Test/test_permid.py | 349 ++ tribler-mod/Tribler/Test/test_permid.py.bak | 348 ++ .../Tribler/Test/test_permid_response1.py | 436 ++ .../Tribler/Test/test_permid_response1.py.bak | 435 ++ tribler-mod/Tribler/Test/test_remote_query.py | 260 ++ .../Tribler/Test/test_remote_query.py.bak | 259 ++ .../Tribler/Test/test_rquery_reply_active.bat | 4 + .../Tribler/Test/test_rquery_reply_active.py | 181 + .../Test/test_rquery_reply_active.py.bak | 180 + .../Tribler/Test/test_rquery_reply_active.sh | 13 + .../Tribler/Test/test_secure_overlay.bat | 23 + .../Tribler/Test/test_secure_overlay.py | 690 +++ .../Tribler/Test/test_secure_overlay.py.bak | 689 +++ .../Tribler/Test/test_secure_overlay.sh | 31 + .../Tribler/Test/test_seeding_stats.py | 94 + .../Tribler/Test/test_seeding_stats.py.bak | 93 + tribler-mod/Tribler/Test/test_sim.py | 83 + tribler-mod/Tribler/Test/test_sim.py.bak | 82 + .../Tribler/Test/test_social_overlap.py | 314 ++ .../Tribler/Test/test_social_overlap.py.bak | 313 ++ .../Tribler/Test/test_sqlitecachedb.py | 879 ++++ .../Tribler/Test/test_sqlitecachedb.py.bak | 878 ++++ .../Tribler/Test/test_sqlitecachedbhandler.py | 1114 +++++ .../Test/test_sqlitecachedbhandler.py.bak | 1113 +++++ .../Tribler/Test/test_sqlitecachedbhandler.sh | 58 + tribler-mod/Tribler/Test/test_superpeers.py | 90 + .../Tribler/Test/test_superpeers.py.bak | 89 + .../Tribler/Test/test_torrentcollecting.py | 106 + .../Test/test_torrentcollecting.py.bak | 105 + .../Tribler/Test/test_tracker_checking.py | 25 + .../Tribler/Test/test_tracker_checking.py.bak | 24 + tribler-mod/Tribler/Test/test_ut_pex.py | 397 ++ tribler-mod/Tribler/Test/test_ut_pex.py.bak | 396 ++ tribler-mod/Tribler/Test/testdata.txt | 1004 +++++ tribler-mod/Tribler/Test/usericon-ok.jpg | Bin 0 -> 1129 bytes tribler-mod/Tribler/Tools/__init__.py | 3 + tribler-mod/Tribler/Tools/__init__.py.bak | 2 + tribler-mod/Tribler/Tools/addplaytime.py | 97 + tribler-mod/Tribler/Tools/addplaytime.py.bak | 96 + .../Tribler/Tools/bitbucket-live-noauth.py | 72 + .../Tools/bitbucket-live-noauth.py.bak | 71 + tribler-mod/Tribler/Tools/bitbucket-live.py | 66 + .../Tribler/Tools/bitbucket-live.py.bak | 65 + tribler-mod/Tribler/Tools/btshowmetainfo.py | 107 + .../Tribler/Tools/btshowmetainfo.py.bak | 106 + tribler-mod/Tribler/Tools/cmdlinedl.py | 140 + tribler-mod/Tribler/Tools/cmdlinedl.py.bak | 134 + .../Tribler/Tools/createlivestream-noauth.py | 156 + .../Tools/createlivestream-noauth.py.bak | 155 + tribler-mod/Tribler/Tools/createlivestream.py | 152 + .../Tribler/Tools/createlivestream.py.bak | 151 + tribler-mod/Tribler/Tools/dirtrackerseeder.py | 156 + .../Tribler/Tools/dirtrackerseeder.py.bak | 155 + .../Tribler/Tools/pipe-arnocam-home.sh | 2 + tribler-mod/Tribler/Tools/pipe-arnocam-jip.sh | 2 + .../Tools/pipe-babscam-h264-aac-gop-sync.sh | 7 + .../Tribler/Tools/pipe-babscam-h264-mp3.sh | 3 + .../pipe-babscam-h264-nosound-mencoder.sh | 7 + .../Tools/pipe-babscam-mpeg4-mp3-sync.sh | 3 + tribler-mod/Tribler/Tools/superpeer.py | 104 + tribler-mod/Tribler/Tools/superpeer.py.bak | 103 + .../TrackerChecking/TorrentChecking.py | 212 + .../TrackerChecking/TorrentChecking.py.bak | 211 + .../TrackerChecking/TrackerChecking.py | 162 + .../TrackerChecking/TrackerChecking.py.bak | 161 + .../Tribler/TrackerChecking/__init__.py | 3 + .../Tribler/TrackerChecking/__init__.py.bak | 2 + .../Tribler/Utilities/Instance2Instance.py | 225 + .../Utilities/Instance2Instance.py.bak | 224 + .../Utilities/LinuxSingleInstanceChecker.py | 25 + .../LinuxSingleInstanceChecker.py.bak | 24 + .../Tribler/Utilities/TimedTaskQueue.py | 106 + .../Tribler/Utilities/TimedTaskQueue.py.bak | 105 + tribler-mod/Tribler/Utilities/__init__.py | 3 + tribler-mod/Tribler/Utilities/__init__.py.bak | 2 + tribler-mod/Tribler/Utilities/configreader.py | 465 ++ .../Tribler/Utilities/configreader.py.bak | 464 ++ tribler-mod/Tribler/Video/Buttons.py | 317 ++ tribler-mod/Tribler/Video/Buttons.py.bak | 316 ++ tribler-mod/Tribler/Video/EmbeddedPlayer.py | 733 ++++ .../Tribler/Video/EmbeddedPlayer.py.bak | 732 ++++ .../Tribler/Video/EmbeddedPlayer4Frame.py | 318 ++ .../Tribler/Video/EmbeddedPlayer4Frame.py.bak | 317 ++ .../Tribler/Video/Images/4framebackground.png | Bin 0 -> 164 bytes .../Tribler/Video/Images/4framesliderDot.png | Bin 0 -> 352 bytes .../Video/Images/4framesliderDot_dis.png | Bin 0 -> 352 bytes .../Video/Images/4framesliderVolume.png | Bin 0 -> 292 bytes .../Tribler/Video/Images/background.png | Bin 0 -> 152 bytes .../Tribler/Video/Images/fullScreen-hover.png | Bin 0 -> 1660 bytes .../Tribler/Video/Images/fullScreen.png | Bin 0 -> 1690 bytes .../Tribler/Video/Images/fullScreen_dis.png | Bin 0 -> 548 bytes .../Tribler/Video/Images/fullScreen_hover.png | Bin 0 -> 568 bytes tribler-mod/Tribler/Video/Images/pause.png | Bin 0 -> 598 bytes .../Tribler/Video/Images/pause_dis.png | Bin 0 -> 519 bytes .../Tribler/Video/Images/pause_hover.png | Bin 0 -> 551 bytes tribler-mod/Tribler/Video/Images/play.png | Bin 0 -> 590 bytes tribler-mod/Tribler/Video/Images/play_dis.png | Bin 0 -> 548 bytes .../Tribler/Video/Images/play_hover.png | Bin 0 -> 597 bytes .../Tribler/Video/Images/sliderDot.png | Bin 0 -> 437 bytes .../Tribler/Video/Images/sliderDot_dis.png | Bin 0 -> 366 bytes .../Tribler/Video/Images/sliderDot_hover.png | Bin 0 -> 444 bytes .../Tribler/Video/Images/sliderVolume.png | Bin 0 -> 199 bytes tribler-mod/Tribler/Video/Images/vol0.png | Bin 0 -> 193 bytes .../Tribler/Video/Images/vol0Enabled.png | Bin 0 -> 193 bytes .../Video/Images/vol0Enabled_clicked.png | Bin 0 -> 193 bytes .../Tribler/Video/Images/vol0_clicked.png | Bin 0 -> 193 bytes tribler-mod/Tribler/Video/Images/vol1.png | Bin 0 -> 220 bytes .../Tribler/Video/Images/vol1Enabled.png | Bin 0 -> 220 bytes .../Video/Images/vol1Enabled_clicked.png | Bin 0 -> 220 bytes .../Tribler/Video/Images/vol1_hover.png | Bin 0 -> 220 bytes tribler-mod/Tribler/Video/Images/vol2.png | Bin 0 -> 224 bytes .../Tribler/Video/Images/vol2Enabled.png | Bin 0 -> 225 bytes .../Video/Images/vol2Enabled_clicked.png | Bin 0 -> 224 bytes .../Tribler/Video/Images/vol2_hover.png | Bin 0 -> 225 bytes tribler-mod/Tribler/Video/Images/vol3.png | Bin 0 -> 224 bytes .../Tribler/Video/Images/vol3Enabled.png | Bin 0 -> 225 bytes .../Video/Images/vol3Enabled_clicked.png | Bin 0 -> 224 bytes .../Tribler/Video/Images/vol3_hover.png | Bin 0 -> 225 bytes tribler-mod/Tribler/Video/Images/vol4.png | Bin 0 -> 226 bytes .../Tribler/Video/Images/vol4Enabled.png | Bin 0 -> 226 bytes .../Video/Images/vol4Enabled_clicked.png | Bin 0 -> 226 bytes .../Tribler/Video/Images/vol4_hover.png | Bin 0 -> 226 bytes tribler-mod/Tribler/Video/Images/vol5.png | Bin 0 -> 221 bytes .../Video/Images/vol5Enabled_clicked.png | Bin 0 -> 226 bytes .../Tribler/Video/Images/vol5_hover.png | Bin 0 -> 227 bytes tribler-mod/Tribler/Video/Images/vol6.png | Bin 0 -> 233 bytes .../Video/Images/vol6Enabled_clicked.png | Bin 0 -> 233 bytes .../Tribler/Video/Images/vol6_hover.png | Bin 0 -> 232 bytes tribler-mod/Tribler/Video/Progress.py | 559 +++ tribler-mod/Tribler/Video/Progress.py.bak | 558 +++ tribler-mod/Tribler/Video/VLCWrapper.py | 388 ++ tribler-mod/Tribler/Video/VLCWrapper.py.bak | 387 ++ tribler-mod/Tribler/Video/VideoFrame.py | 189 + tribler-mod/Tribler/Video/VideoFrame.py.bak | 188 + tribler-mod/Tribler/Video/VideoPlayer.py | 895 ++++ tribler-mod/Tribler/Video/VideoPlayer.py.bak | 894 ++++ tribler-mod/Tribler/Video/VideoServer.py | 274 ++ tribler-mod/Tribler/Video/VideoServer.py.bak | 273 ++ tribler-mod/Tribler/Video/__init__.py | 3 + tribler-mod/Tribler/Video/__init__.py.bak | 2 + tribler-mod/Tribler/Video/defs.py | 17 + tribler-mod/Tribler/Video/defs.py.bak | 16 + tribler-mod/Tribler/Video/utils.py | 121 + tribler-mod/Tribler/Video/utils.py.bak | 120 + tribler-mod/Tribler/Web2/__init__.py | 20 + tribler-mod/Tribler/Web2/__init__.py.bak | 19 + tribler-mod/Tribler/Web2/photo/__init__.py | 3 + .../Tribler/Web2/photo/__init__.py.bak | 2 + tribler-mod/Tribler/Web2/photo/flickr.py | 242 + tribler-mod/Tribler/Web2/photo/flickr.py.bak | 241 + tribler-mod/Tribler/Web2/photo/photo.py | 82 + tribler-mod/Tribler/Web2/photo/photo.py.bak | 81 + tribler-mod/Tribler/Web2/photo/settings.py | 20 + .../Tribler/Web2/photo/settings.py.bak | 19 + tribler-mod/Tribler/Web2/photo/zooomr.py | 127 + tribler-mod/Tribler/Web2/photo/zooomr.py.bak | 126 + tribler-mod/Tribler/Web2/util/__init__.py | 3 + tribler-mod/Tribler/Web2/util/__init__.py.bak | 2 + tribler-mod/Tribler/Web2/util/codec.py | 373 ++ tribler-mod/Tribler/Web2/util/codec.py.bak | 372 ++ tribler-mod/Tribler/Web2/util/config.py | 27 + tribler-mod/Tribler/Web2/util/config.py.bak | 26 + tribler-mod/Tribler/Web2/util/db.py | 606 +++ tribler-mod/Tribler/Web2/util/db.py.bak | 605 +++ tribler-mod/Tribler/Web2/util/download.py | 335 ++ tribler-mod/Tribler/Web2/util/download.py.bak | 334 ++ tribler-mod/Tribler/Web2/util/history.py | 111 + tribler-mod/Tribler/Web2/util/history.py.bak | 110 + tribler-mod/Tribler/Web2/util/log.py | 24 + tribler-mod/Tribler/Web2/util/log.py.bak | 23 + tribler-mod/Tribler/Web2/util/mypartial.py | 14 + .../Tribler/Web2/util/mypartial.py.bak | 13 + tribler-mod/Tribler/Web2/util/observer.py | 79 + tribler-mod/Tribler/Web2/util/observer.py.bak | 78 + tribler-mod/Tribler/Web2/util/update.py | 120 + tribler-mod/Tribler/Web2/util/update.py.bak | 119 + tribler-mod/Tribler/Web2/util/utilsettings.py | 8 + .../Tribler/Web2/util/utilsettings.py.bak | 7 + tribler-mod/Tribler/Web2/video/__init__.py | 3 + .../Tribler/Web2/video/__init__.py.bak | 2 + .../Tribler/Web2/video/genericsearch.py | 267 ++ .../Tribler/Web2/video/genericsearch.py.bak | 266 ++ tribler-mod/Tribler/Web2/video/settings.py | 18 + .../Tribler/Web2/video/settings.py.bak | 17 + tribler-mod/Tribler/Web2/video/video.py | 108 + tribler-mod/Tribler/Web2/video/video.py.bak | 107 + tribler-mod/Tribler/Web2/web2definitions.conf | 77 + tribler-mod/Tribler/__init__.py | 6 + tribler-mod/Tribler/__init__.py.bak | 5 + tribler-mod/Tribler/binary-LICENSE.txt | 1998 +++++++++ tribler-mod/Tribler/readme.txt | 242 + tribler-mod/Tribler/tribler_sdb_v2.sql | 314 ++ 1154 files changed, 226305 insertions(+) create mode 100644 tribler-mod/Tribler/Category/Category.py create mode 100644 tribler-mod/Tribler/Category/Category.py.bak create mode 100644 tribler-mod/Tribler/Category/FamilyFilter.py create mode 100644 tribler-mod/Tribler/Category/FamilyFilter.py.bak create mode 100644 tribler-mod/Tribler/Category/TestCategory.py create mode 100644 tribler-mod/Tribler/Category/TestCategory.py.bak create mode 100644 tribler-mod/Tribler/Category/__init__.py create mode 100644 tribler-mod/Tribler/Category/__init__.py.bak create mode 100644 tribler-mod/Tribler/Category/category.conf create mode 100644 tribler-mod/Tribler/Category/filter_terms.filter create mode 100644 tribler-mod/Tribler/Category/init_category.py create mode 100644 tribler-mod/Tribler/Category/init_category.py.bak create mode 100644 tribler-mod/Tribler/Category/porncat.txt create mode 100644 tribler-mod/Tribler/Core/API.py create mode 100644 tribler-mod/Tribler/Core/API.py.bak create mode 100644 tribler-mod/Tribler/Core/APIImplementation/DownloadImpl.py create mode 100644 tribler-mod/Tribler/Core/APIImplementation/DownloadImpl.py.bak create mode 100644 tribler-mod/Tribler/Core/APIImplementation/DownloadRuntimeConfig.py create mode 100644 tribler-mod/Tribler/Core/APIImplementation/DownloadRuntimeConfig.py.bak create mode 100644 tribler-mod/Tribler/Core/APIImplementation/LaunchManyCore.py create mode 100644 tribler-mod/Tribler/Core/APIImplementation/LaunchManyCore.py.bak create mode 100644 tribler-mod/Tribler/Core/APIImplementation/SessionRuntimeConfig.py create mode 100644 tribler-mod/Tribler/Core/APIImplementation/SessionRuntimeConfig.py.bak create mode 100644 tribler-mod/Tribler/Core/APIImplementation/SingleDownload.py create mode 100644 tribler-mod/Tribler/Core/APIImplementation/SingleDownload.py.bak create mode 100644 tribler-mod/Tribler/Core/APIImplementation/ThreadPool.py create mode 100644 tribler-mod/Tribler/Core/APIImplementation/ThreadPool.py.bak create mode 100644 tribler-mod/Tribler/Core/APIImplementation/UserCallbackHandler.py create mode 100644 tribler-mod/Tribler/Core/APIImplementation/UserCallbackHandler.py.bak create mode 100644 tribler-mod/Tribler/Core/APIImplementation/__init__.py create mode 100644 tribler-mod/Tribler/Core/APIImplementation/__init__.py.bak create mode 100644 tribler-mod/Tribler/Core/APIImplementation/maketorrent.py create mode 100644 tribler-mod/Tribler/Core/APIImplementation/maketorrent.py.bak create mode 100644 tribler-mod/Tribler/Core/APIImplementation/miscutils.py create mode 100644 tribler-mod/Tribler/Core/APIImplementation/miscutils.py.bak create mode 100644 tribler-mod/Tribler/Core/Base.py create mode 100644 tribler-mod/Tribler/Core/Base.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/Choker.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/Choker.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/Connecter.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/Connecter.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/Downloader.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/Downloader.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/DownloaderFeedback.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/DownloaderFeedback.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/Encrypter.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/Encrypter.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/FileSelector.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/FileSelector.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/Filter.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/Filter.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/HTTPDownloader.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/HTTPDownloader.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/MessageID.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/MessageID.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/NatCheck.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/NatCheck.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/PiecePicker.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/PiecePicker.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/Rerequester.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/Rerequester.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/Statistics.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/Statistics.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/Storage.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/Storage.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/StorageWrapper.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/StorageWrapper.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/T2T.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/T2T.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/Uploader.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/Uploader.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/__init__.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/__init__.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/btformats.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/btformats.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/convert.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/convert.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/fakeopen.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/fakeopen.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/track.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/BT1/track.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/CurrentRateMeasure.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/CurrentRateMeasure.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/HTTPHandler.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/HTTPHandler.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/PSYCO.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/PSYCO.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/RateLimiter.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/RateLimiter.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/RateMeasure.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/RateMeasure.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/RawServer.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/RawServer.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/ServerPortHandler.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/ServerPortHandler.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/SocketHandler.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/SocketHandler.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/__init__.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/__init__.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/bencode.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/bencode.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/bitfield.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/bitfield.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/clock.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/clock.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/download_bt1.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/download_bt1.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/inifile.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/inifile.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/iprangeparse.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/iprangeparse.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/natpunch.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/natpunch.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/parseargs.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/parseargs.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/parsedir.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/parsedir.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/piecebuffer.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/piecebuffer.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/selectpoll.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/selectpoll.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/subnetparse.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/subnetparse.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/torrentlistparse.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/torrentlistparse.py.bak create mode 100644 tribler-mod/Tribler/Core/BitTornado/zurllib.py create mode 100644 tribler-mod/Tribler/Core/BitTornado/zurllib.py.bak create mode 100644 tribler-mod/Tribler/Core/BuddyCast/TorrentCollecting.py create mode 100644 tribler-mod/Tribler/Core/BuddyCast/TorrentCollecting.py.bak create mode 100644 tribler-mod/Tribler/Core/BuddyCast/__init__.py create mode 100644 tribler-mod/Tribler/Core/BuddyCast/__init__.py.bak create mode 100644 tribler-mod/Tribler/Core/BuddyCast/bartercast.py create mode 100644 tribler-mod/Tribler/Core/BuddyCast/bartercast.py.bak create mode 100644 tribler-mod/Tribler/Core/BuddyCast/buddycast.py create mode 100644 tribler-mod/Tribler/Core/BuddyCast/buddycast.py.bak create mode 100644 tribler-mod/Tribler/Core/BuddyCast/moderationcast.py create mode 100644 tribler-mod/Tribler/Core/BuddyCast/moderationcast.py.bak create mode 100644 tribler-mod/Tribler/Core/BuddyCast/moderationcast_util.py create mode 100644 tribler-mod/Tribler/Core/BuddyCast/moderationcast_util.py.bak create mode 100644 tribler-mod/Tribler/Core/BuddyCast/similarity.py create mode 100644 tribler-mod/Tribler/Core/BuddyCast/similarity.py.bak create mode 100644 tribler-mod/Tribler/Core/BuddyCast/votecast.py create mode 100644 tribler-mod/Tribler/Core/BuddyCast/votecast.py.bak create mode 100644 tribler-mod/Tribler/Core/CacheDB/BsdCacheDBHandler.py create mode 100644 tribler-mod/Tribler/Core/CacheDB/BsdCacheDBHandler.py.bak create mode 100644 tribler-mod/Tribler/Core/CacheDB/CacheDBHandler.py create mode 100644 tribler-mod/Tribler/Core/CacheDB/CacheDBHandler.py.bak create mode 100644 tribler-mod/Tribler/Core/CacheDB/EditDist.py create mode 100644 tribler-mod/Tribler/Core/CacheDB/EditDist.py.bak create mode 100644 tribler-mod/Tribler/Core/CacheDB/Notifier.py create mode 100644 tribler-mod/Tribler/Core/CacheDB/Notifier.py.bak create mode 100644 tribler-mod/Tribler/Core/CacheDB/SqliteCacheDBHandler.py create mode 100644 tribler-mod/Tribler/Core/CacheDB/SqliteCacheDBHandler.py.bak create mode 100644 tribler-mod/Tribler/Core/CacheDB/SqliteFriendshipStatsCacheDB.py create mode 100644 tribler-mod/Tribler/Core/CacheDB/SqliteFriendshipStatsCacheDB.py.bak create mode 100644 tribler-mod/Tribler/Core/CacheDB/SqliteSeedingStatsCacheDB.py create mode 100644 tribler-mod/Tribler/Core/CacheDB/SqliteSeedingStatsCacheDB.py.bak create mode 100644 tribler-mod/Tribler/Core/CacheDB/SqliteVideoPlaybackStatsCacheDB.py create mode 100644 tribler-mod/Tribler/Core/CacheDB/SqliteVideoPlaybackStatsCacheDB.py.bak create mode 100644 tribler-mod/Tribler/Core/CacheDB/__init__.py create mode 100644 tribler-mod/Tribler/Core/CacheDB/__init__.py.bak create mode 100644 tribler-mod/Tribler/Core/CacheDB/bsdcachedb.py create mode 100644 tribler-mod/Tribler/Core/CacheDB/bsdcachedb.py.bak create mode 100644 tribler-mod/Tribler/Core/CacheDB/bsddb2sqlite.py create mode 100644 tribler-mod/Tribler/Core/CacheDB/bsddb2sqlite.py.bak create mode 100644 tribler-mod/Tribler/Core/CacheDB/cachedb.py create mode 100644 tribler-mod/Tribler/Core/CacheDB/cachedb.py.bak create mode 100644 tribler-mod/Tribler/Core/CacheDB/friends.py create mode 100644 tribler-mod/Tribler/Core/CacheDB/friends.py.bak create mode 100644 tribler-mod/Tribler/Core/CacheDB/maxflow.py create mode 100644 tribler-mod/Tribler/Core/CacheDB/maxflow.py.bak create mode 100644 tribler-mod/Tribler/Core/CacheDB/read_db.py create mode 100644 tribler-mod/Tribler/Core/CacheDB/read_db.py.bak create mode 100644 tribler-mod/Tribler/Core/CacheDB/sqlitecachedb.py create mode 100644 tribler-mod/Tribler/Core/CacheDB/sqlitecachedb.py.bak create mode 100644 tribler-mod/Tribler/Core/CacheDB/unicode.py create mode 100644 tribler-mod/Tribler/Core/CacheDB/unicode.py.bak create mode 100644 tribler-mod/Tribler/Core/CoopDownload/Coordinator.py create mode 100644 tribler-mod/Tribler/Core/CoopDownload/Coordinator.py.bak create mode 100644 tribler-mod/Tribler/Core/CoopDownload/CoordinatorMessageHandler.py create mode 100644 tribler-mod/Tribler/Core/CoopDownload/CoordinatorMessageHandler.py.bak create mode 100644 tribler-mod/Tribler/Core/CoopDownload/Helper.py create mode 100644 tribler-mod/Tribler/Core/CoopDownload/Helper.py.bak create mode 100644 tribler-mod/Tribler/Core/CoopDownload/HelperMessageHandler.py create mode 100644 tribler-mod/Tribler/Core/CoopDownload/HelperMessageHandler.py.bak create mode 100644 tribler-mod/Tribler/Core/CoopDownload/Logger.py create mode 100644 tribler-mod/Tribler/Core/CoopDownload/Logger.py.bak create mode 100644 tribler-mod/Tribler/Core/CoopDownload/RatePredictor.py create mode 100644 tribler-mod/Tribler/Core/CoopDownload/RatePredictor.py.bak create mode 100644 tribler-mod/Tribler/Core/CoopDownload/__init__.py create mode 100644 tribler-mod/Tribler/Core/CoopDownload/__init__.py.bak create mode 100644 tribler-mod/Tribler/Core/DecentralizedTracking/__init__.py create mode 100644 tribler-mod/Tribler/Core/DecentralizedTracking/__init__.py.bak create mode 100644 tribler-mod/Tribler/Core/DecentralizedTracking/mainlineDHT.py create mode 100644 tribler-mod/Tribler/Core/DecentralizedTracking/mainlineDHT.py.bak create mode 100644 tribler-mod/Tribler/Core/DecentralizedTracking/mainlineDHTChecker.py create mode 100644 tribler-mod/Tribler/Core/DecentralizedTracking/mainlineDHTChecker.py.bak create mode 100644 tribler-mod/Tribler/Core/DecentralizedTracking/rsconvert.py create mode 100644 tribler-mod/Tribler/Core/DecentralizedTracking/rsconvert.py.bak create mode 100644 tribler-mod/Tribler/Core/DecentralizedTracking/ut_pex.py create mode 100644 tribler-mod/Tribler/Core/DecentralizedTracking/ut_pex.py.bak create mode 100644 tribler-mod/Tribler/Core/Download.py create mode 100644 tribler-mod/Tribler/Core/Download.py.bak create mode 100644 tribler-mod/Tribler/Core/DownloadConfig.py create mode 100644 tribler-mod/Tribler/Core/DownloadConfig.py.bak create mode 100644 tribler-mod/Tribler/Core/DownloadState.py create mode 100644 tribler-mod/Tribler/Core/DownloadState.py.bak create mode 100644 tribler-mod/Tribler/Core/LiveSourceAuthConfig.py create mode 100644 tribler-mod/Tribler/Core/LiveSourceAuthConfig.py.bak create mode 100644 tribler-mod/Tribler/Core/Merkle/__init__.py create mode 100644 tribler-mod/Tribler/Core/Merkle/__init__.py.bak create mode 100644 tribler-mod/Tribler/Core/Merkle/merkle.py create mode 100644 tribler-mod/Tribler/Core/Merkle/merkle.py.bak create mode 100644 tribler-mod/Tribler/Core/NATFirewall/ConnectionCheck.py create mode 100644 tribler-mod/Tribler/Core/NATFirewall/ConnectionCheck.py.bak create mode 100644 tribler-mod/Tribler/Core/NATFirewall/DialbackMsgHandler.py create mode 100644 tribler-mod/Tribler/Core/NATFirewall/DialbackMsgHandler.py.bak create mode 100644 tribler-mod/Tribler/Core/NATFirewall/NatCheck.py create mode 100644 tribler-mod/Tribler/Core/NATFirewall/NatCheck.py.bak create mode 100644 tribler-mod/Tribler/Core/NATFirewall/NatCheckMsgHandler.py create mode 100644 tribler-mod/Tribler/Core/NATFirewall/NatCheckMsgHandler.py.bak create mode 100644 tribler-mod/Tribler/Core/NATFirewall/NatTraversal.py create mode 100644 tribler-mod/Tribler/Core/NATFirewall/NatTraversal.py.bak create mode 100644 tribler-mod/Tribler/Core/NATFirewall/ReturnConnHandler.py create mode 100644 tribler-mod/Tribler/Core/NATFirewall/ReturnConnHandler.py.bak create mode 100644 tribler-mod/Tribler/Core/NATFirewall/TimeoutCheck.py create mode 100644 tribler-mod/Tribler/Core/NATFirewall/TimeoutCheck.py.bak create mode 100644 tribler-mod/Tribler/Core/NATFirewall/UPnPThread.py create mode 100644 tribler-mod/Tribler/Core/NATFirewall/UPnPThread.py.bak create mode 100644 tribler-mod/Tribler/Core/NATFirewall/__init__.py create mode 100644 tribler-mod/Tribler/Core/NATFirewall/__init__.py.bak create mode 100644 tribler-mod/Tribler/Core/NATFirewall/guessip.py create mode 100644 tribler-mod/Tribler/Core/NATFirewall/guessip.py.bak create mode 100644 tribler-mod/Tribler/Core/NATFirewall/upnp.py create mode 100644 tribler-mod/Tribler/Core/NATFirewall/upnp.py.bak create mode 100644 tribler-mod/Tribler/Core/Overlay/MetadataHandler.py create mode 100644 tribler-mod/Tribler/Core/Overlay/MetadataHandler.py.bak create mode 100644 tribler-mod/Tribler/Core/Overlay/OverlayApps.py create mode 100644 tribler-mod/Tribler/Core/Overlay/OverlayApps.py.bak create mode 100644 tribler-mod/Tribler/Core/Overlay/OverlayThreadingBridge.py create mode 100644 tribler-mod/Tribler/Core/Overlay/OverlayThreadingBridge.py.bak create mode 100644 tribler-mod/Tribler/Core/Overlay/SecureOverlay.py create mode 100644 tribler-mod/Tribler/Core/Overlay/SecureOverlay.py.bak create mode 100644 tribler-mod/Tribler/Core/Overlay/__init__.py create mode 100644 tribler-mod/Tribler/Core/Overlay/__init__.py.bak create mode 100644 tribler-mod/Tribler/Core/Overlay/permid.py create mode 100644 tribler-mod/Tribler/Core/Overlay/permid.py.bak create mode 100644 tribler-mod/Tribler/Core/RequestPolicy.py create mode 100644 tribler-mod/Tribler/Core/RequestPolicy.py.bak create mode 100644 tribler-mod/Tribler/Core/Search/KeywordSearch.py create mode 100644 tribler-mod/Tribler/Core/Search/KeywordSearch.py.bak create mode 100644 tribler-mod/Tribler/Core/Search/Reranking.py create mode 100644 tribler-mod/Tribler/Core/Search/Reranking.py.bak create mode 100644 tribler-mod/Tribler/Core/Search/SearchManager.py create mode 100644 tribler-mod/Tribler/Core/Search/SearchManager.py.bak create mode 100644 tribler-mod/Tribler/Core/Search/__init__.py create mode 100644 tribler-mod/Tribler/Core/Search/__init__.py.bak create mode 100644 tribler-mod/Tribler/Core/Session.py create mode 100644 tribler-mod/Tribler/Core/Session.py.bak create mode 100644 tribler-mod/Tribler/Core/SessionConfig.py create mode 100644 tribler-mod/Tribler/Core/SessionConfig.py.bak create mode 100644 tribler-mod/Tribler/Core/SocialNetwork/FriendshipMsgHandler.py create mode 100644 tribler-mod/Tribler/Core/SocialNetwork/FriendshipMsgHandler.py.bak create mode 100644 tribler-mod/Tribler/Core/SocialNetwork/OverlapMsgHandler.py create mode 100644 tribler-mod/Tribler/Core/SocialNetwork/OverlapMsgHandler.py.bak create mode 100644 tribler-mod/Tribler/Core/SocialNetwork/RemoteQueryMsgHandler.py create mode 100644 tribler-mod/Tribler/Core/SocialNetwork/RemoteQueryMsgHandler.py.bak create mode 100644 tribler-mod/Tribler/Core/SocialNetwork/RemoteTorrentHandler.py create mode 100644 tribler-mod/Tribler/Core/SocialNetwork/RemoteTorrentHandler.py.bak create mode 100644 tribler-mod/Tribler/Core/SocialNetwork/SocialNetworkMsgHandler.py create mode 100644 tribler-mod/Tribler/Core/SocialNetwork/SocialNetworkMsgHandler.py.bak create mode 100644 tribler-mod/Tribler/Core/SocialNetwork/__init__.py create mode 100644 tribler-mod/Tribler/Core/SocialNetwork/__init__.py.bak create mode 100644 tribler-mod/Tribler/Core/Statistics/Crawler.py create mode 100644 tribler-mod/Tribler/Core/Statistics/Crawler.py.bak create mode 100644 tribler-mod/Tribler/Core/Statistics/DatabaseCrawler.py create mode 100644 tribler-mod/Tribler/Core/Statistics/DatabaseCrawler.py.bak create mode 100644 tribler-mod/Tribler/Core/Statistics/FriendshipCrawler.py create mode 100644 tribler-mod/Tribler/Core/Statistics/FriendshipCrawler.py.bak create mode 100644 tribler-mod/Tribler/Core/Statistics/Logger.py create mode 100644 tribler-mod/Tribler/Core/Statistics/Logger.py.bak create mode 100644 tribler-mod/Tribler/Core/Statistics/SeedingStatsCrawler.py create mode 100644 tribler-mod/Tribler/Core/Statistics/SeedingStatsCrawler.py.bak create mode 100644 tribler-mod/Tribler/Core/Statistics/VideoPlaybackCrawler.py create mode 100644 tribler-mod/Tribler/Core/Statistics/VideoPlaybackCrawler.py.bak create mode 100644 tribler-mod/Tribler/Core/Statistics/__init__.py create mode 100644 tribler-mod/Tribler/Core/Statistics/__init__.py.bak create mode 100644 tribler-mod/Tribler/Core/Statistics/crawler.txt create mode 100644 tribler-mod/Tribler/Core/Statistics/tribler_friendship_stats_sdb.sql create mode 100644 tribler-mod/Tribler/Core/Statistics/tribler_seedingstats_sdb.sql create mode 100644 tribler-mod/Tribler/Core/Statistics/tribler_videoplayback_stats.sql create mode 100644 tribler-mod/Tribler/Core/TorrentDef.py create mode 100644 tribler-mod/Tribler/Core/TorrentDef.py.bak create mode 100644 tribler-mod/Tribler/Core/Utilities/__init__.py create mode 100644 tribler-mod/Tribler/Core/Utilities/__init__.py.bak create mode 100644 tribler-mod/Tribler/Core/Utilities/timeouturlopen.py create mode 100644 tribler-mod/Tribler/Core/Utilities/timeouturlopen.py.bak create mode 100644 tribler-mod/Tribler/Core/Utilities/unicode.py create mode 100644 tribler-mod/Tribler/Core/Utilities/unicode.py.bak create mode 100644 tribler-mod/Tribler/Core/Utilities/utilities.py create mode 100644 tribler-mod/Tribler/Core/Utilities/utilities.py.bak create mode 100644 tribler-mod/Tribler/Core/Utilities/win32regchecker.py create mode 100644 tribler-mod/Tribler/Core/Utilities/win32regchecker.py.bak create mode 100644 tribler-mod/Tribler/Core/Video/LiveSourceAuth.py create mode 100644 tribler-mod/Tribler/Core/Video/LiveSourceAuth.py.bak create mode 100644 tribler-mod/Tribler/Core/Video/MovieTransport.py create mode 100644 tribler-mod/Tribler/Core/Video/MovieTransport.py.bak create mode 100644 tribler-mod/Tribler/Core/Video/PiecePickerStreaming.py create mode 100644 tribler-mod/Tribler/Core/Video/PiecePickerStreaming.py.bak create mode 100644 tribler-mod/Tribler/Core/Video/VideoOnDemand.py create mode 100644 tribler-mod/Tribler/Core/Video/VideoOnDemand.py.bak create mode 100644 tribler-mod/Tribler/Core/Video/VideoSource.py create mode 100644 tribler-mod/Tribler/Core/Video/VideoSource.py.bak create mode 100644 tribler-mod/Tribler/Core/Video/VideoStatus.py create mode 100644 tribler-mod/Tribler/Core/Video/VideoStatus.py.bak create mode 100644 tribler-mod/Tribler/Core/Video/__init__.py create mode 100644 tribler-mod/Tribler/Core/Video/__init__.py.bak create mode 100644 tribler-mod/Tribler/Core/__init__.py create mode 100644 tribler-mod/Tribler/Core/__init__.py.bak create mode 100644 tribler-mod/Tribler/Core/defaults.py create mode 100644 tribler-mod/Tribler/Core/defaults.py.bak create mode 100644 tribler-mod/Tribler/Core/exceptions.py create mode 100644 tribler-mod/Tribler/Core/exceptions.py.bak create mode 100644 tribler-mod/Tribler/Core/osutils.py create mode 100644 tribler-mod/Tribler/Core/osutils.py.bak create mode 100644 tribler-mod/Tribler/Core/simpledefs.py create mode 100644 tribler-mod/Tribler/Core/simpledefs.py.bak create mode 100644 tribler-mod/Tribler/Core/superpeer.txt create mode 100644 tribler-mod/Tribler/Debug/__init__.py create mode 100644 tribler-mod/Tribler/Debug/__init__.py.bak create mode 100644 tribler-mod/Tribler/Debug/console.py create mode 100644 tribler-mod/Tribler/Debug/console.py.bak create mode 100644 tribler-mod/Tribler/Images/SwarmPlayerIcon.ico create mode 100644 tribler-mod/Tribler/Images/SwarmPlayerLogo.png create mode 100644 tribler-mod/Tribler/Images/SwarmPluginIcon.ico create mode 100644 tribler-mod/Tribler/Images/SwarmPluginLogo.png create mode 100644 tribler-mod/Tribler/Images/background.png create mode 100644 tribler-mod/Tribler/Images/fullScreen.png create mode 100644 tribler-mod/Tribler/Images/fullScreen_hover.png create mode 100644 tribler-mod/Tribler/Images/logoTribler.png create mode 100644 tribler-mod/Tribler/Images/logoTribler_small.png create mode 100644 tribler-mod/Tribler/Images/mute.png create mode 100644 tribler-mod/Tribler/Images/mute_hover.png create mode 100644 tribler-mod/Tribler/Images/pause.png create mode 100644 tribler-mod/Tribler/Images/pause_hover.png create mode 100644 tribler-mod/Tribler/Images/play.png create mode 100644 tribler-mod/Tribler/Images/play_hover.png create mode 100644 tribler-mod/Tribler/Images/save.png create mode 100644 tribler-mod/Tribler/Images/saveDisabled.png create mode 100644 tribler-mod/Tribler/Images/saveDisabled_hover.png create mode 100644 tribler-mod/Tribler/Images/save_hover.png create mode 100644 tribler-mod/Tribler/Images/sliderDot.png create mode 100644 tribler-mod/Tribler/Images/sliderVolume.png create mode 100644 tribler-mod/Tribler/Images/splash.jpg create mode 100644 tribler-mod/Tribler/Images/torrenticon.ico create mode 100644 tribler-mod/Tribler/Images/tribler.ico create mode 100644 tribler-mod/Tribler/Images/volume.png create mode 100644 tribler-mod/Tribler/Images/volume_hover.png create mode 100644 tribler-mod/Tribler/LICENSE.txt create mode 100644 tribler-mod/Tribler/Lang/__init__.py create mode 100644 tribler-mod/Tribler/Lang/__init__.py.bak create mode 100644 tribler-mod/Tribler/Lang/english.lang create mode 100644 tribler-mod/Tribler/Lang/lang.py create mode 100644 tribler-mod/Tribler/Lang/lang.py.bak create mode 100644 tribler-mod/Tribler/Main/Build/Mac/Info.plist create mode 100644 tribler-mod/Tribler/Main/Build/Mac/Makefile create mode 100644 tribler-mod/Tribler/Main/Build/Mac/SLAResources.rsrc create mode 100644 tribler-mod/Tribler/Main/Build/Mac/TriblerDoc.icns create mode 100644 tribler-mod/Tribler/Main/Build/Mac/VolumeIcon.icns create mode 100644 tribler-mod/Tribler/Main/Build/Mac/background.png create mode 100644 tribler-mod/Tribler/Main/Build/Mac/icon_sources/appicon.png create mode 100644 tribler-mod/Tribler/Main/Build/Mac/icon_sources/appicon.psd create mode 100644 tribler-mod/Tribler/Main/Build/Mac/icon_sources/default_document.png create mode 100644 tribler-mod/Tribler/Main/Build/Mac/icon_sources/default_volumeicon.png create mode 100644 tribler-mod/Tribler/Main/Build/Mac/icon_sources/dmgicon.png create mode 100644 tribler-mod/Tribler/Main/Build/Mac/icon_sources/dmgicon.psd create mode 100644 tribler-mod/Tribler/Main/Build/Mac/icon_sources/docicon.png create mode 100644 tribler-mod/Tribler/Main/Build/Mac/icon_sources/docicon.psd create mode 100755 tribler-mod/Tribler/Main/Build/Mac/mkinstalldirs create mode 100755 tribler-mod/Tribler/Main/Build/Mac/process_libs create mode 100644 tribler-mod/Tribler/Main/Build/Mac/setuptriblermac.py create mode 100644 tribler-mod/Tribler/Main/Build/Mac/setuptriblermac.py.bak create mode 100755 tribler-mod/Tribler/Main/Build/Mac/smart_lipo_merge create mode 100755 tribler-mod/Tribler/Main/Build/Mac/smart_lipo_thin create mode 100644 tribler-mod/Tribler/Main/Build/Mac/tribler.icns create mode 100755 tribler-mod/Tribler/Main/Build/Mac/triblermac.command create mode 100644 tribler-mod/Tribler/Main/Build/Mac/vlc-macosx-compile.patch create mode 100644 tribler-mod/Tribler/Main/Build/Ubuntu/changelog create mode 100644 tribler-mod/Tribler/Main/Build/Ubuntu/compat create mode 100644 tribler-mod/Tribler/Main/Build/Ubuntu/control create mode 100644 tribler-mod/Tribler/Main/Build/Ubuntu/copyright create mode 100644 tribler-mod/Tribler/Main/Build/Ubuntu/files create mode 100644 tribler-mod/Tribler/Main/Build/Ubuntu/prerm create mode 100644 tribler-mod/Tribler/Main/Build/Ubuntu/pycompat create mode 100755 tribler-mod/Tribler/Main/Build/Ubuntu/rules create mode 100644 tribler-mod/Tribler/Main/Build/Ubuntu/tribler.1 create mode 100644 tribler-mod/Tribler/Main/Build/Ubuntu/tribler.desktop create mode 100644 tribler-mod/Tribler/Main/Build/Ubuntu/tribler.manpages create mode 100644 tribler-mod/Tribler/Main/Build/Ubuntu/tribler.menu create mode 100644 tribler-mod/Tribler/Main/Build/Ubuntu/tribler.postinst.debhelper create mode 100644 tribler-mod/Tribler/Main/Build/Ubuntu/tribler.postrm.debhelper create mode 100644 tribler-mod/Tribler/Main/Build/Ubuntu/tribler.prerm.debhelper create mode 100755 tribler-mod/Tribler/Main/Build/Ubuntu/tribler.sh create mode 100644 tribler-mod/Tribler/Main/Build/Ubuntu/tribler.substvars create mode 100644 tribler-mod/Tribler/Main/Build/Ubuntu/tribler.xpm create mode 100644 tribler-mod/Tribler/Main/Build/Ubuntu/tribler_big.xpm create mode 100644 tribler-mod/Tribler/Main/Build/Win32/heading.bmp create mode 100644 tribler-mod/Tribler/Main/Build/Win32/setuptribler.py create mode 100644 tribler-mod/Tribler/Main/Build/Win32/setuptribler.py.bak create mode 100644 tribler-mod/Tribler/Main/Build/Win32/tribler.exe.manifest create mode 100644 tribler-mod/Tribler/Main/Build/Win32/tribler.nsi create mode 100644 tribler-mod/Tribler/Main/Dialogs/BandwidthSelector.py create mode 100644 tribler-mod/Tribler/Main/Dialogs/BandwidthSelector.py.bak create mode 100644 tribler-mod/Tribler/Main/Dialogs/GUITaskQueue.py create mode 100644 tribler-mod/Tribler/Main/Dialogs/GUITaskQueue.py.bak create mode 100644 tribler-mod/Tribler/Main/Dialogs/TorrentMaker.py create mode 100644 tribler-mod/Tribler/Main/Dialogs/TorrentMaker.py.bak create mode 100644 tribler-mod/Tribler/Main/Dialogs/__init__.py create mode 100644 tribler-mod/Tribler/Main/Dialogs/__init__.py.bak create mode 100644 tribler-mod/Tribler/Main/Dialogs/abcoption.py create mode 100644 tribler-mod/Tribler/Main/Dialogs/abcoption.py.bak create mode 100644 tribler-mod/Tribler/Main/Dialogs/aboutme.py create mode 100644 tribler-mod/Tribler/Main/Dialogs/aboutme.py.bak create mode 100644 tribler-mod/Tribler/Main/Dialogs/common.py create mode 100644 tribler-mod/Tribler/Main/Dialogs/common.py.bak create mode 100644 tribler-mod/Tribler/Main/Dialogs/dlhelperframe.py create mode 100644 tribler-mod/Tribler/Main/Dialogs/dlhelperframe.py.bak create mode 100644 tribler-mod/Tribler/Main/Dialogs/makefriends.py create mode 100644 tribler-mod/Tribler/Main/Dialogs/makefriends.py.bak create mode 100644 tribler-mod/Tribler/Main/Dialogs/regdialog.py create mode 100644 tribler-mod/Tribler/Main/Dialogs/regdialog.py.bak create mode 100644 tribler-mod/Tribler/Main/Dialogs/socnetmyinfo.py create mode 100644 tribler-mod/Tribler/Main/Dialogs/socnetmyinfo.py.bak create mode 100644 tribler-mod/Tribler/Main/Dialogs/systray.py create mode 100644 tribler-mod/Tribler/Main/Dialogs/systray.py.bak create mode 100644 tribler-mod/Tribler/Main/Utility/__init__.py create mode 100644 tribler-mod/Tribler/Main/Utility/__init__.py.bak create mode 100644 tribler-mod/Tribler/Main/Utility/compat.py create mode 100644 tribler-mod/Tribler/Main/Utility/compat.py.bak create mode 100644 tribler-mod/Tribler/Main/Utility/constants.py create mode 100644 tribler-mod/Tribler/Main/Utility/constants.py.bak create mode 100644 tribler-mod/Tribler/Main/Utility/getscrapedata.py create mode 100644 tribler-mod/Tribler/Main/Utility/getscrapedata.py.bak create mode 100644 tribler-mod/Tribler/Main/Utility/helpers.py create mode 100644 tribler-mod/Tribler/Main/Utility/helpers.py.bak create mode 100644 tribler-mod/Tribler/Main/Utility/regchecker.py create mode 100644 tribler-mod/Tribler/Main/Utility/regchecker.py.bak create mode 100644 tribler-mod/Tribler/Main/Utility/utility.py create mode 100644 tribler-mod/Tribler/Main/Utility/utility.py.bak create mode 100644 tribler-mod/Tribler/Main/__init__.py create mode 100644 tribler-mod/Tribler/Main/__init__.py.bak create mode 100644 tribler-mod/Tribler/Main/crawler.py create mode 100644 tribler-mod/Tribler/Main/crawler.py.bak create mode 100644 tribler-mod/Tribler/Main/globals.py create mode 100644 tribler-mod/Tribler/Main/globals.py.bak create mode 100644 tribler-mod/Tribler/Main/metadata-injector.py create mode 100644 tribler-mod/Tribler/Main/metadata-injector.py.bak create mode 100644 tribler-mod/Tribler/Main/notification.py create mode 100644 tribler-mod/Tribler/Main/notification.py.bak create mode 100644 tribler-mod/Tribler/Main/tribler.py create mode 100644 tribler-mod/Tribler/Main/tribler.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/ColumnHeader.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/ColumnHeader.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/FilesItemDetailsSummary.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/FilesItemDetailsSummary.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/FriendsItemPanel.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/FriendsItemPanel.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/FriendshipManager.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/FriendshipManager.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/GridState.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/GridState.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/GuiUtility.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/GuiUtility.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/IconsManager.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/IconsManager.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/LibraryItemPanel.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/LibraryItemPanel.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/LoadingDetails.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/LoadingDetails.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/MainFrame.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/MainFrame.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/MyFrame.xrc create mode 100644 tribler-mod/Tribler/Main/vwxGUI/MyPlayer.xrc create mode 100644 tribler-mod/Tribler/Main/vwxGUI/MyText.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/MyText.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/NewStaticText.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/NewStaticText.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/PersonsItemDetailsSummary.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/PersonsItemDetailsSummary.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/PersonsItemPanel.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/PersonsItemPanel.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/SearchDetails.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/SearchDetails.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/SearchGridManager.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/SearchGridManager.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/SubscriptionsItemPanel.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/SubscriptionsItemPanel.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/Tab_graphs.xrc create mode 100644 tribler-mod/Tribler/Main/vwxGUI/TextButton.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/TextButton.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/TextEdit.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/TextEdit.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/TopSearchPanel.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/TopSearchPanel.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/TopSearchPanel.xrc create mode 100644 tribler-mod/Tribler/Main/vwxGUI/TriblerProgressbar.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/TriblerProgressbar.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/TriblerStyles.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/TriblerStyles.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/__init__.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/__init__.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/bgPanel.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/bgPanel.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/btn_DetailsHeader.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/btn_DetailsHeader.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/deleteTorrent.xrc create mode 100644 tribler-mod/Tribler/Main/vwxGUI/dummy.xrc create mode 100644 tribler-mod/Tribler/Main/vwxGUI/filesDetails.xrc create mode 100644 tribler-mod/Tribler/Main/vwxGUI/filesItemPanel.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/filesItemPanel.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/filesTab_files.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/filesTab_files.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/filesTab_files.xrc create mode 100644 tribler-mod/Tribler/Main/vwxGUI/filterStandard.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/filterStandard.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/font.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/font.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/None.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRgradient.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRgradient_new.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRgradient_new_win.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRind.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRind2.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRind2_win.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRindicator.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRindicator_left.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRindicator_right.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/advanced_filtering.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/average_win.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/black_spacer.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/files_friends.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/go.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/good_win.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/help.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/help_win.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/iconSaved.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/left.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/line3.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/poor_win.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/right.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/search_files.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/search_new.gif create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/search_new_windows.gif create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/seperator.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/seperator_win.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/sharing_reputation.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/sharing_reputation_win.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/top_image.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/video.gif create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/welcome.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/wrapCorBL.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/wrapCorBR.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/wrapCorTL.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/5.0/wrapCorTR.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/Save.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/Save_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/Search_new.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/Search_new_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/Search_new_win.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/Search_new_win_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/basic.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/basicEnabled.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/basicEnabled_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/basic_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/bcicon.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/black_top_left.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/black_top_right.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/blue_long.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/browse.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/browse_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumb.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbFriends.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbL.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbL_audio.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbL_compressed.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbL_hidden.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbL_other.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbL_video.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbL_xxx.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbLibrary.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbPeer.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbPeerS.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbS_audio.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbS_compressed.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbS_hidden.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbS_other.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbS_video.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbS_xxx.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumb_audio.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumb_compressed.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumb_hidden.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumb_other.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumb_video.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/defaultThumb_xxx.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/download.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/download_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/edit.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/edit_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/edit_clicked_old.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/edit_old.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/fake.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/fake_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/familyfilter.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/familyfilterEnabled.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/familyfilterEnabled_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/familyfilter_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/familyfilter_win.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/familyfilter_winEnabled.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/familyfilter_winEnabled_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/familyfilter_win_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/fiftyUp.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/fiftyUp_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/firewallStatus_state1.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/firewallStatus_state2.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/firewallStatus_state3.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/go.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/goEnabled.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/go_backup.png create mode 100755 tribler-mod/Tribler/Main/vwxGUI/images/go_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/hline.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/hundredUp.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/hundredUp_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/iconSaved_state3.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/library_play.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/library_play_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/line.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/line2.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/line3.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/line4.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/logo4video.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/logo4video2.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/logo4video2_win.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/mainBG.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/mt.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/mtEnabled.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/mtEnabled_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/mt_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/mute.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/mute_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/my_files.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/my_filesEnabled.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/my_filesEnabled_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/my_files_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/my_files_win.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/my_files_winBlank.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/my_files_winEnabled.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/my_files_winEnabled_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/my_files_win_clicked.png create mode 100755 tribler-mod/Tribler/Main/vwxGUI/images/nextpage.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/nextpage2.png create mode 100755 tribler-mod/Tribler/Main/vwxGUI/images/nextpageEnabled.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/nextpageEnabled_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/nextpage_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/play_button.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/play_button_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/playbig.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/playbigEnabled.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/playbigEnabled_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/playbig_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/playsmallEnabled.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/playsmallEnabled_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/popularity1.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/popularity10.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/popularity2.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/popularity3.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/popularity4.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/popularity5.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/popularity6.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/popularity7.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/popularity8.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/popularity9.png create mode 100755 tribler-mod/Tribler/Main/vwxGUI/images/prevpage.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/prevpage2.png create mode 100755 tribler-mod/Tribler/Main/vwxGUI/images/prevpageEnabled.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/prevpageEnabled_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/prevpage_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/real.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/real_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/remove.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/remove_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/results_win.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/results_winBlank.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/results_winEnabled.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/search.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/search_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/select_files.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/select_files_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/settings.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/settingsEnabled.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/settingsEnabled_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/settings_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/settings_win.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/settings_winBlank.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/settings_winEnabled.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/settings_winEnabled_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/settings_win_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/seventyfiveDown.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/seventyfiveDown_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/sixhundreddDown.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/sixhundreddDown_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/sr_average.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/sr_good.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/sr_poor.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/tenUp.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/tenUp_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/threehundredDown.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/threehundredDown_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/thumb.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/thumbField.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/topBG.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/top_bg.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/top_search.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/top_search_grey.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/unlimitedDown.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/unlimitedDown_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/unlimitedUp.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/unlimitedUp_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/zeroDown.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/zeroDown_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/zeroUp.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/images/zeroUp_clicked.png create mode 100644 tribler-mod/Tribler/Main/vwxGUI/libraryDetails.xrc create mode 100644 tribler-mod/Tribler/Main/vwxGUI/personsDetails.xrc create mode 100644 tribler-mod/Tribler/Main/vwxGUI/personsItem.xrc create mode 100644 tribler-mod/Tribler/Main/vwxGUI/personsTab_advanced.xrc create mode 100644 tribler-mod/Tribler/Main/vwxGUI/profileDetails.xrc create mode 100644 tribler-mod/Tribler/Main/vwxGUI/profileDetails_Download.xrc create mode 100644 tribler-mod/Tribler/Main/vwxGUI/profileDetails_Files.xrc create mode 100644 tribler-mod/Tribler/Main/vwxGUI/profileDetails_Persons.xrc create mode 100644 tribler-mod/Tribler/Main/vwxGUI/profileDetails_Presence.xrc create mode 100644 tribler-mod/Tribler/Main/vwxGUI/profileDetails_Quality.xrc create mode 100644 tribler-mod/Tribler/Main/vwxGUI/settingsOverview.xrc create mode 100644 tribler-mod/Tribler/Main/vwxGUI/settingsOverviewPanel.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/settingsOverviewPanel.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/standardDetails.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/standardDetails.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/standardFilter.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/standardFilter.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/standardGrid.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/standardGrid.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/standardOverview.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/standardOverview.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/standardPager.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/standardPager.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/statusDownloads.xrc create mode 100644 tribler-mod/Tribler/Main/vwxGUI/subscriptionsDetails.xrc create mode 100644 tribler-mod/Tribler/Main/vwxGUI/tribler_List.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/tribler_List.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/tribler_topButton.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/tribler_topButton.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/uploadTab_details.xrc create mode 100644 tribler-mod/Tribler/Main/vwxGUI/web2.py create mode 100644 tribler-mod/Tribler/Main/vwxGUI/web2.py.bak create mode 100644 tribler-mod/Tribler/Main/vwxGUI/zudeo_torrent_description.txt create mode 100644 tribler-mod/Tribler/Player/BaseApp.py create mode 100644 tribler-mod/Tribler/Player/BaseApp.py.bak create mode 100644 tribler-mod/Tribler/Player/Build/Mac/Info.plist create mode 100644 tribler-mod/Tribler/Player/Build/Mac/Makefile create mode 100644 tribler-mod/Tribler/Player/Build/Mac/SLAResources.rsrc create mode 100644 tribler-mod/Tribler/Player/Build/Mac/TriblerDoc.icns create mode 100644 tribler-mod/Tribler/Player/Build/Mac/VolumeIcon.icns create mode 100644 tribler-mod/Tribler/Player/Build/Mac/background.png create mode 100644 tribler-mod/Tribler/Player/Build/Mac/icon_sources/appicon.png create mode 100644 tribler-mod/Tribler/Player/Build/Mac/icon_sources/appicon.psd create mode 100644 tribler-mod/Tribler/Player/Build/Mac/icon_sources/default_document.png create mode 100644 tribler-mod/Tribler/Player/Build/Mac/icon_sources/default_volumeicon.png create mode 100644 tribler-mod/Tribler/Player/Build/Mac/icon_sources/dmgicon.png create mode 100644 tribler-mod/Tribler/Player/Build/Mac/icon_sources/dmgicon.psd create mode 100644 tribler-mod/Tribler/Player/Build/Mac/icon_sources/docicon.png create mode 100644 tribler-mod/Tribler/Player/Build/Mac/icon_sources/docicon.psd create mode 100755 tribler-mod/Tribler/Player/Build/Mac/mkinstalldirs create mode 100755 tribler-mod/Tribler/Player/Build/Mac/process_libs create mode 100644 tribler-mod/Tribler/Player/Build/Mac/setuptriblermac.py create mode 100644 tribler-mod/Tribler/Player/Build/Mac/setuptriblermac.py.bak create mode 100755 tribler-mod/Tribler/Player/Build/Mac/smart_lipo_merge create mode 100755 tribler-mod/Tribler/Player/Build/Mac/smart_lipo_thin create mode 100644 tribler-mod/Tribler/Player/Build/Mac/tribler.icns create mode 100644 tribler-mod/Tribler/Player/Build/Mac/vlc-macosx-compile.patch create mode 100644 tribler-mod/Tribler/Player/Build/Ubuntu/changelog create mode 100644 tribler-mod/Tribler/Player/Build/Ubuntu/compat create mode 100644 tribler-mod/Tribler/Player/Build/Ubuntu/control create mode 100644 tribler-mod/Tribler/Player/Build/Ubuntu/copyright create mode 100644 tribler-mod/Tribler/Player/Build/Ubuntu/files create mode 100644 tribler-mod/Tribler/Player/Build/Ubuntu/prerm create mode 100644 tribler-mod/Tribler/Player/Build/Ubuntu/pycompat create mode 100755 tribler-mod/Tribler/Player/Build/Ubuntu/rules create mode 100644 tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.1 create mode 100644 tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.desktop create mode 100644 tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.manpages create mode 100644 tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.menu create mode 100644 tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.postinst.debhelper create mode 100644 tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.postrm.debhelper create mode 100755 tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.sh create mode 100644 tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.xpm create mode 100644 tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer_big.xpm create mode 100644 tribler-mod/Tribler/Player/Build/Win32/heading.bmp create mode 100644 tribler-mod/Tribler/Player/Build/Win32/setuptriblerplay.py create mode 100644 tribler-mod/Tribler/Player/Build/Win32/setuptriblerplay.py.bak create mode 100644 tribler-mod/Tribler/Player/Build/Win32/swarmplayer.exe.manifest create mode 100644 tribler-mod/Tribler/Player/Build/Win32/triblerplay.nsi create mode 100644 tribler-mod/Tribler/Player/Reporter.py create mode 100644 tribler-mod/Tribler/Player/Reporter.py.bak create mode 100644 tribler-mod/Tribler/Player/UtilityStub.py create mode 100644 tribler-mod/Tribler/Player/UtilityStub.py.bak create mode 100644 tribler-mod/Tribler/Player/__init__.py create mode 100644 tribler-mod/Tribler/Player/__init__.py.bak create mode 100644 tribler-mod/Tribler/Player/swarmplayer.py create mode 100644 tribler-mod/Tribler/Player/swarmplayer.py.bak create mode 100644 tribler-mod/Tribler/Player/systray.py create mode 100644 tribler-mod/Tribler/Player/systray.py.bak create mode 100644 tribler-mod/Tribler/Plugin/BackgroundProcess.py create mode 100644 tribler-mod/Tribler/Plugin/BackgroundProcess.py.bak create mode 100644 tribler-mod/Tribler/Plugin/Build/Win32/setupBGexe.py create mode 100644 tribler-mod/Tribler/Plugin/Build/Win32/setupBGexe.py.bak create mode 100644 tribler-mod/Tribler/Plugin/pluginemulator-http.py create mode 100644 tribler-mod/Tribler/Plugin/pluginemulator-http.py.bak create mode 100644 tribler-mod/Tribler/Plugin/pluginemulator.py create mode 100644 tribler-mod/Tribler/Plugin/pluginemulator.py.bak create mode 100644 tribler-mod/Tribler/Policies/RateManager.py create mode 100644 tribler-mod/Tribler/Policies/RateManager.py.bak create mode 100644 tribler-mod/Tribler/Policies/SeedingManager.py create mode 100644 tribler-mod/Tribler/Policies/SeedingManager.py.bak create mode 100644 tribler-mod/Tribler/Policies/UploadLimitation.py create mode 100644 tribler-mod/Tribler/Policies/UploadLimitation.py.bak create mode 100644 tribler-mod/Tribler/Policies/__init__.py create mode 100644 tribler-mod/Tribler/Policies/__init__.py.bak create mode 100644 tribler-mod/Tribler/Subscriptions/__init__.py create mode 100644 tribler-mod/Tribler/Subscriptions/__init__.py.bak create mode 100644 tribler-mod/Tribler/Subscriptions/rss_client.py create mode 100644 tribler-mod/Tribler/Subscriptions/rss_client.py.bak create mode 100644 tribler-mod/Tribler/Test/API/contentdir/file.avi create mode 100644 tribler-mod/Tribler/Test/API/contentdir/file.txt create mode 100644 tribler-mod/Tribler/Test/API/ec.pem create mode 100644 tribler-mod/Tribler/Test/API/ecpub.pem create mode 100644 tribler-mod/Tribler/Test/API/file.wmv create mode 100644 tribler-mod/Tribler/Test/API/file2.wmv create mode 100644 tribler-mod/Tribler/Test/API/test_api.bat create mode 100755 tribler-mod/Tribler/Test/API/test_api.sh create mode 100644 tribler-mod/Tribler/Test/API/test_remote_torrent.py create mode 100644 tribler-mod/Tribler/Test/API/test_remote_torrent.py.bak create mode 100644 tribler-mod/Tribler/Test/API/test_seeding.py create mode 100644 tribler-mod/Tribler/Test/API/test_seeding.py.bak create mode 100644 tribler-mod/Tribler/Test/API/test_seeding_live.py create mode 100644 tribler-mod/Tribler/Test/API/test_seeding_live.py.bak create mode 100644 tribler-mod/Tribler/Test/API/test_seeding_vod.py create mode 100644 tribler-mod/Tribler/Test/API/test_seeding_vod.py.bak create mode 100644 tribler-mod/Tribler/Test/API/test_tdef.py create mode 100644 tribler-mod/Tribler/Test/API/test_tdef.py.bak create mode 100644 tribler-mod/Tribler/Test/API/test_tracking.py create mode 100644 tribler-mod/Tribler/Test/API/test_tracking.py.bak create mode 100644 tribler-mod/Tribler/Test/API/test_upload_limit.py create mode 100644 tribler-mod/Tribler/Test/API/test_upload_limit.py.bak create mode 100644 tribler-mod/Tribler/Test/API/test_vod.py create mode 100644 tribler-mod/Tribler/Test/API/test_vod.py.bak create mode 100644 tribler-mod/Tribler/Test/API/thumb.jpg create mode 100644 tribler-mod/Tribler/Test/TESTSUITE.txt create mode 100644 tribler-mod/Tribler/Test/__init__.py create mode 100644 tribler-mod/Tribler/Test/__init__.py.bak create mode 100644 tribler-mod/Tribler/Test/bencode.py create mode 100644 tribler-mod/Tribler/Test/bencode.py.bak create mode 100644 tribler-mod/Tribler/Test/btconn.py create mode 100644 tribler-mod/Tribler/Test/btconn.py.bak create mode 100644 tribler-mod/Tribler/Test/buddycast_data.py create mode 100644 tribler-mod/Tribler/Test/buddycast_data.py.bak create mode 100644 tribler-mod/Tribler/Test/extend_db_dir/bak_multiple.torrent create mode 100644 tribler-mod/Tribler/Test/extend_db_dir/bak_single.torrent create mode 100644 tribler-mod/Tribler/Test/extend_db_dir/bak_tribler.tar.gz create mode 100644 tribler-mod/Tribler/Test/extend_db_dir/bsddb.tar.gz create mode 100644 tribler-mod/Tribler/Test/extend_db_dir/superpeer120070902sp7001.log create mode 100644 tribler-mod/Tribler/Test/extend_db_dir/url_to_bsddb.txt create mode 100644 tribler-mod/Tribler/Test/extend_db_dir/url_to_tribler.sdb.txt create mode 100644 tribler-mod/Tribler/Test/extend_hs_dir/dummydata.merkle.torrent create mode 100644 tribler-mod/Tribler/Test/log_parser.py create mode 100644 tribler-mod/Tribler/Test/log_parser.py.bak create mode 100644 tribler-mod/Tribler/Test/olconn.py create mode 100644 tribler-mod/Tribler/Test/olconn.py.bak create mode 100644 tribler-mod/Tribler/Test/test.bat create mode 100644 tribler-mod/Tribler/Test/test.sh create mode 100644 tribler-mod/Tribler/Test/test_TimedTaskQueue.py create mode 100644 tribler-mod/Tribler/Test/test_TimedTaskQueue.py.bak create mode 100644 tribler-mod/Tribler/Test/test_as_server.py create mode 100644 tribler-mod/Tribler/Test/test_as_server.py.bak create mode 100644 tribler-mod/Tribler/Test/test_bartercast.py create mode 100644 tribler-mod/Tribler/Test/test_bartercast.py.bak create mode 100644 tribler-mod/Tribler/Test/test_bsddb2sqlite.py create mode 100644 tribler-mod/Tribler/Test/test_bsddb2sqlite.py.bak create mode 100644 tribler-mod/Tribler/Test/test_buddycast.py create mode 100644 tribler-mod/Tribler/Test/test_buddycast.py.bak create mode 100755 tribler-mod/Tribler/Test/test_buddycast2_datahandler.bat create mode 100644 tribler-mod/Tribler/Test/test_buddycast2_datahandler.py create mode 100644 tribler-mod/Tribler/Test/test_buddycast2_datahandler.py.bak create mode 100755 tribler-mod/Tribler/Test/test_buddycast2_datahandler.sh create mode 100644 tribler-mod/Tribler/Test/test_buddycast4.py create mode 100644 tribler-mod/Tribler/Test/test_buddycast4.py.bak create mode 100644 tribler-mod/Tribler/Test/test_buddycast4_stresstest.py create mode 100644 tribler-mod/Tribler/Test/test_buddycast4_stresstest.py.bak create mode 100644 tribler-mod/Tribler/Test/test_buddycast_msg.bat create mode 100644 tribler-mod/Tribler/Test/test_buddycast_msg.py create mode 100644 tribler-mod/Tribler/Test/test_buddycast_msg.py.bak create mode 100644 tribler-mod/Tribler/Test/test_buddycast_msg.sh create mode 100644 tribler-mod/Tribler/Test/test_cachedb.py create mode 100644 tribler-mod/Tribler/Test/test_cachedb.py.bak create mode 100644 tribler-mod/Tribler/Test/test_cachedbhandler.py create mode 100644 tribler-mod/Tribler/Test/test_cachedbhandler.py.bak create mode 100644 tribler-mod/Tribler/Test/test_connect_overlay.py create mode 100644 tribler-mod/Tribler/Test/test_connect_overlay.py.bak create mode 100644 tribler-mod/Tribler/Test/test_crawler.py create mode 100644 tribler-mod/Tribler/Test/test_crawler.py.bak create mode 100755 tribler-mod/Tribler/Test/test_dialback_conn_handler.bat create mode 100644 tribler-mod/Tribler/Test/test_dialback_conn_handler.py create mode 100644 tribler-mod/Tribler/Test/test_dialback_conn_handler.py.bak create mode 100755 tribler-mod/Tribler/Test/test_dialback_conn_handler.sh create mode 100755 tribler-mod/Tribler/Test/test_dialback_reply_active.bat create mode 100644 tribler-mod/Tribler/Test/test_dialback_reply_active.py create mode 100644 tribler-mod/Tribler/Test/test_dialback_reply_active.py.bak create mode 100755 tribler-mod/Tribler/Test/test_dialback_reply_active.sh create mode 100644 tribler-mod/Tribler/Test/test_dialback_reply_active2.py create mode 100644 tribler-mod/Tribler/Test/test_dialback_reply_active2.py.bak create mode 100644 tribler-mod/Tribler/Test/test_dialback_request.py create mode 100644 tribler-mod/Tribler/Test/test_dialback_request.py.bak create mode 100755 tribler-mod/Tribler/Test/test_dlhelp.bat create mode 100644 tribler-mod/Tribler/Test/test_dlhelp.py create mode 100644 tribler-mod/Tribler/Test/test_dlhelp.py.bak create mode 100755 tribler-mod/Tribler/Test/test_dlhelp.sh create mode 100644 tribler-mod/Tribler/Test/test_extend_hs.py create mode 100644 tribler-mod/Tribler/Test/test_extend_hs.py.bak create mode 100644 tribler-mod/Tribler/Test/test_extend_hs_t350.py create mode 100644 tribler-mod/Tribler/Test/test_extend_hs_t350.py.bak create mode 100644 tribler-mod/Tribler/Test/test_friend.py create mode 100644 tribler-mod/Tribler/Test/test_friend.py.bak create mode 100644 tribler-mod/Tribler/Test/test_friendship.bat create mode 100644 tribler-mod/Tribler/Test/test_friendship.py create mode 100644 tribler-mod/Tribler/Test/test_friendship.py.bak create mode 100755 tribler-mod/Tribler/Test/test_friendship.sh create mode 100644 tribler-mod/Tribler/Test/test_friendship_crawler.py create mode 100644 tribler-mod/Tribler/Test/test_friendship_crawler.py.bak create mode 100644 tribler-mod/Tribler/Test/test_g2g.py create mode 100644 tribler-mod/Tribler/Test/test_g2g.py.bak create mode 100644 tribler-mod/Tribler/Test/test_gui_server.py create mode 100644 tribler-mod/Tribler/Test/test_gui_server.py.bak create mode 100644 tribler-mod/Tribler/Test/test_merkle.py create mode 100644 tribler-mod/Tribler/Test/test_merkle.py.bak create mode 100644 tribler-mod/Tribler/Test/test_na_extend_hs.py create mode 100644 tribler-mod/Tribler/Test/test_na_extend_hs.py.bak create mode 100644 tribler-mod/Tribler/Test/test_na_extend_hs.sh create mode 100644 tribler-mod/Tribler/Test/test_natcheck.py create mode 100644 tribler-mod/Tribler/Test/test_natcheck.py.bak create mode 100644 tribler-mod/Tribler/Test/test_osutils.py create mode 100644 tribler-mod/Tribler/Test/test_osutils.py.bak create mode 100644 tribler-mod/Tribler/Test/test_overlay_bridge.py create mode 100644 tribler-mod/Tribler/Test/test_overlay_bridge.py.bak create mode 100755 tribler-mod/Tribler/Test/test_overlay_bridge.sh create mode 100644 tribler-mod/Tribler/Test/test_permid.py create mode 100644 tribler-mod/Tribler/Test/test_permid.py.bak create mode 100644 tribler-mod/Tribler/Test/test_permid_response1.py create mode 100644 tribler-mod/Tribler/Test/test_permid_response1.py.bak create mode 100644 tribler-mod/Tribler/Test/test_remote_query.py create mode 100644 tribler-mod/Tribler/Test/test_remote_query.py.bak create mode 100755 tribler-mod/Tribler/Test/test_rquery_reply_active.bat create mode 100644 tribler-mod/Tribler/Test/test_rquery_reply_active.py create mode 100644 tribler-mod/Tribler/Test/test_rquery_reply_active.py.bak create mode 100755 tribler-mod/Tribler/Test/test_rquery_reply_active.sh create mode 100644 tribler-mod/Tribler/Test/test_secure_overlay.bat create mode 100644 tribler-mod/Tribler/Test/test_secure_overlay.py create mode 100644 tribler-mod/Tribler/Test/test_secure_overlay.py.bak create mode 100755 tribler-mod/Tribler/Test/test_secure_overlay.sh create mode 100644 tribler-mod/Tribler/Test/test_seeding_stats.py create mode 100644 tribler-mod/Tribler/Test/test_seeding_stats.py.bak create mode 100644 tribler-mod/Tribler/Test/test_sim.py create mode 100644 tribler-mod/Tribler/Test/test_sim.py.bak create mode 100644 tribler-mod/Tribler/Test/test_social_overlap.py create mode 100644 tribler-mod/Tribler/Test/test_social_overlap.py.bak create mode 100644 tribler-mod/Tribler/Test/test_sqlitecachedb.py create mode 100644 tribler-mod/Tribler/Test/test_sqlitecachedb.py.bak create mode 100644 tribler-mod/Tribler/Test/test_sqlitecachedbhandler.py create mode 100644 tribler-mod/Tribler/Test/test_sqlitecachedbhandler.py.bak create mode 100755 tribler-mod/Tribler/Test/test_sqlitecachedbhandler.sh create mode 100644 tribler-mod/Tribler/Test/test_superpeers.py create mode 100644 tribler-mod/Tribler/Test/test_superpeers.py.bak create mode 100644 tribler-mod/Tribler/Test/test_torrentcollecting.py create mode 100644 tribler-mod/Tribler/Test/test_torrentcollecting.py.bak create mode 100644 tribler-mod/Tribler/Test/test_tracker_checking.py create mode 100644 tribler-mod/Tribler/Test/test_tracker_checking.py.bak create mode 100644 tribler-mod/Tribler/Test/test_ut_pex.py create mode 100644 tribler-mod/Tribler/Test/test_ut_pex.py.bak create mode 100644 tribler-mod/Tribler/Test/testdata.txt create mode 100644 tribler-mod/Tribler/Test/usericon-ok.jpg create mode 100644 tribler-mod/Tribler/Tools/__init__.py create mode 100644 tribler-mod/Tribler/Tools/__init__.py.bak create mode 100644 tribler-mod/Tribler/Tools/addplaytime.py create mode 100644 tribler-mod/Tribler/Tools/addplaytime.py.bak create mode 100755 tribler-mod/Tribler/Tools/bitbucket-live-noauth.py create mode 100755 tribler-mod/Tribler/Tools/bitbucket-live-noauth.py.bak create mode 100755 tribler-mod/Tribler/Tools/bitbucket-live.py create mode 100755 tribler-mod/Tribler/Tools/bitbucket-live.py.bak create mode 100644 tribler-mod/Tribler/Tools/btshowmetainfo.py create mode 100644 tribler-mod/Tribler/Tools/btshowmetainfo.py.bak create mode 100644 tribler-mod/Tribler/Tools/cmdlinedl.py create mode 100644 tribler-mod/Tribler/Tools/cmdlinedl.py.bak create mode 100644 tribler-mod/Tribler/Tools/createlivestream-noauth.py create mode 100644 tribler-mod/Tribler/Tools/createlivestream-noauth.py.bak create mode 100644 tribler-mod/Tribler/Tools/createlivestream.py create mode 100644 tribler-mod/Tribler/Tools/createlivestream.py.bak create mode 100644 tribler-mod/Tribler/Tools/dirtrackerseeder.py create mode 100644 tribler-mod/Tribler/Tools/dirtrackerseeder.py.bak create mode 100755 tribler-mod/Tribler/Tools/pipe-arnocam-home.sh create mode 100755 tribler-mod/Tribler/Tools/pipe-arnocam-jip.sh create mode 100755 tribler-mod/Tribler/Tools/pipe-babscam-h264-aac-gop-sync.sh create mode 100755 tribler-mod/Tribler/Tools/pipe-babscam-h264-mp3.sh create mode 100755 tribler-mod/Tribler/Tools/pipe-babscam-h264-nosound-mencoder.sh create mode 100755 tribler-mod/Tribler/Tools/pipe-babscam-mpeg4-mp3-sync.sh create mode 100644 tribler-mod/Tribler/Tools/superpeer.py create mode 100644 tribler-mod/Tribler/Tools/superpeer.py.bak create mode 100644 tribler-mod/Tribler/TrackerChecking/TorrentChecking.py create mode 100644 tribler-mod/Tribler/TrackerChecking/TorrentChecking.py.bak create mode 100644 tribler-mod/Tribler/TrackerChecking/TrackerChecking.py create mode 100644 tribler-mod/Tribler/TrackerChecking/TrackerChecking.py.bak create mode 100644 tribler-mod/Tribler/TrackerChecking/__init__.py create mode 100644 tribler-mod/Tribler/TrackerChecking/__init__.py.bak create mode 100644 tribler-mod/Tribler/Utilities/Instance2Instance.py create mode 100644 tribler-mod/Tribler/Utilities/Instance2Instance.py.bak create mode 100644 tribler-mod/Tribler/Utilities/LinuxSingleInstanceChecker.py create mode 100644 tribler-mod/Tribler/Utilities/LinuxSingleInstanceChecker.py.bak create mode 100644 tribler-mod/Tribler/Utilities/TimedTaskQueue.py create mode 100644 tribler-mod/Tribler/Utilities/TimedTaskQueue.py.bak create mode 100644 tribler-mod/Tribler/Utilities/__init__.py create mode 100644 tribler-mod/Tribler/Utilities/__init__.py.bak create mode 100644 tribler-mod/Tribler/Utilities/configreader.py create mode 100644 tribler-mod/Tribler/Utilities/configreader.py.bak create mode 100644 tribler-mod/Tribler/Video/Buttons.py create mode 100644 tribler-mod/Tribler/Video/Buttons.py.bak create mode 100644 tribler-mod/Tribler/Video/EmbeddedPlayer.py create mode 100644 tribler-mod/Tribler/Video/EmbeddedPlayer.py.bak create mode 100644 tribler-mod/Tribler/Video/EmbeddedPlayer4Frame.py create mode 100644 tribler-mod/Tribler/Video/EmbeddedPlayer4Frame.py.bak create mode 100644 tribler-mod/Tribler/Video/Images/4framebackground.png create mode 100644 tribler-mod/Tribler/Video/Images/4framesliderDot.png create mode 100644 tribler-mod/Tribler/Video/Images/4framesliderDot_dis.png create mode 100644 tribler-mod/Tribler/Video/Images/4framesliderVolume.png create mode 100644 tribler-mod/Tribler/Video/Images/background.png create mode 100644 tribler-mod/Tribler/Video/Images/fullScreen-hover.png create mode 100644 tribler-mod/Tribler/Video/Images/fullScreen.png create mode 100644 tribler-mod/Tribler/Video/Images/fullScreen_dis.png create mode 100644 tribler-mod/Tribler/Video/Images/fullScreen_hover.png create mode 100644 tribler-mod/Tribler/Video/Images/pause.png create mode 100644 tribler-mod/Tribler/Video/Images/pause_dis.png create mode 100644 tribler-mod/Tribler/Video/Images/pause_hover.png create mode 100644 tribler-mod/Tribler/Video/Images/play.png create mode 100644 tribler-mod/Tribler/Video/Images/play_dis.png create mode 100644 tribler-mod/Tribler/Video/Images/play_hover.png create mode 100644 tribler-mod/Tribler/Video/Images/sliderDot.png create mode 100644 tribler-mod/Tribler/Video/Images/sliderDot_dis.png create mode 100644 tribler-mod/Tribler/Video/Images/sliderDot_hover.png create mode 100644 tribler-mod/Tribler/Video/Images/sliderVolume.png create mode 100644 tribler-mod/Tribler/Video/Images/vol0.png create mode 100644 tribler-mod/Tribler/Video/Images/vol0Enabled.png create mode 100644 tribler-mod/Tribler/Video/Images/vol0Enabled_clicked.png create mode 100644 tribler-mod/Tribler/Video/Images/vol0_clicked.png create mode 100644 tribler-mod/Tribler/Video/Images/vol1.png create mode 100644 tribler-mod/Tribler/Video/Images/vol1Enabled.png create mode 100644 tribler-mod/Tribler/Video/Images/vol1Enabled_clicked.png create mode 100644 tribler-mod/Tribler/Video/Images/vol1_hover.png create mode 100644 tribler-mod/Tribler/Video/Images/vol2.png create mode 100644 tribler-mod/Tribler/Video/Images/vol2Enabled.png create mode 100644 tribler-mod/Tribler/Video/Images/vol2Enabled_clicked.png create mode 100644 tribler-mod/Tribler/Video/Images/vol2_hover.png create mode 100644 tribler-mod/Tribler/Video/Images/vol3.png create mode 100644 tribler-mod/Tribler/Video/Images/vol3Enabled.png create mode 100644 tribler-mod/Tribler/Video/Images/vol3Enabled_clicked.png create mode 100644 tribler-mod/Tribler/Video/Images/vol3_hover.png create mode 100644 tribler-mod/Tribler/Video/Images/vol4.png create mode 100644 tribler-mod/Tribler/Video/Images/vol4Enabled.png create mode 100644 tribler-mod/Tribler/Video/Images/vol4Enabled_clicked.png create mode 100644 tribler-mod/Tribler/Video/Images/vol4_hover.png create mode 100644 tribler-mod/Tribler/Video/Images/vol5.png create mode 100644 tribler-mod/Tribler/Video/Images/vol5Enabled_clicked.png create mode 100644 tribler-mod/Tribler/Video/Images/vol5_hover.png create mode 100644 tribler-mod/Tribler/Video/Images/vol6.png create mode 100644 tribler-mod/Tribler/Video/Images/vol6Enabled_clicked.png create mode 100644 tribler-mod/Tribler/Video/Images/vol6_hover.png create mode 100644 tribler-mod/Tribler/Video/Progress.py create mode 100644 tribler-mod/Tribler/Video/Progress.py.bak create mode 100644 tribler-mod/Tribler/Video/VLCWrapper.py create mode 100644 tribler-mod/Tribler/Video/VLCWrapper.py.bak create mode 100644 tribler-mod/Tribler/Video/VideoFrame.py create mode 100644 tribler-mod/Tribler/Video/VideoFrame.py.bak create mode 100644 tribler-mod/Tribler/Video/VideoPlayer.py create mode 100644 tribler-mod/Tribler/Video/VideoPlayer.py.bak create mode 100644 tribler-mod/Tribler/Video/VideoServer.py create mode 100644 tribler-mod/Tribler/Video/VideoServer.py.bak create mode 100644 tribler-mod/Tribler/Video/__init__.py create mode 100644 tribler-mod/Tribler/Video/__init__.py.bak create mode 100644 tribler-mod/Tribler/Video/defs.py create mode 100644 tribler-mod/Tribler/Video/defs.py.bak create mode 100644 tribler-mod/Tribler/Video/utils.py create mode 100644 tribler-mod/Tribler/Video/utils.py.bak create mode 100644 tribler-mod/Tribler/Web2/__init__.py create mode 100644 tribler-mod/Tribler/Web2/__init__.py.bak create mode 100644 tribler-mod/Tribler/Web2/photo/__init__.py create mode 100644 tribler-mod/Tribler/Web2/photo/__init__.py.bak create mode 100644 tribler-mod/Tribler/Web2/photo/flickr.py create mode 100644 tribler-mod/Tribler/Web2/photo/flickr.py.bak create mode 100644 tribler-mod/Tribler/Web2/photo/photo.py create mode 100644 tribler-mod/Tribler/Web2/photo/photo.py.bak create mode 100644 tribler-mod/Tribler/Web2/photo/settings.py create mode 100644 tribler-mod/Tribler/Web2/photo/settings.py.bak create mode 100644 tribler-mod/Tribler/Web2/photo/zooomr.py create mode 100644 tribler-mod/Tribler/Web2/photo/zooomr.py.bak create mode 100644 tribler-mod/Tribler/Web2/util/__init__.py create mode 100644 tribler-mod/Tribler/Web2/util/__init__.py.bak create mode 100644 tribler-mod/Tribler/Web2/util/codec.py create mode 100644 tribler-mod/Tribler/Web2/util/codec.py.bak create mode 100644 tribler-mod/Tribler/Web2/util/config.py create mode 100644 tribler-mod/Tribler/Web2/util/config.py.bak create mode 100644 tribler-mod/Tribler/Web2/util/db.py create mode 100644 tribler-mod/Tribler/Web2/util/db.py.bak create mode 100644 tribler-mod/Tribler/Web2/util/download.py create mode 100644 tribler-mod/Tribler/Web2/util/download.py.bak create mode 100644 tribler-mod/Tribler/Web2/util/history.py create mode 100644 tribler-mod/Tribler/Web2/util/history.py.bak create mode 100644 tribler-mod/Tribler/Web2/util/log.py create mode 100644 tribler-mod/Tribler/Web2/util/log.py.bak create mode 100644 tribler-mod/Tribler/Web2/util/mypartial.py create mode 100644 tribler-mod/Tribler/Web2/util/mypartial.py.bak create mode 100644 tribler-mod/Tribler/Web2/util/observer.py create mode 100644 tribler-mod/Tribler/Web2/util/observer.py.bak create mode 100644 tribler-mod/Tribler/Web2/util/update.py create mode 100644 tribler-mod/Tribler/Web2/util/update.py.bak create mode 100644 tribler-mod/Tribler/Web2/util/utilsettings.py create mode 100644 tribler-mod/Tribler/Web2/util/utilsettings.py.bak create mode 100644 tribler-mod/Tribler/Web2/video/__init__.py create mode 100644 tribler-mod/Tribler/Web2/video/__init__.py.bak create mode 100644 tribler-mod/Tribler/Web2/video/genericsearch.py create mode 100644 tribler-mod/Tribler/Web2/video/genericsearch.py.bak create mode 100644 tribler-mod/Tribler/Web2/video/settings.py create mode 100644 tribler-mod/Tribler/Web2/video/settings.py.bak create mode 100644 tribler-mod/Tribler/Web2/video/video.py create mode 100644 tribler-mod/Tribler/Web2/video/video.py.bak create mode 100644 tribler-mod/Tribler/Web2/web2definitions.conf create mode 100644 tribler-mod/Tribler/__init__.py create mode 100644 tribler-mod/Tribler/__init__.py.bak create mode 100644 tribler-mod/Tribler/binary-LICENSE.txt create mode 100644 tribler-mod/Tribler/readme.txt create mode 100644 tribler-mod/Tribler/tribler_sdb_v2.sql diff --git a/tribler-mod/Tribler/Category/Category.py b/tribler-mod/Tribler/Category/Category.py new file mode 100644 index 0000000..633a978 --- /dev/null +++ b/tribler-mod/Tribler/Category/Category.py @@ -0,0 +1,386 @@ +from time import localtime, strftime +# written by Yuan Yuan, Jelle Roozenburg +# see LICENSE.txt for license information + +import os, re +from Tribler.Category.init_category import getCategoryInfo +from FamilyFilter import XXXFilter +from traceback import print_exc + +import sys + +from Tribler.__init__ import LIBRARYNAME + +DEBUG=False +category_file = "category.conf" + + +class Category: + + # Code to make this a singleton + __single = None + __size_change = 1024 * 1024 + + def __init__(self, install_dir='.'): + + if Category.__single: + raise RuntimeError, "Category is singleton" + filename = os.path.join(install_dir,LIBRARYNAME, 'Category', category_file) + Category.__single = self + self.utility = None + #self.torrent_db = TorrentDBHandler.getInstance() # Arno, 2009-01-30: apparently unused + try: + self.category_info = getCategoryInfo(filename) + self.category_info.sort(rankcmp) + except: + self.category_info = [] + if DEBUG: + print_exc() + + self.xxx_filter = XXXFilter(install_dir) + + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","category: Categories defined by user",self.getCategoryNames() + + + # return Category instance + def getInstance(*args, **kw): + if Category.__single is None: + Category(*args, **kw) + return Category.__single + getInstance = staticmethod(getInstance) + + def register(self,metadata_handler): + self.metadata_handler = metadata_handler + + def init_from_main(self, utility): + self.utility = utility + self.set_family_filter(None) # init family filter to saved state + + """ + # check to see whether need to resort torrent file + # return bool + def checkResort(self, data_manager): + data = data_manager.data +#=============================================================================== +# if not data: +# data = data_manager.torrent_db.getRecommendedTorrents(all = True) +#=============================================================================== + if not data: + return False + +# data = data_manager.torrent_db.getRecommendedTorrents(all = True) +# self.reSortAll(data) +# return True + torrent = data[0] + if torrent["category"] == ["?"]: + #data = data_manager.torrent_db.getRecommendedTorrents(all = True) + self.reSortAll(data) +# del data + return True + + begin = time() + for item in data: + if len(item['category']) > 1: + #data = data_manager.torrent_db.getRecommendedTorrents(all = True) + self.reSortAll(data) +# del data + return True + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'torrcoll: Checking of %d torrents costs: %f s' % (len(data), time() - begin) + return False + + # recalculate category of all torrents, remove torrents from db if not existed + def reSortAll(self, data, parent = None): + + max = len(data) + if max == 0: + return + import wx + dlgHolder = [] + event = Event() + def makeDialog(): + dlg = wx.ProgressDialog("Upgrading Database", + "Upgrading Old Database to New Database", + maximum = max, + parent = None, + style = wx.PD_AUTO_HIDE + | wx.PD_ELAPSED_TIME + | wx.PD_REMAINING_TIME + ) + dlgHolder.append(dlg) + event.set() + + + wx.CallAfter(makeDialog) + + # Wait for dialog to be ready + event.wait() + dlg = dlgHolder[0] + + count = 0 + step = int(float(max) / 20) + 1 + + # sort each torrent file + for i in xrange(len(data)): + count += 1 + if count % step == 0: + wx.CallAfter(dlg.Update, [count]) + try: + # try alternative dir if bsddb doesnt match with current Tribler install + rec = data[i] + (torrent_dir,torrent_name) = self.metadata_handler.get_std_torrent_dir_name(rec) + + # read the torrent file + filesrc = os.path.join(torrent_dir,torrent_name) + +# print filesrc + f = open(filesrc, "rb") + torrentdata = f.read() # torrent decoded string + f.close() + except IOError: # torrent file not found + # delete the info from db + self.torrent_db.deleteTorrent(data[i]['infohash']) + continue + + # decode the data + torrent_dict = bencode.bdecode(torrentdata) + content_name = dunno2unicode(torrent_dict["info"].get('name', '?')) + + category_belong = [] + category_belong = self.calculateCategory(torrent_dict, content_name) + + if (category_belong == []): + category_belong = ['other'] + + data[i]['category'] = category_belong # should have updated self.data + self.torrent_db.updateTorrent(data[i]['infohash'], updateFlag=False, category=category_belong) + self.torrent_db.sync() + wx.CallAfter(dlg.Destroy) + """ + + + def getCategoryKeys(self): + if self.category_info is None: + return [] + keys = [] + keys.append("All") + keys.append("other") + for category in self.category_info: + keys.append(category['name']) + keys.sort() + return keys + + def getCategoryNames(self): + if self.category_info is None: + return [] + keys = [] + for category in self.category_info: + rank = category['rank'] + if rank == -1: + break + keys.append((category['name'],category['displayname'])) + return keys + + def hasActiveCategory(self, torrent): + try: + name = torrent['category'][0] + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Torrent: %s has no valid category' % `torrent['content_name']` + return False + for category in [{'name':'other', 'rank':1}]+self.category_info: + rank = category['rank'] + if rank == -1: + break + if name.lower() == category['name'].lower(): + return True + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Category: %s was not in %s' % (name.lower(), [a['name'].lower() for a in self.category_info if a['rank'] != -1]) + return False + + def getCategoryRank(self,cat): + for category in self.category_info: + if category['name'] == cat: + return category['rank'] + return None + + # calculate the category for a given torrent_dict of a torrent file + # return list + def calculateCategory(self, torrent_dict, display_name): + # torrent_dict is the dict of + # a torrent file + # return value: list of category the torrent belongs to + torrent_category = None + + files_list = [] + try: + # the multi-files mode + for ifiles in torrent_dict['info']["files"]: + files_list.append((ifiles['path'][-1], ifiles['length'] / float(self.__size_change))) + except KeyError: + # single mode + files_list.append((torrent_dict['info']["name"],torrent_dict['info']['length'] / float(self.__size_change))) + + # Check xxx + try: + tracker = torrent_dict.get('announce') + if not tracker: + tracker = torrent_dict.get('announce-list',[['']])[0][0] + if self.xxx_filter.isXXXTorrent(files_list, display_name, torrent_dict.get('announce'), torrent_dict.get('comment')): + return ['xxx'] + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Category: Exception in explicit terms filter in torrent: %s' % torrent_dict + print_exc() + + # filename_list ready + strongest_cat = 0.0 + for category in self.category_info: # for each category + (decision, strength) = self.judge(category, files_list, display_name) + if decision and (strength > strongest_cat): + torrent_category = [category['name']] + strongest_cat = strength + + if torrent_category == None: + torrent_category = ['other'] + + return torrent_category + + # judge whether a torrent file belongs to a certain category + # return bool + def judge(self, category, files_list, display_name = ''): + + # judge file keywords + display_name = display_name.lower() + factor = 1.0 + fileKeywords = self._getWords(display_name) + + for ikeywords in category['keywords'].keys(): + try: + fileKeywords.index(ikeywords) + factor *= 1 - category['keywords'][ikeywords] + except: + pass + if (1 - factor) > 0.5: + if 'strength' in category: + return (True, category['strength']) + else: + return (True, (1- factor)) + + # judge each file + matchSize = 0 + totalSize = 1e-19 + for name, length in files_list: + totalSize += length + # judge file size + if ( length < category['minfilesize'] ) or \ + (category['maxfilesize'] > 0 and length > category['maxfilesize'] ): + continue + + # judge file suffix + OK = False + for isuffix in category['suffix']: + if name.lower().endswith( isuffix ): + OK = True + break + if OK: + matchSize += length + continue + + # judge file keywords + factor = 1.0 + fileKeywords = self._getWords(name.lower()) + + for ikeywords in category['keywords'].keys(): +# pass + try: + fileKeywords.index(ikeywords) + #print ikeywords + factor *= 1 - category['keywords'][ikeywords] + except: + pass + if factor < 0.5: + # print filename_list[index] + '#######################' + matchSize += length + + # match file + if (matchSize / totalSize) >= category['matchpercentage']: + if 'strength' in category: + return (True, category['strength']) + else: + return (True, (matchSize/ totalSize)) + + return (False, 0) + + + WORDS_REGEXP = re.compile('[a-zA-Z0-9]+') + def _getWords(self, string): + return self.WORDS_REGEXP.findall(string) + + + def family_filter_enabled(self): + """ + Return is xxx filtering is enabled in this client + """ + if self.utility is None: + return False + state = self.utility.config.Read('family_filter') + if state in ('1', '0'): + return state == '1' + else: + self.utility.config.Write('family_filter', '1') + self.utility.config.Flush() + return True + + def set_family_filter(self, b=None): + assert b in (True, False, None) + old = self.family_filter_enabled() + if b != old or b is None: # update category data if initial call, or if state changes + if b is None: + b=old + if self.utility is None: + return + #print >> sys.stderr , b + if b: + self.utility.config.Write('family_filter', '1') + else: + self.utility.config.Write('family_filter', '0') + self.utility.config.Flush() + # change category data + for category in self.category_info: + if category['name'] == 'xxx': + if b: + category['old-rank'] = category['rank'] + category['rank'] = -1 + elif category['rank'] == -1: + category['rank'] = category['old-rank'] + break + + + def get_family_filter_sql(self, _getCategoryID, table_name=''): + if self.family_filter_enabled(): + forbiddencats = [cat['name'] for cat in self.category_info if cat['rank'] == -1] + if table_name: + table_name+='.' + if forbiddencats: + return " and %scategory_id not in (%s)" % (table_name, ','.join([str(_getCategoryID([cat])) for cat in forbiddencats])) + return '' + + + + +def rankcmp(a,b): + if not ('rank' in a): + return 1 + elif not ('rank' in b): + return -1 + elif a['rank'] == -1: + return 1 + elif b['rank'] == -1: + return -1 + elif a['rank'] == b['rank']: + return 0 + elif a['rank'] < b['rank']: + return -1 + else: + return 1 + diff --git a/tribler-mod/Tribler/Category/Category.py.bak b/tribler-mod/Tribler/Category/Category.py.bak new file mode 100644 index 0000000..9ca7b77 --- /dev/null +++ b/tribler-mod/Tribler/Category/Category.py.bak @@ -0,0 +1,385 @@ +# written by Yuan Yuan, Jelle Roozenburg +# see LICENSE.txt for license information + +import os, re +from Tribler.Category.init_category import getCategoryInfo +from FamilyFilter import XXXFilter +from traceback import print_exc + +import sys + +from Tribler.__init__ import LIBRARYNAME + +DEBUG=False +category_file = "category.conf" + + +class Category: + + # Code to make this a singleton + __single = None + __size_change = 1024 * 1024 + + def __init__(self, install_dir='.'): + + if Category.__single: + raise RuntimeError, "Category is singleton" + filename = os.path.join(install_dir,LIBRARYNAME, 'Category', category_file) + Category.__single = self + self.utility = None + #self.torrent_db = TorrentDBHandler.getInstance() # Arno, 2009-01-30: apparently unused + try: + self.category_info = getCategoryInfo(filename) + self.category_info.sort(rankcmp) + except: + self.category_info = [] + if DEBUG: + print_exc() + + self.xxx_filter = XXXFilter(install_dir) + + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","category: Categories defined by user",self.getCategoryNames() + + + # return Category instance + def getInstance(*args, **kw): + if Category.__single is None: + Category(*args, **kw) + return Category.__single + getInstance = staticmethod(getInstance) + + def register(self,metadata_handler): + self.metadata_handler = metadata_handler + + def init_from_main(self, utility): + self.utility = utility + self.set_family_filter(None) # init family filter to saved state + + """ + # check to see whether need to resort torrent file + # return bool + def checkResort(self, data_manager): + data = data_manager.data +#=============================================================================== +# if not data: +# data = data_manager.torrent_db.getRecommendedTorrents(all = True) +#=============================================================================== + if not data: + return False + +# data = data_manager.torrent_db.getRecommendedTorrents(all = True) +# self.reSortAll(data) +# return True + torrent = data[0] + if torrent["category"] == ["?"]: + #data = data_manager.torrent_db.getRecommendedTorrents(all = True) + self.reSortAll(data) +# del data + return True + + begin = time() + for item in data: + if len(item['category']) > 1: + #data = data_manager.torrent_db.getRecommendedTorrents(all = True) + self.reSortAll(data) +# del data + return True + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'torrcoll: Checking of %d torrents costs: %f s' % (len(data), time() - begin) + return False + + # recalculate category of all torrents, remove torrents from db if not existed + def reSortAll(self, data, parent = None): + + max = len(data) + if max == 0: + return + import wx + dlgHolder = [] + event = Event() + def makeDialog(): + dlg = wx.ProgressDialog("Upgrading Database", + "Upgrading Old Database to New Database", + maximum = max, + parent = None, + style = wx.PD_AUTO_HIDE + | wx.PD_ELAPSED_TIME + | wx.PD_REMAINING_TIME + ) + dlgHolder.append(dlg) + event.set() + + + wx.CallAfter(makeDialog) + + # Wait for dialog to be ready + event.wait() + dlg = dlgHolder[0] + + count = 0 + step = int(float(max) / 20) + 1 + + # sort each torrent file + for i in xrange(len(data)): + count += 1 + if count % step == 0: + wx.CallAfter(dlg.Update, [count]) + try: + # try alternative dir if bsddb doesnt match with current Tribler install + rec = data[i] + (torrent_dir,torrent_name) = self.metadata_handler.get_std_torrent_dir_name(rec) + + # read the torrent file + filesrc = os.path.join(torrent_dir,torrent_name) + +# print filesrc + f = open(filesrc, "rb") + torrentdata = f.read() # torrent decoded string + f.close() + except IOError: # torrent file not found + # delete the info from db + self.torrent_db.deleteTorrent(data[i]['infohash']) + continue + + # decode the data + torrent_dict = bencode.bdecode(torrentdata) + content_name = dunno2unicode(torrent_dict["info"].get('name', '?')) + + category_belong = [] + category_belong = self.calculateCategory(torrent_dict, content_name) + + if (category_belong == []): + category_belong = ['other'] + + data[i]['category'] = category_belong # should have updated self.data + self.torrent_db.updateTorrent(data[i]['infohash'], updateFlag=False, category=category_belong) + self.torrent_db.sync() + wx.CallAfter(dlg.Destroy) + """ + + + def getCategoryKeys(self): + if self.category_info is None: + return [] + keys = [] + keys.append("All") + keys.append("other") + for category in self.category_info: + keys.append(category['name']) + keys.sort() + return keys + + def getCategoryNames(self): + if self.category_info is None: + return [] + keys = [] + for category in self.category_info: + rank = category['rank'] + if rank == -1: + break + keys.append((category['name'],category['displayname'])) + return keys + + def hasActiveCategory(self, torrent): + try: + name = torrent['category'][0] + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Torrent: %s has no valid category' % `torrent['content_name']` + return False + for category in [{'name':'other', 'rank':1}]+self.category_info: + rank = category['rank'] + if rank == -1: + break + if name.lower() == category['name'].lower(): + return True + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Category: %s was not in %s' % (name.lower(), [a['name'].lower() for a in self.category_info if a['rank'] != -1]) + return False + + def getCategoryRank(self,cat): + for category in self.category_info: + if category['name'] == cat: + return category['rank'] + return None + + # calculate the category for a given torrent_dict of a torrent file + # return list + def calculateCategory(self, torrent_dict, display_name): + # torrent_dict is the dict of + # a torrent file + # return value: list of category the torrent belongs to + torrent_category = None + + files_list = [] + try: + # the multi-files mode + for ifiles in torrent_dict['info']["files"]: + files_list.append((ifiles['path'][-1], ifiles['length'] / float(self.__size_change))) + except KeyError: + # single mode + files_list.append((torrent_dict['info']["name"],torrent_dict['info']['length'] / float(self.__size_change))) + + # Check xxx + try: + tracker = torrent_dict.get('announce') + if not tracker: + tracker = torrent_dict.get('announce-list',[['']])[0][0] + if self.xxx_filter.isXXXTorrent(files_list, display_name, torrent_dict.get('announce'), torrent_dict.get('comment')): + return ['xxx'] + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Category: Exception in explicit terms filter in torrent: %s' % torrent_dict + print_exc() + + # filename_list ready + strongest_cat = 0.0 + for category in self.category_info: # for each category + (decision, strength) = self.judge(category, files_list, display_name) + if decision and (strength > strongest_cat): + torrent_category = [category['name']] + strongest_cat = strength + + if torrent_category == None: + torrent_category = ['other'] + + return torrent_category + + # judge whether a torrent file belongs to a certain category + # return bool + def judge(self, category, files_list, display_name = ''): + + # judge file keywords + display_name = display_name.lower() + factor = 1.0 + fileKeywords = self._getWords(display_name) + + for ikeywords in category['keywords'].keys(): + try: + fileKeywords.index(ikeywords) + factor *= 1 - category['keywords'][ikeywords] + except: + pass + if (1 - factor) > 0.5: + if 'strength' in category: + return (True, category['strength']) + else: + return (True, (1- factor)) + + # judge each file + matchSize = 0 + totalSize = 1e-19 + for name, length in files_list: + totalSize += length + # judge file size + if ( length < category['minfilesize'] ) or \ + (category['maxfilesize'] > 0 and length > category['maxfilesize'] ): + continue + + # judge file suffix + OK = False + for isuffix in category['suffix']: + if name.lower().endswith( isuffix ): + OK = True + break + if OK: + matchSize += length + continue + + # judge file keywords + factor = 1.0 + fileKeywords = self._getWords(name.lower()) + + for ikeywords in category['keywords'].keys(): +# pass + try: + fileKeywords.index(ikeywords) + #print ikeywords + factor *= 1 - category['keywords'][ikeywords] + except: + pass + if factor < 0.5: + # print filename_list[index] + '#######################' + matchSize += length + + # match file + if (matchSize / totalSize) >= category['matchpercentage']: + if 'strength' in category: + return (True, category['strength']) + else: + return (True, (matchSize/ totalSize)) + + return (False, 0) + + + WORDS_REGEXP = re.compile('[a-zA-Z0-9]+') + def _getWords(self, string): + return self.WORDS_REGEXP.findall(string) + + + def family_filter_enabled(self): + """ + Return is xxx filtering is enabled in this client + """ + if self.utility is None: + return False + state = self.utility.config.Read('family_filter') + if state in ('1', '0'): + return state == '1' + else: + self.utility.config.Write('family_filter', '1') + self.utility.config.Flush() + return True + + def set_family_filter(self, b=None): + assert b in (True, False, None) + old = self.family_filter_enabled() + if b != old or b is None: # update category data if initial call, or if state changes + if b is None: + b=old + if self.utility is None: + return + #print >> sys.stderr , b + if b: + self.utility.config.Write('family_filter', '1') + else: + self.utility.config.Write('family_filter', '0') + self.utility.config.Flush() + # change category data + for category in self.category_info: + if category['name'] == 'xxx': + if b: + category['old-rank'] = category['rank'] + category['rank'] = -1 + elif category['rank'] == -1: + category['rank'] = category['old-rank'] + break + + + def get_family_filter_sql(self, _getCategoryID, table_name=''): + if self.family_filter_enabled(): + forbiddencats = [cat['name'] for cat in self.category_info if cat['rank'] == -1] + if table_name: + table_name+='.' + if forbiddencats: + return " and %scategory_id not in (%s)" % (table_name, ','.join([str(_getCategoryID([cat])) for cat in forbiddencats])) + return '' + + + + +def rankcmp(a,b): + if not ('rank' in a): + return 1 + elif not ('rank' in b): + return -1 + elif a['rank'] == -1: + return 1 + elif b['rank'] == -1: + return -1 + elif a['rank'] == b['rank']: + return 0 + elif a['rank'] < b['rank']: + return -1 + else: + return 1 + diff --git a/tribler-mod/Tribler/Category/FamilyFilter.py b/tribler-mod/Tribler/Category/FamilyFilter.py new file mode 100644 index 0000000..b1134c6 --- /dev/null +++ b/tribler-mod/Tribler/Category/FamilyFilter.py @@ -0,0 +1,112 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg +# see LICENSE.txt for license information + +import re, sys, os +from traceback import print_exc + +from Tribler.__init__ import LIBRARYNAME + +WORDS_REGEXP = re.compile('[a-zA-Z0-9]+') +DEBUG = False + +class XXXFilter: + def __init__(self, install_dir): + termfilename = os.path.join(install_dir, LIBRARYNAME, 'Category','filter_terms.filter') + self.xxx_terms, self.xxx_searchterms = self.initTerms(termfilename) + + def initTerms(self, filename): + terms = set() + searchterms = set() + + try: + f = file(filename, 'r') + lines = f.read().lower().splitlines() + + for line in lines: + if line.startswith('*'): + searchterms.add(line[1:]) + else: + terms.add(line) + f.close() + except: + if DEBUG: + print_exc() + + if DEBUG: + print 'Read %d XXX terms from file %s' % (len(terms)+len(searchterms), filename) + return terms, searchterms + + def _getWords(self, string): + return [a.lower() for a in WORDS_REGEXP.findall(string)] + + + def isXXXTorrent(self, files_list, torrent_name, tracker, comment=None): + if tracker: + tracker = tracker.lower().replace('http://', '').replace('announce','') + else: + tracker = '' + terms = [a[0].lower() for a in files_list] + is_xxx = (len(filter(self.isXXX, terms)) > 0 or + self.isXXX(torrent_name, False) or + self.isXXX(tracker, False) or + (comment and self.isXXX(comment, False)) + ) + if DEBUG: + if is_xxx: + print 'Torrent is XXX: %s %s' % (torrent_name, tracker) + else: + print 'Torrent is NOT XXX: %s %s' % (torrent_name, tracker) + return is_xxx + + + def isXXX(self, s, isFilename=True): + s = s.lower() + if self.isXXXTerm(s): # We have also put some full titles in the filter file + return True + if not self.isAudio(s) and self.foundXXXTerm(s): + return True + words = self._getWords(s) + words2 = [' '.join(words[i:i+2]) for i in xrange(0, len(words)-1)] + num_xxx = len([w for w in words+words2 if self.isXXXTerm(w, s)]) + if isFilename and self.isAudio(s): + return num_xxx > 2 # almost never classify mp3 as porn + else: + return num_xxx > 0 + + def foundXXXTerm(self, s): + for term in self.xxx_searchterms: + if term in s: + if DEBUG: + print 'XXXFilter: Found term "%s" in %s' % (term, s) + return True + return False + + def isXXXTerm(self, s, title=None): + # check if term-(e)s is in xxx-terms + s = s.lower() + if s in self.xxx_terms: + if DEBUG: + print 'XXXFilter: "%s" is dirty%s' % (s, title and ' in %s' % title or '') + return True + if s.endswith('es'): + if s[:-2] in self.xxx_terms: + if DEBUG: + print 'XXXFilter: "%s" is dirty%s' % (s[:-2], title and ' in %s' % title or '') + return True + elif s.endswith('s') or s.endswith('n'): + if s[:-1] in self.xxx_terms: + if DEBUG: + print 'XXXFilter: "%s" is dirty%s' % (s[:-1], title and ' in %s' % title or '') + return True + + return False + + audio_extensions = ['cda', 'flac', 'm3u', 'mp2', 'mp3', 'md5', 'vorbis', 'wav', 'wma', 'ogg'] + def isAudio(self, s): + return s[s.rfind('.')+1:] in self.audio_extensions + + + + + diff --git a/tribler-mod/Tribler/Category/FamilyFilter.py.bak b/tribler-mod/Tribler/Category/FamilyFilter.py.bak new file mode 100644 index 0000000..6d9176a --- /dev/null +++ b/tribler-mod/Tribler/Category/FamilyFilter.py.bak @@ -0,0 +1,111 @@ +# Written by Jelle Roozenburg +# see LICENSE.txt for license information + +import re, sys, os +from traceback import print_exc + +from Tribler.__init__ import LIBRARYNAME + +WORDS_REGEXP = re.compile('[a-zA-Z0-9]+') +DEBUG = False + +class XXXFilter: + def __init__(self, install_dir): + termfilename = os.path.join(install_dir, LIBRARYNAME, 'Category','filter_terms.filter') + self.xxx_terms, self.xxx_searchterms = self.initTerms(termfilename) + + def initTerms(self, filename): + terms = set() + searchterms = set() + + try: + f = file(filename, 'r') + lines = f.read().lower().splitlines() + + for line in lines: + if line.startswith('*'): + searchterms.add(line[1:]) + else: + terms.add(line) + f.close() + except: + if DEBUG: + print_exc() + + if DEBUG: + print 'Read %d XXX terms from file %s' % (len(terms)+len(searchterms), filename) + return terms, searchterms + + def _getWords(self, string): + return [a.lower() for a in WORDS_REGEXP.findall(string)] + + + def isXXXTorrent(self, files_list, torrent_name, tracker, comment=None): + if tracker: + tracker = tracker.lower().replace('http://', '').replace('announce','') + else: + tracker = '' + terms = [a[0].lower() for a in files_list] + is_xxx = (len(filter(self.isXXX, terms)) > 0 or + self.isXXX(torrent_name, False) or + self.isXXX(tracker, False) or + (comment and self.isXXX(comment, False)) + ) + if DEBUG: + if is_xxx: + print 'Torrent is XXX: %s %s' % (torrent_name, tracker) + else: + print 'Torrent is NOT XXX: %s %s' % (torrent_name, tracker) + return is_xxx + + + def isXXX(self, s, isFilename=True): + s = s.lower() + if self.isXXXTerm(s): # We have also put some full titles in the filter file + return True + if not self.isAudio(s) and self.foundXXXTerm(s): + return True + words = self._getWords(s) + words2 = [' '.join(words[i:i+2]) for i in xrange(0, len(words)-1)] + num_xxx = len([w for w in words+words2 if self.isXXXTerm(w, s)]) + if isFilename and self.isAudio(s): + return num_xxx > 2 # almost never classify mp3 as porn + else: + return num_xxx > 0 + + def foundXXXTerm(self, s): + for term in self.xxx_searchterms: + if term in s: + if DEBUG: + print 'XXXFilter: Found term "%s" in %s' % (term, s) + return True + return False + + def isXXXTerm(self, s, title=None): + # check if term-(e)s is in xxx-terms + s = s.lower() + if s in self.xxx_terms: + if DEBUG: + print 'XXXFilter: "%s" is dirty%s' % (s, title and ' in %s' % title or '') + return True + if s.endswith('es'): + if s[:-2] in self.xxx_terms: + if DEBUG: + print 'XXXFilter: "%s" is dirty%s' % (s[:-2], title and ' in %s' % title or '') + return True + elif s.endswith('s') or s.endswith('n'): + if s[:-1] in self.xxx_terms: + if DEBUG: + print 'XXXFilter: "%s" is dirty%s' % (s[:-1], title and ' in %s' % title or '') + return True + + return False + + audio_extensions = ['cda', 'flac', 'm3u', 'mp2', 'mp3', 'md5', 'vorbis', 'wav', 'wma', 'ogg'] + def isAudio(self, s): + return s[s.rfind('.')+1:] in self.audio_extensions + + + + + diff --git a/tribler-mod/Tribler/Category/TestCategory.py b/tribler-mod/Tribler/Category/TestCategory.py new file mode 100644 index 0000000..fe7ea77 --- /dev/null +++ b/tribler-mod/Tribler/Category/TestCategory.py @@ -0,0 +1,149 @@ +from time import localtime, strftime +# Written by Yuan Yuan +# see LICENSE.txt for license information + +import sys, os +execpath = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), '..', '..') +sys.path.append(execpath) +#print sys.path +from Utility.utility import getMetainfo +from Tribler.Category.Category import Category + +DEBUG = False + +def testFilter(catfilename, torrentpath): + readCategorisationFile(catfilename) + #print 'Install_dir is %s' % execpath + c = Category.getInstance(execpath, None) + total = porn = fn = fp = 0 + for tfilename,isporn in tdict.items(): + torrent = getMetainfo(os.path.join(torrentpath,tfilename)) + name = torrent['info']['name'] + cat = c.calculateCategory(torrent, name) + fporn = (cat == ['xxx']) + total+= 1 + porn += int(isporn) + if isporn == fporn: + if DEBUG: + print (isporn, fporn), 'good', name + + elif isporn and not fporn: + fn+=1 + print 'FALSE NEGATIVE' + showTorrent(os.path.join(torrentpath,tfilename)) + elif not isporn and fporn: + fp +=1 + print 'FALSE POSITIVE' + showTorrent(os.path.join(torrentpath,tfilename)) + + print """ + Total torrents: %(total)d + XXX torrents: %(porn)d + Correct filtered: %(good)d + False negatives: %(fn)d + False positives: %(fp)d + """ % {'total':total, 'porn':porn, 'fn':fn,'fp':fp,'good':total-fn-fp} + +def readCategorisationFile(filename): + global tdict + tdict = {} + try: + f = file(filename, 'r') + lines = f.read().splitlines() + for line in lines: + if line: + parts = line.split('\t') + tdict[parts[0]] = bool(int(parts[1])) + f.close() + except IOError: + print 'No file %s found, starting with empty file' % filename + +def getTorrentData(path, max_num=-1): + torrents= [] + i = 0 + for fname in os.listdir(path): + if fname.endswith('.torrent'): + torrents.append(os.path.join(path,fname)) + if i%1000 == 0 and i: + print 'Loaded: %d torrents' % i + if i == int(max_num): + break + i+=1 + print 'Loaded %d torrents' % len(torrents) + return torrents + +def showTorrent(path): + torrent = getMetainfo(os.path.join(path)) + name = torrent['info']['name'] + print '------------------------------' + print '\tfiles :' + files_list = [] + __size_change = 1024 + try: + # the multi-files mode + for ifiles in torrent['info']["files"]: + files_list.append((ifiles['path'][-1], ifiles['length'] / float(__size_change))) + except KeyError: + # single mode + files_list.append((torrent['info']["name"],torrent['info']['length'] / float(__size_change))) + for fname, fsize in files_list: + print'\t\t%s\t%d kb' % (fname, fsize) + print 'Torrent name: %s' % name + print '\ttracker:%s' % torrent['announce'] + print '------------------------------' + +def createTorrentDataSet(filename, torrentpath): + initSaveFile(filename) + f_out = file(filename, 'a') + torrents = getTorrentData(torrentpath) + for torrent in torrents: + if os.path.split(torrent)[-1] in tset: # already done + continue + showTorrent(torrent) + ans = None + while ans not in ['q', 'y','n']: + print 'Is this torrent porn? (y/n/q)' + ans = sys.stdin.readline()[:-1].lower() + if ans == 'q': + break + else: + saveTorrent(f_out, torrent, (ans=='y')) + f_out.close() + +def saveTorrent(f_out, torrent, boolean): + if torrent in tset: + return + tfilename = os.path.split(torrent)[-1] + assert tfilename + f_out.write('%s\t%d\n' % (tfilename, int(boolean))) + f_out.flush() + tset.add(torrent) + +def initSaveFile(filename): + global tset + tset = set() + try: + f = file(filename, 'r') + lines = f.read().splitlines() + for line in lines: + tset.add(line.split('\t')[0]) + f.close() + except IOError: + print 'No file %s found, starting with empty file' % filename + + + +def main(args): + if len(args) != 4 or args[1] not in ['categorise', 'test']: + print 'Usage 1: %s categorise [torrent-dir] [torrent-data-file]' % args[0] + print 'Usage 2: %s test [torrent-dir] [torrent-data-file]' % args[0] + sys.exit(1) + if args[1] == 'categorise': + createTorrentDataSet(args[3], args[2]) + elif args[1] == 'test': + testFilter(args[3], args[2]) + print 'ready' + + +if __name__ == '__main__': + main(sys.argv) diff --git a/tribler-mod/Tribler/Category/TestCategory.py.bak b/tribler-mod/Tribler/Category/TestCategory.py.bak new file mode 100644 index 0000000..84c323b --- /dev/null +++ b/tribler-mod/Tribler/Category/TestCategory.py.bak @@ -0,0 +1,148 @@ +# Written by Yuan Yuan +# see LICENSE.txt for license information + +import sys, os +execpath = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), '..', '..') +sys.path.append(execpath) +#print sys.path +from Utility.utility import getMetainfo +from Tribler.Category.Category import Category + +DEBUG = False + +def testFilter(catfilename, torrentpath): + readCategorisationFile(catfilename) + #print 'Install_dir is %s' % execpath + c = Category.getInstance(execpath, None) + total = porn = fn = fp = 0 + for tfilename,isporn in tdict.items(): + torrent = getMetainfo(os.path.join(torrentpath,tfilename)) + name = torrent['info']['name'] + cat = c.calculateCategory(torrent, name) + fporn = (cat == ['xxx']) + total+= 1 + porn += int(isporn) + if isporn == fporn: + if DEBUG: + print (isporn, fporn), 'good', name + + elif isporn and not fporn: + fn+=1 + print 'FALSE NEGATIVE' + showTorrent(os.path.join(torrentpath,tfilename)) + elif not isporn and fporn: + fp +=1 + print 'FALSE POSITIVE' + showTorrent(os.path.join(torrentpath,tfilename)) + + print """ + Total torrents: %(total)d + XXX torrents: %(porn)d + Correct filtered: %(good)d + False negatives: %(fn)d + False positives: %(fp)d + """ % {'total':total, 'porn':porn, 'fn':fn,'fp':fp,'good':total-fn-fp} + +def readCategorisationFile(filename): + global tdict + tdict = {} + try: + f = file(filename, 'r') + lines = f.read().splitlines() + for line in lines: + if line: + parts = line.split('\t') + tdict[parts[0]] = bool(int(parts[1])) + f.close() + except IOError: + print 'No file %s found, starting with empty file' % filename + +def getTorrentData(path, max_num=-1): + torrents= [] + i = 0 + for fname in os.listdir(path): + if fname.endswith('.torrent'): + torrents.append(os.path.join(path,fname)) + if i%1000 == 0 and i: + print 'Loaded: %d torrents' % i + if i == int(max_num): + break + i+=1 + print 'Loaded %d torrents' % len(torrents) + return torrents + +def showTorrent(path): + torrent = getMetainfo(os.path.join(path)) + name = torrent['info']['name'] + print '------------------------------' + print '\tfiles :' + files_list = [] + __size_change = 1024 + try: + # the multi-files mode + for ifiles in torrent['info']["files"]: + files_list.append((ifiles['path'][-1], ifiles['length'] / float(__size_change))) + except KeyError: + # single mode + files_list.append((torrent['info']["name"],torrent['info']['length'] / float(__size_change))) + for fname, fsize in files_list: + print'\t\t%s\t%d kb' % (fname, fsize) + print 'Torrent name: %s' % name + print '\ttracker:%s' % torrent['announce'] + print '------------------------------' + +def createTorrentDataSet(filename, torrentpath): + initSaveFile(filename) + f_out = file(filename, 'a') + torrents = getTorrentData(torrentpath) + for torrent in torrents: + if os.path.split(torrent)[-1] in tset: # already done + continue + showTorrent(torrent) + ans = None + while ans not in ['q', 'y','n']: + print 'Is this torrent porn? (y/n/q)' + ans = sys.stdin.readline()[:-1].lower() + if ans == 'q': + break + else: + saveTorrent(f_out, torrent, (ans=='y')) + f_out.close() + +def saveTorrent(f_out, torrent, boolean): + if torrent in tset: + return + tfilename = os.path.split(torrent)[-1] + assert tfilename + f_out.write('%s\t%d\n' % (tfilename, int(boolean))) + f_out.flush() + tset.add(torrent) + +def initSaveFile(filename): + global tset + tset = set() + try: + f = file(filename, 'r') + lines = f.read().splitlines() + for line in lines: + tset.add(line.split('\t')[0]) + f.close() + except IOError: + print 'No file %s found, starting with empty file' % filename + + + +def main(args): + if len(args) != 4 or args[1] not in ['categorise', 'test']: + print 'Usage 1: %s categorise [torrent-dir] [torrent-data-file]' % args[0] + print 'Usage 2: %s test [torrent-dir] [torrent-data-file]' % args[0] + sys.exit(1) + if args[1] == 'categorise': + createTorrentDataSet(args[3], args[2]) + elif args[1] == 'test': + testFilter(args[3], args[2]) + print 'ready' + + +if __name__ == '__main__': + main(sys.argv) diff --git a/tribler-mod/Tribler/Category/__init__.py b/tribler-mod/Tribler/Category/__init__.py new file mode 100644 index 0000000..eec068a --- /dev/null +++ b/tribler-mod/Tribler/Category/__init__.py @@ -0,0 +1,3 @@ +from time import localtime, strftime +# Written by Yuan Yuan +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Category/__init__.py.bak b/tribler-mod/Tribler/Category/__init__.py.bak new file mode 100644 index 0000000..316dcff --- /dev/null +++ b/tribler-mod/Tribler/Category/__init__.py.bak @@ -0,0 +1,2 @@ +# Written by Yuan Yuan +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Category/category.conf b/tribler-mod/Tribler/Category/category.conf new file mode 100644 index 0000000..4f9000f --- /dev/null +++ b/tribler-mod/Tribler/Category/category.conf @@ -0,0 +1,62 @@ +[xxx] +rank = 10 +displayname = XXX +matchpercentage = 0.001 +strength = 1.1 +# Keywords are in seperate file: filter_content.filter + + +[Video] +rank = 1 +displayname = Video Files +suffix = asf, asp, avi, flc, fli, flic, mkv, mov, movie, mpeg, mpg, qicktime, ram, rm, rmvb, rpm, vob, wma, wmv +minfilesize = 50 +maxfilesize = 10000000 +matchpercentage = 0.5 + +*divx = 1 +*xvid = 1 +*rmvb = 1 + +[VideoClips] +rank = 2 +displayname = Video Clips +suffix = asf, asp, avi, flc, fli, flic, mkv, mov, movie, mpeg, mpg, qicktime, ram, rm, rmvb, rpm, vob, wma, wmv, mp4, flv +minfilesize = 0 +maxfilesize = 50 +matchpercentage = 0.5 + +[Audio] +rank = 3 +displayname = Audio +suffix = cda, flac, m3u, mp2, mp3, vorbis, wav, wma, ogg, ape +matchpercentage = 0.8 + +[Document] +rank = 5 +displayname = Documents +suffix = doc, pdf, ppt, ps, tex, txt, vsd +matchpercentage = 0.8 + +[Compressed] +rank = 4 +displayname = Compressed +suffix = ace, bin, bwt, cab, ccd, cdi, cue, gzip, iso, jar, mdf, mds, nrg, rar, tar, vcd, z, zip +matchpercentage = 0.8 + +*.r0 = 1 +*.r1 = 1 +*.r2 = 1 +*.r3 = 1 +*.r4 = 1 +*.r5 = 1 +*.r6 = 1 +*.r7 = 1 +*.r8 = 1 +*.r9 = 1 + +[Picture] +rank = 6 +displayname = Pictures +suffix = bmp, dib, dwg, gif, ico, jpeg, jpg, pic, png, swf, tif, tiff +matchpercentage = 0.8 diff --git a/tribler-mod/Tribler/Category/filter_terms.filter b/tribler-mod/Tribler/Category/filter_terms.filter new file mode 100644 index 0000000..4d50f7b --- /dev/null +++ b/tribler-mod/Tribler/Category/filter_terms.filter @@ -0,0 +1,3903 @@ +*adult +*fuck +*gay +*lesbian +*porn +*sex +*shemale +*slut +*sperm +*tits +*whore +*xxx +12creampie +18plus +18yo +20yo +2girls +3er +3gp +3some +3way +3zum +4heather +4sexdates +69s +abgefickt +abgespritz2 +abgespritzt +abgewichst +abpritzen +abspritzen +abspritzen2 +abspritzer +abuse +accidental +action +adel miller +adriana +adrianna +aduld +adultcams +adultfriendfinder +adultgames +adultlinks +adultmatch +adults +adultsex +adulttv +advertenties +afro +afscheiding +aftrekken +agustina +akiba +akira +alba +albanian +alektra +alektra blue +alex +alex divine +alexa +alexandra +alexandra18 +alexis +alexis silver +alicia rhodes +alina +alisha +alison +aliyah +allanah starr +alleslutscher +allhotgirls +allysin embers +alolita +alsscan +alysha leigh +alyssa +alyssa chase +amanda +amanda dawkins +amanda white +amanda3 +amant +amante +amater +amateur +amateuraufnahm +amateurbloot +amateurchick +amateurchicks +amateurcouples +amateurcreampi +amateurfick +amateurgirl +amateurgirls +amateurhoer +amateurhoeren +amateurhoertje +amateurhoertjes +amateurin +amateurkut +amateurkutje +amateurkutjes +amateurlesben +amateurmeisjes +amateurmovies_ +amateurnaakt +amateurpics +amateurporn +amateurprostitue +amateurprostitutie +amateurs +amateursex +amateursexfilm +amateursexfilmpjes +amateursexfilms +amateursexfotos +amateursexvideo +amateursletjes +amateurtieners +amator +amatoriale +amatuer +amature +amber +amber michaels +amber rayne +amor +amore +ampland +amputee +amy +anaal +anaallikken +anaalneuken +anaalridder +anaalsex +anaalvingeren +anabel chong +anal +analcreampie +anale +analed +analesex +analfick +analfisting +analfuck +analgirl +anally +analneuken +analnow5 +analpassion +analthe +anderson +andrea +andrea spinks +anetta keys +anette +angel eyes +angel long +angela +angelica +angelica sin +angelina +angespritzt +angewichst +angewixt +angie +angie george +angus +anika +animalporno +animalsex +animalsexcom +animalsexverhalen +anime +anita +anita crystal +anja +anjali +anna +anna malle +anne +anneke +annika +antistress +antonia deona +antwan +anulingus +anuslikken +anuslikker +anusrijder +anyone but my husband (1975).avi +anywebcam +aoki +apodacab +arab +arabian +archieven +archiv +argentina +argentine +argentino +ariana +ariana jollee +army +arsch +arschbesamung +arschdildo +arschfick +arschficken +arschfickerin +arschgeil +arschspritzer +arse +ashley +ashley blue +ashley long +asia +asia carrera +asiagirl +asian +asianbabecams +asianbabes +asianlolita +asianporn +asianporno +asians +asiansex +asiansexyshemales +asianteen +ass +ass2 +assasian +assdoggy +asses +assess +assfisting +assfuck +assfucked +assfucking +asshole +asslick +asslickgesichtsbesamu +assmovies +assparede +asss +asturbating_in +audrey hollander +aufblasbarer +aufgenommen +autorally +autosex +autostoelen +ava divine +ava vincent +avalon +avnads +awesome +awetdream +ayana angel +aydemir +aziatische +aziesex +azlea +baarmoederverzakking +babe +babebody +babepics +babes +babewatch +babysitter +backdoor +backside +badboysfilms +badezimmer +baffen +baggett +bakire2002 +balkon +balls +balmoral +banana +banane +bananen +bang +bangbros +bangbross +bangbus +bangbuscom +banged +banging +bangladeshi +banned +barbara summer +barbie +bare +barebacking +barefoot +barely +barquisimeto +barranquillera +barsinghausen +bastard +bath +bathing +bathroom +bathtub +bbs +bbw +bdsm +bdsmfilms +bdsmmovies +bdsmpagina +bdsmshop +bdsmstartpagina +bdsmverhalen +bdsmvideosnet +bdsmzaken +beach +beachgirls +beastiality +beavis +become +bed +bedroom +bedtime +beefy +beffen +behaarde +beine +bejaardenseks +bejaardensex +bekendbloot +bekijksex +bekommt +belgian +belgischeporno +bella +bella starr +belladonna +bellydance +benassi +benikgeil +beritten +berlinerin +besoffen +bess +bestialiteit +bestiality +besuch_beim_na +bethroom +betty +bev cox +bianca +bianca black +biatch +bibibi +bichon +bidet +big titted +bigboobs +bigcock +bigcocks +bigdick +bignaturals +bigtits +bigtitscom +bigtitsroundasses +bikini +billen +billige +bimannen +bimbo +bimbolinks +bimbos +bimbosex +binken +birape +bisex +bisexcam +bisexfilms +bisexual +bisexueel +bisexverhalen +bitch +bitches +bitchvirtual +bitchy +bitttchhhhhh +bizar +bjsextremesex +blackgirls +blackporno +blacksex +blas +blase +blasen +blasende +blasi +blasluder +blass +blasschlampe +blindfolded +blond +blonde +blonde1 +blonde2 +blonde3 +blonde4 +blondegirl +blondes +blondi +blondie +blondine +blondje +blondsweety +blondwomen +blondy +bloot +blootje +blote +blotevrouwen +blow +blow job +blowing +blowjo +blowjob +blowjob1 +blowjob10 +blowjob12 +blowjob2 +blowjob3 +blowjob4 +blowjob5 +blowjob6 +blowjob7 +blowjob8 +blowjob9 +blowjob_trixie +blowjobclips +blowjobcum +blowjobcute +blowjobgeil +blowjobmeine +blowjobmovie +blowjobmovies +blowjobs +blowjon +blowlight +blown +blowputa +blows +bobbi eden +bobbie +bocca +body +bodypanty +bodystocking +boerensex +boerinnen +bolas +bondage +bondagescape +bondagewizard +bonnie simon +boob +boobed +boobies +booble +boobs +boobster +boobstight +boogo +boots +booty +bootytalk +bootytorrents +bordelen +borst +borsten +borstenforum +borstjes +botergeil +botergeile +boxing +boyfriend +boyfriends +boyfuckmom +boyz +bra +brandi +brandibelle +brasil +brasilian +brazil +braziliaanse +brazilian +brazzers +breast +breasted +breasts +breezahchicks +breezersletjes +briana banks +bridget +brigitte +brinquedinho +britney spears +brittany andrews +brittney skye +brooke +brooke alexander +brooke haven +brunete +brunette +brunettes +brutal +brutaldildos +brutalviolence +bubble +bud +buitenbloot +buitensex +buitensexlocaties +buitensexplaatsen +bukake +bukkake +bukkakeshop +bukkakeshopcom +bunny +bunnyteens +busen +bust +busty +bustysolo +butt +butterflykisse +buttfuck +buttfucked +buttfucking +butthead +butthole +buttman +buttplug +butts +buurjongen +buurvrouw +byrne +c700 +cabalgata +cachonda +calabria +calentita +caliente +calientes +cambabe +cambang +camchat +camcrush +cameltoe +cameltoes +cameras +cameron +camgirl +camgirls +camgirlyoung +camhoer +camhonk +camilla +camjockva +cammen +campingsex +cams +camsex +camslet +camsletjes +camsletten +camwithher +canaal +canaaldigitaal +canal +canaldigitaal +canaldigital +canalplus +candi +candice +candice paris +candid +candy +carly +carmel +carmen electra +caroline +carrot +carsex +cartoonsex +carupaneras +casanova +casera +cassandra +cassia riley +casting +castingsex +catalina +cathy barry +catsuit +caught +cearense +celeb +celebrities +celebrity +celebs +celeste star +celia +centerfolds +chachi +chandigarhdicke +changingroom +channel +chantal +chantelle stevens +charisma +charlestonkleding +charlie +charlie holays +charlie laine +charlotte +charming +chatbabe +chathonk +chatrooms +chatsex +chatten +chattijd +cheating +cheatingnice +cheerleader +chenfick +cherry +chica +chicca +chick +chicks +chickslole +childfuck +childporn +chilena +chinagirls +chineessex +chinese +chinesische +chix +chloe dior +chris charming +christina +christina brooks +christine +christines +chubby +chupando +chupgirl +cindy crawford +ciuccia +claire +claires +clara +claudia +claudia rossi +cleopatra +cliphunter +clit +clitclub +clitlikken +clitoride +clitoris +clitorispiercing +clitorus +clits +closeup +clubclubseventeen +clubseventeencom +cn_200 +cock +cocked +cockman +cockring +cocks +cockselfmade +cocksucker +cocksucking +cocktail +cocktails +cocktales +coed +coeds +cogiendo +cojida +cojiendo +coke +colombia +colombiana +comicsex +coming +communicatietraining +como +compilationamateur +compilationmasturbation +condom +congolese +connelly +connie +conoces +contactadvertenties +contacten +contakt +contractions +convulsions +coolios +copines +coppia +coral +cornelia +corrida +corset +cosplay +couch +couple +couplefreundin +couples +coupleyoung +courtney simpson +cowgirl +cowgurl +cowlist +crave +cream +creamasia +creamed +creampie +creampiecom +creampied +creampies +creamy +cristiana +cristina +cristina bella +croatian +cuckold +cucumber +culito +culo +cum +cum swap +cumblast +cumblasting +cumbot +cumbria +cumbustion +cumdrinking +cumface +cumfacial +cumfacialmovie +cumfacialmovies +cumfacials +cumfiesta +cuming +cumm +cumm swap +cummed +cumming +cummovie +cummovies +cumms +cummz +cumn +cumnina +cumpilation +cumpilationcum +cumpiled +cums +cums7 +cumshot +cumshot01alessia_marcuz +cumshot2 +cumshotangie +cumshotblonde +cumshotcomp_1 +cumshotfeel +cumshotfilms +cumshotmovie +cumshotmovies +cumshots +cumslut +cumsluts +cuni +cunny +cunt +cuntlicker +cursus +cute +cutie +cuties +cuty +cutypussy +cyber +cyberseks +cybersex +cybersex2 +cynthia +cytherea +cytheria +dafreexxxmovies +daisy rock +dames +danielle louise kelson +danish +daphne rosen +daria glower +darla crane +daryn +dasha +daten +datewereld +dayana kill +debbie +debbie tomlins +debora +deborah +deborandome +deep +deepest +deepthroat +deepthroatblowjob +deepthroating +deepthroats +delante +delilah stone +denise +desi +desire +despues +destiny deville +destiny st claire +deutsche +devasso +devinn lane +devon +dianah +dianamontage +dianapost +dianita +diaper +dick +dick1 +dicke +dicken +dicker +dicklaugh +dicks +dienstmeid +diep +diepanaal +dierensex +dierensexfilms +dierensexverhalen +dieresex +diersex +dike +dikelul +dikke +dikkedames +dikkelul +dikkelullen +dikketieten +dildi +dildo +dildobusty +dildofick +dildofuck +dildohot +dildoing +dildomachines +dildophillipienisch +dildos +dildoserbian +dildoserver +dildoservercom +dildosex +dildoslet +dildoslip +dildoslut +dildowife +dineren +dirty +disciplin +discogirls +discomeile +dixie +dizney +dochter +doctor adventure +doctors adventures +doggie +dogging +doggle +doggy +doggy1 +doggystile +doggystyle +dogsex +doityourself +dolitha +dolithas +doll +dolls +domai +domina +dominica leoni +dominicana +donenad +dong +donna derriere +donna marie +dora venter +dorm +doubleblow +douche +downblouse +dp +drachtige +drag queen +dreamgirl +dreamgirlz +dreamteens +dreier +dreilochfick +dressing +dripping +dronkenvrouwen +dru berrymore +drugsverslaving +drunksexorgycom +dubbel +dubbele +ducha +dumpert +dungeon +durchgefickt +durrty +dusche +duschen +dutchgangbang +dutchgirl +dutchliteroticacom +dutchpussy +dutchroxy +dutchstuds +dwerg +dwergen +dwergenseks +dyanna lauren +eating +ebony +ebonylive +ebonysex +edc +eden +ehefotze +ehefrau +ehepaar +eier +eigenstart +eikel +ejaculatie +ejaculating +ejaculation +ejaculations +elektricitiet +elena +elisabeth +elizabeth lawrence +elle brook +elwebbs +emanuelle +emily marilyn +emmanuelle +empornium +endometriose +enema +enjoying +enjoys +enormepikken +entjungfert +erection +erica +erika +ero +erocams +erochat +erodating +erolips +erolog +eros +erotic +erotica +eroticabeurs +eroticanu +eroticaonline +eroticaprive +eroticlive +erotico +erotiek +erotiekop5 +erotiekwinkels +erotik +erotikmesse +erotisch +erotische +erotischemassage +erotischeverhalen +erotischeverhalencom +erotishe +eroverhalen +erozone +escort +escorts +escortservice +escortsletje +esposa +estelle +euro angel +euromillions +eutube +eva angelina +eva vortex +eve +everysexhasits +evgeniya +executies +exfrau +exgirlfriend +exhib +exhibitionist +exibicionista +exotic +exotische +expert +extreem +extreemsex +extrem +extreme +extremesex +exwife +faccia +facefuck +facefull +faceshot +facial +facialgirls +facialized +facials +facialsusana +facialthreesome +familie +familiesex +famous +famouspornstars +fanny +fart +farts +fatchick +faustfick +favos +faye dixie +faye rampton +feesten +feet +feetjob +felecia +felix20 +fellatio +felsen +female +females +femalworld +femdom +femdomcity +femdomlinx +femjoy +femke +femme +ferraz +fetish +fetishbank +fetishes +feuchte +ffm +fichen +fick +fickarsch +ficke +ficken +fickende +fickendezwei +fickenmalaysian +fickfotze +fickmaschine +fickquick +ficksau +fickschlampe +fickt +fiesta +fiki +filipina +filipinas +filipino +filippina +filmpje +filmpjes +finder +finger +fingere +fingered +fingerfick +fingerfuck01 +fingerfucked +fingering +fingeringcouple +fingern +fingers +fingerspielela +fingert +fiorela +firstanal +firsttime +firsttimeanal +fishnet +fishnets +fissesangen +fisten +fistfeest +fistfuck +fistfucking +fistfuking +fisting +flabber +flamoes +flasche +flaschenfick +flasher +flashing +flashingjewish +fleshlight +flex +flexy +flikken +flinger +flirk +flirt +floral +flower tucci +follada +follando +foot +footjob +footjob1 +footjobbrazilian +footjobs +fotogallerie +fotografie +fotos +fotosexverhalen +fototoestel +fotovanmijnex +fotoverhalen +fotze +fotzenbesamung +fourfingerz +foursome +foxy +fran lord +francaise +francesa +francine +frankfurt +frankie +fraportgrenzsc +frauen +freaksofcock +freeanimalsex +freegaysexcom +freegayspace +freelolitas +freemovies +freepics +freepornmovies +freesex +freesexcam +freesexcams +freesexchat +freesexcom +freesexfilm +freesexfilms +freesexmovie +freesexmovies +freesexpics +freesexx +freesexxx +freeteens +freetrailers +freewaygesichtsbesamu +freien +fremdgefickt +fremdgepoppt +frenchup +freundin +freundinn +freya lee +frise +frota +ftvgirls +fucing +fucksession +fuckt +fucktango +fucktime +fuckunbelievable +fujiko kano +fukalot +fuked +fun4two +funberry +funkybabes +funnel +funpaleis +fusker +g spot +gaaaaaaaaaaaaay +gabi +gabriella banks +gaby +gag +gaggerskates +gal +galleries +gamiola +gang +gangbang +gangbangbus +gangbanged +gangbangs +gangrape +ganny +gardameer +garnalencocktail +gasman +gasmus +gaybelgie +gayboys +gayfilms +gayfotos +gayporn +gays +gayscat +gayseks +gaysex +gaysexcam +gaysexcams +gaysexplaatjes +gaysexverhalen +gayshop +gayshorties +gaysite +gaysites +gaysluts +gaystartpagina +gaystories +gbmwt +geblasen +gebumst +gedikoglu20 +gefickt +gefilmt +gefingert +geil +geil2 +geile +geilejongen +geilejongen2 +geilejongens +geilekutjes +geilen +geileplaatjes +geilepussysau +geiler +geiler_blowjob +geiles +geileverhalen +geileverhalentk +geilevrouwen +geilewijven +geilheid +geilheit +geiligheid +geill +geilste +geilster +geinrapers +geisha +geitenoog +geleckt +gen padova +geneukt +geneukte +georgette neale +georgie +georgina +georgina smith +gepiercte +gepijpt +gepisst +geritten +germanfuck +gertibaldi +gesaugt +geschapen +geschoren +gesicht +gesichtsbesamu +gespierd +gespreizt +gespritzt +gevoelige +gewichst +gia paloma +gianna +giant +gigolo +gijl +gilly sampson +giovanni +girl +girlblowjob +girlfreind +girlfriend +girlfriends +girlfucking +girlmilking +girls +girlsanal +girlslikegirls +girlsprive +girly +girman +gladde +gloryhohle +gloryhole +glunderende +gluren +gluurder +gluurders +goddess +godiva +goldenshower +golfa +good_suck +gorgeous +gostosa +gothicgirl +gourmande +gozando +grande +grannies +granny +gratis +gratiscam +gratiscams +gratischatten +gratisdierensex +gratisex +gratishomofilms +gratislangesexfilms +gratismovies +gratisneukfilmpjes +gratisneukfilms +gratisneukplaatjes +gratisomasex +gratisplaatjes +gratisplassex +gratisporno +gratispornoclips +gratispornofilm +gratispornofilmpjes +gratispornofilms +gratispornoplaatjes +gratispornovideos +gratispornovideosbe +gratisseksfilmpjes +gratisseksfilms +gratisseksverhalen +gratissex +gratissexcam +gratissexcams +gratissexchat +gratissexcontact +gratissexcontacten +gratissexdating +gratissexfilm +gratissexfilmpjes +gratissexfilms +gratissexfoto +gratissexfotos +gratissexpagina +gratissexplaatjes +gratissexverhalen +gratistieners +gratistienersex +gratistrailers +gratisverhalen +gratiswebcamsex +gratisxxx +greece +greekvsenglish +grieksesex +grilfriend +grlfrnd +groentehoer +groepseks +groepsex +groepsexverhalen +groepssex +grootste +groteborsten +grotelullen +grotetieten +groupie +groupsex +gspot +guadalajara +gummipuppe +gummipussy +gumshot +gung +gurke +gurken +gurkenfick +gusta +hailey +hailies +hairbrush +hairtrigger +hairy +haley +haley paige +haley scott +halsema +hamster +hanai +handarbeit +handjob +handjobgina +handjobs +handjop +handycam +hangtieten +hannah harper +hardcore +hardcorekanjers +hardcorelolita +hardcoreplaatjes +hardcoresex +hardcoreshe +harde +hardeporno +hardeseks +harige +harigepoes +harmony +harmony hex +hart +hathaway +having_fun_wit +headfuck +headjob +headstocking +heather +heels +hegre +heidi +heimlich +heiss +heissen +heisser +heisses +heleen +helena +hella +hentai +hermano +hermaphrodite +herself +hete +heterosexverhalen +hetesletjes +hillary scott +hilton +hindoe +hinten +hinten2 +hintencream +hintern +hisfirstgaysex +hittin +ho +hobbyhoer +hobbyhoeren +hobbysex +hoe +hoer +hoeren +hoerengids +hoerentest +hoerland +hoersex +hoertje +hoertjes +hoes +hogtied +hole +holes +hollandse +hollandsesex +homefuck +homefucking +homemade +homemadeporntorrents +homemadeporntorrents2 +homemades +homesex +homesluts +hometape +homevideo +homevids +homeviedeo +homo +homoboys +homochat +homodating +homoescort +homoplaatjes +homoporno +homosex +homosexfilmpjes +homosexplaatjes +homosexverhalen +homoverhalen +hondenneuken +hondentrimmen +hondenzaad +honey +honeymoon +honeysuckle +hooker +hooker2 +hookers +hookers4you +hookerscom +hooters +hopkorrels +horney +horniest +horny +horny_lesbians +hornycrocodile +horse +horseback +horsefuck +hose +hotelfick +hot mom +hotelroom +hotelschnecke +hotescort +hotgirl +hotgirls +hotlovers +hotmarijke +hotswingers +hott +hotteens +hotter +hottest +hottestlist +hottie +hotties +hottiewatch +hotwife +housewife +housewives +hubbies +hubby +huge_dildo_in_ +hugecocks +hugh +huisvrouw +huisvrouwen +huisvrouwensex +huisvrouwtjes +hulst +humorsex +hump +hure +husbands +hustler +hustlercom +hustlermagazine +hustlertv +ideepthroat +ideepthroatcom +idols +inari vachs +incest +incestporno +incestsex +inclusive +incubus +indian +indonesian +industrion +ingetrokken +ingoio +innocent +inserted +insertinons +insertion +integratie +interacial +interraciaal +interracial +intiem +invalide +iranerin +isabel ice +isabelle +ishotmyself +ispycameltoe +itdadsday1 +jacking +jackoff +jaculation +jacuzzi +jada fire +jade newman +jamie brooks +jamie huxley +jamie lynn +jammu +janca +jane +jane darling +jane whitehouse +janina +janine +japanesedildo +jasmine +jasminlive +jassie +jean val jean +jeanie +jelly +jem stone +jenaveve +jenaveve jolie +jenn +jenna +jenna jameson +jenni +jennifer +jennifer luv +jenny +jenny loveitt +jerk +jerking +jerkoff +jerks +jessica +jessica love +jessucker +jessy +jezebelle bond +jill +jizz +jizzboat +jizzbombcom +jizzed +jizzhut +jizzhutcom +jngfrau +joanna angel +joanna jet +jobchick +jodie +joelean +jong +jonge +jongemeisjes +jongens +jongesex +jordan fleiss +jordan haze +jtm +jugends +juggs +juicing +juicyjuice +julia +julian +julie +julie night +julie robbins +julieta +juna +junge +junges +k4 +kaal +kaalgeschoren +kadena +kale +kalepoes +kamagra +kamasutra +kamasutrabeurs +kanaalkiezer +kanaalxxx +kanalen +kanalenkiezer +kardashian +kardinaalsmuts +karen wood +kari +karim +karima +karina +karina alexa +karina clarke +karla romano +karmasutra +karoline +kasumi +kat +kat varga +kataclizm +kate +kate frost +kates +katha +kathi +kathy +katie morgan +katja +katja kassin +katjas +katrina +katsumi +katy +katy caro +keelneuken +keiharde +keira +kekilli +kellemarie +kelly +kelly bell +kendra +keyra +kick +kiki +kim holland +kimberly kane +kimholland +kindgirls +kinky +kinky kerry +kinkydating +kinkygirls +kinkyroly +kinkysex +kinkytijd +kira +kira kener +kirsten +kissing +kisslolita +kitten +kitty +klaar +klaarkomen +kleedkamer +kleindochter +kleine +kleineborsten +kleinekutjes +kleinetieten +klitjes +klitoris +klitpiercing +knippen +koika +kont +konten +kontje +kontjeneuken +kontjes +kontlikken +kontneuken +kontneukende +kontplaza +kontsex +kontzuigen +kopen +korfbalgays +koridallos +korporaal +korte +kreisverkehr +kreme +krisciel +krystal +kunstkut +kunstvagina +kurzfick +kut +kutcam +kuthaar +kutje +kutjes +kutlikken +kutlikkers +kutsap +kutten +kutzuigen +kyla +kylie +kym morgan +l_gets_fucked_ +laatjerukken +lacey love +lacie +lacie heart +lactacting +lactating +ladies +ladung +ladyboy +laetitia +laid +laine +lana croft +lana li +lane +lange +langefilms +lanken +lanny barby +lap +lapdance +lapiova +lara +laren +lass +lasses +latex +latexsex +latina +latincouple +latino +laura +laura giotto +laura hermenson +laura michaels +lauren +lauren phoenix +layla jade +lea walker +leabian +leah +leah caprice +leather +leatherladiesde +leatherman +leathervideos +lecca +lecken +lecker +leckt +leeuwarden +legs +lekker +lekkere +lekkerekutten +lekkeretieten +lekkerklaarkomen +lekkerneuken +lekkershowen +lemayzing +lerares +lesbe +lesben +lesbenspiele +lesbian +lesbianas +lesbianpink +lesbians +lesbianswebcam +lesbisch +lesbische +lesbischsexstart +lesbisex +lesbo +lesbos +lesbosex +leslie taylor +leticia +lewdmatures +lexie marie +lexmond +lezcuties +lezley zen +libby ellis +libido7 +lick +licked +lickherassoffmydick +lickherassofmydick +lickin +licking +lickingcool +licks +liebeskugeln +liebt +liefdesring +liefhebbers +lieke +lieske +likken +lilliputsex +lillys +lily +lily thai +lindsay +lingerie +linh +linsey dawn mckenzie +lips +lipstick +lisa +lisa daniels +lisa marie +literotica +literoticacom +litova +livecam +livecammen +livecams +livesex +livesexcam +livesexcams +livesexfeed +livesexlist +livesexlistcom +livesexonline +livesexparty +livesexshow +livesexshows +liveshow +liveteens +livewebsex +liz +lkkr +lochgeil +loira +lola +lolita +lolitaatjes +lolitacam +lolitafuck +lolitalive +lolitampegs +lolitaplaatjes +lolitas +lolitasex +loliti +lollipop +lolly badcock +lombardi +londonamateur +long dong silver +lords +lorna lace +lory +lotion +louise evans +louise hodges +lovem +lube +lubricando +lucia +luciana +lucky +lucy gresty +lucy law +lucygirl +luder +luigi +luisa +lul +lulita +lullen +lunettes +lupavideos +lust +lutschen +lutscht +luxi +lyndsey love +lynn +lynn stone +lynsey +madame sindi +mafia +maid +maids +maidstone +majella shepard +male +mamada +mamadas +mamadora +mamando +mandingo +mandy +mandy bright +mania +mannen +mannengelul +mannenplaatjes +mannenpret +manon +manuela +marcela +margit +margot +maria +mariana +marianela +marianna +maribel +marie +marie louise +marie luv +marina +marketa +marktpenetratie +marquetta jewel +martina +maschinenarsch +masiert +masochisme +mason +massage +massaged +massages +massaging +mast +masterbate +masterbates +masterbatig +masterbating +masterbation +masterberen +mastru +mastrubate +mastrubation +mastubandoce +mastubiert +masturb +masturba +masturbacion +masturbandome +masturbate +masturbated +masturbates +masturbatie +masturbating +masturbation +masturbazione +masturbeertjes +masturberen +masturberende +masturbieren +masturbiert +masturbire +matina +matsushima +maturbation +mature +matures +maturesex +maturesexcom +mauritian +maus +mckenzie lee +mcumshot +mecocksucking +medellin +meester +meesteres +meesteressen +mefuck +meid +meiden +meiner +meisje +meisjes +melanie +melisa +melissa black +melissa lauren +mellons +mercedez +merel +messy +mexican +mexicana +mexicano +mia stone +michelle +michelle b +michelle thorne +microbikini +microkini +midget +mierlo +mika tan +milchkuh +milena +milf +milfcom +milfhunter +milfs +milfseeker +militairen +milked +milker +milking +milky +mindy +minefootjob +miranda +miriams +mirjam +mirjams +miss +missbunny +missionary +missy +mistreated +mistres +mistress +misty +miyah +mmmmm +moby +model +models +moeder +moeders +moese +moglie +mohootje +mokkels +molige +moms +mondneuken +moni +monica +monica sweetheart +monika +monique +monique alexander +monique covet +monsersofcock +monster meat +monstercock +monstercocks +monstertits +mooiebillen +mooievrouwen +moran +moremoms +morgane +morrigan hel +motel +motel69star +mouth +mouthful +movieprivate +mrbigdickshotchickscom +muenchen +muff +mund +mundfick +muschie +muschifingern +mya diamond +myself +mysexgames +mystique +naakt +naakte +naaktevrouwen +naaktfoto +naaktfotograaf +naaktfotografie +naaktfotos +naaktstrand +nachbarin +nachtcams +nachtclub +nachtparel +nacked +nackt +nadia +nadine +nahaufnahme +nailed +naked +nando +naomi +nasty +nat +natalia +natalie +natalie heck +natalie woods +natasha +natasha dolling +nate +natte +nattehoer +nattehoeren +nattekut +nattekutjes +nattekutten +nattepoes +nattepoesjes +nattespleet +natural +naturals +naturewonderwoman +naturisme +naturist +natuursteen +naughty +naughtyteens +naugthy +nederlandse +nedersex +neger +negerinen +negerinnen +negerinnensex +negerlul +negersex +nella +neuk +neukadressen +neukcontacten +neukdate +neukdoos +neuken +neukende +neukertjes +neukfilm +neukfilmpje +neukfilmpjes +neukfilms +neukfoto +neukfotos +neukgrot +neukhoer +neukhoeren +neukjehond +neukkut +neukkutjes +neukplaatje +neukplaatjes +neukreet +neukshow +neukshows +neuksletjes +neuksletten +neukstandjes +neukt +neukte +neukvagina +neukverhalen +neukvideo +neukvideos +neukwijf +neukwinkel +nicegeil +nicola holt +nicole +nicole sheridan +nightfly +nightfuck1 +nightie +niki blond +nikita +nikita devine +nikki +nikki hunter +nikki sands +nikki sun +nikky andersson +nimfomaan +nimpho +nina +nina hartley +nip +niple +nippel +nippelalarm +nipple +nipples +nkzaadslikken +nobull +noorse +nordhessen +nubiles +nuda +nude +nudedutchcelebs +nudehot_anal_fuckc +nudes +nudism +nudist +nudistfriends +nudistparadies +nudists +nurse +nursecouple +nurses +nuts +nuttenrasur +nylon +nylonfotze +nylons +nympho +nymphomane +nyomi marcela +oaxaca +offilia +oiled +oiling +oilwrestling +oily +oke_really_goo +olderbimbo +olderholland +oldies_but_gol +olga +olivia o'lovely +olivia saint +oma +omahoer +omakijkdoos +omapijpt +omaplaatjes +omas +omasex +omasexplaatjes +omasexs +omasexverhalen +omaslet +onanieren +ondeugende +ontop +opa +opwindend +oraal +oral +orale +oralgen +oralsex +orgamus +orgasm +orgasmbest +orgasme +orgasmen +orgasmes +orgasmic +orgasmo +orgasms +orgasmus +orgie +orgies +orgy +oriental +oudegokkasten +ouderensex +ouderesex +oudewijvensex +outdoor +outdoors +outside +ouwe +ouwehoeren +ozawa +p0rn +paardelul +paardenkut +paardenlul +paardenneuker +paardenneukers +paardensex +pabo +paige +pamela +pantera +panties +pantiescouch +panty +pantyhose +pantyhosesex +parade +pareja +parenclub +parenclubs +parensex +paris +parkeerplaatshoer +parkeerplaatssex +parkeersex +parker williams +parkingsex +parte +partyfuck +partyhardcore +partysex +passion +patricia +patriotic +pecorina +pelicula +penelopex74 +penelopex74suc +penetracion +penetrated +penetratie +penetraties +penetration +penetrationmelissa +penetrationsee +penetrazione +penetreren +penis +penisbot +penispomp +penisring +penisvergroting +penny flame +penthouse +pepinos +perfectshots +perlenvibrator +perversius +peter north +peterspasses +petite +petra +petter +philippine +philippines +photoshoot +pics +pictures +piedina78 +piemel +piemels +piemeltje +piemeltjes +pierced +piercing +pijp +pijpamateur +pijpbeurt +pijpbeurten +pijpen +pijpende +pijpfilm +pijpfilmpjes +pijpfilms +pijphoer +pijphoeren +pijpme +pijpslet +pijpsletje +pijpsletjes +pijpsletten +pijpt +pik +pikzuigen +pimpin +pinay +pinklips +pinkpornstars +pinky +pipe +pis +piskut +pisshunters +pissing +pisssex +pisst +pixie +plaatjes +plaatsen +plas +plaskut +plaspret +plasseks +plassex +plassexfilms +plassexverhalen +playas +playbabe +playbabes +playboy +playboycom +playboytv +playful +playgirl +playgirls +playground +playmate +playmate81 +playmates +please_my_girl +pleasure +pleasured +pleasures +pleasuring +plek +plug +plumper +plumpers +poep +poepen +poepseks +poepsex +poes +poesjes +poland +polandbathroom +polderrape +polish +pompino +pompoarism +ponygirl +pool +poolbest +poolhappy +poolside +poppos +poppy morgan +porca +porcacionas +porgy +pornbabe +pornbabes +pornbig +porncams +pornchick +pornenmeer +pornfilm +pornfilms +porngirl +porngirls +pornhot +pornication +pornmovie +pornmovies +porno +pornoamateur +pornoamateurbe +pornoamateurs +pornoamateursbe +pornobloopers +pornochic +pornofilm +pornofilmpjes +pornofilms +pornofoto +pornofotos +pornogerrit +pornokanalen +pornolation +pornolijst +pornolinks +pornomeisje +pornomeisjes +pornomovies +pornopark +pornoplaatjes +pornorip +pornosex +pornosite +pornosites +pornostar +pornostart +pornostartpagina +pornoster +pornosterren +pornostream +pornostube +pornotube +pornoverhalen +pornovideo +pornovideos +pornpass +pornrip +pornstar +pornstarbook +pornstargals +pornstars +porntube +portugal +portuguesa +portuguese +posiert +position +positionblonde +postma +pounded +poundedblowjob +pounding +poutana +pov +pr0n +precum +preggo +preggy +pregnant +pregnantsex +prelolitas +premature +preteen +preteenporn +preteens +preteensex +pretnet +priv +privat +private +privatepornmovies +privates +privatexxx +privatvideo +prive +priveadressen +priveclubs +privedames +privefilms +privehuizen +privesex +privesexadressen +projectvoyeur +pron +pronchic +prono +prostate +prostituierte +prostitute +prostitutes +protesters +public +pump +pumpedcunts +punheta +pureporn +puretna +puss +pussi +pussies +pussy +pussyanal +pussyblow +pussycat +pussycatdolls +pussylicking +pussymy +pussypumped +pussys +pusy +puta +putaria +putas +puttana +pvc +quebecoise +quickie +quicky +rachel rockets +rachel rotten +rachel sykes +ragazza +ramona luv +randi wright +ranzig +rape +raped +rapedscene +rapefoto +rapemovie +rapesex +rapestorie +rasieren +rasiert +rasierte +ratemycameltoe +realbutts +realdoll +rebecca +rebecca love +rebekah dee +rebekah jordan +recital +redcoon +redhead +redheaded +redheads +redhot +redlight +redlips +regio +regiodating +regiosex +reid +reifen +reina leone +reingespritzt +reingesteckt +reiniging +reinundraus +reiten +reiter +reitet +renaissance +renee pornero +renee richards +rica +ridevibrator +riding +ridingcum +riesen +riesenschwanz +rijnwaarden +rijpe +rijpetiener +rijpetieners +rijpevrouwen +rimjob +rimmen +rimming +rimpelkut +rincon +ripresa +rita faltoyano +ritt +riya +rocco +rocco siffredi +roddick +rokende +rokje +rokjes +ron hightower +ron jeremy +ronny +rosa +rosi +rough +roxanne +roxanne hall +roxxxy rush +roxy +roxy deville +roxy jezel +roxy rare +rub +rubbed +rubbing +rubs +rukken +russan +russian +russianteen +russianteens +saana +sabrina +sado +safada +safadinha +saft +sahara +sahara knite +sally +salope +samantha +sambal +sammi jayne +sandie caine +sandra +sandra romain +sandra russo +sandramodel +sandwich +sandy +sapphic +sapphicerotica +sapphire raw +sara nice +sara stone +sarah +sarah young +sarahs +sasha +satifaction +satisfaction +sauna +savanna samson +savannah gold +sax +sborra +sborrata +scandal +scandals +scandinavian +scarlet haze +scat +scatsex +schaamhaar +schaamhaarkapsels +schaamlippen +schamlose +schapenneuker +schatzsuchearschficken +schaufeli +schaukelnblond +scheren +schizzetta +schlafender +schlampe +schlampen +schlampenknuts +schlauch +schlauchfrau +schlucken +schluckt +schmecker +schnecke +schnee +schnitte +schoolgirl +schoolsex +schoonmoeder +schoonmoedersex +schoorsteenkappen +schreit +schulmadchenreport +schulmaedchen +schuurman +schwanger +schwanz +schwanzgeilehu +schwester +schwimmbad +scopa +scopata +screwed +screwing +scrotum +searchbigtits +secretary +seduction +seductions +seeker +sekfilms +sekret +seks +seksclip +seksclipjes +seksclips +seksclub +seksfeest +seksfilm +seksfilmpjes +seksfilms +seksfims +seksmeisjes +seksplaatjes +sekssauna +seksspeeltjes +seksueel +seksverhalen +seksverhalenbe +seksvideo +seksvideos +seksvrouwen +selbst +selbstbefriedig +selena +selfmade +selfsuck +seniorensex +sensual +sensuous +serena marcus +serjio +service +seuxy +seventeen +seventeencom +seventeens +seventeensex +seventeenvideo +severina +sex1 +sexadres +sexadressen +sexadvertentie +sexadvertenties +sexafraid +sexartikelen +sexbabe +sexbegin +sexbejaarden +sexbijbel +sexbios +sexbioscoop +sexbladen +sexbom +sexcam +sexcamcafe +sexcamera +sexcamgirls +sexcammen +sexcams +sexcams4free +sexcartoon +sexcartoons +sexchat +sexchatten +sexclip +sexclipje +sexclipjes +sexclips +sexclub +sexclubs +sexcontact +sexcontactadvertentie +sexcontacten +sexcontakt +sexdate +sexdaten +sexdates +sexdating +sexdatingclub +sexdatingworld +sexdex +sexe +sexen +sexfeest +sexfeesten +sexfilm +sexfilmpjes +sexfilmpjesbe +sexfilms +sexfoot +sexfoto +sexfotos +sexfotoverhalen +sexfun +sexgalleries +sexgallerij +sexgame +sexgames +sexgein +sexgratis +sexhuis +sexhumor +sexhumorpagina +sexi +sexiest +sexjobs +sexkanjer +sexkanjers +sexkids +sexklinik +sexkristi +sexlap +sexlijn +sexlijnen +sexlingerie +sexlinks +sexlokatie +sexlusten +sexmaxx +sexmaxxcom +sexmeiden +sexmeisjes +sexmetdieren +sexmokkels +sexmovie +sexmovies +sexo +sexophetnet +sexorgie +sexorgies +sexorgy +sexoverzicht +sexpagina +sexpaginabe +sexpaginacom +sexpaginas +sexpaleis +sexparkeerplaatsen +sexparty +sexpics +sexplaatje +sexplaatjes +sexplaatjescom +sexplezier +sexpoezen +sexportal +sexposters +sexpuppen +sexs +sexsauna +sexservicegids +sexshop +sexshops +sexshow +sexshows +sexsite +sexsites +sexsnol +sexspeeltjes +sexspel +sexspellen +sexspelletjes +sexstandjes +sexstart +sexstartkabel +sexstartpagina +sexstartpunt +sexsuck +sextape +sexteens +sextheater +sexthumbs +sextien +sextieners +sextoon +sextooncom +sextoons +sextorrents +sextoys +sextrailers +sextube +sexual +sexualerziehun +sexueel +sexverhaal +sexverhaaltje +sexverhaaltjes +sexverhalen +sexverslaving +sexvideo +sexvideos +sexvidio +sexvilla +sexwebcam +sexwinkels +sexxx +sexxxstart +sexxxx +sexy +sexydane +sexydesktop +sexymasturbati +sexythea +sexyworld +sexyy +sexzsene +shag +shane hunter +shannie +shannon +sharon wild +shave +shaved +shaven +shaving +shay +shespot +shiny +shirtless +shit +shower +showering +showershaved +showing +shows +shy +shyla +siffredi +silicon +siliconen +silk +silkecumshot +silvia +silvia saint +silviawet +simona +simone +simone claire +sindee coxxx +single +sinterklaasmutsen +siteredinterracialcom +sjarapova +sjemale +skank2 +skirt +slaaf +slagroom +slamming +slanke +slavin +slavinnen +sleazydream +slet +sletje +sletjes +slettebakken +sletten +slettenhaar +slikken +slipbespritzun +slipjes +sloerie +sluts +slutsselbstbefriedi +slutty +smalltitsmix31 +smfilm +smfilms +smfoto +smhoer +smhoeren +smooth +smoothy +smothering +sms +smsporno +smssexdates +smssexdating +smstudio +smut +smutfun +smutstar +smutstarorg +smutvibes +snatch +snel +snelsex +snol +snolen +snollen +snuff +snuffmovie +snuffx +snuffxcom +soaked +sockjob +sodo +sodomie +sofa +softcore +softporn +solarium +solo +soloblasespielchen +somer +sonia +sophie +spanishfly +spanked +spanking +spannendeverhalen +spears +speedybee +speelfun +speksteen +sperma +spermabank +spermabesuch_beim_na +spermadonor +spermadusche +spermafest +spermakut +spermaladung +spermalehrgang +spermarausch2 +spermarausch3 +spermaschlacht +spermaslet +spermasletten +spermaslikken +spermaslikkers +sperme +speurders +spice +spies +spijkerkwartier +spiritual +spit +spleet +spleetjes +sportsex +spread +spreaded +spreading +springsteen +spritz +spritze +spritzen +spritzende +spritzer +spritzig +spritzkonzert +spritzt +spuiten +spuitend +spuitende +spuitenenslikken +spuitenenslikkenbnn +spuitkut +spuitorgasme +spunk +spur +spycam +spying +squad +squirt +squirten +squirter +squirting +squirting101 +squirts +squirtsamateur +squirtsikis +stabbin +stacey silver +stacy +stadskanaal +starr +startkabel +startsex +startsexpagina +startvagina +startvaginacom +startvaginacum +startvaginatv +steenbergen +steenstrips +steenwijk +stefan +stefani morgan +steffi +steiger +stel +stephanie +stepmother +sterksel +sterling +stewardess +stiefelschlamp +stiekem +stiekum +stieltjesplein +stiff +stiffy +stijve +stijvetepels +stilley +stimulation +stocking +stockings +stomorgyl +stoot +stopper +straatmeiden +strakke +strand +strandsex +strap +strapfilmed +strapon +strapon_1 +strapon_2 +strapon_4 +streaking +streetblowjobs +streptease +string +strip +stripclub +striper +stripper +stripping +strips +striptease +stripteases +stripteaseshow +striptese +stroke +strokes +stroking +strumpfhose +strumpfhosen +studente +studentin +studentsexparties +studentsexparty +stuffed +styleanal +stylez +submissive +succub +suck +suck2 +sucked +sucker +suckin +sucking +sucks +sue +suf +sufgeneukt +sugababes +sugarbabes +sunbathing +sunny lee +sunny leone +sunrise adams +supergeil +supermodel +supernippel +supersex +supplements +surprise +susan +susana +susi +susie tamworth +suzana +suzie best +suzie wildin +svetlana +swaffelen +swallow +swallowing +swallows +swapping +swede +swedish +sweedish +sweedish3 +sweet +sweetbabes +sweetest +sweetie +sweety +swing +swinger +swingerclubs +swingers +swingersclub +swingersfunclub +swingersnederland +swingerspagina +swinging +swingstart +swollen +swot +sybian +sybiantitfuck +sylvia +syndroom +syota +szexualis +taboo +taille +talladega +talon +tamara noon +tammy +tammy oldham +tanga +tania +tania russof +tanja +tannja +tante +tanya +tanzender +tara +taryn thomas +tarzan +tasucu +tawnee +tawny roberts +taylor jones +taylor morgan +taylor st claire +teagan +teagan presley +tease +teaser +teasing +techniek +teef +teefjes +teen +teenagers +teenagesex +teenagesexvideo +teenass +teenboys +teencams +teenersex +teengallery +teengirl +teengirls +teenhardcore +teenie +teenies +teenpics +teenpictures +teenpix +teenporn +teenporno +teenport +teenrape +teens +teensex +teensluts +teeny +teibolera +teil +teil2 +telcel +telefoonsex +teoni +tepels +tepelzuigen +tequila woods +tera bond +tera patrick +terra +terri +terri summers +tess +tetas +texas presley +thai +thaigirl +thailand +thaisau +thaise +thaiteen +thehun +thepornbay +thetruevoyeur +thong +threesome +threesome_gir +threesomefuck +threesomes +throat +throating +thuisontvangst +thuissex +tia tanaka +tiana lynn +tiener +tienergangbang +tienerhoer +tienerhoeren +tienerhoertjes +tienerkutje +tienerkutjes +tienermeisjes +tienermokkels +tienerpoesje +tienerporno +tieners +tienersbloot +tienerseksfilms +tienersex +tienersexfilm +tienersexfilms +tienersexverhalen +tienerslet +tienersletjes +tienertietjes +tienertjes +tienerwebcams +tiet +tieten +tietenclub +tietjes +tietneuken +tietse +tietze +tiffani +tiffany +tiffany rousso +tiffany taylor +tiffany walker +tight +tights +tijuana +tina +tiny +tinys +tippelgids +tips +tit +titfuck +titfucking +titsebony +titte +titted +titten +tittenfick +tittenmaus +tittenmonster +tittens +tittenschweste +titties +titts +titty +titworld +tj hart +toe +toes +toestel +toilet +toilets +toiletsubmissive +toilette +tongue +tongvibrator +tonny +tonya +toons +topdreamsex +tophookers +topless +toplist +topmodel +torture +tory lane +totally +touching +toy +toy_threesomemistress +toys +traci +tracy +tracy williams +traffic +trannies +tranny +tranny6 +trannys +trannysex +transseksueel +transsex +transsexueel +transsexuelen +traverstiet +travesti +travestiet +trekken +trekkenredheaded +trekplaatjes +trickfilm +trimmen +trimmendous +trina michaels +trinity post +trio +triocontact +triodating +triode +triodus +triootje +triootjes +triosex +trixie +troia +tsibouki +tugjob +tugjobs +turk +turkey +turkhisblowjob +turksh +twat +twentegirl +twopetra +tyla wynn +tyra banx +ucly +uitgaan +uitstellen +ultrasexmovies +ultraslut +umkleidekabine +uncensored +uncovered +underground +undies +unrealboobs +upskirt +upskirts +urlaubsfick +vacca +vader +vagina +vaginagib +vaginal +vaginale +vakantiecams +valeria +vanessa +vanessa freeman +vanessa lane +veele +velba +venezolana +venezolanas +venus +vergroting +verhaal +verhalen +verhalensex +verkrachtingsverhalen +veronica carso +veronica jett +veronica sinclair +veronika +veronika vanoza +veronique +verpassen +verschluckt +verslaving +verslavingen +verslavingszorg +verycutehandjo +vet +vette +vibe +vibrado +vibrated +vibrating +vibrations +vibrator +vibrators +vibratorsex +vibro +vibsi +vicki holloway +vicki richter +vicky +vicky powell +vicky valentine +vicky vette +victoria brown +victoria swinger +vid +videochat +videoswingers +videotheek +vidio +vids +vieswijf +vieze +viezespelletjes +village +vindsex +vingerde +vingeren +vingerende +vingert +virgin +virgins +virtual_nikkit +vivian west +vixen +vixen cheshire +volkomen +volkomenkut +volle +volleborsten +vollgespritzt +vollgewichste +voorbinddildo +votze +votzen +voyeur +voyeurorgasm +voyeurs +voyeursex +voyeurweb +voyeurwebcom +vriendin +vrijstel +vroegtijdige +vrouw +vrouwelijk +vrouwelijke +vrouwen +vrouwensex +vrouwtjes +vuckovic +vuile +vuistneuken +vuistneukfilms +vulva +wabcam +wakawaka +wamsexorgy +wanda curtis +wank +wanker +wanking +wanks +wannawatch +wasserflasche +waterbondage +waterboy +watergames +wax +webcam +webcambabes +webcamchat +webcamdames +webcamgirls +webcamhoer +webcamhoeren +webcamhoertjes +webcamnow +webcamplanet +webcams +webcamsex +webcamun +webgirl +weblog +weckdienst +wed +welovecock +wendy +wendy divine +wendy taylor +wentelteef +wentelteefje +wentelteefjes +wet +wetcam +wetcams +wetplay +wetpussies +wichse +wichsen +wichser +wicked +wiener +wifes +wifey +wiixen +wijf +wijven +wil +willenporno +wipkut +withwife +wix +wixen +wixt +wixxen +wixxer +wnn +woah +woman4you +women4jou +wonderwoman +worden +wordt +worldsex +worldsexcom +wrestling +wwwsex +xbabes +xfemdom +xfemdomcom +xfilms +xmissy +xtreme +xxlgazi +xxnx +xxxalone +xxxamateurs +xxxfilm +xxxfilme +xxxfilmpjes +xxxmovies +xxxpasswords +xxxporn +xxxsex +xxxx +xxxxx +xxxxxx +yasmine +younger +youngest +youngporn +youngpornmovies +youngteens +youngthroats +youporn +yourfilehost +yoursex +yuka +yvette +yvonne +zaad +zaadschieten +zaadslet +zaadsletten +zaadslik +zaadslikfilms +zaadslikken +zaadslikkende +zaadslikker +zaadslikkers +zehra +zeigt +zemanova +zeugen +zoeky +zogende +zomerteens +zonder +zonnen +zoon +zora banks +zorra +zucchini +zuig +zuigen +zuighoer +zuigteef +zukkabunny +zunge +zungenmaschine +zus +zuzanna +zwangeremeiden +zwangerensex +zwangeresex +zwangerevrouwensex +zweep diff --git a/tribler-mod/Tribler/Category/init_category.py b/tribler-mod/Tribler/Category/init_category.py new file mode 100644 index 0000000..798345c --- /dev/null +++ b/tribler-mod/Tribler/Category/init_category.py @@ -0,0 +1,58 @@ +from time import localtime, strftime +# Written by Yuan Yuan +# see LICENSE.txt for license information + +# give the initial category information + +import ConfigParser + +def splitList(string): + l = [] + for word in string.split(","): + word = word.strip() + l.append(word) + return l + +init_fun = {} +init_fun["minfilenumber"] = int +init_fun["maxfilenumber"] = int +init_fun["minfilesize"] = int +init_fun["maxfilesize"] = int +init_fun["suffix"] = splitList +init_fun["matchpercentage"] = float +init_fun["keywords"] = float +init_fun["strength"] = float +init_fun["displayname"] = str +init_fun["rank"] = int + +def getDefault(): + category = {} + category["name"] = "" + category["keywords"] ={} + category["suffix"] = [] + category["minfilesize"] = 0 + category["maxfilesize"] = -1 + return category + +def getCategoryInfo(filename): + config = ConfigParser.ConfigParser() + config.readfp(open(filename)) + + cate_list = [] + sections = config.sections() + + for isection in sections: + category = getDefault() + category["name"] = isection + for (name, value) in config.items(isection): + if name[0] != "*": + category[name] = init_fun[name](value) + else: + name = name[1:] + name = name.strip() + category["keywords"][name] = init_fun["keywords"](value) + cate_list.append(category) + +# print cate_list + return cate_list + diff --git a/tribler-mod/Tribler/Category/init_category.py.bak b/tribler-mod/Tribler/Category/init_category.py.bak new file mode 100644 index 0000000..1d8edfb --- /dev/null +++ b/tribler-mod/Tribler/Category/init_category.py.bak @@ -0,0 +1,57 @@ +# Written by Yuan Yuan +# see LICENSE.txt for license information + +# give the initial category information + +import ConfigParser + +def splitList(string): + l = [] + for word in string.split(","): + word = word.strip() + l.append(word) + return l + +init_fun = {} +init_fun["minfilenumber"] = int +init_fun["maxfilenumber"] = int +init_fun["minfilesize"] = int +init_fun["maxfilesize"] = int +init_fun["suffix"] = splitList +init_fun["matchpercentage"] = float +init_fun["keywords"] = float +init_fun["strength"] = float +init_fun["displayname"] = str +init_fun["rank"] = int + +def getDefault(): + category = {} + category["name"] = "" + category["keywords"] ={} + category["suffix"] = [] + category["minfilesize"] = 0 + category["maxfilesize"] = -1 + return category + +def getCategoryInfo(filename): + config = ConfigParser.ConfigParser() + config.readfp(open(filename)) + + cate_list = [] + sections = config.sections() + + for isection in sections: + category = getDefault() + category["name"] = isection + for (name, value) in config.items(isection): + if name[0] != "*": + category[name] = init_fun[name](value) + else: + name = name[1:] + name = name.strip() + category["keywords"][name] = init_fun["keywords"](value) + cate_list.append(category) + +# print cate_list + return cate_list + diff --git a/tribler-mod/Tribler/Category/porncat.txt b/tribler-mod/Tribler/Category/porncat.txt new file mode 100644 index 0000000..3bc4b25 --- /dev/null +++ b/tribler-mod/Tribler/Category/porncat.txt @@ -0,0 +1,1046 @@ +ccb0706121591094f84a69d9fe940572ec2cb639.torrent 0 +a4a6114591fa9310c5b8f5c48600a2c07cca1b8d.torrent 0 +ef2c3c82685de81b1c9255fb58c8259f4a86430e.torrent 0 +2b14de353d7db8bb60c527161213b3c475e6875a.torrent 0 +df44368956386e3867e7825783abe07547fd339c.torrent 1 +5a27d951445864cb14572d1480127135b5c0e8d3.torrent 0 +0a317564fc0ca7e1ba3ff46373011be3acf1c758.torrent 0 +01717eca7a7de535683fb82ed452b713b39ad35c.torrent 0 +c7bcccf9cff385a7b02752568e611cc0a5aca681.torrent 0 +ef384c4be28f3e200ea7fc66addeb26b237cccce.torrent 0 +08269ab7a4a1b99b105e23ede58f99c343a8c777.torrent 0 +61ce5253dd1dcc850549e17cf03927424b46f8c2.torrent 0 +3777552a9d872d9822b45531491c6ef98249b29e.torrent 0 +ff1f15d3f47054b8cef6c93ed9cae7bc0cd3870b.torrent 1 +e0763d0d36615fec974252baaa080d435f54bff4.torrent 1 +b6a4eec2e7093c2d7e1aa01df4174ac2cd509b52.torrent 0 +8210c4dae0ebea35e2a27d75db1be27032542af3.torrent 1 +aaf3361c991bc7eb15f39912bb707992a8d9c843.torrent 0 +35ea49321cabc597b105247d1436c192468ad548.torrent 0 +4099759fd67ae91563140e81e894d9d32e1435cf.torrent 0 +8aef509ea106ce0d6eb034e9dc83109fbf882b17.torrent 1 +cfdd1b3f63ddd9550e0db124b03f5621e4e12308.torrent 1 +81fbd3c851ca2967767da9b506d7ef1848248ea5.torrent 0 +96763ae5f64bd474a51c84d8021c242a3ce34a81.torrent 0 +165be3c7d9f1603dc01f440e560fd519a4a4f551.torrent 0 +181d9fdcc9592610a6e784ebceb57a7428cc1a69.torrent 0 +cb31fdba4cc8f584d4908f879cf3f8f4f2fdaa11.torrent 0 +0c765a7e17f428a003bd0dd16aaf3affc53a5582.torrent 0 +d7f82b98efb9dd6fcadc95aca9c276c1313e42fa.torrent 0 +34aa11ccdf30e48c33843e481894eafb19f6476e.torrent 0 +f3763832ed5512b95f783ba7a12c830a264bd16d.torrent 1 +5b68d027b97c375042d66e38e32d38e5118e2d8e.torrent 1 +cf774b468fc237701cb7f71c0f2a92cea5da016d.torrent 0 +081cddfca3694dbcb23ebbf1f0d69b431e3c5264.torrent 0 +16e9235ff1b1237da2de0926fbb35d2f6c082114.torrent 0 +edf4f2a7f5aa6140c42b2e2f0b03cfaf974f1a61.torrent 0 +bbd45b97c936779e23286e354b7f194edf312090.torrent 0 +a35cb9c748f2a91a78003e461cb8bfa1677591d5.torrent 0 +87b0cf2d727cb3cb899eac519f863c84c2946787.torrent 0 +02f9684d23915d17fc63fe14ff9bb363b2cee1e2.torrent 0 +45ed31762e5f5b25cd17ec46a25067df25303d57.torrent 0 +a9c72cc44886d38a1a87f9e64b46da5b5504f69f.torrent 0 +94e0e6400cb288ff4b0776444e4ae858a26d833d.torrent 0 +8eec18d984c7d00072142e5c09d3a7a55725f520.torrent 0 +ff8a17b926ec7ef841a13493bc018e6144b5c809.torrent 0 +dac480412608883621e5a38f3065ef1f41dcd279.torrent 0 +5929f496b821ba3de7fd64e628f9edc87b6340be.torrent 0 +69422461d12ea4a025f24f0deabd043f80e66087.torrent 0 +aea1fa2f835533b80623fa101e6d84a6c6dd847c.torrent 0 +3b55c36f29574fe7c9c273f3c7efe827a167aa31.torrent 0 +c148c0bfe0f92264632dc9088f5b3e2b661051af.torrent 0 +1ef57ad4fbf464ab74be9747fe53a61427b5f46d.torrent 0 +6bf2b5e542de28a74ce8187f9c585e32c907f92b.torrent 0 +c26c42141a4338c565b34b3583199480a39eb6a4.torrent 0 +433314eeb4749fc6c23e955f7ddf4dccf923112f.torrent 0 +d804a21895b3986b25f609ca72cf4e602cca7eb9.torrent 0 +efe13c37fcc7c49ac4419699888d590fdb86bb1f.torrent 0 +2a5cf47b52714de82f4448a9a655b974a2b28b4c.torrent 0 +df6835183f3588ca5978bed6e81078339077d789.torrent 0 +d2178d1be3e69d45aa58c5b065a03d1ed0461def.torrent 0 +17c80def72397181442c8e6be6beb817aa1a339c.torrent 0 +d0124c4d4b084c0daa4579d78206840a13703b32.torrent 1 +1d7da3e85c6f5138a863324cc12e1c57e6627e41.torrent 0 +64a519e38170114349e9598e96d679d9bd25230b.torrent 0 +e1bee10ad3cc633d6a89474b8b5af45f1a648cca.torrent 0 +8931198f540e387bb43bc34f03dc58b3987b6b31.torrent 1 +9d8a6630f86861785b1b33ae56275c65db7f9fdd.torrent 0 +63878170d7baa6b51dbf26c7de7825f828945a10.torrent 0 +209c7b9b4e324bbae116fec0881e968512343800.torrent 0 +1bad757901dcd6ad3cccf3030785c6d57be1a96a.torrent 0 +fa866ff1799f019c1280a0291efdc7fef7776a40.torrent 0 +adff649f632f83375f46ec28ebb01f9b47f60c16.torrent 0 +10d740ff36698aca0ee9a44d3e66a32f08303191.torrent 0 +f8e6ee8123887caac24cd9f2abcb828ebf8a5904.torrent 1 +c1a06d594bd42a0a0806e313de8a65a7176742e5.torrent 0 +4e39c8dbe88a34c3fbd639722d76c6e02a07ae39.torrent 1 +02d610e8a52f6b1c5ddf2c21ca25a22a4d740c98.torrent 0 +fcae192a8cf9a3dba411993a47515802bf18c8c7.torrent 0 +5f5ec2ac2009e52387f5ab245466b3bf1269ded1.torrent 0 +67cd57d0290d8caaabd5f10a26449fc0ff17cc8b.torrent 0 +9ac288fa780d608ff2b560539b09b29af1fa4c4e.torrent 0 +a8a9f7224d9c9ecdeb1b93dc2ba34831d78c1ae7.torrent 0 +f41dcfdae8d952d0789f45f95e009baf0ef570f1.torrent 0 +3ab014379b5490725fedda2769b50a404bf8f04a.torrent 1 +60101dbb5025c020ed1f3b21155d470ec8bc3f0a.torrent 0 +ae9e5022c9c49cc9fc9487ae87d357342206ce3e.torrent 0 +365a23bccd99c2a6a1e5e40d32770fa6e106b135.torrent 0 +e78b340f6ef41a6fcd36066e74aaa045aa04617b.torrent 0 +7ac78b56f02964d543540f642bc37b5b18beb75c.torrent 0 +b27a1a521a58d4405b1d30ee5f46c232f875d0c8.torrent 0 +0387f30b76f90352085b22d753c60dab858d4e45.torrent 0 +6b8a0c5e727d6bf0335dcde50da7f027f3364a77.torrent 1 +5d96e0accc735356741264c8b1fcc52b6003774b.torrent 0 +5bb331ed8a157c2c6ed82cb0ab0330acf7cfaf60.torrent 0 +87c10305335674ef87f2a10d0e2c7c810a1e4d7a.torrent 0 +810728c9809d9d3a5e3a205eb31cded7a3545883.torrent 0 +ce2140d74d4519a3abdda8ea5a0945602c8e1155.torrent 0 +1bdebd771789b9fb1199582e632cbee06edb24dc.torrent 0 +caa7711860611083aac092c7ab61983dae751b0f.torrent 0 +26b7ae9264393476a13f708e5f9b4528cdcd49e0.torrent 0 +8412126290d46097b908a4bf49563511f8f4aaae.torrent 0 +1b8af5c3609ffeb7dc8998224de003b67d81b9f9.torrent 0 +9488125765fe8394782a7327748942b6e00820b9.torrent 0 +7d254dd6e99b6e394786c7eadfd02fdf10956f6d.torrent 0 +b9df4d1866332814ac51ebb4fef2ed41b3ef967f.torrent 0 +d18d74c897aca3a083e998447370fac880a71978.torrent 0 +8c1792dff71981c8f0301a94f0abed7363c17660.torrent 1 +d5d5e8eebcbb51805aedb4c519641d466504e62c.torrent 0 +cf34f6d94930e3d280c4468450fac9693671fc18.torrent 1 +88108c34ec3a80b7fc20de282dc2f99f04cac344.torrent 0 +938e3114ca282d3d78562564212caed469862ccf.torrent 0 +a056d36bf4431fb51d261d61a2d82d794967d539.torrent 0 +912ec39948a41ac4a39865c995ddbea14d5af1d1.torrent 0 +6786ccf7a9c3a0703fad401a387fae404611229a.torrent 0 +623a7f639d4ae223dd9f87cab8917418f7a974ee.torrent 0 +679d9b356d19d25873329bb0213d606dff687ccd.torrent 0 +dd7ee2953ac834bd990b8ba020515f17fa16c08d.torrent 0 +0c64b165378c8ee7f2782735521002b1c5de83c1.torrent 0 +908b6d724b55423d5c06f937cf2babd06da12dfc.torrent 0 +8bedfdd00c0621367226512f024beff177b6b7e4.torrent 0 +471e254935e47029bd24801eed931f396242fb70.torrent 1 +c9e41acbe0f222aeaf887686cdfbd2e6ce6e0dd6.torrent 0 +7c11de3774205124e994da06471753ce50a0289b.torrent 0 +4b80f04acfa2c14a3bc4afb0d73d99d0d4bb92d3.torrent 0 +cf6fec8703ad5d56d58685bfecbd62f815102aa1.torrent 0 +d292e5a735f3d668496ed14d7f1e98155f6d066e.torrent 0 +d94ed4182bf2f266733d472259b924d4553d5a43.torrent 0 +1c79a9d079aaf64bde966f5fd10be50ad61bace9.torrent 0 +1c36730db8662e4ed8803caf30b8d34170f4a143.torrent 0 +298c7b7d88f961eced39fc4d4cce16ef5c923e30.torrent 0 +b458f32751001b06a18106e3d4fcdf730f1a658d.torrent 0 +058587840b687b9e0c6a9ed0af52b26376eba90d.torrent 0 +c0db589501ab6ce8eec8ba182157109dc6d1e8ad.torrent 0 +b5950f143b4c55317ca91c7b42386b3350dc2a11.torrent 0 +d6455a2d30863067accf9674e21046c0f1ec2ebd.torrent 0 +6b06e539bc0166959340618952f442293d0f4c1e.torrent 0 +992fc4c679d1f882d2427436e8d43997c4f1f977.torrent 0 +8a2840164275221f43c6a14705ab1d288d33687c.torrent 0 +5e976a20e95db0ad00cfe522dc74fc785cbeb8c7.torrent 0 +3a99f6d46a6ddecca4eef6d86a57644659462be4.torrent 1 +ba6a603e1fa3a7e314181a269d0f5893023c139c.torrent 0 +a8b078ad03c5fbd8e7f95984c2d8191e785957cd.torrent 0 +01c4aefe17d6be3a5e5c6508d8d175992d580b47.torrent 1 +cc83c5ef613f353132422bd48ed048f07c730e05.torrent 1 +4d6d101f42740b1340bbe35e280996ea11acc1be.torrent 0 +b1052e7c1318767811bf16dea98770c74154494a.torrent 0 +4b3e9f40b5c731d61899eb4099177c88159ed73b.torrent 1 +34bea59ec8f7a1d7c647a9178ff2bf45d6768bac.torrent 1 +3350df2c03cf814e577053055daf005a5e602d8c.torrent 0 +3e21ba6305bdd42aeb85bfbe0eb9bab2f6bc6971.torrent 0 +3ee511ef54942b23a150d5b78dde19c80cbcbbe3.torrent 0 +105094b60e72341b20e1cac68adbe83ef4087b7b.torrent 0 +ed3c2bb4ef85b11feb19cf687243a624f822c1d9.torrent 0 +1a9cb3323727bf6e8d7546e09cf4e45f915d0b3c.torrent 0 +919ebbf2716b99303065ec09012914cc2b63828a.torrent 1 +45e2be6c8704056f99a9af4a99dd5d7925e90728.torrent 1 +1a346370257d52900085f6bb7b12eb3bff6bde6a.torrent 1 +639a35073ee50cc7e209e3d0ae9dae6e7ab84175.torrent 0 +be66e5c0ea417ee659e59ad1ba1d1269475126e0.torrent 1 +bc249a60f1703ab47f975801b8c99d0012464f41.torrent 0 +be51f6b118d68ae41b300f0867a594c1cff1340d.torrent 0 +0bebb745ce89a1ef34d4e21c876f5a60c626ea33.torrent 0 +be6a1a355f30b7e110d01cd6e0f7cb2c976e984e.torrent 0 +892a09b7b98e1e37a5c1bb2cb6d94d8fdaff10fb.torrent 0 +99ef62d009da7573e7913bed8a405a96a5f2f4b7.torrent 1 +b3c4bacad8ec19a1c9dd9c9e9d83341e5b89c636.torrent 0 +0b0763bdec846b815d2e68006af43c5d2d2e11d2.torrent 0 +16d78145b028c95a26bf35c2d74c9e386d34ba91.torrent 0 +164ea8f30f4e818f173688b2e9fc9da8b4df3035.torrent 0 +c2eb89264b4a16c4cd16d4db79aebca70446d8cc.torrent 0 +c9b8cdaa8d7dffa794547f96eb793525768478d1.torrent 0 +b98d57b0edf0ebadfd0dca0182f4ccd606d9fede.torrent 0 +e8ccecacc7fdbe27c069307c2b5c2cbc3291849e.torrent 0 +aa1abafc3cfdc4100f5a16296362c2bc2865644d.torrent 1 +aa95a0a9a229f824bfcc34c84e4c1d03e2af4cde.torrent 0 +b77f47352106bfbe8efea7bc25d67af99b565dc5.torrent 0 +ac820e814ca426dc99802595e6354cf31feb1de7.torrent 0 +efbc4c4d3ac4f72f884d34f1029455cda218ec52.torrent 0 +d16f039642a5281c8536a1b2e7e235b4afa8d94a.torrent 0 +98953bfbbb192cdaa4222e9945a0f78ebce4cae7.torrent 0 +315e8aebd3ade46ff871d51833fc05e3fb6c0e11.torrent 0 +1f588c1e7131f944280c0f65abfc445e666fb241.torrent 1 +8a06de7cde50f7eb5c3ed8076721e268c78a170b.torrent 1 +a723be15aa40b6bad3907f6cf03362e0eb06fb24.torrent 0 +e387cf732b0ef3fe3504c441ccc49e188a090c26.torrent 0 +7617615183143f6c0343a312cd14654f8fe05f29.torrent 0 +e48a70da08bb00edcd5e3a1fb160fdca8dfe8d85.torrent 0 +49bfabc6e5e3839241d9c13c229c469b83307ce3.torrent 0 +7b97210edce80021647ba6acbff779003af33322.torrent 0 +58b36b1480a5d19e29f51fd3b2df57b4168e9980.torrent 0 +db0bdd49544794dc455c2597ca11f505e6401753.torrent 0 +07bdca06a158e8d33b4cf3db3561c65159d24b0a.torrent 0 +6453590007bdff9e6369d52a2f4e2045976af031.torrent 0 +c1125babf356be5a70b743a7f3fc52d89e4022d3.torrent 0 +02f3aeb747e1a2dfbb5fb0e557c4ca84e737d00c.torrent 0 +81db30b4e4613f8ab666cc8bb17e40144fa20e2d.torrent 0 +44accd9aad532ed0a008019dd982b7ee66cfa66d.torrent 0 +0448bcfd0cb4515a0c3dc9e428a0195bcaee1265.torrent 1 +710126ccc2da05976c276ca0f7547f28cc86910c.torrent 0 +f945ade1179b6284386d0b183d65bb7d56a374f2.torrent 0 +24774067a1e40f2c5f5a422abc498a7ef4de7d65.torrent 1 +41348325a65a3dc1ff3af84c97fcca68f5c352c2.torrent 0 +b778e6fdd352d0ef29d3a051a2f9518b3a32710c.torrent 0 +154294b04162f04fbce45829ac63302503d2b155.torrent 0 +559fe69a048289f9c18d47ce279a4c1fbe68400e.torrent 0 +61d61ba17db766ed8991e071a3fc7b4b9af83a33.torrent 0 +cfe9aab63187abb8bd68fde76c19dfd49ed8c3dd.torrent 1 +ebab2854be3f03c270182f3777ac7f4b5ee62c57.torrent 0 +d8aa24e6b8d0c893edc39ddb5b0eba8124ae77ad.torrent 0 +f552c8d63d8b995a34dc1e9ae3a4808ab528a43e.torrent 0 +e24edb1a9f71ca8eb4e9c4308c5c82467e30a10a.torrent 1 +a616216dc001f2336cf3471f49e7db9c3e1cc7b0.torrent 1 +d9e4e561bd84ef1cdb37e8113e4a7448a59d2c20.torrent 0 +29640e3c79b4425e0702a95e8f14ceb52c46edf1.torrent 0 +f9cd6a20ccd1c86f8c8133ed4a9f4cc741d72b01.torrent 0 +fd04abb7233e96674b408e9d5cc545bae3f35bb8.torrent 0 +474da22fed67b82aea216f89c9a948c7a655963f.torrent 1 +1a7bf70a1e68264bdf0c0415ffa598a06dbd7db8.torrent 0 +ea107faac63b2a50082e63c8b48c18cb9e171ca6.torrent 0 +31ca17cd5f062ded941fd2c978aaa5a28bb0371e.torrent 0 +8836105e81387893e09aefaf45af9235d9f05d0b.torrent 1 +6e64ec22801f84591519feff0009f14ac4bf1f4b.torrent 0 +aace8c34bf707d6894adfba1edabf66b9fdc3af6.torrent 0 +35305b6294f107cab333da65c71711e8d9a395b4.torrent 0 +6d1d3206536994eccd4047c36bcd9d4d4a878586.torrent 1 +848590401197c075f25e2f091c803044a538b483.torrent 0 +96d3e45705e337f33cc22d1d9dcd32eaf78811d6.torrent 0 +53850e227fadea8fd99ee54640059919e4910b66.torrent 1 +3b93c2ed1a53ba7fb9216a90d13ca55906cbdfde.torrent 0 +bd9e613e0fa584110b2b7db3d2df278ea7f2b987.torrent 0 +8ea16b4d85a69c42abe8e987e6b103af652e6447.torrent 0 +3d6300bac68750c693c52521791aaa7cfedc9e85.torrent 0 +a5236d192e7a80e2a9c8acabb08ac723e81bf812.torrent 0 +90868fbb0a5b4c30e402735da4dac397c67da04a.torrent 1 +905fab172f9fa10fca29e0f8302f83e2b1676687.torrent 0 +168d7278158768f5cbbc93805647453d215c22a9.torrent 0 +0962a20f421a9eed43079a8e18f2015e3cf37a26.torrent 0 +b4e92561ffee967a6bc467ad75e818db9b800953.torrent 0 +b1dc4dbf060f1199cb96ea27748e44cba2aa6895.torrent 0 +aa87b08010c160591ced490ea657128cae1ef491.torrent 0 +4715b5ec7dfe860aa3a6c05b97e6a18d514afa27.torrent 0 +d0424c911e1cd30766865aa569ad6e99af611ae9.torrent 1 +49a7317b502d6e18f76b9e58381a64f54ae69a0d.torrent 0 +d64d7314fbd1762be7397e11a2287633109f05ad.torrent 1 +f5156aad961ac65d40299c8c451e82c96c76a3d3.torrent 0 +bb9e4f49f719808054999035e1197c88374530ef.torrent 0 +a1a7684e56af510c781614e2083ac836fe49f5ed.torrent 0 +dbd8becb298a1ad467fed9626045d77d5c43c3c8.torrent 0 +173090566eb4df0733dd671abf297469ad3664c4.torrent 0 +3f58fae42d38f61f862e4d3561932dd754e1ce24.torrent 0 +1f29141c7642d87b0018a16438cbaee447a4375d.torrent 0 +6ead410df39a8f0bdd4be994f3b92c5412d93d7b.torrent 0 +3c72b158604b713e30d29894e7e5655fd88a0a9e.torrent 0 +d336c730a28d516de57e50eb0bc414e8f3c349ff.torrent 0 +f5969a57a2e6989dcc558af12f10b2fc2f16c4f8.torrent 0 +04e90f68ea73254201906637ca52cfd074988bbb.torrent 1 +d9b365cb44c73de6cc3ac4ba0360e17ef75027cf.torrent 0 +91ec1461d5f25d19ea225c56e51b1d27a187d32f.torrent 0 +7174f1ea4881b4d877acea27d277eaf9f6eac960.torrent 0 +6184c4728dbbe6e689b4fe7045ca8ad828b196a9.torrent 0 +1c1bf4e9d2599a7c73af8e4b2e2698215b7506d2.torrent 1 +94ccd8884de29a5fcab588f8d79d77b627ba148d.torrent 0 +74f4da40492b74476916a4a1e9976aaa0bcaa155.torrent 0 +76858583dfed4290031205a2207a519cd517c968.torrent 0 +04e211dc52970eeb404457bcdf5ddeefc64aa74e.torrent 0 +e923e16e6d364a528eba4c8dee40d9dcfeb665b7.torrent 0 +45410cdba9c8503202c5da632e510f3dd2c13057.torrent 0 +1180d97c840b92f8a4c2524ce9b0290d711976b9.torrent 1 +45f53515a9c6ba1dbf4007af6e47f201115ed64e.torrent 0 +a7aee2fd013642b8a4b1d6f4b4e8a7caf57a284f.torrent 0 +d4a32f00f8894c2446bf3719a4650eac707237c9.torrent 0 +6fdd252dd3a76ea58e00f3630639e5258ae0dd77.torrent 0 +a6fe7c7eff66e729d4d1e72e8bc04fb639bab99b.torrent 0 +b073b2a4ae5dff24f41725b20403e1d8154d048e.torrent 0 +be0079705a89e7d320827c13ff7b3e986a767160.torrent 0 +821c05c041e89a8d09513f823e00a159aecd3602.torrent 0 +da768802791f00756cf3a936e087dbb3ca032cf0.torrent 0 +bdc036bcabca4e041a6ade52935fa49637718017.torrent 0 +9bbb5bb758883eb41e329867c1e566718c443430.torrent 0 +75083aaac58f80cd5c67867c2a609fcd9a830f02.torrent 0 +72b505d8fa0de98779cb6158b4fa7f7bc1909345.torrent 1 +7562c19f76300efc6e13bc87a794c66bcca41599.torrent 0 +b5ec58bbba1bac8c1e594c0a49ef0f31531c5a5a.torrent 0 +9d10d07a996043a9fd1fadb065fad7d29ab1fad6.torrent 1 +b560433528a70eb9341b90f2937ed02ccec8429e.torrent 0 +3372e561637aab9d090bea330ac6b86cf3c4cb37.torrent 0 +6f843bd78fbcefa73b0e2039ed6a1297ff94721e.torrent 0 +c0b76b7bc1beba5b9b2b8f02eec8803b79959e2a.torrent 0 +63b87b9e5cb8173948d6320ad3426430d39555a9.torrent 0 +b548c44df6bbf806cad6bf7dc1d0d5c935fbd878.torrent 1 +3a0afabb56c8a79d2167f80d2d40295933ca37b0.torrent 1 +dee8c500529e02a7a2b83e9ea66787de7fa372ed.torrent 0 +4909113e21f3453fd5016cdcefb7a5e0505ec2c2.torrent 0 +266bcc4b6af5b902da571dda82ddc3a207b72fa7.torrent 0 +2c372f00de1d4749ace698476af449d0357e0585.torrent 0 +cc607802092dced71149f7861659c46018dd4f0b.torrent 0 +3f5e181f56eb9baf100f7dd978f23854a1544cc6.torrent 1 +5fc74d251f2046e20061df8ec4d6c9e0189b1942.torrent 0 +8efb96e5419c9534b61e818e543add469550c5af.torrent 1 +88caaf1c390b43ddbcc0924f4ebf26e107288dd5.torrent 1 +1c711bace8bb45093bd741e1416a47be5d124fb5.torrent 1 +409ce2f176cd8bf922ac3dafb6fd1e2e138a3584.torrent 0 +6c8e6e389c5ffdf574bbac0ca0b2c005f33e0f44.torrent 0 +059f04005ca3cc07062a08c73f632c2b1d49cd4d.torrent 0 +696bf34a6333f52424ade29896aaaebca1230478.torrent 1 +768e9b850dc8123e98c84b7721141cbe7b6cc9f3.torrent 1 +0bcffbc9ecf6c57d5689165de30a32da1fe9ed54.torrent 0 +e1c1213d3b0849341128048391b211adc0e275fd.torrent 0 +b8d3144d89f0a3d6cb9c2cd44df97fcf4ba47975.torrent 1 +cfc8494f193c37a4667139762ff00903f5f1f6d0.torrent 0 +6a74d6839f506424aafbed10c274f6361a142ed1.torrent 0 +f75a5b326825b6f69914697b99aba0c910c48376.torrent 0 +1ec97454eefe711d701c7dff9412994c5ecf53e0.torrent 0 +b9bcd2d05e78ba25b14528d85054bf87e787ebf9.torrent 0 +490001cce4776d7ff082c5e8682d35c451cb70ba.torrent 0 +9a52654e2a46a19f1ef49261a2aff570bf8ca6d4.torrent 0 +583868737ee2a1c36c5b32fa8a4911d8779cd468.torrent 0 +d9390c9d1f31bb66bb4c6a51366974c520b51fd4.torrent 0 +2021a9824e6e5be659b978099f400db5e1db1f53.torrent 0 +b76f16205b303147b372d719956c0c758f8303f2.torrent 1 +10587854d610ef71fb9745aa16aaf190ee1bc698.torrent 1 +34321c300588ee3f8741a952aee6e0ed7aa699ba.torrent 0 +9090594351ada5a5de02bc4abb3da2454dca345d.torrent 0 +cfad02e519d8d46ef1dc12a6ac3f003eb782d4a8.torrent 1 +e16134da55841edcd376a8b97ca8e748388ec622.torrent 0 +de1aea691862e8ba915d483d6dc1326e2cb02e27.torrent 0 +8cacc96b74616a1bc16fa18b10b5d94b51629aa1.torrent 0 +706399bd30fc327e6bff8126a12b7a8e81423d3a.torrent 0 +904f2128a79589410debaa85e01f05fc193dde8c.torrent 0 +a908b2fb710227aaee765da2f18e34f1d0a1133c.torrent 0 +56937e3984c6fe4f187b359fec1fd568159fc686.torrent 0 +df1819bbd6fdbbfb3f3dbdaf79e70dccf7a2b960.torrent 0 +9b8b2c111f2dd62df157d792a53faf48ea54a856.torrent 0 +a15f1756e4e95b4ff9de9eda5fc8a8fc9dff89bc.torrent 0 +65ae7e2f1d5cf60acfa6d0ea57cd363c5d7985d3.torrent 0 +ead8a2e9cca00f976f30653e372e5be262d3fe95.torrent 0 +d64dcb6d5d0c2d9d6c161552592c0dc88f9fd04e.torrent 0 +55c3f4ac0bd3068dc353244912a335a9fd748683.torrent 0 +3a85275945be544260982b3247496549c25d25d1.torrent 0 +034783fface9882b2143751e56d98b509716c954.torrent 0 +73596050c86550145cf3f50f8474eae084c5dcb1.torrent 1 +5a4f1e8bb37b8abbbc19e84dc4d60d9baeb8ccaf.torrent 1 +f0328bceae3e7e23dab8f18b26d9b2422d75b952.torrent 0 +e22531017429ceb6c9a9a3d6ec0530a966dc5888.torrent 0 +a6b0a83984218947df78403b1db29f8bfd935282.torrent 0 +79cd00c6a5676b0ee6cad713721a695c48d010b0.torrent 0 +c94126acd59899124f8e0ee6185e3868a7c18f63.torrent 0 +2f35ce11104737ca85b8662a801dde38f01ceb40.torrent 1 +71608b99e30a6e56c1cdf4b3279859193340f617.torrent 1 +0b9dea9a67efca638bd7d1102b444b40519c4dff.torrent 0 +ae10214e195edfc576720d33f1162f17077c18b3.torrent 0 +e60ef9e1515dbd87535343bef11eb08a29926180.torrent 1 +ac9b6244231614f4939ba9f0daf21badcfc3e7c2.torrent 0 +15f1abff9fe437baef4cbc0b4422ad454bb3499b.torrent 0 +e8351a4a22f4dde805f06ba1b90cbb17f40ea883.torrent 0 +bb9137329ac6a9616974b4d75959be06d223704b.torrent 0 +54a8228da48b103e7288980c5916461828fa9dc7.torrent 0 +95d0adbe97d8946b7f1a06f32fd08f7d6e34b038.torrent 0 +121034ccf4fe9777a02ef3d3ed35696bbf515515.torrent 0 +752e7094efc96cab8db5a1d29b235b5145f64b1c.torrent 0 +907eb2ae16b57da54906a546c570c8583bec197f.torrent 0 +5445042f7b9339547ec02a5683284c2214f1ae58.torrent 0 +42a475682e81a403a630c1e34c3368efdc19f487.torrent 0 +7ebaee22e0b6f5edec8239a3ac026fcce8f27eba.torrent 1 +3abbe48de625f5d805f3bfd468c3ede8318bd9f4.torrent 0 +c9cc7ec590663e9143a110e130290d097f2a50d2.torrent 0 +c6974d53bbb3d61072463d375a5878200158e4ff.torrent 0 +37ac4bfd0a6030d171af79b73d636e1beb2f15c9.torrent 0 +9b87b50c8f927f73485f89bcf58f226125a98527.torrent 0 +464e3a412b80d3d102f9024333597a544f6c4727.torrent 0 +e8b8113a7619b2747f8891c2e570c043107668de.torrent 0 +6d1968f6918d0fd7cfeaa9db7c5fbfdcdd84728d.torrent 0 +0df9a6f86685ef0320ee0060485e48b7cfa873e5.torrent 0 +d880e7333d3210fe05c4e7d27f410b5f8141197f.torrent 0 +fd304337f367ba2371699a1f20478abdc88f4f27.torrent 0 +8c7faedcd24269903ad7eb013b8d3151b44bd4d5.torrent 0 +a7f0c323090b3d700f0b1eefd5d6c2dfa816b57d.torrent 0 +d705dbc1e2c6358de6507b6a3e593835f3e50907.torrent 0 +829b1dd4821401a6f3613e696ab36f9b1222c5eb.torrent 0 +827b641632f2b9ba5cd529f9b462cca527a90126.torrent 1 +6042d0012f134aa3a43fa6f7acb7759c8dc6e44d.torrent 0 +3447944b443479d3dd8ee37f54ea576b7616fa49.torrent 0 +401da2041f9251299470387f5e3da93e85eb881c.torrent 0 +8c4b3f6f7742f8be794c758cab610bb5a69caf3b.torrent 0 +36e7496399228758dcaf5ca5f3cb9c4528e1509f.torrent 0 +6a440916caa42b78f99f58d4501bd06aff67b2c9.torrent 0 +64a8dd0e9f60ed058aa7f1b2780af6b1cbd973e8.torrent 0 +31c02ba637ff8ad9b51054cf7efebd16b5468e7b.torrent 0 +e4a951a533f1e11572114cc0d4d2fa9f806d6841.torrent 0 +921893f177ffb7202303c317361040ebc855622f.torrent 0 +4f34540f030c4dfffc1833f9bd8ced87e9e94c9f.torrent 1 +bedd3ed8de9f4366eea40a6bde425c752f960705.torrent 1 +2c3b32e743329331459ea100e34f2c6adf9d081b.torrent 0 +799c099aa065ba9f2541e1b55a45a63014e31837.torrent 0 +a764f5d53b1a00f4bc436b9a8da19592dcd2ef3d.torrent 0 +efb817c2d59edc31d143ff1172480238bed5f482.torrent 0 +e4dfaced5efc0659a04ee35c15ced1a2692a9ccc.torrent 1 +99b17ebdf63a29841501fa9e90dbdc5da33e7eda.torrent 0 +e4bb69aee925cc19f58383394a2724cb2d6f041f.torrent 0 +80239682b9551fc807c396aa3f203ae2c1161f16.torrent 0 +1aafb6220027d0f276c58e6853389032c025cf3f.torrent 0 +abd31aff25f0e70c85b56e51447856912b5d0e76.torrent 1 +f3e2c72ec33c9ccd8be3e810a235729680d13727.torrent 1 +23103984fac91b08dbf362732af36ea861def39b.torrent 0 +da22da2836fd88ff5832465a9288a9be1ae98834.torrent 0 +bb6139a2cd055dd01808ebf216db28f7d4b1f41c.torrent 0 +33d0960af4ff91d6bf866ee77088ee542f741ed9.torrent 0 +2057998625b81bfbe6b234085277ab3f8701de56.torrent 0 +1a510f701be5d1ed004f815fe02b0a662d443626.torrent 0 +7a3b34d30b28328500342110e698fbf94894e3c2.torrent 0 +510d895db16bdf3980faf647801d4147dc6a1ecc.torrent 0 +0b0d26be330b3b6e6f3828579e460f29c36635ba.torrent 1 +d9c8da488567c328d160dea0536db82e40c55314.torrent 1 +8292685cce16cba27523f58de9a6be0963755201.torrent 0 +9a54963e9245a31450569e3f10503937ca051218.torrent 0 +86f77685a0007069eca7083f21bcf8f1fded1164.torrent 0 +f06b29eb5c8acb0cdbd60c6c3cd632ca380b889f.torrent 0 +cfc971570bfda17eab31c6e815e69a48238116c9.torrent 1 +b0a75b4a582824261ac775098096671e0ca59ae0.torrent 0 +df80b09448b5f507d6a71b969354b3b5b2517d9b.torrent 1 +0ba6919df0078de2906bb522cf662a339e48e8bf.torrent 0 +a76504a7cc1bc3964d25ff8031add4d511ce8243.torrent 0 +cc3fa0799c4dbcd6154079d1a59da78ea351ad99.torrent 0 +c12f829db6bf42e3bc771f7f667b658a4742ed98.torrent 0 +1371e66e3358ea64cfe8b0e0438c92078d0c993e.torrent 0 +52b7f5f9bc8c327e768842b0a8088a50990884ab.torrent 1 +19f6c869cce7f4dab96467f73efb9cb58389e9a8.torrent 0 +0241f83dd9d271cb1a92358dcfad5861d8b3a62f.torrent 0 +85940d26258d43d3e6d56990095f0590477dfc49.torrent 0 +9068395b3177c08a4a2c6fb75cefcd505ad0c5df.torrent 0 +f373817f7710895ea7af9025b73a713f12b3c973.torrent 0 +c153740ed1ab3c82f82801e35ee16bb6de5690d4.torrent 0 +265f0f60d570aa5b6bb6798bb16e3cf416573ba1.torrent 0 +102fba3ec9d923418e7f93c082e5068af59c7689.torrent 0 +3c079b6f8236fb16c936b862ed0103f8da9e6e1d.torrent 0 +0745635aec15b7e4268069a1d009feaa7cf56014.torrent 1 +816978d7c2ae4df2245a131449a0ab54f7e25c82.torrent 1 +f04ab203f4e401872faa557dedfd0b2c7ad59f3e.torrent 0 +241513d8df10c09909fd8ee1e492d422bd3edc72.torrent 0 +09cc948a3e97c0cb62aea16066046ca431834ec4.torrent 0 +6c6cdba3d0e476019cecb287279487ee0f1babe0.torrent 0 +4a290e75c4c5aa8a2fa9c610a4018abf95ac4b8b.torrent 1 +2e64d0565dbd86823162e5eac93589bd1b3c15af.torrent 0 +bd941b6977631261c2f4eb4eb920971c9b924c46.torrent 0 +0f61a0f12b110591ca81a562beb0379a77846daf.torrent 1 +eafe9d48300c8e28ec757323940151d15846166c.torrent 0 +da5d8b33303a0dc68d3fb21744962623e4ca28d9.torrent 0 +b9661b1c9b34060fe443021ac3a01a9bb84c3c9f.torrent 0 +a37bbc145b86ce743907b55129b9b08ade815ebe.torrent 0 +50fbc9490e3215921f7b321b27aeb9fa5b63ca2b.torrent 0 +e9e50b9de7d2baf39de27450da64c91ca202eab3.torrent 0 +d41c4ea9a8390ffaa2f5213480f3266f3fe23f5a.torrent 0 +2590a5b1db82f6ea36d86005fe04d60ed8568905.torrent 0 +b2b05830e09f75b4fb04ec37d42ae68a41f90a0f.torrent 1 +d61330aa0784d8c90ac5f28d27a59d4f504f647d.torrent 1 +09e207dae1b4bc0c62a06273d72f4e01b8ac549e.torrent 0 +84288821355325abe321f22e632375076e6b7124.torrent 0 +d4c224dfb9860e4b08bcb7714548a68f0adc9781.torrent 0 +eefa44bb145494b7f98207d53c28916576f5f9a9.torrent 1 +988daf6d53c44e2c5c296681870cab50c0f271b9.torrent 0 +b78d87a29517fe5539b66613c135ae9b23cc4036.torrent 1 +e1b6d25b4a433e151c417ba62ffcb5eded21bc84.torrent 1 +15661e37e741731185e4798933767b5e144cbb48.torrent 0 +fc481163e72bb93c1ab5b57ea8afa0b4c9af4156.torrent 1 +5e66ea45f4ca94ca3d377e894d3a7d770d68d88f.torrent 0 +f3b7d46ad16517c30ccbc3e97527730844f3f66d.torrent 1 +9e8faef9714a57b58846537a02ee26b7036bcb1e.torrent 0 +079158cd35232f28526634c0e88aec49111e4c17.torrent 0 +51c52f9c98ef461f024504ffe601c1dbdeec1364.torrent 0 +bdf74079c3404a3a1f0fa972ae689122542cdcea.torrent 1 +986fc32d3e44afc12a69909e99f2a6089d83e8f6.torrent 0 +6491f661a16964da7caaf182c7553f81c216ef2c.torrent 0 +465cd5bc67d697160ce1b0a6a51852e2f8071808.torrent 0 +46082d01153dbc052fa023aa9c9eadfc912cdc07.torrent 1 +d33b6c940b9eae29ea0a8a0688671074ee2c170a.torrent 0 +0b142abc41e296a45a55d1e2a20d37b0de5d9822.torrent 0 +640d51a9d075cb82efb79f7e7bd89016f1f76655.torrent 1 +ae20136eae6e25f96ec32af4fff61a3c944b12a3.torrent 0 +d55cf91570c1f004bc81f45b6fc0353a38dba51f.torrent 0 +38fdf5811bbcb88f82aae969f55f88b272bd5ae2.torrent 0 +691e891a7980dac130f81a1120f8102405b19db1.torrent 1 +3b93f445610d4fe92b4d6ec59b75b0ecd63ba119.torrent 0 +b25852145fb9ee59e2cca9a0d4cdd1387c872c80.torrent 1 +441dfdf82f2a1dd1eb20b688062cf6577db6f087.torrent 0 +60542cd4d21b5e1516861fe81de8f8b40c9c8935.torrent 0 +d196d5a88dc07ef2f1ba978e800310594978392b.torrent 1 +d5b35720f9026100b0020cbd0e19174313fdf759.torrent 1 +694b01959765d8d24bc3f93676a91c041504c650.torrent 0 +4d0a16b4962cdf95963dbcdce17c3818a741d952.torrent 0 +c5e4bb89a612e60fe0db772661ce6e36ebe65d66.torrent 1 +ad3ad6692a08d0fe5de05b784fec2fec4f2fb613.torrent 1 +01e97ff1097e64f34bf48b4af1503c45dc50ddd2.torrent 0 +a34c71bc36a7b1f40a08768d694f924e8a323c63.torrent 0 +708cea93fca76d94d96e74ed89fb63a6baf08bf2.torrent 0 +fb9c84f63cd35b7e47c7b62825581cd56f18c7ee.torrent 0 +7f9380501af87f903bebab0b4931e60c427a5b19.torrent 0 +34ec934717784f4b4296692b4ae5f6b75ea59450.torrent 0 +acf26def9cdef5407aca74519af581af45416b73.torrent 0 +092dfe143865eb278b53ca5eebbf7533478b5395.torrent 1 +017862cf6ad5fc7ff4be3e374f597e34591dcd26.torrent 0 +079bcd5c7f6de2c42aafcd22a8985e38d0962d7a.torrent 0 +788bdfd237393e2e3167961093fb8da0d5c085ee.torrent 0 +2f4efca4a6d488ec12b38c0652594b4eb84d744e.torrent 0 +a273f043a4fdf6c6f3d10908ba27bf53f3071060.torrent 0 +018e084e42404fa56baad9e427d543be3dce8d97.torrent 1 +c82276cd880c4c32618902c9712a62881569b4f2.torrent 0 +e21e9315d9ff22846df9d623684ee20e1d00f798.torrent 0 +5a56fafe05a61b9aa8e71ac8427219067e1ffbcc.torrent 0 +2a5ea701157f3b1b4f12559e872a8f3eeaf728ce.torrent 0 +8a00d1b53a00ca5c728a153b5e93d3ad664d60ea.torrent 0 +a32568d76c1fce6dd654b01a317014d9f430072c.torrent 0 +4f8074dd5c2da2df794ba0f691138414db985ea4.torrent 0 +d60dfac8e2876482417d0f30c89ce29919bc98c0.torrent 0 +e77d9633f884a6df87b109e616f070c28c6e8f88.torrent 1 +3523acdd8508a00a8bcb9ff9790c3d42ac375459.torrent 0 +8bd6d989c4cdd064bcd6b5582e694bf2163fc680.torrent 0 +e8e077de555397141ecdf025b88d50234bb20e33.torrent 0 +69aa94f73d6e8511ca4b812009e6239082f16007.torrent 0 +bba85061d6fc311e0f3df857bd16e3b667c16fc6.torrent 0 +ea0c2306f3bed8f1f96bc45760ed5d19078cda22.torrent 1 +5ba477ecd6abcf387d74c5f52431482c292c03f0.torrent 0 +17b37292a769d31ccf1fc3ac3a362aa1bc300a2b.torrent 0 +d342e9e16a9e2cf92c16c0766ca18a861bb00b7d.torrent 0 +18ca0d0c3a438be6ee48df65387904b1ec76f1b9.torrent 0 +e486dbcbb0b7512fd9f93948d4357fcada80f003.torrent 0 +c222620b4c80a53c78f70593797373fa25bdf980.torrent 0 +2156962c32704a6603469045e32ce9f2473b9d3d.torrent 1 +3ddd44324609100d649bc42fffb28837add6541b.torrent 0 +1c9f79a8853f49b56ea3ce8ebffb044563545e90.torrent 0 +913d601fdf2ecdb94514308a032b154aaa8bd120.torrent 0 +8659a99888f11656b3d05309f5b8347054695027.torrent 0 +97680e230c65113054068e578a850125def1eadd.torrent 1 +0999c073637a03131ea8b9c2b06eb7aa7af67243.torrent 0 +30b097fab079c00518b3e7dacb1c0e6d0a400445.torrent 0 +b3249ef008a8ef802900139db1f8d3d7059321c6.torrent 1 +4b5ff9cec2934e00c5e633f19d2d60a25bb04e7f.torrent 0 +0c25a80743b8e3a108d6bb4c74b48e63af869971.torrent 0 +885836fc3385a56e79b36898a37cb122248ecace.torrent 1 +ab64703699b1d82b8f2617900cea2cc54eee70b2.torrent 0 +5cd06160a51bccb8c55c75ead1daf0483221650f.torrent 0 +25388b11e2d6cbd2ad3d6bb493b910d6e9f5f828.torrent 0 +f90b1f10511c6bca60c34f9ac060e5396460d277.torrent 0 +2038d768a02eb46c9505202c206ea74bd51b2482.torrent 1 +cf5bcde32820c135dfb03d4d4543d4e1533873f2.torrent 0 +bb8dcbf3d2416038e11df408077adcd3c2cb32b8.torrent 0 +a7436b7f77414828e41055a91077e7f68de40ccb.torrent 1 +b78e707cd17c096cbe19d82e3fb6c0588e9295d7.torrent 0 +ac7c7cb156b95df38d58869d77eac15e8e1e1602.torrent 0 +fcf92a2140e305560e43274a93a2d68f0f7d8c3d.torrent 0 +a851c6559f19c6e9594b81b979e74a519bfdb443.torrent 0 +1215218cffd56861186818891e022a72f66b7496.torrent 0 +8d23c1e6d7ffe90074ef81a07fc9a5c3420c6a01.torrent 0 +7eeaf75f3a1416520f1e89eb93c1ef49bd2a5ba6.torrent 0 +f85f490320d6de9b24d4d94efb2d9149b2cd3e90.torrent 0 +e904116034b62f0c538829dab0c1422ad395f6a3.torrent 0 +db7f024a3093f636d617d6b1eb9434d3c38e899b.torrent 0 +ee61c10a2cd2e4c4be4f04cc0a1d7980e620cc1c.torrent 0 +4025b6d2238173d5de240deec56d26cb737e5601.torrent 0 +6ce7d92c64e6eaca3fee667f1f7b0b41e06ca13b.torrent 1 +edec3c09f9d4c419841ea426d671bbf558756afa.torrent 0 +c23a1b967c0c9318a0dd73c9d53b8b27e5d817fc.torrent 0 +172aace921a82117d340c831db20521546ef92f7.torrent 1 +f4ffdbe843b27ee92f03fb410113cca53aab351f.torrent 0 +49ee5e72676d1e4ab507a35454efd13d529d695f.torrent 0 +4c54c380caa507a34ac62081a9f9870f1cd3a20a.torrent 1 +74a7f8b4db5d2a2e65774998b0370b830422af69.torrent 1 +373b15142df3bab42df115ddb1d127a8647b3e37.torrent 0 +9349d81e4aa7aefee92f1947848b3f47220eef95.torrent 0 +e7ec1f9eb875603a78cc7e921f1f46041fe56581.torrent 0 +2e9fa49ceb1511e0a0b6949b674ba7aa2a09b395.torrent 1 +9495e2cb3b10dfd006b4b06d545cb0988737ecce.torrent 0 +dbb4b50af03b6e9a65fef20529cc494a529ea5b3.torrent 0 +2cc02b6b01f7a977ad987abb505e8611af21434e.torrent 0 +5a3ff03ec748aaeb3c9c60a7c78dcdab0534e29d.torrent 0 +7ca9e5c5277fcfd19d1f4b17ec385e0abffec411.torrent 0 +c4dac41388709493088d87d27fddff240b3a4a9a.torrent 0 +0a87ffefb1ddbc61245751cfa7edbe2b0669a331.torrent 0 +01ff91a0b0e348498f5405bfa45f6c6296561022.torrent 0 +396618757924600d0276884e2dc599455f5e278d.torrent 0 +cc6d7dd25c6ebd2beeef530018866722832bf74b.torrent 0 +c55292b5b864f054fe695bdeb94b99d87d9544f7.torrent 0 +fb32fa0a407cf6704c9ea6b8498a74514f9a1cd2.torrent 1 +3f87f7fcd63066fe677c570247061ab58fdda6f7.torrent 0 +5679f6a75491d114529dec117492beb9066e6a8e.torrent 1 +8a2fa028fea6f776665739393857312c365efcad.torrent 0 +3fa5284286d5d770671481cec206803ae79b8e2b.torrent 0 +496957ba5ae09d8dda2d6fc5d93629fd0a3b38aa.torrent 1 +871d2074f0097a0bcc85b9b789c41c88e7851d13.torrent 0 +07005e2f2b30b58a3d9cbd614430407caa2b1fca.torrent 0 +14c335618cee3cc8a598091470c129d74f2c67e6.torrent 0 +1677975f431cdf598d00d5b83393b67a217ede0e.torrent 0 +3ba392f8b1a72ff3fe6ebe5c7f75f701ff1bf10a.torrent 0 +6ad07c3b45b9a543e79470820562cbd08ae0380d.torrent 1 +cbc54d238a36e2744d7c7530f96b0b6730c1ae4b.torrent 1 +4f9bcb3168369ed32db3ee35864512d31eb04c8f.torrent 1 +1ecc21054051115c3da72a933fea7a87eed79c7a.torrent 0 +27091c3ae4d92c240e18ba3b3f40e756c2098ea8.torrent 1 +457f556355d02b78649495315dcbd76f9142cbf8.torrent 0 +74ba389f1ef274b7f8c4d40ad95204de168caae3.torrent 0 +18d2c05aa544378430e0df2cd50c260b60e868d4.torrent 1 +9e846fed88cc806892dc7ccc50870654c5d2374f.torrent 0 +b60ca26e9a58d5a1ee3d81d0dba22ffdd61ff9f3.torrent 1 +e6b864373b8c636754b79a9061fae57791925870.torrent 0 +a98cdc871d7bed19868c4f7bfe97da338bd390d2.torrent 0 +2bd3a63129294a43aaae13f5b755755c7c4dd286.torrent 1 +507596abf82fdddd37ad983f9fd60a5eb8087e45.torrent 0 +25be7aab776e04b889ca79951889ed277661622f.torrent 0 +1ea831ef1e1e9c04628701b50de97229d3a04695.torrent 0 +16b9aab81442c2c429f817af8755bfc29de487e5.torrent 0 +1e69173e765200e82b15c4134f652451cac109c4.torrent 0 +9a94dd36d34ab99397b1209bcc6c02812d07cfc8.torrent 0 +7c66e567e0dc8186008ce575e61c1be3bc5f4742.torrent 1 +ac1f32a7158d0a66f75dd973fbed84cae13ad0a2.torrent 0 +4ffe40ed69cc24d7f60296c59f67c50e4ddde6f1.torrent 0 +bf5432b23156a078e6fa0ba043115bcd726043ba.torrent 0 +adc911c0928099284f8a2ff72e79f26407adb41d.torrent 0 +9bd540e3886824f36a05fa4ef74bbe8266fd0166.torrent 1 +43c64e9b929dedf85361ff6c27ad851f3867c403.torrent 0 +53f12f2114e27631d70ea031d8e4a559715aec98.torrent 0 +ec2b2594b44cba43cbc0aded93fdce59412e5ab4.torrent 0 +b8f57855ea9d611cd32f8679364ff94161b12f5f.torrent 0 +8ee4804f39d46f86021b835c3a8715fd804f61c8.torrent 0 +7cd945530687cad9141d8adfd7ae43a3bd826b37.torrent 0 +4844b96a847ddbc0ecc1638e3782b6ca08d926eb.torrent 0 +8b86b99193923f80523c7a4e1c2df286a18ba89e.torrent 1 +0408fced301c326e4a8250ff5890858313c06814.torrent 0 +c43ed0358ad64c1fdf2477b59fb772396dd7e168.torrent 0 +8c51a75af645c2cbda21e42cda9de93f8df864ef.torrent 0 +5e55e6485549ff25b911adb8a8bc8082aacc4e94.torrent 0 +defbf571e08cf1fe0fecc4bce59bfcf34eba8202.torrent 0 +4efb7043fba21febc6a53d8b934808fa954fdef8.torrent 0 +6015eddd30caa63dc3cd10d1d5d98236a534636d.torrent 0 +8f5c30ecb783979091a109786b6cef3863eea67b.torrent 0 +54ebb28fae815609f2d4965fa8da570ed77c0e01.torrent 1 +deb92a6b79959726a77a15c0de4e4fde20e6255d.torrent 0 +7b002da514767b2639b4a427b93b0b3b7282acd9.torrent 0 +739734d5ec2fd2d758b0e69bc9219292c411a07b.torrent 0 +0bf940ac9e5c339cccdccad9ddb391194ff9384c.torrent 1 +703eb11123ce22a3588221955d29cbe9420a628e.torrent 0 +500db35680f21ec91b76a6abb556eef629a4b7bd.torrent 0 +b6c4069d6943403c2048d4550241a2a75917c16c.torrent 0 +83f06151daa7652afade1340127f6e4448676172.torrent 0 +67c9d8536481b403b701ab5fde8e22256328eebf.torrent 0 +f546d7fd81ed0cad3e4eada125b1a2f1976c8fad.torrent 0 +9e513d8f6c7f274662d8983460a7398d41e7f7fe.torrent 0 +68157a0fcb1b9e2adb0f83810ab1ddcda711de12.torrent 0 +df600d0649381a6589aa71e74837224121f7ff8c.torrent 0 +95429107c259adc1e4cf42ee75c40a20c97e25d3.torrent 0 +3152aadfe992ca7eadaa73d5a27efa70c241ae63.torrent 0 +5d09253da958c77300946ffc1fef130f790223cf.torrent 1 +62f0fdb762591c3f8e48a05da4cb9682c489d52f.torrent 0 +f5c3a1dc21577278cb9fd6f203335d3085c78b41.torrent 1 +54e6317cb17961d473727199dba150d3d907e3bc.torrent 0 +7ceb6d7d9538974e0d05863cca9f91d29402c435.torrent 0 +7578f3cf32ede5f8bfd4884c5a63918857343a54.torrent 1 +cd512a8ea62277ddc9ed11b847b08f8eb399f54e.torrent 0 +c3eb903ba4f4094dca77330e824e537a64ed1e35.torrent 0 +48b07ea7043484a31619ca8377c42f7c32021137.torrent 0 +b32cc3bdd13d3826d91f4c433315e1ef5927fe34.torrent 0 +e82bdf81d9023003500f9195418157e8ec6c97c3.torrent 1 +7c6efd8e62c6768581e18dd39a747cdb59bdb5ca.torrent 0 +ca53686f418f19385ef7234c11cf13f15b7a3624.torrent 0 +00b274d1faaa04cd86ab71b59f127b068482127d.torrent 1 +6106fccafb09f9404a806a02957ad29b1fdd442e.torrent 0 +668500a6af1524b806cd80c6e6b94f186e589d01.torrent 0 +e6af0a583b577f7236f70016d8367e0eef411713.torrent 0 +cbd28805ffbfbb043b1ba3dc78673d668b0dc706.torrent 1 +1c9c1e97f2f23e7553e4378cf179ee0e34b6c232.torrent 0 +badb70304c45784d32567f8f136a8dd3eb0e059c.torrent 0 +7415d85cba2b5b202e4fd90de56fd214813fdf6c.torrent 1 +7ff0a1eb1d49c3cdac0ac63fd9f7e39cbf090def.torrent 0 +32e437fa249e39b37c61e791a783753c8ffa9bb2.torrent 0 +406fa6d2ccc72636b4e26208505ee7a54fe0a5f2.torrent 0 +fc30227c768ed0dc0fff10f3371d83d12be61d37.torrent 0 +280f2e5e59e7069cd4bdc3042c45a66423ef7c4b.torrent 0 +11e1ea731d0944cae8a5e947399299fbe9874f8a.torrent 0 +f786b8ccce6acb70a78ad06571003647e1c7f3b9.torrent 0 +070023f2113d0dff6d490cad6767b9da0a44bfa9.torrent 1 +52c551ca937f6d938aa11a0aef458c62ebe02a8a.torrent 0 +de781610f70a6d72f7e660754731c2fee72f6225.torrent 0 +16eb2e4a35354b709b40d180eb73850401f1c44a.torrent 0 +14ea2b8cfbf490c0086f6b586d8a7cbcc3065eee.torrent 0 +f655f8f5425f459b532aa8e04ae9bc12a63c15b9.torrent 0 +4a06a9245e31055fdb1b5d032b2efbeb0e62b39b.torrent 0 +747fb5bab0f5c1ddaca63346aeb7f91353e907e8.torrent 0 +cf3f8dee35481357e515e95d59d6352e154092f6.torrent 0 +886d91592ed3d556fb9d6b9e184803d5c067ccf9.torrent 0 +6b4374b1e7384f127062ed582d92cc01a79690a0.torrent 0 +24c43fdca08456690e8b5c8091e0e1a0d8127853.torrent 0 +b632481ff41b9b639c86f828427fd2afa3411642.torrent 0 +08dcb7ef5c62da5fb8bb7dd45c9602957a08eb90.torrent 0 +553440c3a56a2936e120d9f98557911cffab5b20.torrent 1 +e568f2b581a45d3c5ce472c3bc05e357755d1f71.torrent 0 +d615828374d89e8e4f3913ed21f41846d741bdb3.torrent 0 +f3e56877d7a8f6d4384b28931646d60b044c45c4.torrent 0 +c796a1b0b39593a497b0dd6ed317452c27e87d5a.torrent 0 +58a6ca8d579385ca5ca8c648f5b4e144fe19e69a.torrent 0 +8004a74fb83413ff2c79cf637440fcd67ff85cf2.torrent 0 +097e9aeefee764233dcd35a966d7c72469107ac1.torrent 0 +0a664af7f5d07c6ce026e5630e77a1cf0b26d265.torrent 0 +540e9b99a4e4b9ba3b1646ec1e698ce32e3547f9.torrent 0 +32f070391e4f00e09afffc13cfbb91c441de526e.torrent 0 +7a22031ccfc5bd5d56d28218bdb6b3d9bbbd93a0.torrent 0 +9a37c7de98c9be499ca99d7431118cbc0d878c02.torrent 0 +8bba440937bf5c9e13901932904d6d0426ace8e5.torrent 0 +c28969e53ef7e93abcfb0007b78b25bcd0b7a36a.torrent 0 +b178550f3a9489d08de4826627b92f3886b624d5.torrent 0 +1dbd25f05882206bd86e2558c024402f5089fa7c.torrent 0 +fc99a2592b16c5dea6e16d0bec2a3b2a7ade3a4e.torrent 0 +6ab7f44cf8667d669781d4d34d3a4b21b2259303.torrent 0 +a95289f94b08c08741b800c77da03f44f1221043.torrent 1 +f7fbcae57797ba5021fe05531941f36880831741.torrent 1 +e89231cda0f83d467968b4cd2bf8497971854f34.torrent 1 +6056e0a17eaf63359ad90c7c887f9197c87bcccf.torrent 0 +21927dbaf0fa7d6a59914465e4ae387dd7da7d76.torrent 0 +9c83718243e2bd27679572b5727ca7a34d0d7025.torrent 0 +6b5c844cac4a870b14ee76c7e1628543e8f69ddb.torrent 0 +a846830b49fd21e5d8cbd675e2c520abe39f3f5e.torrent 0 +6975932653f44b4e6bf9de4d99f2bd1d70ed935f.torrent 0 +ff848fd8ec6cf61ae95479f4025dbd4042707af9.torrent 0 +24b5d1ddf8cab41fe12b59e560001a9f0478fbca.torrent 0 +432fdf0decf55bd4e99b05707b466ac1a1095b22.torrent 0 +4668a263b5f284f6071c125c45b13692b700b361.torrent 0 +e9eab16a7bb03cfc93592f5374e5e9a848e20ab2.torrent 0 +795e5248abfa4a5fb7340c46aee5d0abf3bbc750.torrent 0 +18cfb34b5063f06532ed2c2c1ebec9dd8fa044a6.torrent 0 +8392462bac13c8517ec77d6a7eeaf6dde6fff717.torrent 0 +a5b6d66b5e25ecb260015553888a7095483030e4.torrent 0 +b328b5fb64e0ac4b7d166a1ce3cdd2d6612536b5.torrent 1 +d510c30d7cce87dede0d2b1b13d5db6e8b43d649.torrent 0 +8059c24339887610b7f452b1780f555222157d1b.torrent 0 +32076e268831346a421878d929b71a58abf41c65.torrent 0 +06773b93b61a06b39442817a4006852de34bb340.torrent 0 +70ba981ca5cd109a919e56212c454d9cb954b489.torrent 0 +c4d3a805d4ef368979101f33fe51122526a4ccbe.torrent 0 +99c81697dc508a603ec73088c9ec10d6853bcfaf.torrent 0 +6976cf2abbe6da84196300e6061b837324b55329.torrent 0 +90612666c441ac19a1976f46aa368c8afa3dcd10.torrent 0 +bbbcdb6915b0415a6fd18710f31fcd270a5621ee.torrent 0 +f2dc91602f02984f49cfe4430546417679db8dbd.torrent 1 +769d1fb3f3562e6162c714620b389c61ef38d657.torrent 0 +9a5a3850d7ef9146075911050ca653841583a60d.torrent 0 +c24473f403dd42492b7d6f0e7593b918629942f1.torrent 0 +69b2e0ed0b3f78b8783ced47a266006aa0942c1d.torrent 0 +9c9c400c4f2240594f61c1914341d622b6fab567.torrent 0 +de37f00a88e72c850afc5849b0686f0675354fe0.torrent 1 +3468026ea12f5e7bdc3b4417547c4a2cc9ac0b7b.torrent 1 +7602edc8d51000ed58cdd1ed8765f384b84bc581.torrent 0 +f37153603fcc83b37231a320f92611cd6cb37ad0.torrent 0 +9d66dab69a9e3f457add6ad86f6ec0e6597929c3.torrent 0 +a7c008fabe48780a2a3cd8b8e248ea37c583f26a.torrent 0 +41c3912f02aa7406839ef59bc72c1e8a541bc27d.torrent 0 +833c9384beef3df14e73a9e1b05cb477732d0d82.torrent 0 +453c21b3e6508d8e9e56b3ad95a37f7f231397d0.torrent 0 +8ddc91d20efce572eb4c1bd108c3bf855f24a3e4.torrent 0 +6ab67642a24ec7893521b346b897385996e4fd0f.torrent 0 +1439996b8a3a023b7c477d1ebd2be8f7afd7d4de.torrent 0 +b9cd3ed4c78ad03e7244fd45b29b64fd5ed5ac50.torrent 0 +3696046a0b4bbefbbe92045555b3818338b8a49a.torrent 1 +0e0e8ceb6c32ef9f573837bf50c13f588756ed32.torrent 0 +e0833d15f58bc8df1c525d8a79d98ac21f301bfc.torrent 1 +f27848004e4ed82317d2e4dfb32cfcd7c475cb2f.torrent 1 +db6b6b8246e8616fe595e2fe136800483bfcc288.torrent 0 +779e50bd88406cacd6c56babf432d91d2e63037f.torrent 0 +971de1bf4ea95303cc8b33462a0f09eaa6814bd1.torrent 0 +0bf097e736df5bcddcf1c58ee1953f9a01087594.torrent 0 +77ea117d9d3af9c4384dd6408e049408f62be698.torrent 0 +624348db99be3428c025b5cce9b393800d0ff5d3.torrent 0 +9fede771ddeeec5a83485ff4799b891fbdeeea5b.torrent 0 +3ba6435770317bb12598cef0b6dc91fcf19970de.torrent 0 +012bada5d52da6544f45cf5a6c9a654770bc4948.torrent 0 +0c337755dbee25f21b80e6d659f58157f30ee868.torrent 0 +423a1ef68fbaff7a34f03ba426cc5dafecf9a62d.torrent 0 +cafa8747668eed97df382f7ebf0ee7d5759e66c7.torrent 1 +fbacb23cebe67d5f35243541f964ae5ab0df744a.torrent 0 +d9221538e4453c75b772274add497781a3f3f73a.torrent 0 +b47bedda9d5f99d9602fe2668b5ebe8dd8761273.torrent 0 +a782e96c18862bf388091ab73f00b31c45906ab3.torrent 0 +7fceb894f7ff9799967a303cdf619b68a6721551.torrent 0 +039a63025d7fcb8d7716dd8fb393f899e9fd399f.torrent 0 +9697b40319e1995c0071f41180773e1cc6fd9b22.torrent 0 +ec1b93bea1748b91f2a03ef5c1d20aa098403626.torrent 0 +11f8a16fcb826f120b17776de5ea3287911fa8ed.torrent 0 +a895d3f4dfa0ca4a278441d683562cc4bd30beb1.torrent 0 +6141809f425cb4af8afb101793b854a770675903.torrent 0 +72b8cce298ced24966349dda7249d5471a4ed914.torrent 1 +d8b9c4a7fa3fba6bcff317fa1651250cfe490cc8.torrent 0 +cba4f74c177c956f1a98dea0951baad5b531a0dc.torrent 0 +c19eeb15da6c954a60cc101f645d6600ca6c0f27.torrent 0 +9b67018f6209d7d2083c4c9201db0daa91d40dc7.torrent 1 +b0f9f177d6ec3f069b799e872db44be0ba37dc33.torrent 1 +9d4249db16b9d7fdc84367bb972aaa07556b0eae.torrent 0 +0dc3acca961702591228482b0de41015b6d3b9d9.torrent 1 +ba56380469f4616e145edf49f517e9c7f6d5ddb8.torrent 0 +60bf6a6a33a3312ec701a36d6e95d3efee7041d5.torrent 0 +e0592f135681e7aa8e7f617baa5e27277e3460bf.torrent 0 +7288985558b2922594d326692d117ce14a553c9e.torrent 0 +54510a9c74b5bba8a1a6688d768ffaafb3ae07a5.torrent 0 +d3218af7a6b2b7de061ded468d4990e48b7c182a.torrent 0 +379b089bc6badce72a93ff68af20e00a20942af1.torrent 0 +89a08f72df85cae0d95752ff3fe37a78b76a1406.torrent 0 +7735650aa83a772b224b8c9a722266af9c96f0dd.torrent 0 +4ee8bafd4a9d270dea851f5aa87e2d558f3e2df2.torrent 0 +9060a1a85f0b024e74b679c0eb223a537f05b56e.torrent 0 +3fda7899566b25b9d8f99c3b497bf9a1bc6c1161.torrent 0 + +207f53a3ba488cbb062a993d52aef457fd57676f.torrent 1 +5a21c57912148907e0cca54f985428f5fb4d77f5.torrent 0 +9179a2faf3e594dc8c5d00bc286bdde23a716e3e.torrent 0 +6215794552e3319d5fa1005d98a0b4c339b4b9f1.torrent 0 +266bd4da3ecbb191fd767434724c98047a909260.torrent 0 +95f6f9f0d2c54fe0280b67cb5f2dd43ec5795759.torrent 1 +2c312ac7b3dc4207cc47d02d0bb8169a35bbaaa3.torrent 0 +2cd83bf98f5a27231a0b6d1d2fe720bf1ed6f57f.torrent 0 +17be3c2969b8be321cef83e821cfcc38d02c8cdc.torrent 0 +f2ef9f63c261e42fa5eefe3b9f7f06db1dda325d.torrent 1 +06459eaceb1508c09461ff8529f897b4ce200565.torrent 0 +3a536b2ea65cdefcff546f110da760c3c0732c8f.torrent 0 +cb3e8252ce363347980d9cad69accb572fd772f8.torrent 0 +9d21e3f61e9a7c8640ed187ec18c96a30961cba0.torrent 0 +5e7b25378e660fdba485b6141ea6ba809878f73c.torrent 0 +5584044b8d25f1047758ec95468057d7ccd767ab.torrent 0 +90ce012c61d1906d26d130e8a401529c7042f761.torrent 0 +69079da15e55b183f6ee9a67a0390657d51bea65.torrent 0 +0389c7200e8188e31572b51e4bff320250ad9e15.torrent 0 +c019ab8400d95a70c12243670b3ea63f0e063da3.torrent 0 +967e52d027fcb4dfb4be49ec6956e7beb2b15dde.torrent 0 +02eee51952509ac24fb9d469759208e13d4e84fd.torrent 0 +35c7993800bee7dcc6d62090aded5adce6e87e5e.torrent 0 +edcc8dfce17ce2ab8070d0e1a77d7b9d97da8a21.torrent 0 +aae0fe6588dfb8d4f362da4c1d35efd97a0b98c1.torrent 1 +8ee7ea8e76e991c1424c4490e8bffc86ff8d9c1b.torrent 1 +a5af404150e005fbbf9acd0416480f3a8366a196.torrent 1 +ed8ffd943edd0c8cf515c0ccf31559333f8b2376.torrent 0 +73546999e67de155e4e43202b6519e4ad859b0af.torrent 0 +d952a89382d016b3f731dd4b189ef1de25127bc8.torrent 0 +423bebb81f693fbb4e740a213d30cff00eb7ed9b.torrent 0 +027ecfb36fdfda2a837683067003322fd8422dae.torrent 0 +1b9e72358c8a49c49d5327f6444db292c3b81391.torrent 0 +d5d432e19439ef9bbc5370a374ec766f6fb4fe4b.torrent 0 +4c34a3c3958d1435f2950efd8fb4218674dc55b5.torrent 1 +7967eaacd95e170131494c40eb1b4b8307a759fc.torrent 0 +d490873c12f1e91f60754be1ddbb250dbeba7ad0.torrent 0 +f66cd75a5ab993da814be7caecc34de786886f3a.torrent 1 +ba519adc28ac824c749b67587b0d151c8c01cf43.torrent 1 +595f7be5a13707d804c0e8c8553afb410ac72fc6.torrent 0 +37da86e182a1fbb473b1ff949b94489afc565469.torrent 0 +bd702be214cef5fba633622a0d73a79f8ca5f37c.torrent 0 +7c9d382cdba23e60462616bebdd15f1f499951cf.torrent 0 +f3fcc6396c1305c49a41e9ff10838821ed0f6712.torrent 1 +2eac539986483a0e6d862ee0684214291c8d78f2.torrent 1 +bcf50f3ca67f2182c7dd22010b7ecbf2bc5bf76c.torrent 0 +404f08c4f65216cc0ce5e4cfc8803307e8822ac7.torrent 1 +26b211d6527ac5f83294ace3b548c38d77df60bb.torrent 0 +639d096bbaa0cb329548bbb401a6a033c38638a3.torrent 0 +11bd90f6adb2414287b7c447fd9e68319bccb14e.torrent 0 +42935f99a8430a6ba6876da33ed9d136878f556b.torrent 0 +9048a5e60578da5a18f903f2f5644027ce57d7b8.torrent 0 +c34b9989d240665a648e2508ae2cb3b1ff35100d.torrent 0 +edb4796d8e72b9cc2f3a4971a6dfe7a493dba0ff.torrent 0 +9a0a0193eacc96d30a89df776f2f8c03a0e2d084.torrent 0 +78322e0aeb0739a6d9828e7e5d1bc874d1cdc082.torrent 0 +4f48b8d05cc033abd70478d4da761151aeafd6da.torrent 0 +aa90f092c2bc70c6348befab02424660a916339e.torrent 1 +5d33719d8ff97753251d36715cc49376438a424a.torrent 0 +e0647411dbd3113fca8a23fb45d968ae14dee2bf.torrent 0 +f255e60cb48bd0e474fcf0378596e721b6fc2eb6.torrent 0 +29e52d19b4d20d52b2f335b21f46ad3169b69e9b.torrent 0 +2e8d98315647966917feb71ef7dfc33d6c7f391c.torrent 0 +5de80eff372cc421f6179f576717cdd7af5f382b.torrent 0 +2ee3c5a738b2bedef5a99c7153ef914a8636f731.torrent 0 +7e3e5d8ea222b6754adefb84276b39cc26619d3d.torrent 0 +c656171b65bf6d728567f98f3b7c520fe685c6d7.torrent 0 +47594441ce8aec2707954fe8060ecbb33677276a.torrent 1 +26d43b76c7cd263587277b944c50502e984d531d.torrent 0 +ea34decd4043b5243949ec364bc48d6df4f1970c.torrent 0 +49af95ed245775987fb48cf4d4484832e77c15a8.torrent 0 +f125dc0e2315391ec2f48b5eeb232729521059c3.torrent 0 +df7590938ccf6614d3a551779d54015c3f591dde.torrent 0 +05c3b9b4748c3feaae1e1df0350172c451baeba8.torrent 0 +3e1d95b5b953f3a514fe0d237783025174836fa5.torrent 0 +6e5de140a2a3c02bf7b53b7b22e74187de027091.torrent 0 +70ed3ecc1638eeac598658e52733a58bacb47b72.torrent 0 +73bba5e1619adeeac44355038904551cf24bd7d2.torrent 0 +70a8557f18118c9893aa42e18d9ef7dde54163cf.torrent 0 +7a809b651fbd03edfe165728192fb17a300af0fe.torrent 0 +856fdf93bbe20a191c3d95bfe5b5dbcdf1c8abe0.torrent 0 +6e30e90748012adffd5927beb6115dc99523375d.torrent 0 +7dc17ea06e85b6128772cbd5ce6c7074a66c8a76.torrent 0 +f94741b3f81c75dd7ba799022eadf927db1bc254.torrent 0 +903078049d0fb77ed74f23c946d9f811920e886f.torrent 0 +15f233a3da472321c245040d2200ada3d89140d9.torrent 0 +b86c0970d4fd0943c6826eddb3f9b14134e2f983.torrent 0 +1c564727484d362b6565a5575e0091996ff8c510.torrent 0 +bd61049a90dcddd666b9dc6f63f714bf5200df2e.torrent 0 +d60a14d3e15ed7b8c802e1fdb1563b1eed653140.torrent 0 +a626cbe37c1bd1aed4a3e37e8261a5b546ffc083.torrent 0 +616df771315bbceb2a1ec62d8c745389746efbbf.torrent 0 +a1031460942e58fe62f4f6ebca2b0255907a2c0b.torrent 0 +d22995c0f1a4e1d57e54f155b494e72dffdf6a32.torrent 0 +8d54167a68e4bd5406ca52ed1d9fff2ed09f76a8.torrent 0 +a1ecfe1f58561faa0b3406e59cb85d4edead3a73.torrent 0 +4cd398a724a018af426612de4e682be0c5fdea54.torrent 0 +35bfebbcf97706077f30414e04304e84af32f0c2.torrent 0 +1ca8172a09f8b447cf4272b093b4906bd6f5eed8.torrent 0 +a833f389b1d4d8684d22c1e86aaf0e87f0020131.torrent 0 +9c5eb440729153c64c66b4069060dd02d5324ac3.torrent 0 +eaea8afa9b56a2ed1ef4e061fa9aa3ef3863a89e.torrent 0 +e8464c25bc066eb724d0f0041140ba7f82cfa415.torrent 0 +1e355e26974790320fe43548c573cd4584566d97.torrent 0 +7d99c289f08caa145f567f058c8a3aeed746860f.torrent 0 +6c1092e942a208c5f2479c241a80dfbbf17a170e.torrent 0 +77f21de57d1bf926b0b646b2c6bcc79fbfdded6b.torrent 0 +56d7976250bf50d9c5da173ba32e7f3ea2e23ba0.torrent 0 +3d42342e1f67622d29c9eba55b8919c9c0ff8a9c.torrent 0 +d7a0c3fab3307b6d7b607007887632efdfa20d23.torrent 0 +f564751d49ef68b98f21c6499443e2dfac65049c.torrent 1 +a6d9d5f32b6185fa7e9a6acb264a51bf25431599.torrent 1 +5e9f230010e903e7e698517498e6a50b336895d4.torrent 0 +b95b9e6731f956792b30753a83e50edaecd675fd.torrent 1 +8e0f214e67a1594f22f2ba1f67566e5141bf2ba6.torrent 0 +6ab8eb342bfbe1d42dc4563acbcda4322cf0f2f8.torrent 1 +d22a0d55da44c9155a041282fadcb336b193dcaa.torrent 0 +d648382bee706d662d4703b6c7bf2538825eb2dc.torrent 0 +e9106f69ea30e860ca30c4e271143e1f4ba8b96c.torrent 0 +7cefbb2f42219e298fe657df2363f558f3b81f93.torrent 0 +5104ffa96a9ba5f21fe76b117a8fb801a5c03970.torrent 0 +32c92c96038ef22b13b02775b2de0db9fa403024.torrent 1 +2a8875a6c9b9c4ad26c3b2685ec1d6c4a45dd363.torrent 0 +f5173f39f00b0f7a1e195d902880b2c47b4c67c2.torrent 0 +b18e08408ea34c530cd64dac76133c4aeb221633.torrent 0 +71c405e6f2c004d524d93dfd0bf1ea78738ed26b.torrent 1 +9d8f92d1bbe469e85aa53833defa6f9bf7d69862.torrent 0 +3672d932737d8db194c7178a04ea81eadeee149f.torrent 0 +5b53c66e6d0aae3f6740bf3645669d0b3bae8573.torrent 0 +4b9806cc82b9b1b77a89cc2dadb9284d5fabd56a.torrent 1 +57af579ca5d44b7df9a3ba105e2d41cee83fea14.torrent 1 +f060f5197529936236d03c57e286cc7e81623511.torrent 0 +73a2c5aa3af73352110441f24cea430bfd774811.torrent 0 +92a9d8f854b1b8966ff9e70a7d9b43f14cd41270.torrent 0 +b0bb24e608c0595be61851eff3e12b0046085171.torrent 0 +371426e02ba403a3e9684eefc69645c60b98902a.torrent 0 +aa146b9459c01ce8a2501017afa2033619b63e2d.torrent 0 +b0c5044c23ec186c41938af019dd1feceec74015.torrent 0 +b20757705cdbb441fd9170ab78ffab6d561f4829.torrent 1 +db96c3121a088caff99b601288023c50fa4b95d7.torrent 0 +4207bcc1cffc24b2f1b753060f7bd0326363a3f4.torrent 0 +2c7084b813d5bec70567aa3428bbe0aec8b16cf5.torrent 1 +88a5d7109ca272b14f21a1ce7c36732fa6c75a75.torrent 0 +45cdc8e351c922eff5e1069924f499a6d17b34d8.torrent 0 +5dd70f9f532b1292b37a32dd223c1353c55eca12.torrent 0 +302b959d79246514e3f9b5dd9fe0bd92f5ff4e77.torrent 0 +54324bdfd509de5585274d27d62823a885a3bd29.torrent 0 +db23b639c5753e5fe0ca06749a0f99d031adcf3e.torrent 0 +5f7333996b08738e77311b259b17b6a1a485d3d7.torrent 0 +041e7b8c72a1dc16525e959f7a48ee935632d88e.torrent 0 +fcce57aee58e7500d8c80e277ea2bc213aa9a38c.torrent 0 +e27046897ea36576b42e94dd954804cafefc4c0f.torrent 0 +1ee7c407bd7019bd455096e223ba3ec0bfbdd6e2.torrent 0 +0250b64a97e24f7b244a167020265c05657d0e20.torrent 0 +4809826fb8a824c9d5820a279c6ccecd1229eb0f.torrent 0 +7c25817c0430c8a107aa60e78dec23c99807be7f.torrent 0 +dec9edd2af12e86decdb097df5e32cae5760ac3a.torrent 0 +c24d4c888f17c03312b18b7b877d69a5df68f5cb.torrent 0 +f4d0de3c0e6ead45051b323dad009790615806aa.torrent 0 +a0ad5489fbb6169ee99df9b1abe91ce78d63bc76.torrent 0 +58264f7d52bb68e4ad597e84f66af38cff8f70f8.torrent 0 +7a0f003476938f10964f52df49425ec9b4176023.torrent 0 +c92974951157eb77bef4d4aea6d1d4a1a77f813c.torrent 0 +7a226d1d38e27c22aa5dc5af0699ecb9eff9beeb.torrent 0 +2c761cb39aed26ca0cbb1faeb4f1cbcee9d94cc0.torrent 1 +6e1b0d68bfbd6be1ad6d74cfeb6c2c683807f6ba.torrent 0 +7efdb2f1662fbd875593f9ed1a64869daf9cbc13.torrent 0 +40a7ddb3771799a9d0c24a0be8b3b92c686fdc72.torrent 0 +b1125cd9d8b962424ab3e9c3e89e50ed22c4aa0d.torrent 1 +9972c4891df095a501613ded4705b734c12efc54.torrent 0 +74597c5d7c66b687adf061fe4b5e36bb072e2648.torrent 0 +973918360a5085b534b289068e66bf25e25550e3.torrent 0 +e5701fcc6950f1df31b921089df81dc388531a8d.torrent 0 +29ff1c712bf6dd7e6f4f49e10a7abfabfc7c6123.torrent 0 +4c991624ca3861adfa3ccb3d220efc2d93c4a366.torrent 0 +f24d97ef1c39f2379d92da71b213cc99ee536b91.torrent 0 +0d7d4c0ad591e53e7439f9d37774a461b12d9a2c.torrent 0 +4d6a15dcd9e04424782c096292d7475f885910a2.torrent 0 +67b2b891def55a09fb92dc3c66dd50714e130410.torrent 0 +7ec04d09302663e7e4d3bee0dd674e490c1f55fd.torrent 0 +d71524fc824b0bfcdc5219626eb69feead234f25.torrent 0 +4f8fac311167ce769a34690541670c240d4acd3c.torrent 0 +21a5ffcb922ca92bac29087c81e4629063494471.torrent 0 +303c83fc1c2fadd5e355a177fa39c405b9460c9c.torrent 1 +611a9eadd71cb2e29ccd8a68f1ab6cf57556eb57.torrent 0 +93c83933b4270371020f2687e82b7c0960f234cd.torrent 1 +83a0219c2321dbb62e5f1b70fadccd4250c7f77e.torrent 0 +c42829ac2d35da9d34912b2259dbd198cddcbfdf.torrent 1 +81af068dde1462b216788bce4c8e430d12029744.torrent 0 +a0054c7940c75137dd4e2a859289f6d125ad0457.torrent 0 +976206f6b63b809807ca730136e79b169a2ac5e8.torrent 0 +77433717f1fa992509afdd02ee421e1e097daf42.torrent 0 +7c770b0ed129890833ba922ceca1160889376b70.torrent 0 +075ebb74478c2f6bde9e83c267dee2e32f37d973.torrent 0 +e2391031b3e2a473e5663133da31500acf3da353.torrent 0 +9c79eafb650961b48a4be77c6965b088d882f049.torrent 0 +d16686c35ca1d99689c6bfc659a7b12fb125c056.torrent 0 +9fc763cd567c105134d66b56bc8a5f589e2e9395.torrent 0 +8a6dedaafe0445f15baffa0789051c2f882465d2.torrent 0 +29b14e5d79a5a56ce6c61bc84deaace4c529d57a.torrent 0 +a2d83f944ea85ead6ea296793f73ef87de118e91.torrent 0 +f66eef280ae8fdf3b1a35e2d3a8f9d633470b21c.torrent 0 +b2538fb9171c2334bc20824b294861f5593536d1.torrent 0 +a7a1e9dc8d45252489f6454eeaf8d5d93f6ee63f.torrent 0 +deff5b8dbdbc1a4df614d99056ac5fef8f76db3c.torrent 0 +821236a480c35b034fc15ea4aa9361967b08d2f9.torrent 0 +2f51d2b73451a242efc4e28b605167999c591687.torrent 0 +1d2fe11ef58a1eda102efb1c9cb1b58cbf14037c.torrent 0 +47b7513047cdc7d178b156f675b87556446e749a.torrent 0 +2bc22fffb49587a84ed865fe076677dc960ff1b7.torrent 0 +388689e2967d267cc2d6c3d5e12e077032957015.torrent 0 +81734158afb88925526c9cd25941503f94c18797.torrent 0 +fc25d29c5a77265df68801368d7e56c4122d2aad.torrent 0 +37fe1d8551dbc20c4505c8430c7fafca5e4dd07f.torrent 0 +a45bc2c2b02d2d33310d34b9ad0ac2d9b064f52e.torrent 0 +2082d65c99aa192f5a463f9702aae835a8c0ebe2.torrent 0 +9bdb4887ad943141f5ba3a273f4c8991f4fba9e4.torrent 0 +2e3e09e9cec9c952a3662a1505a48c7a7500a7de.torrent 0 +760e7a08aad86952a77deac93af518123807c854.torrent 0 +48a350941f1130f5ed79132f42aad7df2422bb4c.torrent 0 +f2a8a49f56971e3f69bf726c670e4084d9b7e54e.torrent 0 +b4fedda725ed56a823309879b1514c358b7f97c6.torrent 0 +2462a307135fba344a3c73389f5d02341aa62741.torrent 0 +d795283b83104d275f3daef52cd6290e5a24d4a1.torrent 0 +a6d04a707ada1a44a78ea624dcbad8d9def218eb.torrent 1 +698aa0ff702258b93f1e01924d25f0851e9d7652.torrent 0 +9790bd794a6215351b4c1fb9e83aeb30af2ee835.torrent 0 +e0714586be96394c26ba3c12e57af33addfae6da.torrent 0 +58e2557cb276c9817e06e51b3e88f05cfa61ee47.torrent 0 +72226b34e32bd5f11814e16762560e72d112c0c7.torrent 0 +cd74da8aa52fb389751b2e9cfc88a752dbf1d5d9.torrent 0 +891dd836a4fc5bdc19a05a77949c1cee5f6affd1.torrent 1 +b7c13e6116ee0e44dc96b9dead49ed2afa1e6c15.torrent 1 +1748eb33a8b230fc0b530350b672b5f998847b19.torrent 0 +b577db41222e0e6c422696cb0ff3e44ce20345ee.torrent 0 +c37799a41da6df84d6ebf9b1d891acc28e5aa8cc.torrent 0 +60708d2aacb818ae14bf12a2ffb3c35dcaa0db71.torrent 1 +d9f992bd456d84747d691b35c83c42976f00d5b1.torrent 0 +b1ea6b6f99a940db9811a0c0c85fd47ef1ea7008.torrent 0 +63e18e8b75670fbc312e93673dcdd8cb224e258e.torrent 1 diff --git a/tribler-mod/Tribler/Core/API.py b/tribler-mod/Tribler/Core/API.py new file mode 100644 index 0000000..e8c6de3 --- /dev/null +++ b/tribler-mod/Tribler/Core/API.py @@ -0,0 +1,72 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information +# +# To use the Tribler Core just do: +# from Tribler.Core.API import * +# +""" Tribler Core API v1.0.2rc6, May 15, 2009. Import this to use the API """ + +# History: +# +# 1.0.2rc6 Added: torrent_size (size of the .torrent file) to the remote +# torrent search responce +# +# Timeline disruption: API v1.0.3 was release with Next-Share M16 on April 30. +# +# 1.0.2rc4 Added: Session.get_externally_reachable() which tells whether the +# listen port is reachable from the Internet. +# +# 1.0.2rc3 Added: Session.has_shutdown() which tells whether it is already +# safe to quit the process the Session was running in. +# +# 1.0.2rc2 Removed: [s/g]et_puncturing_coordinators in SessionConfig. +# Bugfix: [s/g]et_puncturing_private_port in SessionConfig renamed to +# [s/g]et_puncturing_internal_port. +# +# 1.0.2rc1 Added: set_same_nat_try_internal(). If set Tribler will +# check if other Tribler peers it meets in a swarm are behind the +# same NAT and if so, replace the connection with an connection over +# the internal network. Also added set_unchoke_bias_for_internal() +# +# 1.0.1 Released with Tribler 4.5.0 +# +# 1.0.1rc4 Added: friendship extension to Session API. +# Added: 'gracetime' parameter to Session shutdown. +# +# 1.0.1rc3 Bugfix: [s/g]et_internaltracker in SessionRuntimeConfig renamed to +# [s/g]et_internal_tracker. +# +# Added/bugfix: [s/g]et_mainline_dht in SessionConfigInterface to +# control whether mainline DHT support is activated. +# +# 1.0.1rc2 Added: set_seeding_policy() to Download class to dynamically set +# different seeding policies. +# +# Added: Methods to SessionConfigInterface for Network Address +# Translator detection, see also Session.get_nat_type() +# +# 1.0.1rc1 Bugfix: The query passed to the callback function for +# query_connected_peers() is now the original query, rather than +# the query with "SIMPLE " stripped off. +# +# 1.0.0 Released with SwarmPlayer 1.0 +# +# 1.0.0rc5 Added option to define auxiliary seeding servers for live stream +# (=these servers are always unchoked at the source server). +# +# 1.0.0rc4 Changed DownloadConfig.set_vod_start_callback() to a generic +# event-driven interface. + + +from Tribler.Core.simpledefs import * +from Tribler.Core.Base import * +from Tribler.Core.Session import * +from Tribler.Core.SessionConfig import * +from Tribler.Core.Download import * +from Tribler.Core.DownloadConfig import * +from Tribler.Core.DownloadState import * +from Tribler.Core.exceptions import * +from Tribler.Core.RequestPolicy import * +from Tribler.Core.TorrentDef import * +from Tribler.Core.LiveSourceAuthConfig import * diff --git a/tribler-mod/Tribler/Core/API.py.bak b/tribler-mod/Tribler/Core/API.py.bak new file mode 100644 index 0000000..beb17ff --- /dev/null +++ b/tribler-mod/Tribler/Core/API.py.bak @@ -0,0 +1,71 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information +# +# To use the Tribler Core just do: +# from Tribler.Core.API import * +# +""" Tribler Core API v1.0.2rc6, May 15, 2009. Import this to use the API """ + +# History: +# +# 1.0.2rc6 Added: torrent_size (size of the .torrent file) to the remote +# torrent search responce +# +# Timeline disruption: API v1.0.3 was release with Next-Share M16 on April 30. +# +# 1.0.2rc4 Added: Session.get_externally_reachable() which tells whether the +# listen port is reachable from the Internet. +# +# 1.0.2rc3 Added: Session.has_shutdown() which tells whether it is already +# safe to quit the process the Session was running in. +# +# 1.0.2rc2 Removed: [s/g]et_puncturing_coordinators in SessionConfig. +# Bugfix: [s/g]et_puncturing_private_port in SessionConfig renamed to +# [s/g]et_puncturing_internal_port. +# +# 1.0.2rc1 Added: set_same_nat_try_internal(). If set Tribler will +# check if other Tribler peers it meets in a swarm are behind the +# same NAT and if so, replace the connection with an connection over +# the internal network. Also added set_unchoke_bias_for_internal() +# +# 1.0.1 Released with Tribler 4.5.0 +# +# 1.0.1rc4 Added: friendship extension to Session API. +# Added: 'gracetime' parameter to Session shutdown. +# +# 1.0.1rc3 Bugfix: [s/g]et_internaltracker in SessionRuntimeConfig renamed to +# [s/g]et_internal_tracker. +# +# Added/bugfix: [s/g]et_mainline_dht in SessionConfigInterface to +# control whether mainline DHT support is activated. +# +# 1.0.1rc2 Added: set_seeding_policy() to Download class to dynamically set +# different seeding policies. +# +# Added: Methods to SessionConfigInterface for Network Address +# Translator detection, see also Session.get_nat_type() +# +# 1.0.1rc1 Bugfix: The query passed to the callback function for +# query_connected_peers() is now the original query, rather than +# the query with "SIMPLE " stripped off. +# +# 1.0.0 Released with SwarmPlayer 1.0 +# +# 1.0.0rc5 Added option to define auxiliary seeding servers for live stream +# (=these servers are always unchoked at the source server). +# +# 1.0.0rc4 Changed DownloadConfig.set_vod_start_callback() to a generic +# event-driven interface. + + +from Tribler.Core.simpledefs import * +from Tribler.Core.Base import * +from Tribler.Core.Session import * +from Tribler.Core.SessionConfig import * +from Tribler.Core.Download import * +from Tribler.Core.DownloadConfig import * +from Tribler.Core.DownloadState import * +from Tribler.Core.exceptions import * +from Tribler.Core.RequestPolicy import * +from Tribler.Core.TorrentDef import * +from Tribler.Core.LiveSourceAuthConfig import * diff --git a/tribler-mod/Tribler/Core/APIImplementation/DownloadImpl.py b/tribler-mod/Tribler/Core/APIImplementation/DownloadImpl.py new file mode 100644 index 0000000..2e1719a --- /dev/null +++ b/tribler-mod/Tribler/Core/APIImplementation/DownloadImpl.py @@ -0,0 +1,562 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys +import os +import copy +from traceback import print_exc,print_stack +from threading import RLock,Condition,Event,Thread,currentThread + +from Tribler.Core.DownloadState import DownloadState +from Tribler.Core.DownloadConfig import DownloadStartupConfig +from Tribler.Core.simpledefs import * +from Tribler.Core.exceptions import * +from Tribler.Core.osutils import * +from Tribler.Core.APIImplementation.SingleDownload import SingleDownload +import Tribler.Core.APIImplementation.maketorrent as maketorrent +from Tribler.Core.Utilities.unicode import metainfoname2unicode + +from Tribler.Video.utils import win32_retrieve_video_play_command + +DEBUG = False + +class DownloadImpl: + + def __init__(self,session,tdef): + self.dllock = RLock() + # just enough so error saving and get_state() works + self.error = None + self.sd = None # hack + # To be able to return the progress of a stopped torrent, how far it got. + self.progressbeforestop = 0.0 + self.filepieceranges = [] + self.pstate_for_restart = None # h4x0r to remember resumedata + + # Copy tdef, so we get an infohash + self.session = session + self.tdef = tdef.copy() + self.tdef.readonly = True + + # + # Creating a Download + # + def setup(self,dcfg=None,pstate=None,initialdlstatus=None,lmcreatedcallback=None,lmvodeventcallback=None): + """ + Create a Download object. Used internally by Session. + @param dcfg DownloadStartupConfig or None (in which case + a new DownloadConfig() is created and the result + becomes the runtime config of this Download. + """ + # Called by any thread + try: + self.dllock.acquire() # not really needed, no other threads know of this object + + metainfo = self.get_def().get_metainfo() + # H4xor this so the 'name' field is safe + (namekey,uniname) = metainfoname2unicode(metainfo) + self.correctedinfoname = fix_filebasename(uniname) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Download: setup: piece size",metainfo['info']['piece length'] + + # See if internal tracker used + itrackerurl = self.session.get_internal_tracker_url() + #infohash = self.tdef.get_infohash() + metainfo = self.tdef.get_metainfo() + usingitracker = False + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Download: setup: internal tracker?",metainfo['announce'],itrackerurl,"#" + + if itrackerurl.endswith('/'): + slashless = itrackerurl[:-1] + else: + slashless = itrackerurl + if metainfo['announce'] == itrackerurl or metainfo['announce'] == slashless: + usingitracker = True + elif 'announce-list' in metainfo: + for tier in metainfo['announce-list']: + if itrackerurl in tier or slashless in tier: + usingitracker = True + break + + if usingitracker: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Download: setup: Using internal tracker" + # Copy .torrent to state_dir/itracker so the tracker thread + # finds it and accepts peer registrations for it. + # + self.session.add_to_internal_tracker(self.tdef) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Download: setup: Not using internal tracker" + + # Copy dlconfig, from default if not specified + if dcfg is None: + cdcfg = DownloadStartupConfig() + else: + cdcfg = dcfg + self.dlconfig = copy.copy(cdcfg.dlconfig) + # Copy sessconfig into dlconfig, such that BitTornado.BT1.Connecter, etc. + # knows whether overlay is on, etc. + # + for (k,v) in self.session.get_current_startup_config_copy().sessconfig.iteritems(): + self.dlconfig.setdefault(k,v) + + self.set_filepieceranges(metainfo) + + # Things that only exist at runtime + self.dlruntimeconfig= {} + self.dlruntimeconfig['max_desired_upload_rate'] = 0 + self.dlruntimeconfig['max_desired_download_rate'] = 0 + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadImpl: setup: initialdlstatus",`self.tdef.get_name_as_unicode()`,initialdlstatus + + # Set progress + if pstate is not None and pstate.has_key('dlstate'): + self.progressbeforestop = pstate['dlstate'].get('progress', 0.0) + + # Note: initialdlstatus now only works for STOPPED + if initialdlstatus != DLSTATUS_STOPPED: + if pstate is None or pstate['dlstate']['status'] != DLSTATUS_STOPPED: + # Also restart on STOPPED_ON_ERROR, may have been transient + self.create_engine_wrapper(lmcreatedcallback,pstate,lmvodeventcallback) + + self.pstate_for_restart = pstate + + self.dllock.release() + except Exception,e: + print_exc() + self.set_error(e) + self.dllock.release() + + def create_engine_wrapper(self,lmcreatedcallback,pstate,lmvodeventcallback): + """ Called by any thread, assume dllock already acquired """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Download: create_engine_wrapper()" + + # all thread safe + infohash = self.get_def().get_infohash() + metainfo = copy.deepcopy(self.get_def().get_metainfo()) + + # H4xor this so the 'name' field is safe + (namekey,uniname) = metainfoname2unicode(metainfo) + metainfo['info'][namekey] = metainfo['info']['name'] = self.correctedinfoname + + multihandler = self.session.lm.multihandler + listenport = self.session.get_listen_port() + vapath = self.session.get_video_analyser_path() + + # Note: BT1Download is started with copy of d.dlconfig, not direct access + kvconfig = copy.copy(self.dlconfig) + + # Define which file to DL in VOD mode + live = self.get_def().get_live() + vodfileindex = { + 'index':-1, + 'inpath':None, + 'bitrate':0.0, + 'live':live, + 'usercallback':None, + 'userevents': [], + 'outpath':None} + + # --- streaming settings + if self.dlconfig['mode'] == DLMODE_VOD or self.dlconfig['video_source']: + # video file present which is played or produced + multi = False + if 'files' in metainfo['info']: + multi = True + + # Determine bitrate + if multi and len(self.dlconfig['selected_files']) == 0: + # Multi-file torrent, but no file selected + raise VODNoFileSelectedInMultifileTorrentException() + + if not multi: + # single-file torrent + file = self.get_def().get_name() + idx = -1 + bitrate = self.get_def().get_bitrate() + else: + # multi-file torrent + file = self.dlconfig['selected_files'][0] + idx = self.get_def().get_index_of_file_in_files(file) + bitrate = self.get_def().get_bitrate(file) + + # Determine MIME type + mimetype = self.get_mimetype(file) + # Arno: don't encode mimetype in lambda, allow for dynamic + # determination by videoanalyser + vod_usercallback_wrapper = lambda event,params:self.session.uch.perform_vod_usercallback(self,self.dlconfig['vod_usercallback'],event,params) + + vodfileindex['index'] = idx + vodfileindex['inpath'] = file + vodfileindex['bitrate'] = bitrate + vodfileindex['mimetype'] = mimetype + vodfileindex['usercallback'] = vod_usercallback_wrapper + vodfileindex['userevents'] = self.dlconfig['vod_userevents'][:] + elif live: + # live torrents must be streamed or produced, but not just downloaded + raise LiveTorrentRequiresUsercallbackException() + else: + vodfileindex['mimetype'] = 'application/octet-stream' + + # Delegate creation of engine wrapper to network thread + network_create_engine_wrapper_lambda = lambda:self.network_create_engine_wrapper(infohash,metainfo,kvconfig,multihandler,listenport,vapath,vodfileindex,lmcreatedcallback,pstate,lmvodeventcallback) + self.session.lm.rawserver.add_task(network_create_engine_wrapper_lambda,0) + + + def network_create_engine_wrapper(self,infohash,metainfo,kvconfig,multihandler,listenport,vapath,vodfileindex,lmcallback,pstate,lmvodeventcallback): + """ Called by network thread """ + self.dllock.acquire() + try: + self.sd = SingleDownload(infohash,metainfo,kvconfig,multihandler,self.session.lm.get_ext_ip,listenport,vapath,vodfileindex,self.set_error,pstate,lmvodeventcallback,self.session.lm.hashcheck_done) + sd = self.sd + exc = self.error + if lmcallback is not None: + lmcallback(self,sd,exc,pstate) + finally: + self.dllock.release() + + # + # Public method + # + def get_def(self): + # No lock because attrib immutable and return value protected + return self.tdef + + # + # Retrieving DownloadState + # + def set_state_callback(self,usercallback,getpeerlist=False): + """ Called by any thread """ + self.dllock.acquire() + try: + network_get_state_lambda = lambda:self.network_get_state(usercallback,getpeerlist) + # First time on general rawserver + self.session.lm.rawserver.add_task(network_get_state_lambda,0.0) + finally: + self.dllock.release() + + + def network_get_state(self,usercallback,getpeerlist,sessioncalling=False): + """ Called by network thread """ + self.dllock.acquire() + try: + if self.sd is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadImpl: network_get_state: Download not running" + ds = DownloadState(self,DLSTATUS_STOPPED,self.error,self.progressbeforestop) + else: + + (status,stats,logmsgs,coopdl_helpers,coopdl_coordinator) = self.sd.get_stats(getpeerlist) + ds = DownloadState(self,status,self.error,0.0,stats=stats,filepieceranges=self.filepieceranges,logmsgs=logmsgs,coopdl_helpers=coopdl_helpers,coopdl_coordinator=coopdl_coordinator,peerid=self.sd.peerid,videoinfo=self.sd.videoinfo) + self.progressbeforestop = ds.get_progress() + + if sessioncalling: + return ds + + # Invoke the usercallback function via a new thread. + # After the callback is invoked, the return values will be passed to + # the returncallback for post-callback processing. + self.session.uch.perform_getstate_usercallback(usercallback,ds,self.sesscb_get_state_returncallback) + finally: + self.dllock.release() + + + def sesscb_get_state_returncallback(self,usercallback,when,newgetpeerlist): + """ Called by SessionCallbackThread """ + self.dllock.acquire() + try: + if when > 0.0: + # Schedule next invocation, either on general or DL specific + # TODO: ensure this continues when dl is stopped. Should be OK. + network_get_state_lambda = lambda:self.network_get_state(usercallback,newgetpeerlist) + if self.sd is None: + self.session.lm.rawserver.add_task(network_get_state_lambda,when) + else: + self.sd.dlrawserver.add_task(network_get_state_lambda,when) + finally: + self.dllock.release() + + # + # Download stop/resume + # + def stop(self): + """ Called by any thread """ + self.stop_remove(removestate=False,removecontent=False) + + def stop_remove(self,removestate=False,removecontent=False): + """ Called by any thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadImpl: stop_remove:",`self.tdef.get_name_as_unicode()`,"state",removestate,"content",removecontent + self.dllock.acquire() + try: + network_stop_lambda = lambda:self.network_stop(removestate,removecontent) + self.session.lm.rawserver.add_task(network_stop_lambda,0.0) + finally: + self.dllock.release() + + def network_stop(self,removestate,removecontent): + """ Called by network thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadImpl: network_stop",`self.tdef.get_name_as_unicode()` + self.dllock.acquire() + try: + infohash = self.tdef.get_infohash() + pstate = self.network_get_persistent_state() + if self.sd is not None: + pstate['engineresumedata'] = self.sd.shutdown() + self.sd = None + self.pstate_for_restart = pstate + else: + # This method is also called at Session shutdown, where one may + # choose to checkpoint its Download. If the Download was + # stopped before, pstate_for_restart contains its resumedata. + # and that should be written into the checkpoint. + # + if self.pstate_for_restart is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadImpl: network_stop: Reusing previously saved engineresume data for checkpoint" + # Don't copy full pstate_for_restart, as the torrent + # may have gone from e.g. HASHCHECK at startup to STOPPED + # now, at shutdown. In other words, it was never active + # in this session and the pstate_for_restart still says + # HASHCHECK. + pstate['engineresumedata'] = self.pstate_for_restart['engineresumedata'] + + # Offload the removal of the content and other disk cleanup to another thread + if removestate: + contentdest = self.get_content_dest() + self.session.uch.perform_removestate_callback(infohash,contentdest,removecontent) + + return (infohash,pstate) + finally: + self.dllock.release() + + + def restart(self): + """ Restart the Download. Technically this action does not need to be + delegated to the network thread, but does so removes some concurrency + problems. By scheduling both stops and restarts via the network task + queue we ensure that they are executed in the order they were called. + Called by any thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadImpl: restart:",`self.tdef.get_name_as_unicode()` + self.dllock.acquire() + try: + self.session.lm.rawserver.add_task(self.network_restart,0.0) + finally: + self.dllock.release() + + def network_restart(self): + """ Called by network thread """ + # Must schedule the hash check via lm. In some cases we have batch stops + # and restarts, e.g. we have stop all-but-one & restart-all for VOD) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadImpl: network_restart",`self.tdef.get_name_as_unicode()` + self.dllock.acquire() + try: + if self.sd is None: + self.error = None # assume fatal error is reproducible + # h4xor: restart using earlier loaded resumedata + self.create_engine_wrapper(self.session.lm.network_engine_wrapper_created_callback,pstate=self.pstate_for_restart,lmvodeventcallback=self.session.lm.network_vod_event_callback) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadImpl: network_restart: SingleDownload already running",`self` + + # No exception if already started, for convenience + finally: + self.dllock.release() + + # + # Config parameters that only exists at runtime + # + def set_max_desired_speed(self,direct,speed): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Download: set_max_desired_speed",direct,speed + #if speed < 10: + # print_stack() + + self.dllock.acquire() + if direct == UPLOAD: + self.dlruntimeconfig['max_desired_upload_rate'] = speed + else: + self.dlruntimeconfig['max_desired_download_rate'] = speed + self.dllock.release() + + def get_max_desired_speed(self,direct): + self.dllock.acquire() + try: + if direct == UPLOAD: + return self.dlruntimeconfig['max_desired_upload_rate'] + else: + return self.dlruntimeconfig['max_desired_download_rate'] + finally: + self.dllock.release() + + def get_dest_files(self, exts=None): + """ We could get this from BT1Download.files (see BT1Download.saveAs()), + but that object is the domain of the network thread. + You can give a list of extensions to return. If None: return all dest_files + """ + + def get_ext(filename): + (prefix,ext) = os.path.splitext(filename) + if ext != '' and ext[0] == '.': + ext = ext[1:] + return ext + + self.dllock.acquire() + try: + f2dlist = [] + metainfo = self.tdef.get_metainfo() + if 'files' not in metainfo['info']: + # single-file torrent + diskfn = self.get_content_dest() + f2dtuple = (None, diskfn) + ext = get_ext(diskfn) + if exts is None or ext in exts: + f2dlist.append(f2dtuple) + else: + # multi-file torrent + if len(self.dlconfig['selected_files']) > 0: + fnlist = self.dlconfig['selected_files'] + else: + fnlist = self.tdef.get_files(exts=exts) + + for filename in fnlist: + filerec = maketorrent.get_torrentfilerec_from_metainfo(filename,metainfo) + savepath = maketorrent.torrentfilerec2savefilename(filerec) + diskfn = maketorrent.savefilenames2finaldest(self.get_content_dest(),savepath) + ext = get_ext(diskfn) + if exts is None or ext in exts: + f2dtuple = (filename,diskfn) + f2dlist.append(f2dtuple) + + return f2dlist + finally: + self.dllock.release() + + + + + + # + # Persistence + # + def network_checkpoint(self): + """ Called by network thread """ + self.dllock.acquire() + try: + pstate = self.network_get_persistent_state() + if self.sd is None: + resdata = None + else: + resdata = self.sd.checkpoint() + pstate['engineresumedata'] = resdata + return (self.tdef.get_infohash(),pstate) + finally: + self.dllock.release() + + + def network_get_persistent_state(self): + """ Assume dllock already held """ + pstate = {} + pstate['version'] = PERSISTENTSTATE_CURRENTVERSION + pstate['metainfo'] = self.tdef.get_metainfo() # assumed immutable + dlconfig = copy.copy(self.dlconfig) + # Reset unpicklable params + dlconfig['vod_usercallback'] = None + dlconfig['mode'] = DLMODE_NORMAL # no callback, no VOD + pstate['dlconfig'] = dlconfig + + pstate['dlstate'] = {} + ds = self.network_get_state(None,False,sessioncalling=True) + pstate['dlstate']['status'] = ds.get_status() + pstate['dlstate']['progress'] = ds.get_progress() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Download: netw_get_pers_state: status",dlstatus_strings[ds.get_status()],"progress",ds.get_progress() + + pstate['engineresumedata'] = None + return pstate + + # + # Coop download + # + def get_coopdl_role_object(self,role): + """ Called by network thread """ + role_object = None + self.dllock.acquire() + try: + if self.sd is not None: + role_object = self.sd.get_coopdl_role_object(role) + finally: + self.dllock.release() + return role_object + + + + # + # Internal methods + # + def set_error(self,e): + self.dllock.acquire() + self.error = e + self.dllock.release() + + + def set_filepieceranges(self,metainfo): + """ Determine which file maps to which piece ranges for progress info """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Download: set_filepieceranges:",self.dlconfig['selected_files'] + (length,self.filepieceranges) = maketorrent.get_length_filepieceranges_from_metainfo(metainfo,self.dlconfig['selected_files']) + + def get_content_dest(self): + """ Returns the file (single-file torrent) or dir (multi-file torrent) + to which the downloaded content is saved. """ + return os.path.join(self.dlconfig['saveas'],self.correctedinfoname) + + # ARNOCOMMENT: better if we removed this from Core, user knows which + # file he selected to play, let him figure out MIME type + def get_mimetype(self,file): + (prefix,ext) = os.path.splitext(file) + ext = ext.lower() + mimetype = None + if sys.platform == 'win32': + # TODO: Use Python's mailcap facility on Linux to find player + try: + [mimetype,playcmd] = win32_retrieve_video_play_command(ext,file) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadImpl: Win32 reg said MIME type is",mimetype + except: + print_exc() + else: + try: + import mimetypes + homedir = os.path.expandvars('${HOME}') + homemapfile = os.path.join(homedir,'.mimetypes') + mapfiles = [homemapfile] + mimetypes.knownfiles + mimetypes.init(mapfiles) + (mimetype,encoding) = mimetypes.guess_type(file) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadImpl: /etc/mimetypes+ said MIME type is",mimetype,file + except: + print_exc() + + # if auto detect fails + if mimetype is None: + if ext == '.avi': + mimetype = 'video/avi' + elif ext == '.mpegts': + mimetype = 'video/mp2t' + elif ext == '.mkv': + mimetype = 'video/x-matroska' + else: + mimetype = 'video/mpeg' + return mimetype + diff --git a/tribler-mod/Tribler/Core/APIImplementation/DownloadImpl.py.bak b/tribler-mod/Tribler/Core/APIImplementation/DownloadImpl.py.bak new file mode 100644 index 0000000..672c3ec --- /dev/null +++ b/tribler-mod/Tribler/Core/APIImplementation/DownloadImpl.py.bak @@ -0,0 +1,561 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys +import os +import copy +from traceback import print_exc,print_stack +from threading import RLock,Condition,Event,Thread,currentThread + +from Tribler.Core.DownloadState import DownloadState +from Tribler.Core.DownloadConfig import DownloadStartupConfig +from Tribler.Core.simpledefs import * +from Tribler.Core.exceptions import * +from Tribler.Core.osutils import * +from Tribler.Core.APIImplementation.SingleDownload import SingleDownload +import Tribler.Core.APIImplementation.maketorrent as maketorrent +from Tribler.Core.Utilities.unicode import metainfoname2unicode + +from Tribler.Video.utils import win32_retrieve_video_play_command + +DEBUG = False + +class DownloadImpl: + + def __init__(self,session,tdef): + self.dllock = RLock() + # just enough so error saving and get_state() works + self.error = None + self.sd = None # hack + # To be able to return the progress of a stopped torrent, how far it got. + self.progressbeforestop = 0.0 + self.filepieceranges = [] + self.pstate_for_restart = None # h4x0r to remember resumedata + + # Copy tdef, so we get an infohash + self.session = session + self.tdef = tdef.copy() + self.tdef.readonly = True + + # + # Creating a Download + # + def setup(self,dcfg=None,pstate=None,initialdlstatus=None,lmcreatedcallback=None,lmvodeventcallback=None): + """ + Create a Download object. Used internally by Session. + @param dcfg DownloadStartupConfig or None (in which case + a new DownloadConfig() is created and the result + becomes the runtime config of this Download. + """ + # Called by any thread + try: + self.dllock.acquire() # not really needed, no other threads know of this object + + metainfo = self.get_def().get_metainfo() + # H4xor this so the 'name' field is safe + (namekey,uniname) = metainfoname2unicode(metainfo) + self.correctedinfoname = fix_filebasename(uniname) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Download: setup: piece size",metainfo['info']['piece length'] + + # See if internal tracker used + itrackerurl = self.session.get_internal_tracker_url() + #infohash = self.tdef.get_infohash() + metainfo = self.tdef.get_metainfo() + usingitracker = False + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Download: setup: internal tracker?",metainfo['announce'],itrackerurl,"#" + + if itrackerurl.endswith('/'): + slashless = itrackerurl[:-1] + else: + slashless = itrackerurl + if metainfo['announce'] == itrackerurl or metainfo['announce'] == slashless: + usingitracker = True + elif 'announce-list' in metainfo: + for tier in metainfo['announce-list']: + if itrackerurl in tier or slashless in tier: + usingitracker = True + break + + if usingitracker: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Download: setup: Using internal tracker" + # Copy .torrent to state_dir/itracker so the tracker thread + # finds it and accepts peer registrations for it. + # + self.session.add_to_internal_tracker(self.tdef) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Download: setup: Not using internal tracker" + + # Copy dlconfig, from default if not specified + if dcfg is None: + cdcfg = DownloadStartupConfig() + else: + cdcfg = dcfg + self.dlconfig = copy.copy(cdcfg.dlconfig) + # Copy sessconfig into dlconfig, such that BitTornado.BT1.Connecter, etc. + # knows whether overlay is on, etc. + # + for (k,v) in self.session.get_current_startup_config_copy().sessconfig.iteritems(): + self.dlconfig.setdefault(k,v) + + self.set_filepieceranges(metainfo) + + # Things that only exist at runtime + self.dlruntimeconfig= {} + self.dlruntimeconfig['max_desired_upload_rate'] = 0 + self.dlruntimeconfig['max_desired_download_rate'] = 0 + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadImpl: setup: initialdlstatus",`self.tdef.get_name_as_unicode()`,initialdlstatus + + # Set progress + if pstate is not None and pstate.has_key('dlstate'): + self.progressbeforestop = pstate['dlstate'].get('progress', 0.0) + + # Note: initialdlstatus now only works for STOPPED + if initialdlstatus != DLSTATUS_STOPPED: + if pstate is None or pstate['dlstate']['status'] != DLSTATUS_STOPPED: + # Also restart on STOPPED_ON_ERROR, may have been transient + self.create_engine_wrapper(lmcreatedcallback,pstate,lmvodeventcallback) + + self.pstate_for_restart = pstate + + self.dllock.release() + except Exception,e: + print_exc() + self.set_error(e) + self.dllock.release() + + def create_engine_wrapper(self,lmcreatedcallback,pstate,lmvodeventcallback): + """ Called by any thread, assume dllock already acquired """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Download: create_engine_wrapper()" + + # all thread safe + infohash = self.get_def().get_infohash() + metainfo = copy.deepcopy(self.get_def().get_metainfo()) + + # H4xor this so the 'name' field is safe + (namekey,uniname) = metainfoname2unicode(metainfo) + metainfo['info'][namekey] = metainfo['info']['name'] = self.correctedinfoname + + multihandler = self.session.lm.multihandler + listenport = self.session.get_listen_port() + vapath = self.session.get_video_analyser_path() + + # Note: BT1Download is started with copy of d.dlconfig, not direct access + kvconfig = copy.copy(self.dlconfig) + + # Define which file to DL in VOD mode + live = self.get_def().get_live() + vodfileindex = { + 'index':-1, + 'inpath':None, + 'bitrate':0.0, + 'live':live, + 'usercallback':None, + 'userevents': [], + 'outpath':None} + + # --- streaming settings + if self.dlconfig['mode'] == DLMODE_VOD or self.dlconfig['video_source']: + # video file present which is played or produced + multi = False + if 'files' in metainfo['info']: + multi = True + + # Determine bitrate + if multi and len(self.dlconfig['selected_files']) == 0: + # Multi-file torrent, but no file selected + raise VODNoFileSelectedInMultifileTorrentException() + + if not multi: + # single-file torrent + file = self.get_def().get_name() + idx = -1 + bitrate = self.get_def().get_bitrate() + else: + # multi-file torrent + file = self.dlconfig['selected_files'][0] + idx = self.get_def().get_index_of_file_in_files(file) + bitrate = self.get_def().get_bitrate(file) + + # Determine MIME type + mimetype = self.get_mimetype(file) + # Arno: don't encode mimetype in lambda, allow for dynamic + # determination by videoanalyser + vod_usercallback_wrapper = lambda event,params:self.session.uch.perform_vod_usercallback(self,self.dlconfig['vod_usercallback'],event,params) + + vodfileindex['index'] = idx + vodfileindex['inpath'] = file + vodfileindex['bitrate'] = bitrate + vodfileindex['mimetype'] = mimetype + vodfileindex['usercallback'] = vod_usercallback_wrapper + vodfileindex['userevents'] = self.dlconfig['vod_userevents'][:] + elif live: + # live torrents must be streamed or produced, but not just downloaded + raise LiveTorrentRequiresUsercallbackException() + else: + vodfileindex['mimetype'] = 'application/octet-stream' + + # Delegate creation of engine wrapper to network thread + network_create_engine_wrapper_lambda = lambda:self.network_create_engine_wrapper(infohash,metainfo,kvconfig,multihandler,listenport,vapath,vodfileindex,lmcreatedcallback,pstate,lmvodeventcallback) + self.session.lm.rawserver.add_task(network_create_engine_wrapper_lambda,0) + + + def network_create_engine_wrapper(self,infohash,metainfo,kvconfig,multihandler,listenport,vapath,vodfileindex,lmcallback,pstate,lmvodeventcallback): + """ Called by network thread """ + self.dllock.acquire() + try: + self.sd = SingleDownload(infohash,metainfo,kvconfig,multihandler,self.session.lm.get_ext_ip,listenport,vapath,vodfileindex,self.set_error,pstate,lmvodeventcallback,self.session.lm.hashcheck_done) + sd = self.sd + exc = self.error + if lmcallback is not None: + lmcallback(self,sd,exc,pstate) + finally: + self.dllock.release() + + # + # Public method + # + def get_def(self): + # No lock because attrib immutable and return value protected + return self.tdef + + # + # Retrieving DownloadState + # + def set_state_callback(self,usercallback,getpeerlist=False): + """ Called by any thread """ + self.dllock.acquire() + try: + network_get_state_lambda = lambda:self.network_get_state(usercallback,getpeerlist) + # First time on general rawserver + self.session.lm.rawserver.add_task(network_get_state_lambda,0.0) + finally: + self.dllock.release() + + + def network_get_state(self,usercallback,getpeerlist,sessioncalling=False): + """ Called by network thread """ + self.dllock.acquire() + try: + if self.sd is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadImpl: network_get_state: Download not running" + ds = DownloadState(self,DLSTATUS_STOPPED,self.error,self.progressbeforestop) + else: + + (status,stats,logmsgs,coopdl_helpers,coopdl_coordinator) = self.sd.get_stats(getpeerlist) + ds = DownloadState(self,status,self.error,0.0,stats=stats,filepieceranges=self.filepieceranges,logmsgs=logmsgs,coopdl_helpers=coopdl_helpers,coopdl_coordinator=coopdl_coordinator,peerid=self.sd.peerid,videoinfo=self.sd.videoinfo) + self.progressbeforestop = ds.get_progress() + + if sessioncalling: + return ds + + # Invoke the usercallback function via a new thread. + # After the callback is invoked, the return values will be passed to + # the returncallback for post-callback processing. + self.session.uch.perform_getstate_usercallback(usercallback,ds,self.sesscb_get_state_returncallback) + finally: + self.dllock.release() + + + def sesscb_get_state_returncallback(self,usercallback,when,newgetpeerlist): + """ Called by SessionCallbackThread """ + self.dllock.acquire() + try: + if when > 0.0: + # Schedule next invocation, either on general or DL specific + # TODO: ensure this continues when dl is stopped. Should be OK. + network_get_state_lambda = lambda:self.network_get_state(usercallback,newgetpeerlist) + if self.sd is None: + self.session.lm.rawserver.add_task(network_get_state_lambda,when) + else: + self.sd.dlrawserver.add_task(network_get_state_lambda,when) + finally: + self.dllock.release() + + # + # Download stop/resume + # + def stop(self): + """ Called by any thread """ + self.stop_remove(removestate=False,removecontent=False) + + def stop_remove(self,removestate=False,removecontent=False): + """ Called by any thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadImpl: stop_remove:",`self.tdef.get_name_as_unicode()`,"state",removestate,"content",removecontent + self.dllock.acquire() + try: + network_stop_lambda = lambda:self.network_stop(removestate,removecontent) + self.session.lm.rawserver.add_task(network_stop_lambda,0.0) + finally: + self.dllock.release() + + def network_stop(self,removestate,removecontent): + """ Called by network thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadImpl: network_stop",`self.tdef.get_name_as_unicode()` + self.dllock.acquire() + try: + infohash = self.tdef.get_infohash() + pstate = self.network_get_persistent_state() + if self.sd is not None: + pstate['engineresumedata'] = self.sd.shutdown() + self.sd = None + self.pstate_for_restart = pstate + else: + # This method is also called at Session shutdown, where one may + # choose to checkpoint its Download. If the Download was + # stopped before, pstate_for_restart contains its resumedata. + # and that should be written into the checkpoint. + # + if self.pstate_for_restart is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadImpl: network_stop: Reusing previously saved engineresume data for checkpoint" + # Don't copy full pstate_for_restart, as the torrent + # may have gone from e.g. HASHCHECK at startup to STOPPED + # now, at shutdown. In other words, it was never active + # in this session and the pstate_for_restart still says + # HASHCHECK. + pstate['engineresumedata'] = self.pstate_for_restart['engineresumedata'] + + # Offload the removal of the content and other disk cleanup to another thread + if removestate: + contentdest = self.get_content_dest() + self.session.uch.perform_removestate_callback(infohash,contentdest,removecontent) + + return (infohash,pstate) + finally: + self.dllock.release() + + + def restart(self): + """ Restart the Download. Technically this action does not need to be + delegated to the network thread, but does so removes some concurrency + problems. By scheduling both stops and restarts via the network task + queue we ensure that they are executed in the order they were called. + Called by any thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadImpl: restart:",`self.tdef.get_name_as_unicode()` + self.dllock.acquire() + try: + self.session.lm.rawserver.add_task(self.network_restart,0.0) + finally: + self.dllock.release() + + def network_restart(self): + """ Called by network thread """ + # Must schedule the hash check via lm. In some cases we have batch stops + # and restarts, e.g. we have stop all-but-one & restart-all for VOD) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadImpl: network_restart",`self.tdef.get_name_as_unicode()` + self.dllock.acquire() + try: + if self.sd is None: + self.error = None # assume fatal error is reproducible + # h4xor: restart using earlier loaded resumedata + self.create_engine_wrapper(self.session.lm.network_engine_wrapper_created_callback,pstate=self.pstate_for_restart,lmvodeventcallback=self.session.lm.network_vod_event_callback) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadImpl: network_restart: SingleDownload already running",`self` + + # No exception if already started, for convenience + finally: + self.dllock.release() + + # + # Config parameters that only exists at runtime + # + def set_max_desired_speed(self,direct,speed): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Download: set_max_desired_speed",direct,speed + #if speed < 10: + # print_stack() + + self.dllock.acquire() + if direct == UPLOAD: + self.dlruntimeconfig['max_desired_upload_rate'] = speed + else: + self.dlruntimeconfig['max_desired_download_rate'] = speed + self.dllock.release() + + def get_max_desired_speed(self,direct): + self.dllock.acquire() + try: + if direct == UPLOAD: + return self.dlruntimeconfig['max_desired_upload_rate'] + else: + return self.dlruntimeconfig['max_desired_download_rate'] + finally: + self.dllock.release() + + def get_dest_files(self, exts=None): + """ We could get this from BT1Download.files (see BT1Download.saveAs()), + but that object is the domain of the network thread. + You can give a list of extensions to return. If None: return all dest_files + """ + + def get_ext(filename): + (prefix,ext) = os.path.splitext(filename) + if ext != '' and ext[0] == '.': + ext = ext[1:] + return ext + + self.dllock.acquire() + try: + f2dlist = [] + metainfo = self.tdef.get_metainfo() + if 'files' not in metainfo['info']: + # single-file torrent + diskfn = self.get_content_dest() + f2dtuple = (None, diskfn) + ext = get_ext(diskfn) + if exts is None or ext in exts: + f2dlist.append(f2dtuple) + else: + # multi-file torrent + if len(self.dlconfig['selected_files']) > 0: + fnlist = self.dlconfig['selected_files'] + else: + fnlist = self.tdef.get_files(exts=exts) + + for filename in fnlist: + filerec = maketorrent.get_torrentfilerec_from_metainfo(filename,metainfo) + savepath = maketorrent.torrentfilerec2savefilename(filerec) + diskfn = maketorrent.savefilenames2finaldest(self.get_content_dest(),savepath) + ext = get_ext(diskfn) + if exts is None or ext in exts: + f2dtuple = (filename,diskfn) + f2dlist.append(f2dtuple) + + return f2dlist + finally: + self.dllock.release() + + + + + + # + # Persistence + # + def network_checkpoint(self): + """ Called by network thread """ + self.dllock.acquire() + try: + pstate = self.network_get_persistent_state() + if self.sd is None: + resdata = None + else: + resdata = self.sd.checkpoint() + pstate['engineresumedata'] = resdata + return (self.tdef.get_infohash(),pstate) + finally: + self.dllock.release() + + + def network_get_persistent_state(self): + """ Assume dllock already held """ + pstate = {} + pstate['version'] = PERSISTENTSTATE_CURRENTVERSION + pstate['metainfo'] = self.tdef.get_metainfo() # assumed immutable + dlconfig = copy.copy(self.dlconfig) + # Reset unpicklable params + dlconfig['vod_usercallback'] = None + dlconfig['mode'] = DLMODE_NORMAL # no callback, no VOD + pstate['dlconfig'] = dlconfig + + pstate['dlstate'] = {} + ds = self.network_get_state(None,False,sessioncalling=True) + pstate['dlstate']['status'] = ds.get_status() + pstate['dlstate']['progress'] = ds.get_progress() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Download: netw_get_pers_state: status",dlstatus_strings[ds.get_status()],"progress",ds.get_progress() + + pstate['engineresumedata'] = None + return pstate + + # + # Coop download + # + def get_coopdl_role_object(self,role): + """ Called by network thread """ + role_object = None + self.dllock.acquire() + try: + if self.sd is not None: + role_object = self.sd.get_coopdl_role_object(role) + finally: + self.dllock.release() + return role_object + + + + # + # Internal methods + # + def set_error(self,e): + self.dllock.acquire() + self.error = e + self.dllock.release() + + + def set_filepieceranges(self,metainfo): + """ Determine which file maps to which piece ranges for progress info """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Download: set_filepieceranges:",self.dlconfig['selected_files'] + (length,self.filepieceranges) = maketorrent.get_length_filepieceranges_from_metainfo(metainfo,self.dlconfig['selected_files']) + + def get_content_dest(self): + """ Returns the file (single-file torrent) or dir (multi-file torrent) + to which the downloaded content is saved. """ + return os.path.join(self.dlconfig['saveas'],self.correctedinfoname) + + # ARNOCOMMENT: better if we removed this from Core, user knows which + # file he selected to play, let him figure out MIME type + def get_mimetype(self,file): + (prefix,ext) = os.path.splitext(file) + ext = ext.lower() + mimetype = None + if sys.platform == 'win32': + # TODO: Use Python's mailcap facility on Linux to find player + try: + [mimetype,playcmd] = win32_retrieve_video_play_command(ext,file) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadImpl: Win32 reg said MIME type is",mimetype + except: + print_exc() + else: + try: + import mimetypes + homedir = os.path.expandvars('${HOME}') + homemapfile = os.path.join(homedir,'.mimetypes') + mapfiles = [homemapfile] + mimetypes.knownfiles + mimetypes.init(mapfiles) + (mimetype,encoding) = mimetypes.guess_type(file) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadImpl: /etc/mimetypes+ said MIME type is",mimetype,file + except: + print_exc() + + # if auto detect fails + if mimetype is None: + if ext == '.avi': + mimetype = 'video/avi' + elif ext == '.mpegts': + mimetype = 'video/mp2t' + elif ext == '.mkv': + mimetype = 'video/x-matroska' + else: + mimetype = 'video/mpeg' + return mimetype + diff --git a/tribler-mod/Tribler/Core/APIImplementation/DownloadRuntimeConfig.py b/tribler-mod/Tribler/Core/APIImplementation/DownloadRuntimeConfig.py new file mode 100644 index 0000000..3710bfc --- /dev/null +++ b/tribler-mod/Tribler/Core/APIImplementation/DownloadRuntimeConfig.py @@ -0,0 +1,518 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys + +from Tribler.Core.DownloadConfig import DownloadConfigInterface +from Tribler.Core.exceptions import OperationNotPossibleAtRuntimeException + +DEBUG = False + +class DownloadRuntimeConfig(DownloadConfigInterface): + """ + Implements the Tribler.Core.DownloadConfig.DownloadConfigInterface + + Use these to change the download config at runtime. + + DownloadConfigInterface: All methods called by any thread + """ + def set_max_speed(self,direct,speed): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Download: set_max_speed",`self.get_def().get_metainfo()['info']['name']`,direct,speed + #print_stack() + + self.dllock.acquire() + try: + # Don't need to throw an exception when stopped, we then just save the new value and + # use it at (re)startup. + if self.sd is not None: + set_max_speed_lambda = lambda:self.sd.set_max_speed(direct,speed,None) + self.session.lm.rawserver.add_task(set_max_speed_lambda,0) + + # At the moment we can't catch any errors in the engine that this + # causes, so just assume it always works. + DownloadConfigInterface.set_max_speed(self,direct,speed) + finally: + self.dllock.release() + + def get_max_speed(self,direct): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_max_speed(self,direct) + finally: + self.dllock.release() + + def set_saveas(self,path): + raise OperationNotPossibleAtRuntimeException() + + def set_video_event_callback(self,usercallback): + """ Note: this currently works only when the download is stopped. """ + self.dllock.acquire() + try: + DownloadConfigInterface.set_video_event_callback(self,usercallback) + finally: + self.dllock.release() + + def set_video_events(self,events): + """ Note: this currently works only when the download is stopped. """ + self.dllock.acquire() + try: + DownloadConfigInterface.set_video_events(self,events) + finally: + self.dllock.release() + + def set_mode(self,mode): + """ Note: this currently works only when the download is stopped. """ + self.dllock.acquire() + try: + DownloadConfigInterface.set_mode(self,mode) + finally: + self.dllock.release() + + def get_mode(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_mode(self) + finally: + self.dllock.release() + + def get_video_event_callback(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_video_event_callback(self) + finally: + self.dllock.release() + + def get_video_events(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_video_events(self) + finally: + self.dllock.release() + + def set_selected_files(self,files): + """ Note: this currently works only when the download is stopped. """ + self.dllock.acquire() + try: + DownloadConfigInterface.set_selected_files(self,files) + self.set_filepieceranges(self.tdef.get_metainfo()) + finally: + self.dllock.release() + + + def get_selected_files(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_selected_files(self) + finally: + self.dllock.release() + + def set_max_conns_to_initiate(self,nconns): + self.dllock.acquire() + try: + if self.sd is not None: + set_max_conns2init_lambda = lambda:self.sd.set_max_conns_to_initiate(nconns,None) + self.session.lm.rawserver.add_task(set_max_conns2init_lambda,0.0) + DownloadConfigInterface.set_max_conns_to_initiate(self,nconns) + finally: + self.dllock.release() + + def get_max_conns_to_initiate(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_max_conns_to_initiate(self) + finally: + self.dllock.release() + + def set_max_conns(self,nconns): + self.dllock.acquire() + try: + if self.sd is not None: + set_max_conns_lambda = lambda:self.sd.set_max_conns(nconns,None) + self.session.lm.rawserver.add_task(set_max_conns_lambda,0.0) + DownloadConfigInterface.set_max_conns(self,nconns) + finally: + self.dllock.release() + + def get_max_conns(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_max_conns(self) + finally: + self.dllock.release() + + # + # Advanced download parameters + # + def set_max_uploads(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_max_uploads(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_max_uploads(self) + finally: + self.dllock.release() + + def set_keepalive_interval(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_keepalive_interval(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_keepalive_interval(self) + finally: + self.dllock.release() + + def set_download_slice_size(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_download_slice_size(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_download_slice_size(self) + finally: + self.dllock.release() + + def set_upload_unit_size(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_upload_unit_size(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_upload_unit_size(self) + finally: + self.dllock.release() + + def set_request_backlog(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_request_backlog(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_request_backlog(self) + finally: + self.dllock.release() + + def set_max_message_length(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_max_message_length(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_max_message_length(self) + finally: + self.dllock.release() + + def set_max_slice_length(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_max_slice_length(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_max_slice_length(self) + finally: + self.dllock.release() + + def set_max_rate_period(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_max_rate_period(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_max_rate_period(self) + finally: + self.dllock.release() + + def set_upload_rate_fudge(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_upload_rate_fudge(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_upload_rate_fudge(self) + finally: + self.dllock.release() + + def set_tcp_ack_fudge(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tcp_ack_fudge(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_tcp_ack_fudge(self) + finally: + self.dllock.release() + + def set_rerequest_interval(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_rerequest_interval(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_rerequest_interval(self) + finally: + self.dllock.release() + + def set_min_peers(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_min_peers(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_min_peers(self) + finally: + self.dllock.release() + + def set_http_timeout(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_http_timeout(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_http_timeout(self) + finally: + self.dllock.release() + + def set_check_hashes(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_check_hashes(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_check_hashes(self) + finally: + self.dllock.release() + + def set_alloc_type(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_alloc_type(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_alloc_type(self) + finally: + self.dllock.release() + + def set_alloc_rate(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_alloc_rate(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_alloc_rate(self) + finally: + self.dllock.release() + + def set_buffer_reads(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_buffer_reads(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_buffer_reads(self) + finally: + self.dllock.release() + + def set_write_buffer_size(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_write_buffer_size(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_write_buffer_size(self) + finally: + self.dllock.release() + + def set_breakup_seed_bitfield(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_breakup_seed_bitfield(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_breakup_seed_bitfield(self) + finally: + self.dllock.release() + + def set_snub_time(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_snub_time(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_snub_time(self) + finally: + self.dllock.release() + + def set_rarest_first_cutoff(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_rarest_first_cutoff(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_rarest_first_cutoff(self) + finally: + self.dllock.release() + + def set_rarest_first_priority_cutoff(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_rarest_first_priority_cutoff(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_rarest_first_priority_cutoff(self) + finally: + self.dllock.release() + + def set_min_uploads(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_min_uploads(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_min_uploads(self) + finally: + self.dllock.release() + + def set_max_files_open(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_max_files_open(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_max_files_open(self) + finally: + self.dllock.release() + + def set_round_robin_period(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_round_robin_period(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_round_robin_period(self) + finally: + self.dllock.release() + + def set_super_seeder(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_super_seeder(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_super_seeder(self) + finally: + self.dllock.release() + + def set_security(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_security(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_security(self) + finally: + self.dllock.release() + + def set_auto_kick(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_auto_kick(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_auto_kick(self) + finally: + self.dllock.release() + + def set_double_check_writes(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_double_check_writes(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_double_check_writes(self) + finally: + self.dllock.release() + + def set_triple_check_writes(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_triple_check_writes(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_triple_check_writes(self) + finally: + self.dllock.release() + + def set_lock_files(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_lock_files(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_lock_files(self) + finally: + self.dllock.release() + + def set_lock_while_reading(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_lock_while_reading(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_lock_while_reading(self) + finally: + self.dllock.release() + + def set_auto_flush(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_auto_flush(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_auto_flush(self) + finally: + self.dllock.release() + + def set_exclude_ips(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_exclude_ips(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_exclude_ips(self) + finally: + self.dllock.release() + + def set_ut_pex_max_addrs_from_peer(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_ut_pex_max_addrs_from_peer(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_ut_pex_max_addrs_from_peer(self) + finally: + self.dllock.release() + + def set_same_nat_try_internal(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_same_nat_try_internal(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_same_nat_try_internal(self) + finally: + self.dllock.release() + + + def set_unchoke_bias_for_internal(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_unchoke_bias_for_internal(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_unchoke_bias_for_internal(self) + finally: + self.dllock.release() + diff --git a/tribler-mod/Tribler/Core/APIImplementation/DownloadRuntimeConfig.py.bak b/tribler-mod/Tribler/Core/APIImplementation/DownloadRuntimeConfig.py.bak new file mode 100644 index 0000000..bc5a53a --- /dev/null +++ b/tribler-mod/Tribler/Core/APIImplementation/DownloadRuntimeConfig.py.bak @@ -0,0 +1,517 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys + +from Tribler.Core.DownloadConfig import DownloadConfigInterface +from Tribler.Core.exceptions import OperationNotPossibleAtRuntimeException + +DEBUG = False + +class DownloadRuntimeConfig(DownloadConfigInterface): + """ + Implements the Tribler.Core.DownloadConfig.DownloadConfigInterface + + Use these to change the download config at runtime. + + DownloadConfigInterface: All methods called by any thread + """ + def set_max_speed(self,direct,speed): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Download: set_max_speed",`self.get_def().get_metainfo()['info']['name']`,direct,speed + #print_stack() + + self.dllock.acquire() + try: + # Don't need to throw an exception when stopped, we then just save the new value and + # use it at (re)startup. + if self.sd is not None: + set_max_speed_lambda = lambda:self.sd.set_max_speed(direct,speed,None) + self.session.lm.rawserver.add_task(set_max_speed_lambda,0) + + # At the moment we can't catch any errors in the engine that this + # causes, so just assume it always works. + DownloadConfigInterface.set_max_speed(self,direct,speed) + finally: + self.dllock.release() + + def get_max_speed(self,direct): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_max_speed(self,direct) + finally: + self.dllock.release() + + def set_saveas(self,path): + raise OperationNotPossibleAtRuntimeException() + + def set_video_event_callback(self,usercallback): + """ Note: this currently works only when the download is stopped. """ + self.dllock.acquire() + try: + DownloadConfigInterface.set_video_event_callback(self,usercallback) + finally: + self.dllock.release() + + def set_video_events(self,events): + """ Note: this currently works only when the download is stopped. """ + self.dllock.acquire() + try: + DownloadConfigInterface.set_video_events(self,events) + finally: + self.dllock.release() + + def set_mode(self,mode): + """ Note: this currently works only when the download is stopped. """ + self.dllock.acquire() + try: + DownloadConfigInterface.set_mode(self,mode) + finally: + self.dllock.release() + + def get_mode(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_mode(self) + finally: + self.dllock.release() + + def get_video_event_callback(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_video_event_callback(self) + finally: + self.dllock.release() + + def get_video_events(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_video_events(self) + finally: + self.dllock.release() + + def set_selected_files(self,files): + """ Note: this currently works only when the download is stopped. """ + self.dllock.acquire() + try: + DownloadConfigInterface.set_selected_files(self,files) + self.set_filepieceranges(self.tdef.get_metainfo()) + finally: + self.dllock.release() + + + def get_selected_files(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_selected_files(self) + finally: + self.dllock.release() + + def set_max_conns_to_initiate(self,nconns): + self.dllock.acquire() + try: + if self.sd is not None: + set_max_conns2init_lambda = lambda:self.sd.set_max_conns_to_initiate(nconns,None) + self.session.lm.rawserver.add_task(set_max_conns2init_lambda,0.0) + DownloadConfigInterface.set_max_conns_to_initiate(self,nconns) + finally: + self.dllock.release() + + def get_max_conns_to_initiate(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_max_conns_to_initiate(self) + finally: + self.dllock.release() + + def set_max_conns(self,nconns): + self.dllock.acquire() + try: + if self.sd is not None: + set_max_conns_lambda = lambda:self.sd.set_max_conns(nconns,None) + self.session.lm.rawserver.add_task(set_max_conns_lambda,0.0) + DownloadConfigInterface.set_max_conns(self,nconns) + finally: + self.dllock.release() + + def get_max_conns(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_max_conns(self) + finally: + self.dllock.release() + + # + # Advanced download parameters + # + def set_max_uploads(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_max_uploads(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_max_uploads(self) + finally: + self.dllock.release() + + def set_keepalive_interval(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_keepalive_interval(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_keepalive_interval(self) + finally: + self.dllock.release() + + def set_download_slice_size(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_download_slice_size(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_download_slice_size(self) + finally: + self.dllock.release() + + def set_upload_unit_size(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_upload_unit_size(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_upload_unit_size(self) + finally: + self.dllock.release() + + def set_request_backlog(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_request_backlog(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_request_backlog(self) + finally: + self.dllock.release() + + def set_max_message_length(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_max_message_length(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_max_message_length(self) + finally: + self.dllock.release() + + def set_max_slice_length(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_max_slice_length(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_max_slice_length(self) + finally: + self.dllock.release() + + def set_max_rate_period(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_max_rate_period(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_max_rate_period(self) + finally: + self.dllock.release() + + def set_upload_rate_fudge(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_upload_rate_fudge(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_upload_rate_fudge(self) + finally: + self.dllock.release() + + def set_tcp_ack_fudge(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tcp_ack_fudge(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_tcp_ack_fudge(self) + finally: + self.dllock.release() + + def set_rerequest_interval(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_rerequest_interval(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_rerequest_interval(self) + finally: + self.dllock.release() + + def set_min_peers(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_min_peers(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_min_peers(self) + finally: + self.dllock.release() + + def set_http_timeout(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_http_timeout(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_http_timeout(self) + finally: + self.dllock.release() + + def set_check_hashes(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_check_hashes(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_check_hashes(self) + finally: + self.dllock.release() + + def set_alloc_type(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_alloc_type(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_alloc_type(self) + finally: + self.dllock.release() + + def set_alloc_rate(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_alloc_rate(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_alloc_rate(self) + finally: + self.dllock.release() + + def set_buffer_reads(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_buffer_reads(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_buffer_reads(self) + finally: + self.dllock.release() + + def set_write_buffer_size(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_write_buffer_size(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_write_buffer_size(self) + finally: + self.dllock.release() + + def set_breakup_seed_bitfield(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_breakup_seed_bitfield(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_breakup_seed_bitfield(self) + finally: + self.dllock.release() + + def set_snub_time(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_snub_time(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_snub_time(self) + finally: + self.dllock.release() + + def set_rarest_first_cutoff(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_rarest_first_cutoff(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_rarest_first_cutoff(self) + finally: + self.dllock.release() + + def set_rarest_first_priority_cutoff(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_rarest_first_priority_cutoff(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_rarest_first_priority_cutoff(self) + finally: + self.dllock.release() + + def set_min_uploads(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_min_uploads(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_min_uploads(self) + finally: + self.dllock.release() + + def set_max_files_open(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_max_files_open(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_max_files_open(self) + finally: + self.dllock.release() + + def set_round_robin_period(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_round_robin_period(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_round_robin_period(self) + finally: + self.dllock.release() + + def set_super_seeder(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_super_seeder(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_super_seeder(self) + finally: + self.dllock.release() + + def set_security(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_security(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_security(self) + finally: + self.dllock.release() + + def set_auto_kick(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_auto_kick(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_auto_kick(self) + finally: + self.dllock.release() + + def set_double_check_writes(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_double_check_writes(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_double_check_writes(self) + finally: + self.dllock.release() + + def set_triple_check_writes(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_triple_check_writes(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_triple_check_writes(self) + finally: + self.dllock.release() + + def set_lock_files(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_lock_files(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_lock_files(self) + finally: + self.dllock.release() + + def set_lock_while_reading(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_lock_while_reading(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_lock_while_reading(self) + finally: + self.dllock.release() + + def set_auto_flush(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_auto_flush(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_auto_flush(self) + finally: + self.dllock.release() + + def set_exclude_ips(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_exclude_ips(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_exclude_ips(self) + finally: + self.dllock.release() + + def set_ut_pex_max_addrs_from_peer(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_ut_pex_max_addrs_from_peer(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_ut_pex_max_addrs_from_peer(self) + finally: + self.dllock.release() + + def set_same_nat_try_internal(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_same_nat_try_internal(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_same_nat_try_internal(self) + finally: + self.dllock.release() + + + def set_unchoke_bias_for_internal(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_unchoke_bias_for_internal(self): + self.dllock.acquire() + try: + return DownloadConfigInterface.get_unchoke_bias_for_internal(self) + finally: + self.dllock.release() + diff --git a/tribler-mod/Tribler/Core/APIImplementation/LaunchManyCore.py b/tribler-mod/Tribler/Core/APIImplementation/LaunchManyCore.py new file mode 100644 index 0000000..890a59d --- /dev/null +++ b/tribler-mod/Tribler/Core/APIImplementation/LaunchManyCore.py @@ -0,0 +1,806 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys +import os +from time import time +import pickle +import socket +import binascii +from threading import Event,Thread,enumerate +from traceback import print_exc + +from Tribler.Core.BitTornado.RawServer import RawServer +from Tribler.Core.BitTornado.ServerPortHandler import MultiHandler +from Tribler.Core.BitTornado.BT1.track import Tracker +from Tribler.Core.BitTornado.HTTPHandler import HTTPHandler,DummyHTTPHandler + + +from Tribler.Core.simpledefs import * +from Tribler.Core.exceptions import * +from Tribler.Core.Download import Download +from Tribler.Core.DownloadConfig import DownloadStartupConfig +from Tribler.Core.TorrentDef import TorrentDef +from Tribler.Core.NATFirewall.guessip import get_my_wan_ip +from Tribler.Core.NATFirewall.UPnPThread import UPnPThread +from Tribler.Core.Overlay.SecureOverlay import SecureOverlay +from Tribler.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge +from Tribler.Core.Overlay.OverlayApps import OverlayApps +from Tribler.Core.NATFirewall.DialbackMsgHandler import DialbackMsgHandler +from Tribler.Core.DecentralizedTracking import mainlineDHT +from Tribler.Core.DecentralizedTracking.rsconvert import RawServerConverter +from Tribler.Core.DecentralizedTracking.mainlineDHTChecker import mainlineDHTChecker +from Tribler.Core.CacheDB.sqlitecachedb import SQLiteCacheDB +import Tribler.Core.CacheDB.cachedb as cachedb +from Tribler.Core.CacheDB.SqliteCacheDBHandler import * +from Tribler.Core.CacheDB.SqliteSeedingStatsCacheDB import * +from Tribler.Core.CacheDB.SqliteFriendshipStatsCacheDB import * +from Tribler.Core.RequestPolicy import * +from Tribler.Core.osutils import get_readable_torrent_name +from Tribler.Category.Category import Category +from Tribler.TrackerChecking.TorrentChecking import TorrentChecking + + +SPECIAL_VALUE=481 + +DEBUG = False +PROFILE = False + +# Internal classes +# + +class TriblerLaunchMany(Thread): + + def __init__(self): + """ Called only once (unless we have multiple Sessions) by MainThread """ + Thread.__init__(self) + self.setDaemon(True) + self.setName("Network"+self.getName()) + + def register(self,session,sesslock): + self.session = session + self.sesslock = sesslock + + self.downloads = {} + config = session.sessconfig # Should be safe at startup + + self.locally_guessed_ext_ip = self.guess_ext_ip_from_local_info() + self.upnp_ext_ip = None + self.dialback_ext_ip = None + self.yourip_ext_ip = None + + + # Orig + self.sessdoneflag = Event() + + # Following two attributes set/get by network thread ONLY + self.hashcheck_queue = [] + self.sdownloadtohashcheck = None + + # Following 2 attributes set/get by UPnPThread + self.upnp_thread = None + self.upnp_type = config['upnp_nat_access'] + self.nat_detect = config['nat_detect'] + + self.rawserver = RawServer(self.sessdoneflag, + config['timeout_check_interval'], + config['timeout'], + ipv6_enable = config['ipv6_enabled'], + failfunc = self.rawserver_fatalerrorfunc, + errorfunc = self.rawserver_nonfatalerrorfunc) + self.rawserver.add_task(self.rawserver_keepalive,1) + + self.listen_port = self.rawserver.find_and_bind(0, + config['minport'], config['maxport'], config['bind'], + reuse = True, + ipv6_socket_style = config['ipv6_binds_v4'], + randomizer = config['random_port']) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: Got listen port", self.listen_port + + self.overlay_bridge = OverlayThreadingBridge.getInstance() + self.multihandler = MultiHandler(self.rawserver, self.sessdoneflag) + self.shutdownstarttime = None + + # do_cache -> do_overlay -> (do_buddycast, do_download_help) + if config['megacache']: + # init cache db + if config['nickname'] == '__default_name__': + config['nickname'] = socket.gethostname() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'tlm: Reading Session state from',config['state_dir'] + cachedb.init(config, self.rawserver_fatalerrorfunc) + + # initialize SeedingStats database + cachedb.init_seeding_stats(config, self.rawserver_fatalerrorfunc) + + # initialize Friendship statistics database + cachedb.init_friendship_stats(config, self.rawserver_fatalerrorfunc) + + # initialize VideoPlayback statistics database + cachedb.init_videoplayback_stats(config, self.rawserver_fatalerrorfunc) + + self.my_db = MyDBHandler.getInstance() + self.peer_db = PeerDBHandler.getInstance() + # Register observer to update connection opened/closed to peer_db_handler + self.peer_db.registerConnectionUpdater(self.session) + self.torrent_db = TorrentDBHandler.getInstance() + torrent_collecting_dir = os.path.abspath(config['torrent_collecting_dir']) + self.torrent_db.register(Category.getInstance(),torrent_collecting_dir) + self.mypref_db = MyPreferenceDBHandler.getInstance() + self.pref_db = PreferenceDBHandler.getInstance() + self.superpeer_db = SuperPeerDBHandler.getInstance() + self.superpeer_db.loadSuperPeers(config) + self.friend_db = FriendDBHandler.getInstance() + self.bartercast_db = BarterCastDBHandler.getInstance() + self.bartercast_db.registerSession(self.session) + self.modcast_db = ModerationCastDBHandler.getInstance() + self.modcast_db.registerSession(self.session) + self.votecast_db = VoteCastDBHandler.getInstance() + self.votecast_db.registerSession(self.session) + self.search_db = SearchDBHandler.getInstance() + self.term_db = TermDBHandler.getInstance() + + # Crawling + self.crawler_db = CrawlerDBHandler.getInstance() + self.crawler_db.loadCrawlers(config) + self.seedingstats_db = SeedingStatsDBHandler.getInstance() + self.seedingstatssettings_db = SeedingStatsSettingsDBHandler.getInstance() + self.friendship_statistics_db = FriendshipStatisticsDBHandler().getInstance() + else: + config['overlay'] = 0 # turn overlay off + config['torrent_checking'] = 0 + self.my_db = None + self.peer_db = None + self.torrent_db = None + self.mypref_db = None + self.pref_db = None + self.superpeer_db = None + self.crawler_db = None + self.seedingstats_db = None + self.seedingstatssettings_db = None + self.friendship_statistics_db = None + self.friend_db = None + self.bartercast_db = None + self.mm = None + + if config['overlay']: + self.secure_overlay = SecureOverlay.getInstance() + self.secure_overlay.register(self, config['overlay_max_message_length']) + + # Set policy for which peer requests (dl_helper, rquery) to answer and which to ignore + + self.overlay_apps = OverlayApps.getInstance() + # Default policy, override with Session.set_overlay_request_policy() + policy = FriendsCoopDLOtherRQueryQuotumCrawlerAllowAllRequestPolicy(self.session) + + # For the new DB layer we need to run all overlay apps in a + # separate thread instead of the NetworkThread as before. + self.overlay_bridge.register_bridge(self.secure_overlay,self.overlay_apps) + + self.overlay_apps.register(self.overlay_bridge,self.session,self,config,policy) + # It's important we don't start listening to the network until + # all higher protocol-handling layers are properly configured. + self.overlay_bridge.start_listening() + else: + self.secure_overlay = None + self.overlay_apps = None + config['buddycast'] = 0 + config['download_help'] = 0 + config['socnet'] = 0 + config['rquery'] = 0 + + # Minimal to allow yourip external-IP address detection + some_dialback_handler = DialbackMsgHandler.getInstance() + some_dialback_handler.register_yourip(self) + + + if config['megacache'] or config['overlay']: + # Arno: THINK! whoever added this should at least have made the + # config files configurable via SessionConfigInterface. + # + # TODO: see if we can move this out of the core. We could make the + # category a parameter to TorrentDB.addExternalTorrent(), but that + # will not work directly for MetadataHandler, which is part of the + # core. + + # Some author: First Category instantiation requires install_dir, so do it now + Category.getInstance(config['install_dir']) + + # Internal tracker + self.internaltracker = None + if config['internaltracker']: + self.internaltracker = Tracker(config, self.rawserver) + self.httphandler = HTTPHandler(self.internaltracker.get, config['tracker_min_time_between_log_flushes']) + else: + self.httphandler = DummyHTTPHandler() + self.multihandler.set_httphandler(self.httphandler) + + if config['mainline_dht']: + # Start up mainline DHT + # Arno: do this in a try block, as khashmir gives a very funky + # error when started from a .dmg (not from cmd line) on Mac. In + # particular it complains that it cannot find the 'hex' encoding + # method when hstr.encode('hex') is called, and hstr is a string?! + # + try: + rsconvert = RawServerConverter(self.rawserver) + # '' = host, TODO: when local bind set + mainlineDHT.init('',self.listen_port,config['state_dir'],rawserver=rsconvert) + except: + print_exc() + + + # add task for tracker checking + if config['torrent_checking']: + + if config['mainline_dht']: + # Create torrent-liveliness checker based on DHT + c = mainlineDHTChecker.getInstance() + c.register(mainlineDHT.dht) + + self.torrent_checking_period = config['torrent_checking_period'] + #self.torrent_checking_period = 5 + self.rawserver.add_task(self.run_torrent_check, self.torrent_checking_period) + + + def add(self,tdef,dscfg,pstate=None,initialdlstatus=None): + """ Called by any thread """ + self.sesslock.acquire() + try: + if not tdef.is_finalized(): + raise ValueError("TorrentDef not finalized") + + d = Download(self.session,tdef) + infohash = tdef.get_infohash() + + # Check if running or saved on disk + if infohash in self.downloads: + raise DuplicateDownloadException() + elif pstate is None and not tdef.get_live(): # not already resuming + pstate = self.load_download_pstate_noexc(infohash) + if pstate is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: add: pstate is",dlstatus_strings[pstate['dlstate']['status']],pstate['dlstate']['progress'] + + # Store in list of Downloads, always. + infohash = d.get_def().get_infohash() + self.downloads[infohash] = d + d.setup(dscfg,pstate,initialdlstatus,self.network_engine_wrapper_created_callback,self.network_vod_event_callback) + + if self.torrent_db != None and self.mypref_db != None: + raw_filename = tdef.get_name_as_unicode() + infohash = tdef.get_infohash() + save_name = get_readable_torrent_name(infohash, raw_filename) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'tlm: add', save_name, self.session.sessconfig + torrent_dir = self.session.sessconfig['torrent_collecting_dir'] + save_path = os.path.join(torrent_dir, save_name) + if not os.path.exists(save_path): # save the torrent to the common torrent dir + tdef.save(save_path) + + # hack, make sure these torrents are always good so they show up + # in TorrentDBHandler.getTorrents() + extra_info = {'status':'good'} + self.torrent_db.addExternalTorrent(save_path, source='',extra_info=extra_info) + dest_path = d.get_dest_dir() + # TODO: if user renamed the dest_path for single-file-torrent + data = {'destination_path':dest_path} + self.mypref_db.addMyPreference(infohash, data) + # BuddyCast is now notified of this new Download in our + # preferences via the Notifier mechanism. See BC.sesscb_ntfy_myprefs() + return d + finally: + self.sesslock.release() + + + def network_engine_wrapper_created_callback(self,d,sd,exc,pstate): + """ Called by network thread """ + if exc is None: + # Always need to call the hashcheck func, even if we're restarting + # a download that was seeding, this is just how the BT engine works. + # We've provided the BT engine with its resumedata, so this should + # be fast. + # + try: + if sd is not None: + self.queue_for_hashcheck(sd) + if pstate is None and not d.get_def().get_live(): + # Checkpoint at startup + (infohash,pstate) = d.network_checkpoint() + self.save_download_pstate(infohash,pstate) + else: + raise TriblerException("tlm: network_engine_wrapper_created_callback: sd is None!") + except Exception,e: + # There was a bug in queue_for_hashcheck that is now fixed. + # Leave this in place to catch unexpected errors. + print_exc() + d.set_error(e) + + + def remove(self,d,removecontent=False): + """ Called by any thread """ + self.sesslock.acquire() + try: + d.stop_remove(removestate=True,removecontent=removecontent) + infohash = d.get_def().get_infohash() + del self.downloads[infohash] + finally: + self.sesslock.release() + + def get_downloads(self): + """ Called by any thread """ + self.sesslock.acquire() + try: + return self.downloads.values() #copy, is mutable + finally: + self.sesslock.release() + + def rawserver_fatalerrorfunc(self,e): + """ Called by network thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TriblerLaunchMany: RawServer fatal error func called",e + print_exc() + + def rawserver_nonfatalerrorfunc(self,e): + """ Called by network thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TriblerLaunchmany: RawServer non fatal error func called",e + print_exc() + # Could log this somewhere, or phase it out + + def _run(self): + """ Called only once by network thread """ + + try: + try: + self.start_upnp() + self.multihandler.listen_forever() + except: + print_exc() + finally: + if self.internaltracker is not None: + self.internaltracker.save_state() + + self.stop_upnp() + self.rawserver.shutdown() + + def rawserver_keepalive(self): + """ Hack to prevent rawserver sleeping in select() for a long time, not + processing any tasks on its queue at startup time + + Called by network thread """ + self.rawserver.add_task(self.rawserver_keepalive,1) + + # + # TODO: called by TorrentMaker when new torrent added to itracker dir + # Make it such that when Session.add_torrent() is called and the internal + # tracker is used that we write a metainfo to itracker dir and call this. + # + def tracker_rescan_dir(self): + if self.internaltracker is not None: + self.internaltracker.parse_allowed(source='Session') + + # + # Torrent hash checking + # + def queue_for_hashcheck(self,sd): + """ Schedule a SingleDownload for integrity check of on-disk data + + Called by network thread """ + if hash: + self.hashcheck_queue.append(sd) + # Check smallest torrents first + self.hashcheck_queue.sort(singledownload_size_cmp) + + if not self.sdownloadtohashcheck: + self.dequeue_and_start_hashcheck() + + def dequeue_and_start_hashcheck(self): + """ Start integriy check for first SingleDownload in queue + + Called by network thread """ + self.sdownloadtohashcheck = self.hashcheck_queue.pop(0) + self.sdownloadtohashcheck.perform_hashcheck(self.hashcheck_done) + + def hashcheck_done(self,success=True): + """ Integrity check for first SingleDownload in queue done + + Called by network thread """ + if success: + self.sdownloadtohashcheck.hashcheck_done() + if self.hashcheck_queue: + self.dequeue_and_start_hashcheck() + else: + self.sdownloadtohashcheck = None + + # + # State retrieval + # + def set_download_states_callback(self,usercallback,getpeerlist,when=0.0): + """ Called by any thread """ + network_set_download_states_callback_lambda = lambda:self.network_set_download_states_callback(usercallback,getpeerlist) + self.rawserver.add_task(network_set_download_states_callback_lambda,when) + + def network_set_download_states_callback(self,usercallback,getpeerlist): + """ Called by network thread """ + self.sesslock.acquire() + try: + # Even if the list of Downloads changes in the mean time this is + # no problem. For removals, dllist will still hold a pointer to the + # Download, and additions are no problem (just won't be included + # in list of states returned via callback. + # + dllist = self.downloads.values() + finally: + self.sesslock.release() + + dslist = [] + for d in dllist: + ds = d.network_get_state(None,getpeerlist,sessioncalling=True) + dslist.append(ds) + + # Invoke the usercallback function via a new thread. + # After the callback is invoked, the return values will be passed to + # the returncallback for post-callback processing. + self.session.uch.perform_getstate_usercallback(usercallback,dslist,self.sesscb_set_download_states_returncallback) + + def sesscb_set_download_states_returncallback(self,usercallback,when,newgetpeerlist): + """ Called by SessionCallbackThread """ + if when > 0.0: + # reschedule + self.set_download_states_callback(usercallback,newgetpeerlist,when=when) + + # + # Persistence methods + # + def load_checkpoint(self,initialdlstatus=None): + """ Called by any thread """ + self.sesslock.acquire() + try: + dir = self.session.get_downloads_pstate_dir() + filelist = os.listdir(dir) + for basename in filelist: + # Make this go on when a torrent fails to start + filename = os.path.join(dir,basename) + self.resume_download(filename,initialdlstatus) + finally: + self.sesslock.release() + + + def load_download_pstate_noexc(self,infohash): + """ Called by any thread, assume sesslock already held """ + try: + dir = self.session.get_downloads_pstate_dir() + basename = binascii.hexlify(infohash)+'.pickle' + filename = os.path.join(dir,basename) + return self.load_download_pstate(filename) + except Exception,e: + # TODO: remove saved checkpoint? + #self.rawserver_nonfatalerrorfunc(e) + return None + + def resume_download(self,filename,initialdlstatus=None): + try: + # TODO: filter for file not found explicitly? + pstate = self.load_download_pstate(filename) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: load_checkpoint: pstate is",dlstatus_strings[pstate['dlstate']['status']],pstate['dlstate']['progress'] + if pstate['engineresumedata'] is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: load_checkpoint: resumedata None" + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: load_checkpoint: resumedata len",len(pstate['engineresumedata']) + + tdef = TorrentDef.load_from_dict(pstate['metainfo']) + + # Activate + dscfg = DownloadStartupConfig(dlconfig=pstate['dlconfig']) + self.add(tdef,dscfg,pstate,initialdlstatus) + except Exception,e: + # TODO: remove saved checkpoint? + self.rawserver_nonfatalerrorfunc(e) + + + def checkpoint(self,stop=False,checkpoint=True,gracetime=2.0): + """ Called by any thread, assume sesslock already held """ + # Even if the list of Downloads changes in the mean time this is + # no problem. For removals, dllist will still hold a pointer to the + # Download, and additions are no problem (just won't be included + # in list of states returned via callback. + # + dllist = self.downloads.values() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: checkpointing",len(dllist) + + network_checkpoint_callback_lambda = lambda:self.network_checkpoint_callback(dllist,stop,checkpoint,gracetime) + self.rawserver.add_task(network_checkpoint_callback_lambda,0.0) + + + def network_checkpoint_callback(self,dllist,stop,checkpoint,gracetime): + """ Called by network thread """ + if checkpoint: + for d in dllist: + # Tell all downloads to stop, and save their persistent state + # in a infohash -> pstate dict which is then passed to the user + # for storage. + # + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: network checkpointing:",`d.get_def().get_name()` + if stop: + (infohash,pstate) = d.network_stop(False,False) + else: + (infohash,pstate) = d.network_checkpoint() + + try: + self.save_download_pstate(infohash,pstate) + except Exception,e: + self.rawserver_nonfatalerrorfunc(e) + + if stop: + # Some grace time for early shutdown tasks + if self.shutdownstarttime is not None: + now = time() + diff = now - self.shutdownstarttime + if diff < gracetime: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: shutdown: delaying for early shutdown tasks",gracetime-diff + delay = gracetime-diff + network_shutdown_callback_lambda = lambda:self.network_shutdown() + self.rawserver.add_task(network_shutdown_callback_lambda,delay) + return + + self.network_shutdown() + + + def early_shutdown(self): + """ Called as soon as Session shutdown is initiated. Used to start + shutdown tasks that takes some time and that can run in parallel + to checkpointing, etc. + """ + if self.overlay_apps is not None: + self.shutdownstarttime = time() + self.overlay_bridge.add_task(self.overlay_apps.early_shutdown,0) + + + def network_shutdown(self): + try: + # Detect if megacache is enabled + if self.peer_db is not None: + db = SQLiteCacheDB.getInstance() + db.commit() + + mainlineDHT.deinit() + + ts = enumerate() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: Number of threads still running",len(ts) + for t in ts: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: Thread still running",t.getName(),"daemon",t.isDaemon() + except: + print_exc() + + # Stop network thread + self.sessdoneflag.set() + + def save_download_pstate(self,infohash,pstate): + """ Called by network thread """ + basename = binascii.hexlify(infohash)+'.pickle' + filename = os.path.join(self.session.get_downloads_pstate_dir(),basename) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: network checkpointing: to file",filename + f = open(filename,"wb") + pickle.dump(pstate,f) + f.close() + + + def load_download_pstate(self,filename): + """ Called by any thread """ + f = open(filename,"rb") + pstate = pickle.load(f) + f.close() + return pstate + + # + # External IP address methods + # + def guess_ext_ip_from_local_info(self): + """ Called at creation time """ + ip = get_my_wan_ip() + if ip is None: + host = socket.gethostbyname_ex(socket.gethostname()) + ipaddrlist = host[2] + for ip in ipaddrlist: + return ip + return '127.0.0.1' + else: + return ip + + def run(self): + if PROFILE: + fname = "profile-%s" % self.getName() + import cProfile + cProfile.runctx( "self._run()", globals(), locals(), filename=fname ) + import pstats + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","profile: data for %s" % self.getName() + pstats.Stats(fname,stream=sys.stderr).sort_stats("cumulative").print_stats(20) + else: + self._run() + + def start_upnp(self): + """ Arno: as the UPnP discovery and calls to the firewall can be slow, + do it in a separate thread. When it fails, it should report popup + a dialog to inform and help the user. Or report an error in textmode. + + Must save type here, to handle case where user changes the type + In that case we still need to delete the port mapping using the old mechanism + + Called by network thread """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: start_upnp()" + self.set_activity(NTFY_ACT_UPNP) + self.upnp_thread = UPnPThread(self.upnp_type,self.locally_guessed_ext_ip,self.listen_port,self.upnp_failed_callback,self.upnp_got_ext_ip_callback) + self.upnp_thread.start() + + def stop_upnp(self): + """ Called by network thread """ + if self.upnp_type > 0: + self.upnp_thread.shutdown() + + def upnp_failed_callback(self,upnp_type,listenport,error_type,exc=None,listenproto='TCP'): + """ Called by UPnP thread TODO: determine how to pass to API user + In principle this is a non fatal error. But it is one we wish to + show to the user """ + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","UPnP mode "+str(upnp_type)+" request to firewall failed with error "+str(error_type)+" Try setting a different mode in Preferences. Listen port was "+str(listenport)+", protocol"+listenproto,exc + + def upnp_got_ext_ip_callback(self,ip): + """ Called by UPnP thread """ + self.sesslock.acquire() + self.upnp_ext_ip = ip + self.sesslock.release() + + def dialback_got_ext_ip_callback(self,ip): + """ Called by network thread """ + self.sesslock.acquire() + self.dialback_ext_ip = ip + self.sesslock.release() + + def yourip_got_ext_ip_callback(self,ip): + """ Called by network thread """ + self.sesslock.acquire() + self.yourip_ext_ip = ip + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: yourip_got_ext_ip_callback: others think my IP address is",ip + self.sesslock.release() + + + def get_ext_ip(self,unknowniflocal=False): + """ Called by any thread """ + self.sesslock.acquire() + try: + if self.dialback_ext_ip is not None: + # more reliable + return self.dialback_ext_ip # string immutable + elif self.upnp_ext_ip is not None: + # good reliability, if known + return self.upnp_ext_ip + elif self.yourip_ext_ip is not None: + # majority vote, could be rigged + return self.yourip_ext_ip + else: + # slighly wild guess + if unknowniflocal: + return None + else: + return self.locally_guessed_ext_ip + finally: + self.sesslock.release() + + + def get_int_ip(self): + """ Called by any thread """ + self.sesslock.acquire() + try: + return self.locally_guessed_ext_ip + finally: + self.sesslock.release() + + + # + # Events from core meant for API user + # + def dialback_reachable_callback(self): + """ Called by overlay+network thread """ + self.session.uch.notify(NTFY_REACHABLE, NTFY_INSERT, None, '') + + + def set_activity(self,type, str = '', arg2=None): + """ Called by overlay + network thread """ + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: set_activity",type,str,arg2 + self.session.uch.notify(NTFY_ACTIVITIES, NTFY_INSERT, type, str, arg2) + + + def network_vod_event_callback(self,videoinfo,event,params): + """ Called by network thread """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: network_vod_event_callback: event %s, params %s" % (event,params) + + + # Call Session threadpool to call user's callback + videoinfo['usercallback'](event,params) + + + def update_torrent_checking_period(self): + # dynamically change the interval: update at least once per day + if self.overlay_apps and self.overlay_apps.metadata_handler: + ntorrents = self.overlay_apps.metadata_handler.num_torrents + if ntorrents > 0: + self.torrent_checking_period = min(max(86400/ntorrents, 15), 300) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "torrent_checking_period", self.torrent_checking_period + #self.torrent_checking_period = 1 ### DEBUG, remove it before release!! + + def run_torrent_check(self): + """ Called by network thread """ + + self.update_torrent_checking_period() + self.rawserver.add_task(self.run_torrent_check, self.torrent_checking_period) + # print "torrent_checking start" + try: + t = TorrentChecking() + t.start() + except Exception, e: + print_exc() + self.rawserver_nonfatalerrorfunc(e) + + def get_coopdl_role_object(self,infohash,role): + """ Called by network thread """ + role_object = None + self.sesslock.acquire() + try: + if infohash in self.downloads: + d = self.downloads[infohash] + role_object = d.get_coopdl_role_object(role) + finally: + self.sesslock.release() + return role_object + + + def h4xor_reset_init_conn_counter(self): + self.rawserver.add_task(self.network_h4xor_reset,0) + + def network_h4xor_reset(self): + from Tribler.Core.BitTornado.BT1.Encrypter import incompletecounter + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: h4x0r Resetting outgoing TCP connection rate limiter",incompletecounter.c,"===" + incompletecounter.c = 0 + + +def singledownload_size_cmp(x,y): + """ Method that compares 2 SingleDownload objects based on the size of the + content of the BT1Download (if any) contained in them. + """ + if x is None and y is None: + return 0 + elif x is None: + return 1 + elif y is None: + return -1 + else: + a = x.get_bt1download() + b = y.get_bt1download() + if a is None and b is None: + return 0 + elif a is None: + return 1 + elif b is None: + return -1 + else: + if a.get_datalength() == b.get_datalength(): + return 0 + elif a.get_datalength() < b.get_datalength(): + return -1 + else: + return 1 + diff --git a/tribler-mod/Tribler/Core/APIImplementation/LaunchManyCore.py.bak b/tribler-mod/Tribler/Core/APIImplementation/LaunchManyCore.py.bak new file mode 100644 index 0000000..ddac256 --- /dev/null +++ b/tribler-mod/Tribler/Core/APIImplementation/LaunchManyCore.py.bak @@ -0,0 +1,805 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys +import os +from time import time +import pickle +import socket +import binascii +from threading import Event,Thread,enumerate +from traceback import print_exc + +from Tribler.Core.BitTornado.RawServer import RawServer +from Tribler.Core.BitTornado.ServerPortHandler import MultiHandler +from Tribler.Core.BitTornado.BT1.track import Tracker +from Tribler.Core.BitTornado.HTTPHandler import HTTPHandler,DummyHTTPHandler + + +from Tribler.Core.simpledefs import * +from Tribler.Core.exceptions import * +from Tribler.Core.Download import Download +from Tribler.Core.DownloadConfig import DownloadStartupConfig +from Tribler.Core.TorrentDef import TorrentDef +from Tribler.Core.NATFirewall.guessip import get_my_wan_ip +from Tribler.Core.NATFirewall.UPnPThread import UPnPThread +from Tribler.Core.Overlay.SecureOverlay import SecureOverlay +from Tribler.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge +from Tribler.Core.Overlay.OverlayApps import OverlayApps +from Tribler.Core.NATFirewall.DialbackMsgHandler import DialbackMsgHandler +from Tribler.Core.DecentralizedTracking import mainlineDHT +from Tribler.Core.DecentralizedTracking.rsconvert import RawServerConverter +from Tribler.Core.DecentralizedTracking.mainlineDHTChecker import mainlineDHTChecker +from Tribler.Core.CacheDB.sqlitecachedb import SQLiteCacheDB +import Tribler.Core.CacheDB.cachedb as cachedb +from Tribler.Core.CacheDB.SqliteCacheDBHandler import * +from Tribler.Core.CacheDB.SqliteSeedingStatsCacheDB import * +from Tribler.Core.CacheDB.SqliteFriendshipStatsCacheDB import * +from Tribler.Core.RequestPolicy import * +from Tribler.Core.osutils import get_readable_torrent_name +from Tribler.Category.Category import Category +from Tribler.TrackerChecking.TorrentChecking import TorrentChecking + + +SPECIAL_VALUE=481 + +DEBUG = False +PROFILE = False + +# Internal classes +# + +class TriblerLaunchMany(Thread): + + def __init__(self): + """ Called only once (unless we have multiple Sessions) by MainThread """ + Thread.__init__(self) + self.setDaemon(True) + self.setName("Network"+self.getName()) + + def register(self,session,sesslock): + self.session = session + self.sesslock = sesslock + + self.downloads = {} + config = session.sessconfig # Should be safe at startup + + self.locally_guessed_ext_ip = self.guess_ext_ip_from_local_info() + self.upnp_ext_ip = None + self.dialback_ext_ip = None + self.yourip_ext_ip = None + + + # Orig + self.sessdoneflag = Event() + + # Following two attributes set/get by network thread ONLY + self.hashcheck_queue = [] + self.sdownloadtohashcheck = None + + # Following 2 attributes set/get by UPnPThread + self.upnp_thread = None + self.upnp_type = config['upnp_nat_access'] + self.nat_detect = config['nat_detect'] + + self.rawserver = RawServer(self.sessdoneflag, + config['timeout_check_interval'], + config['timeout'], + ipv6_enable = config['ipv6_enabled'], + failfunc = self.rawserver_fatalerrorfunc, + errorfunc = self.rawserver_nonfatalerrorfunc) + self.rawserver.add_task(self.rawserver_keepalive,1) + + self.listen_port = self.rawserver.find_and_bind(0, + config['minport'], config['maxport'], config['bind'], + reuse = True, + ipv6_socket_style = config['ipv6_binds_v4'], + randomizer = config['random_port']) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: Got listen port", self.listen_port + + self.overlay_bridge = OverlayThreadingBridge.getInstance() + self.multihandler = MultiHandler(self.rawserver, self.sessdoneflag) + self.shutdownstarttime = None + + # do_cache -> do_overlay -> (do_buddycast, do_download_help) + if config['megacache']: + # init cache db + if config['nickname'] == '__default_name__': + config['nickname'] = socket.gethostname() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'tlm: Reading Session state from',config['state_dir'] + cachedb.init(config, self.rawserver_fatalerrorfunc) + + # initialize SeedingStats database + cachedb.init_seeding_stats(config, self.rawserver_fatalerrorfunc) + + # initialize Friendship statistics database + cachedb.init_friendship_stats(config, self.rawserver_fatalerrorfunc) + + # initialize VideoPlayback statistics database + cachedb.init_videoplayback_stats(config, self.rawserver_fatalerrorfunc) + + self.my_db = MyDBHandler.getInstance() + self.peer_db = PeerDBHandler.getInstance() + # Register observer to update connection opened/closed to peer_db_handler + self.peer_db.registerConnectionUpdater(self.session) + self.torrent_db = TorrentDBHandler.getInstance() + torrent_collecting_dir = os.path.abspath(config['torrent_collecting_dir']) + self.torrent_db.register(Category.getInstance(),torrent_collecting_dir) + self.mypref_db = MyPreferenceDBHandler.getInstance() + self.pref_db = PreferenceDBHandler.getInstance() + self.superpeer_db = SuperPeerDBHandler.getInstance() + self.superpeer_db.loadSuperPeers(config) + self.friend_db = FriendDBHandler.getInstance() + self.bartercast_db = BarterCastDBHandler.getInstance() + self.bartercast_db.registerSession(self.session) + self.modcast_db = ModerationCastDBHandler.getInstance() + self.modcast_db.registerSession(self.session) + self.votecast_db = VoteCastDBHandler.getInstance() + self.votecast_db.registerSession(self.session) + self.search_db = SearchDBHandler.getInstance() + self.term_db = TermDBHandler.getInstance() + + # Crawling + self.crawler_db = CrawlerDBHandler.getInstance() + self.crawler_db.loadCrawlers(config) + self.seedingstats_db = SeedingStatsDBHandler.getInstance() + self.seedingstatssettings_db = SeedingStatsSettingsDBHandler.getInstance() + self.friendship_statistics_db = FriendshipStatisticsDBHandler().getInstance() + else: + config['overlay'] = 0 # turn overlay off + config['torrent_checking'] = 0 + self.my_db = None + self.peer_db = None + self.torrent_db = None + self.mypref_db = None + self.pref_db = None + self.superpeer_db = None + self.crawler_db = None + self.seedingstats_db = None + self.seedingstatssettings_db = None + self.friendship_statistics_db = None + self.friend_db = None + self.bartercast_db = None + self.mm = None + + if config['overlay']: + self.secure_overlay = SecureOverlay.getInstance() + self.secure_overlay.register(self, config['overlay_max_message_length']) + + # Set policy for which peer requests (dl_helper, rquery) to answer and which to ignore + + self.overlay_apps = OverlayApps.getInstance() + # Default policy, override with Session.set_overlay_request_policy() + policy = FriendsCoopDLOtherRQueryQuotumCrawlerAllowAllRequestPolicy(self.session) + + # For the new DB layer we need to run all overlay apps in a + # separate thread instead of the NetworkThread as before. + self.overlay_bridge.register_bridge(self.secure_overlay,self.overlay_apps) + + self.overlay_apps.register(self.overlay_bridge,self.session,self,config,policy) + # It's important we don't start listening to the network until + # all higher protocol-handling layers are properly configured. + self.overlay_bridge.start_listening() + else: + self.secure_overlay = None + self.overlay_apps = None + config['buddycast'] = 0 + config['download_help'] = 0 + config['socnet'] = 0 + config['rquery'] = 0 + + # Minimal to allow yourip external-IP address detection + some_dialback_handler = DialbackMsgHandler.getInstance() + some_dialback_handler.register_yourip(self) + + + if config['megacache'] or config['overlay']: + # Arno: THINK! whoever added this should at least have made the + # config files configurable via SessionConfigInterface. + # + # TODO: see if we can move this out of the core. We could make the + # category a parameter to TorrentDB.addExternalTorrent(), but that + # will not work directly for MetadataHandler, which is part of the + # core. + + # Some author: First Category instantiation requires install_dir, so do it now + Category.getInstance(config['install_dir']) + + # Internal tracker + self.internaltracker = None + if config['internaltracker']: + self.internaltracker = Tracker(config, self.rawserver) + self.httphandler = HTTPHandler(self.internaltracker.get, config['tracker_min_time_between_log_flushes']) + else: + self.httphandler = DummyHTTPHandler() + self.multihandler.set_httphandler(self.httphandler) + + if config['mainline_dht']: + # Start up mainline DHT + # Arno: do this in a try block, as khashmir gives a very funky + # error when started from a .dmg (not from cmd line) on Mac. In + # particular it complains that it cannot find the 'hex' encoding + # method when hstr.encode('hex') is called, and hstr is a string?! + # + try: + rsconvert = RawServerConverter(self.rawserver) + # '' = host, TODO: when local bind set + mainlineDHT.init('',self.listen_port,config['state_dir'],rawserver=rsconvert) + except: + print_exc() + + + # add task for tracker checking + if config['torrent_checking']: + + if config['mainline_dht']: + # Create torrent-liveliness checker based on DHT + c = mainlineDHTChecker.getInstance() + c.register(mainlineDHT.dht) + + self.torrent_checking_period = config['torrent_checking_period'] + #self.torrent_checking_period = 5 + self.rawserver.add_task(self.run_torrent_check, self.torrent_checking_period) + + + def add(self,tdef,dscfg,pstate=None,initialdlstatus=None): + """ Called by any thread """ + self.sesslock.acquire() + try: + if not tdef.is_finalized(): + raise ValueError("TorrentDef not finalized") + + d = Download(self.session,tdef) + infohash = tdef.get_infohash() + + # Check if running or saved on disk + if infohash in self.downloads: + raise DuplicateDownloadException() + elif pstate is None and not tdef.get_live(): # not already resuming + pstate = self.load_download_pstate_noexc(infohash) + if pstate is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: add: pstate is",dlstatus_strings[pstate['dlstate']['status']],pstate['dlstate']['progress'] + + # Store in list of Downloads, always. + infohash = d.get_def().get_infohash() + self.downloads[infohash] = d + d.setup(dscfg,pstate,initialdlstatus,self.network_engine_wrapper_created_callback,self.network_vod_event_callback) + + if self.torrent_db != None and self.mypref_db != None: + raw_filename = tdef.get_name_as_unicode() + infohash = tdef.get_infohash() + save_name = get_readable_torrent_name(infohash, raw_filename) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'tlm: add', save_name, self.session.sessconfig + torrent_dir = self.session.sessconfig['torrent_collecting_dir'] + save_path = os.path.join(torrent_dir, save_name) + if not os.path.exists(save_path): # save the torrent to the common torrent dir + tdef.save(save_path) + + # hack, make sure these torrents are always good so they show up + # in TorrentDBHandler.getTorrents() + extra_info = {'status':'good'} + self.torrent_db.addExternalTorrent(save_path, source='',extra_info=extra_info) + dest_path = d.get_dest_dir() + # TODO: if user renamed the dest_path for single-file-torrent + data = {'destination_path':dest_path} + self.mypref_db.addMyPreference(infohash, data) + # BuddyCast is now notified of this new Download in our + # preferences via the Notifier mechanism. See BC.sesscb_ntfy_myprefs() + return d + finally: + self.sesslock.release() + + + def network_engine_wrapper_created_callback(self,d,sd,exc,pstate): + """ Called by network thread """ + if exc is None: + # Always need to call the hashcheck func, even if we're restarting + # a download that was seeding, this is just how the BT engine works. + # We've provided the BT engine with its resumedata, so this should + # be fast. + # + try: + if sd is not None: + self.queue_for_hashcheck(sd) + if pstate is None and not d.get_def().get_live(): + # Checkpoint at startup + (infohash,pstate) = d.network_checkpoint() + self.save_download_pstate(infohash,pstate) + else: + raise TriblerException("tlm: network_engine_wrapper_created_callback: sd is None!") + except Exception,e: + # There was a bug in queue_for_hashcheck that is now fixed. + # Leave this in place to catch unexpected errors. + print_exc() + d.set_error(e) + + + def remove(self,d,removecontent=False): + """ Called by any thread """ + self.sesslock.acquire() + try: + d.stop_remove(removestate=True,removecontent=removecontent) + infohash = d.get_def().get_infohash() + del self.downloads[infohash] + finally: + self.sesslock.release() + + def get_downloads(self): + """ Called by any thread """ + self.sesslock.acquire() + try: + return self.downloads.values() #copy, is mutable + finally: + self.sesslock.release() + + def rawserver_fatalerrorfunc(self,e): + """ Called by network thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TriblerLaunchMany: RawServer fatal error func called",e + print_exc() + + def rawserver_nonfatalerrorfunc(self,e): + """ Called by network thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TriblerLaunchmany: RawServer non fatal error func called",e + print_exc() + # Could log this somewhere, or phase it out + + def _run(self): + """ Called only once by network thread """ + + try: + try: + self.start_upnp() + self.multihandler.listen_forever() + except: + print_exc() + finally: + if self.internaltracker is not None: + self.internaltracker.save_state() + + self.stop_upnp() + self.rawserver.shutdown() + + def rawserver_keepalive(self): + """ Hack to prevent rawserver sleeping in select() for a long time, not + processing any tasks on its queue at startup time + + Called by network thread """ + self.rawserver.add_task(self.rawserver_keepalive,1) + + # + # TODO: called by TorrentMaker when new torrent added to itracker dir + # Make it such that when Session.add_torrent() is called and the internal + # tracker is used that we write a metainfo to itracker dir and call this. + # + def tracker_rescan_dir(self): + if self.internaltracker is not None: + self.internaltracker.parse_allowed(source='Session') + + # + # Torrent hash checking + # + def queue_for_hashcheck(self,sd): + """ Schedule a SingleDownload for integrity check of on-disk data + + Called by network thread """ + if hash: + self.hashcheck_queue.append(sd) + # Check smallest torrents first + self.hashcheck_queue.sort(singledownload_size_cmp) + + if not self.sdownloadtohashcheck: + self.dequeue_and_start_hashcheck() + + def dequeue_and_start_hashcheck(self): + """ Start integriy check for first SingleDownload in queue + + Called by network thread """ + self.sdownloadtohashcheck = self.hashcheck_queue.pop(0) + self.sdownloadtohashcheck.perform_hashcheck(self.hashcheck_done) + + def hashcheck_done(self,success=True): + """ Integrity check for first SingleDownload in queue done + + Called by network thread """ + if success: + self.sdownloadtohashcheck.hashcheck_done() + if self.hashcheck_queue: + self.dequeue_and_start_hashcheck() + else: + self.sdownloadtohashcheck = None + + # + # State retrieval + # + def set_download_states_callback(self,usercallback,getpeerlist,when=0.0): + """ Called by any thread """ + network_set_download_states_callback_lambda = lambda:self.network_set_download_states_callback(usercallback,getpeerlist) + self.rawserver.add_task(network_set_download_states_callback_lambda,when) + + def network_set_download_states_callback(self,usercallback,getpeerlist): + """ Called by network thread """ + self.sesslock.acquire() + try: + # Even if the list of Downloads changes in the mean time this is + # no problem. For removals, dllist will still hold a pointer to the + # Download, and additions are no problem (just won't be included + # in list of states returned via callback. + # + dllist = self.downloads.values() + finally: + self.sesslock.release() + + dslist = [] + for d in dllist: + ds = d.network_get_state(None,getpeerlist,sessioncalling=True) + dslist.append(ds) + + # Invoke the usercallback function via a new thread. + # After the callback is invoked, the return values will be passed to + # the returncallback for post-callback processing. + self.session.uch.perform_getstate_usercallback(usercallback,dslist,self.sesscb_set_download_states_returncallback) + + def sesscb_set_download_states_returncallback(self,usercallback,when,newgetpeerlist): + """ Called by SessionCallbackThread """ + if when > 0.0: + # reschedule + self.set_download_states_callback(usercallback,newgetpeerlist,when=when) + + # + # Persistence methods + # + def load_checkpoint(self,initialdlstatus=None): + """ Called by any thread """ + self.sesslock.acquire() + try: + dir = self.session.get_downloads_pstate_dir() + filelist = os.listdir(dir) + for basename in filelist: + # Make this go on when a torrent fails to start + filename = os.path.join(dir,basename) + self.resume_download(filename,initialdlstatus) + finally: + self.sesslock.release() + + + def load_download_pstate_noexc(self,infohash): + """ Called by any thread, assume sesslock already held """ + try: + dir = self.session.get_downloads_pstate_dir() + basename = binascii.hexlify(infohash)+'.pickle' + filename = os.path.join(dir,basename) + return self.load_download_pstate(filename) + except Exception,e: + # TODO: remove saved checkpoint? + #self.rawserver_nonfatalerrorfunc(e) + return None + + def resume_download(self,filename,initialdlstatus=None): + try: + # TODO: filter for file not found explicitly? + pstate = self.load_download_pstate(filename) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: load_checkpoint: pstate is",dlstatus_strings[pstate['dlstate']['status']],pstate['dlstate']['progress'] + if pstate['engineresumedata'] is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: load_checkpoint: resumedata None" + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: load_checkpoint: resumedata len",len(pstate['engineresumedata']) + + tdef = TorrentDef.load_from_dict(pstate['metainfo']) + + # Activate + dscfg = DownloadStartupConfig(dlconfig=pstate['dlconfig']) + self.add(tdef,dscfg,pstate,initialdlstatus) + except Exception,e: + # TODO: remove saved checkpoint? + self.rawserver_nonfatalerrorfunc(e) + + + def checkpoint(self,stop=False,checkpoint=True,gracetime=2.0): + """ Called by any thread, assume sesslock already held """ + # Even if the list of Downloads changes in the mean time this is + # no problem. For removals, dllist will still hold a pointer to the + # Download, and additions are no problem (just won't be included + # in list of states returned via callback. + # + dllist = self.downloads.values() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: checkpointing",len(dllist) + + network_checkpoint_callback_lambda = lambda:self.network_checkpoint_callback(dllist,stop,checkpoint,gracetime) + self.rawserver.add_task(network_checkpoint_callback_lambda,0.0) + + + def network_checkpoint_callback(self,dllist,stop,checkpoint,gracetime): + """ Called by network thread """ + if checkpoint: + for d in dllist: + # Tell all downloads to stop, and save their persistent state + # in a infohash -> pstate dict which is then passed to the user + # for storage. + # + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: network checkpointing:",`d.get_def().get_name()` + if stop: + (infohash,pstate) = d.network_stop(False,False) + else: + (infohash,pstate) = d.network_checkpoint() + + try: + self.save_download_pstate(infohash,pstate) + except Exception,e: + self.rawserver_nonfatalerrorfunc(e) + + if stop: + # Some grace time for early shutdown tasks + if self.shutdownstarttime is not None: + now = time() + diff = now - self.shutdownstarttime + if diff < gracetime: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: shutdown: delaying for early shutdown tasks",gracetime-diff + delay = gracetime-diff + network_shutdown_callback_lambda = lambda:self.network_shutdown() + self.rawserver.add_task(network_shutdown_callback_lambda,delay) + return + + self.network_shutdown() + + + def early_shutdown(self): + """ Called as soon as Session shutdown is initiated. Used to start + shutdown tasks that takes some time and that can run in parallel + to checkpointing, etc. + """ + if self.overlay_apps is not None: + self.shutdownstarttime = time() + self.overlay_bridge.add_task(self.overlay_apps.early_shutdown,0) + + + def network_shutdown(self): + try: + # Detect if megacache is enabled + if self.peer_db is not None: + db = SQLiteCacheDB.getInstance() + db.commit() + + mainlineDHT.deinit() + + ts = enumerate() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: Number of threads still running",len(ts) + for t in ts: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: Thread still running",t.getName(),"daemon",t.isDaemon() + except: + print_exc() + + # Stop network thread + self.sessdoneflag.set() + + def save_download_pstate(self,infohash,pstate): + """ Called by network thread """ + basename = binascii.hexlify(infohash)+'.pickle' + filename = os.path.join(self.session.get_downloads_pstate_dir(),basename) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: network checkpointing: to file",filename + f = open(filename,"wb") + pickle.dump(pstate,f) + f.close() + + + def load_download_pstate(self,filename): + """ Called by any thread """ + f = open(filename,"rb") + pstate = pickle.load(f) + f.close() + return pstate + + # + # External IP address methods + # + def guess_ext_ip_from_local_info(self): + """ Called at creation time """ + ip = get_my_wan_ip() + if ip is None: + host = socket.gethostbyname_ex(socket.gethostname()) + ipaddrlist = host[2] + for ip in ipaddrlist: + return ip + return '127.0.0.1' + else: + return ip + + def run(self): + if PROFILE: + fname = "profile-%s" % self.getName() + import cProfile + cProfile.runctx( "self._run()", globals(), locals(), filename=fname ) + import pstats + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","profile: data for %s" % self.getName() + pstats.Stats(fname,stream=sys.stderr).sort_stats("cumulative").print_stats(20) + else: + self._run() + + def start_upnp(self): + """ Arno: as the UPnP discovery and calls to the firewall can be slow, + do it in a separate thread. When it fails, it should report popup + a dialog to inform and help the user. Or report an error in textmode. + + Must save type here, to handle case where user changes the type + In that case we still need to delete the port mapping using the old mechanism + + Called by network thread """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: start_upnp()" + self.set_activity(NTFY_ACT_UPNP) + self.upnp_thread = UPnPThread(self.upnp_type,self.locally_guessed_ext_ip,self.listen_port,self.upnp_failed_callback,self.upnp_got_ext_ip_callback) + self.upnp_thread.start() + + def stop_upnp(self): + """ Called by network thread """ + if self.upnp_type > 0: + self.upnp_thread.shutdown() + + def upnp_failed_callback(self,upnp_type,listenport,error_type,exc=None,listenproto='TCP'): + """ Called by UPnP thread TODO: determine how to pass to API user + In principle this is a non fatal error. But it is one we wish to + show to the user """ + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","UPnP mode "+str(upnp_type)+" request to firewall failed with error "+str(error_type)+" Try setting a different mode in Preferences. Listen port was "+str(listenport)+", protocol"+listenproto,exc + + def upnp_got_ext_ip_callback(self,ip): + """ Called by UPnP thread """ + self.sesslock.acquire() + self.upnp_ext_ip = ip + self.sesslock.release() + + def dialback_got_ext_ip_callback(self,ip): + """ Called by network thread """ + self.sesslock.acquire() + self.dialback_ext_ip = ip + self.sesslock.release() + + def yourip_got_ext_ip_callback(self,ip): + """ Called by network thread """ + self.sesslock.acquire() + self.yourip_ext_ip = ip + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: yourip_got_ext_ip_callback: others think my IP address is",ip + self.sesslock.release() + + + def get_ext_ip(self,unknowniflocal=False): + """ Called by any thread """ + self.sesslock.acquire() + try: + if self.dialback_ext_ip is not None: + # more reliable + return self.dialback_ext_ip # string immutable + elif self.upnp_ext_ip is not None: + # good reliability, if known + return self.upnp_ext_ip + elif self.yourip_ext_ip is not None: + # majority vote, could be rigged + return self.yourip_ext_ip + else: + # slighly wild guess + if unknowniflocal: + return None + else: + return self.locally_guessed_ext_ip + finally: + self.sesslock.release() + + + def get_int_ip(self): + """ Called by any thread """ + self.sesslock.acquire() + try: + return self.locally_guessed_ext_ip + finally: + self.sesslock.release() + + + # + # Events from core meant for API user + # + def dialback_reachable_callback(self): + """ Called by overlay+network thread """ + self.session.uch.notify(NTFY_REACHABLE, NTFY_INSERT, None, '') + + + def set_activity(self,type, str = '', arg2=None): + """ Called by overlay + network thread """ + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: set_activity",type,str,arg2 + self.session.uch.notify(NTFY_ACTIVITIES, NTFY_INSERT, type, str, arg2) + + + def network_vod_event_callback(self,videoinfo,event,params): + """ Called by network thread """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: network_vod_event_callback: event %s, params %s" % (event,params) + + + # Call Session threadpool to call user's callback + videoinfo['usercallback'](event,params) + + + def update_torrent_checking_period(self): + # dynamically change the interval: update at least once per day + if self.overlay_apps and self.overlay_apps.metadata_handler: + ntorrents = self.overlay_apps.metadata_handler.num_torrents + if ntorrents > 0: + self.torrent_checking_period = min(max(86400/ntorrents, 15), 300) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "torrent_checking_period", self.torrent_checking_period + #self.torrent_checking_period = 1 ### DEBUG, remove it before release!! + + def run_torrent_check(self): + """ Called by network thread """ + + self.update_torrent_checking_period() + self.rawserver.add_task(self.run_torrent_check, self.torrent_checking_period) + # print "torrent_checking start" + try: + t = TorrentChecking() + t.start() + except Exception, e: + print_exc() + self.rawserver_nonfatalerrorfunc(e) + + def get_coopdl_role_object(self,infohash,role): + """ Called by network thread """ + role_object = None + self.sesslock.acquire() + try: + if infohash in self.downloads: + d = self.downloads[infohash] + role_object = d.get_coopdl_role_object(role) + finally: + self.sesslock.release() + return role_object + + + def h4xor_reset_init_conn_counter(self): + self.rawserver.add_task(self.network_h4xor_reset,0) + + def network_h4xor_reset(self): + from Tribler.Core.BitTornado.BT1.Encrypter import incompletecounter + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tlm: h4x0r Resetting outgoing TCP connection rate limiter",incompletecounter.c,"===" + incompletecounter.c = 0 + + +def singledownload_size_cmp(x,y): + """ Method that compares 2 SingleDownload objects based on the size of the + content of the BT1Download (if any) contained in them. + """ + if x is None and y is None: + return 0 + elif x is None: + return 1 + elif y is None: + return -1 + else: + a = x.get_bt1download() + b = y.get_bt1download() + if a is None and b is None: + return 0 + elif a is None: + return 1 + elif b is None: + return -1 + else: + if a.get_datalength() == b.get_datalength(): + return 0 + elif a.get_datalength() < b.get_datalength(): + return -1 + else: + return 1 + diff --git a/tribler-mod/Tribler/Core/APIImplementation/SessionRuntimeConfig.py b/tribler-mod/Tribler/Core/APIImplementation/SessionRuntimeConfig.py new file mode 100644 index 0000000..0a77a94 --- /dev/null +++ b/tribler-mod/Tribler/Core/APIImplementation/SessionRuntimeConfig.py @@ -0,0 +1,866 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys +from traceback import print_exc + +from Tribler.Core.exceptions import * + +from Tribler.Core.SessionConfig import SessionConfigInterface +from Tribler.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge +from Tribler.Core.Overlay.MetadataHandler import MetadataHandler +from Tribler.Core.BuddyCast.buddycast import BuddyCastFactory + +class SessionRuntimeConfig(SessionConfigInterface): + """ + Implements the Tribler.Core.API.SessionConfigInterface + + Use these to change the session config at runtime. + """ + def set_state_dir(self,statedir): + raise OperationNotPossibleAtRuntimeException() + + def get_state_dir(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_state_dir(self) + finally: + self.sesslock.release() + + def set_install_dir(self,statedir): + raise OperationNotPossibleAtRuntimeException() + + def get_install_dir(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_install_dir(self) + finally: + self.sesslock.release() + + def set_permid_keypair_filename(self,keypair): + raise OperationNotPossibleAtRuntimeException() + + def get_permid_keypair_filename(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_permid_keypair_filename(self) + finally: + self.sesslock.release() + + def set_listen_port(self,port): + raise OperationNotPossibleAtRuntimeException() + + def get_listen_port(self): + # To protect self.sessconfig + self.sesslock.acquire() + try: + return SessionConfigInterface.get_listen_port(self) + finally: + self.sesslock.release() + + def get_video_analyser_path(self): + # To protect self.sessconfig + self.sesslock.acquire() + try: + return SessionConfigInterface.get_video_analyser_path(self) + finally: + self.sesslock.release() + + def set_tracker_ip(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_ip(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_ip(self) + finally: + self.sesslock.release() + + def set_bind_to_addresses(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_bind_to_addresses(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_bind_to_addresses(self) + finally: + self.sesslock.release() + + def set_upnp_mode(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_upnp_mode(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_upnp_mode(self) + finally: + self.sesslock.release() + + def set_autoclose_timeout(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_autoclose_timeout(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_autoclose_timeout(self) + finally: + self.sesslock.release() + + def set_autoclose_check_interval(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_autoclose_check_interval(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_autoclose_check_interval(self) + finally: + self.sesslock.release() + + def set_megacache(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_megacache(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_megacache(self) + finally: + self.sesslock.release() + + def set_overlay(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_overlay(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_overlay(self) + finally: + self.sesslock.release() + + def set_buddycast(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_buddycast(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_buddycast(self) + finally: + self.sesslock.release() + + def set_start_recommender(self,value): + self.sesslock.acquire() + try: + SessionConfigInterface.set_start_recommender(self,value) + olbridge = OverlayThreadingBridge.getInstance() + task = lambda:self.olthread_set_start_recommender(value) + olbridge.add_task(task,0) + finally: + self.sesslock.release() + + def olthread_set_start_recommender(self,value): + bcfac = BuddyCastFactory.getInstance() + if value: + bcfac.restartBuddyCast() + else: + bcfac.pauseBuddyCast() + + def get_start_recommender(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_start_recommender(self) + finally: + self.sesslock.release() + + def set_download_help(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_download_help(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_download_help(self) + finally: + self.sesslock.release() + + def set_torrent_collecting(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_torrent_collecting(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_torrent_collecting(self) + finally: + self.sesslock.release() + + + def set_torrent_collecting_dir(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_torrent_collecting_dir(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_torrent_collecting_dir(self) + finally: + self.sesslock.release() + + + def set_superpeer(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_superpeer(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_superpeer(self) + finally: + self.sesslock.release() + + def set_overlay_log(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_overlay_log(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_overlay_log(self) + finally: + self.sesslock.release() + + def set_buddycast_interval(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_buddycast_interval(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_buddycast_interval(self) + finally: + self.sesslock.release() + + def set_torrent_collecting_max_torrents(self,value): + self.sesslock.acquire() + try: + SessionConfigInterface.set_torrent_collecting_max_torrents(self,value) + olbridge = OverlayThreadingBridge.getInstance() + task = lambda:self.olthread_set_torrent_collecting_max_torrents(value) + olbridge.add_task(task,0) + finally: + self.sesslock.release() + + def olthread_set_torrent_collecting_max_torrents(self,value): + mh = MetadataHandler.getInstance() + mh.set_overflow(value) + mh.delayed_check_overflow(2) + + + def get_torrent_collecting_max_torrents(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_torrent_collecting_max_torrents(self) + finally: + self.sesslock.release() + + def set_buddycast_max_peers(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_buddycast_max_peers(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_buddycast_max_peers(self) + finally: + self.sesslock.release() + + def set_torrent_collecting_rate(self,value): + self.sesslock.acquire() + try: + SessionConfigInterface.set_torrent_collecting_rate(self,value) + olbridge = OverlayThreadingBridge.getInstance() + task = lambda:self.olthread_set_torrent_collecting_rate(value) + olbridge.add_task(task,0) + finally: + self.sesslock.release() + + def olthread_set_torrent_collecting_rate(self,value): + mh = MetadataHandler.getInstance() + mh.set_rate(value) + + def get_torrent_collecting_rate(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_torrent_collecting_rate(self) + finally: + self.sesslock.release() + + def set_torrent_checking(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_torrent_checking(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_torrent_checking(self) + finally: + self.sesslock.release() + + def set_torrent_checking_period(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_torrent_checking_period(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_torrent_checking_period(self) + finally: + self.sesslock.release() + + def set_dialback(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_dialback(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_dialback(self) + finally: + self.sesslock.release() + + def set_social_networking(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_social_networking(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_social_networking(self) + finally: + self.sesslock.release() + + def set_remote_query(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_remote_query(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_remote_query(self) + finally: + self.sesslock.release() + + def set_stop_collecting_threshold(self,value): + self.sesslock.acquire() + try: + SessionConfigInterface.set_stop_collecting_threshold(self,value) + olbridge = OverlayThreadingBridge.getInstance() + task = lambda:self.olthread_set_stop_collecting_threshold(value) + olbridge.add_task(task,0) + finally: + self.sesslock.release() + + def olthread_set_stop_collecting_threshold(self,value): + mh = MetadataHandler.getInstance() + mh.set_min_free_space(value) + mh.delayed_check_free_space(2) + + def get_stop_collecting_threshold(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_stop_collecting_threshold(self) + finally: + self.sesslock.release() + + def set_internal_tracker(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_internal_tracker(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_internal_tracker(self) + finally: + self.sesslock.release() + + def set_mainline_dht(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_mainline_dht(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_mainline_dht(self) + finally: + self.sesslock.release() + + def set_nickname(self,value): + self.sesslock.acquire() + try: + return SessionConfigInterface.set_nickname(self, value) + finally: + self.sesslock.release() + + def get_nickname(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_nickname(self) + finally: + self.sesslock.release() + + def set_mugshot(self,value, mime='image/jpeg'): + self.sesslock.acquire() + try: + return SessionConfigInterface.set_mugshot(self, value, mime) + finally: + self.sesslock.release() + + def get_mugshot(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_mugshot(self) + finally: + self.sesslock.release() + + + def set_tracker_dfile(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_dfile(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_dfile(self) + finally: + self.sesslock.release() + + def set_tracker_dfile_format(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_dfile_format(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_dfile_format(self) + finally: + self.sesslock.release() + + def set_tracker_socket_timeout(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_socket_timeout(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_socket_timeout(self) + finally: + self.sesslock.release() + + def set_tracker_save_dfile_interval(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_save_dfile_interval(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_save_dfile_interval(self) + finally: + self.sesslock.release() + + def set_tracker_timeout_downloaders_interval(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_timeout_downloaders_interval(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_timeout_downloaders_interval(self) + finally: + self.sesslock.release() + + def set_tracker_reannounce_interval(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_reannounce_interval(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_reannounce_interval(self) + finally: + self.sesslock.release() + + def set_tracker_response_size(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_response_size(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_response_size(self) + finally: + self.sesslock.release() + + def set_tracker_timeout_check_interval(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_timeout_check_interval(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_timeout_check_interval(self) + finally: + self.sesslock.release() + + def set_tracker_nat_check(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_nat_check(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_nat_check(self) + finally: + self.sesslock.release() + + def set_tracker_log_nat_checks(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_log_nat_checks(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_log_nat_checks(self) + finally: + self.sesslock.release() + + def set_tracker_min_time_between_log_flushes(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_min_time_between_log_flushes(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_min_time_between_log_flushes(self) + finally: + self.sesslock.release() + + def set_tracker_min_time_between_cache_refreshes(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_min_time_between_cache_refreshes(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_min_time_between_cache_refreshes(self) + finally: + self.sesslock.release() + + def set_tracker_allowed_dir(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_allowed_dir(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_allowed_dir(self) + finally: + self.sesslock.release() + + def set_tracker_allowed_list(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_allowed_list(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_allowed_list(self) + finally: + self.sesslock.release() + + def set_tracker_allowed_controls(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_allowed_controls(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_allowed_controls(self) + finally: + self.sesslock.release() + + def set_tracker_multitracker_enabled(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_multitracker_enabled(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_multitracker_enabled(self) + finally: + self.sesslock.release() + + def set_tracker_multitracker_allowed(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_multitracker_allowed(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_multitracker_allowed(self) + finally: + self.sesslock.release() + + def set_tracker_multitracker_reannounce_interval(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_multitracker_reannounce_interval(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_multitracker_reannounce_interval(self) + finally: + self.sesslock.release() + + def set_tracker_multitracker_maxpeers(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_multitracker_maxpeers(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_multitracker_maxpeers(self) + finally: + self.sesslock.release() + + def set_tracker_aggregate_forward(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_aggregate_forward(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_aggregate_forward(self) + finally: + self.sesslock.release() + + def set_tracker_aggregator(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_aggregator(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_aggregator(self) + finally: + self.sesslock.release() + + def set_tracker_hupmonitor(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_hupmonitor(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_hupmonitor(self) + finally: + self.sesslock.release() + + def set_tracker_multitracker_http_timeout(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_multitracker_http_timeout(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_multitracker_http_timeout(self) + finally: + self.sesslock.release() + + def set_tracker_parse_dir_interval(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_parse_dir_interval(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_parse_dir_interval(self) + finally: + self.sesslock.release() + + def set_tracker_show_infopage(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_show_infopage(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_show_infopage(self) + finally: + self.sesslock.release() + + def set_tracker_infopage_redirect(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_infopage_redirect(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_infopage_redirect(self) + finally: + self.sesslock.release() + + def set_tracker_show_names(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_show_names(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_show_names(self) + finally: + self.sesslock.release() + + def set_tracker_favicon(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_favicon(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_favicon(self) + finally: + self.sesslock.release() + + def set_tracker_allowed_ips(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_allowed_ips(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_allowed_ips(self) + finally: + self.sesslock.release() + + def set_tracker_banned_ips(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_banned_ips(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_banned_ips(self) + finally: + self.sesslock.release() + + def set_tracker_only_local_override_ip(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_only_local_override_ip(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_only_local_override_ip(self) + finally: + self.sesslock.release() + + def set_tracker_logfile(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_logfile(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_logfile(self) + finally: + self.sesslock.release() + + def set_tracker_allow_get(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_allow_get(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_allow_get(self) + finally: + self.sesslock.release() + + def set_tracker_keep_dead(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_keep_dead(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_keep_dead(self) + finally: + self.sesslock.release() + + def set_tracker_scrape_allowed(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_scrape_allowed(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_scrape_allowed(self) + finally: + self.sesslock.release() + + def set_overlay_max_message_length(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_overlay_max_message_length(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_overlay_max_message_length(self) + finally: + self.sesslock.release() + + def set_download_help_dir(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_download_help_dir(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_download_help_dir(self) + finally: + self.sesslock.release() + + def set_bartercast(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_bartercast(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_bartercast(self) + finally: + self.sesslock.release() + + def set_superpeer_file(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_superpeer_file(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_superpeer_file(self) + finally: + self.sesslock.release() + + def set_buddycast_collecting_solution(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_buddycast_collecting_solution(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_buddycast_collecting_solution(self) + finally: + self.sesslock.release() + + def set_peer_icon_path(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_peer_icon_path(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_peer_icon_path(self) + finally: + self.sesslock.release() + + # + # NAT Puncturing servers information setting/retrieval + # + def set_nat_detect(self,value): + raise OperationNotPossibleAtRuntimeException() + + def set_puncturing_private_port(self, puncturing_private_port): + raise OperationNotPossibleAtRuntimeException() + + def set_stun_servers(self, stun_servers): + raise OperationNotPossibleAtRuntimeException() + + def set_pingback_servers(self, pingback_servers): + raise OperationNotPossibleAtRuntimeException() + + def set_puncturing_coordinators(self, puncturing_coordinators): + raise OperationNotPossibleAtRuntimeException() + + def get_nat_detect(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_nat_detect(self) + finally: + self.sesslock.release() + + def get_puncturing_internal_port(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_puncturing_internal_port(self) + finally: + self.sesslock.release() + + def get_stun_servers(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_stun_servers(self) + finally: + self.sesslock.release() + + def get_pingback_servers(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_pingback_servers(self) + finally: + self.sesslock.release() + diff --git a/tribler-mod/Tribler/Core/APIImplementation/SessionRuntimeConfig.py.bak b/tribler-mod/Tribler/Core/APIImplementation/SessionRuntimeConfig.py.bak new file mode 100644 index 0000000..5d22cc6 --- /dev/null +++ b/tribler-mod/Tribler/Core/APIImplementation/SessionRuntimeConfig.py.bak @@ -0,0 +1,865 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys +from traceback import print_exc + +from Tribler.Core.exceptions import * + +from Tribler.Core.SessionConfig import SessionConfigInterface +from Tribler.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge +from Tribler.Core.Overlay.MetadataHandler import MetadataHandler +from Tribler.Core.BuddyCast.buddycast import BuddyCastFactory + +class SessionRuntimeConfig(SessionConfigInterface): + """ + Implements the Tribler.Core.API.SessionConfigInterface + + Use these to change the session config at runtime. + """ + def set_state_dir(self,statedir): + raise OperationNotPossibleAtRuntimeException() + + def get_state_dir(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_state_dir(self) + finally: + self.sesslock.release() + + def set_install_dir(self,statedir): + raise OperationNotPossibleAtRuntimeException() + + def get_install_dir(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_install_dir(self) + finally: + self.sesslock.release() + + def set_permid_keypair_filename(self,keypair): + raise OperationNotPossibleAtRuntimeException() + + def get_permid_keypair_filename(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_permid_keypair_filename(self) + finally: + self.sesslock.release() + + def set_listen_port(self,port): + raise OperationNotPossibleAtRuntimeException() + + def get_listen_port(self): + # To protect self.sessconfig + self.sesslock.acquire() + try: + return SessionConfigInterface.get_listen_port(self) + finally: + self.sesslock.release() + + def get_video_analyser_path(self): + # To protect self.sessconfig + self.sesslock.acquire() + try: + return SessionConfigInterface.get_video_analyser_path(self) + finally: + self.sesslock.release() + + def set_tracker_ip(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_ip(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_ip(self) + finally: + self.sesslock.release() + + def set_bind_to_addresses(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_bind_to_addresses(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_bind_to_addresses(self) + finally: + self.sesslock.release() + + def set_upnp_mode(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_upnp_mode(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_upnp_mode(self) + finally: + self.sesslock.release() + + def set_autoclose_timeout(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_autoclose_timeout(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_autoclose_timeout(self) + finally: + self.sesslock.release() + + def set_autoclose_check_interval(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_autoclose_check_interval(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_autoclose_check_interval(self) + finally: + self.sesslock.release() + + def set_megacache(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_megacache(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_megacache(self) + finally: + self.sesslock.release() + + def set_overlay(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_overlay(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_overlay(self) + finally: + self.sesslock.release() + + def set_buddycast(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_buddycast(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_buddycast(self) + finally: + self.sesslock.release() + + def set_start_recommender(self,value): + self.sesslock.acquire() + try: + SessionConfigInterface.set_start_recommender(self,value) + olbridge = OverlayThreadingBridge.getInstance() + task = lambda:self.olthread_set_start_recommender(value) + olbridge.add_task(task,0) + finally: + self.sesslock.release() + + def olthread_set_start_recommender(self,value): + bcfac = BuddyCastFactory.getInstance() + if value: + bcfac.restartBuddyCast() + else: + bcfac.pauseBuddyCast() + + def get_start_recommender(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_start_recommender(self) + finally: + self.sesslock.release() + + def set_download_help(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_download_help(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_download_help(self) + finally: + self.sesslock.release() + + def set_torrent_collecting(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_torrent_collecting(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_torrent_collecting(self) + finally: + self.sesslock.release() + + + def set_torrent_collecting_dir(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_torrent_collecting_dir(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_torrent_collecting_dir(self) + finally: + self.sesslock.release() + + + def set_superpeer(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_superpeer(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_superpeer(self) + finally: + self.sesslock.release() + + def set_overlay_log(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_overlay_log(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_overlay_log(self) + finally: + self.sesslock.release() + + def set_buddycast_interval(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_buddycast_interval(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_buddycast_interval(self) + finally: + self.sesslock.release() + + def set_torrent_collecting_max_torrents(self,value): + self.sesslock.acquire() + try: + SessionConfigInterface.set_torrent_collecting_max_torrents(self,value) + olbridge = OverlayThreadingBridge.getInstance() + task = lambda:self.olthread_set_torrent_collecting_max_torrents(value) + olbridge.add_task(task,0) + finally: + self.sesslock.release() + + def olthread_set_torrent_collecting_max_torrents(self,value): + mh = MetadataHandler.getInstance() + mh.set_overflow(value) + mh.delayed_check_overflow(2) + + + def get_torrent_collecting_max_torrents(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_torrent_collecting_max_torrents(self) + finally: + self.sesslock.release() + + def set_buddycast_max_peers(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_buddycast_max_peers(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_buddycast_max_peers(self) + finally: + self.sesslock.release() + + def set_torrent_collecting_rate(self,value): + self.sesslock.acquire() + try: + SessionConfigInterface.set_torrent_collecting_rate(self,value) + olbridge = OverlayThreadingBridge.getInstance() + task = lambda:self.olthread_set_torrent_collecting_rate(value) + olbridge.add_task(task,0) + finally: + self.sesslock.release() + + def olthread_set_torrent_collecting_rate(self,value): + mh = MetadataHandler.getInstance() + mh.set_rate(value) + + def get_torrent_collecting_rate(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_torrent_collecting_rate(self) + finally: + self.sesslock.release() + + def set_torrent_checking(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_torrent_checking(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_torrent_checking(self) + finally: + self.sesslock.release() + + def set_torrent_checking_period(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_torrent_checking_period(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_torrent_checking_period(self) + finally: + self.sesslock.release() + + def set_dialback(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_dialback(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_dialback(self) + finally: + self.sesslock.release() + + def set_social_networking(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_social_networking(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_social_networking(self) + finally: + self.sesslock.release() + + def set_remote_query(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_remote_query(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_remote_query(self) + finally: + self.sesslock.release() + + def set_stop_collecting_threshold(self,value): + self.sesslock.acquire() + try: + SessionConfigInterface.set_stop_collecting_threshold(self,value) + olbridge = OverlayThreadingBridge.getInstance() + task = lambda:self.olthread_set_stop_collecting_threshold(value) + olbridge.add_task(task,0) + finally: + self.sesslock.release() + + def olthread_set_stop_collecting_threshold(self,value): + mh = MetadataHandler.getInstance() + mh.set_min_free_space(value) + mh.delayed_check_free_space(2) + + def get_stop_collecting_threshold(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_stop_collecting_threshold(self) + finally: + self.sesslock.release() + + def set_internal_tracker(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_internal_tracker(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_internal_tracker(self) + finally: + self.sesslock.release() + + def set_mainline_dht(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_mainline_dht(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_mainline_dht(self) + finally: + self.sesslock.release() + + def set_nickname(self,value): + self.sesslock.acquire() + try: + return SessionConfigInterface.set_nickname(self, value) + finally: + self.sesslock.release() + + def get_nickname(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_nickname(self) + finally: + self.sesslock.release() + + def set_mugshot(self,value, mime='image/jpeg'): + self.sesslock.acquire() + try: + return SessionConfigInterface.set_mugshot(self, value, mime) + finally: + self.sesslock.release() + + def get_mugshot(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_mugshot(self) + finally: + self.sesslock.release() + + + def set_tracker_dfile(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_dfile(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_dfile(self) + finally: + self.sesslock.release() + + def set_tracker_dfile_format(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_dfile_format(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_dfile_format(self) + finally: + self.sesslock.release() + + def set_tracker_socket_timeout(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_socket_timeout(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_socket_timeout(self) + finally: + self.sesslock.release() + + def set_tracker_save_dfile_interval(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_save_dfile_interval(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_save_dfile_interval(self) + finally: + self.sesslock.release() + + def set_tracker_timeout_downloaders_interval(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_timeout_downloaders_interval(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_timeout_downloaders_interval(self) + finally: + self.sesslock.release() + + def set_tracker_reannounce_interval(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_reannounce_interval(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_reannounce_interval(self) + finally: + self.sesslock.release() + + def set_tracker_response_size(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_response_size(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_response_size(self) + finally: + self.sesslock.release() + + def set_tracker_timeout_check_interval(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_timeout_check_interval(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_timeout_check_interval(self) + finally: + self.sesslock.release() + + def set_tracker_nat_check(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_nat_check(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_nat_check(self) + finally: + self.sesslock.release() + + def set_tracker_log_nat_checks(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_log_nat_checks(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_log_nat_checks(self) + finally: + self.sesslock.release() + + def set_tracker_min_time_between_log_flushes(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_min_time_between_log_flushes(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_min_time_between_log_flushes(self) + finally: + self.sesslock.release() + + def set_tracker_min_time_between_cache_refreshes(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_min_time_between_cache_refreshes(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_min_time_between_cache_refreshes(self) + finally: + self.sesslock.release() + + def set_tracker_allowed_dir(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_allowed_dir(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_allowed_dir(self) + finally: + self.sesslock.release() + + def set_tracker_allowed_list(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_allowed_list(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_allowed_list(self) + finally: + self.sesslock.release() + + def set_tracker_allowed_controls(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_allowed_controls(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_allowed_controls(self) + finally: + self.sesslock.release() + + def set_tracker_multitracker_enabled(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_multitracker_enabled(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_multitracker_enabled(self) + finally: + self.sesslock.release() + + def set_tracker_multitracker_allowed(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_multitracker_allowed(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_multitracker_allowed(self) + finally: + self.sesslock.release() + + def set_tracker_multitracker_reannounce_interval(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_multitracker_reannounce_interval(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_multitracker_reannounce_interval(self) + finally: + self.sesslock.release() + + def set_tracker_multitracker_maxpeers(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_multitracker_maxpeers(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_multitracker_maxpeers(self) + finally: + self.sesslock.release() + + def set_tracker_aggregate_forward(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_aggregate_forward(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_aggregate_forward(self) + finally: + self.sesslock.release() + + def set_tracker_aggregator(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_aggregator(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_aggregator(self) + finally: + self.sesslock.release() + + def set_tracker_hupmonitor(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_hupmonitor(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_hupmonitor(self) + finally: + self.sesslock.release() + + def set_tracker_multitracker_http_timeout(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_multitracker_http_timeout(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_multitracker_http_timeout(self) + finally: + self.sesslock.release() + + def set_tracker_parse_dir_interval(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_parse_dir_interval(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_parse_dir_interval(self) + finally: + self.sesslock.release() + + def set_tracker_show_infopage(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_show_infopage(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_show_infopage(self) + finally: + self.sesslock.release() + + def set_tracker_infopage_redirect(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_infopage_redirect(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_infopage_redirect(self) + finally: + self.sesslock.release() + + def set_tracker_show_names(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_show_names(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_show_names(self) + finally: + self.sesslock.release() + + def set_tracker_favicon(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_favicon(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_favicon(self) + finally: + self.sesslock.release() + + def set_tracker_allowed_ips(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_allowed_ips(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_allowed_ips(self) + finally: + self.sesslock.release() + + def set_tracker_banned_ips(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_banned_ips(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_banned_ips(self) + finally: + self.sesslock.release() + + def set_tracker_only_local_override_ip(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_only_local_override_ip(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_only_local_override_ip(self) + finally: + self.sesslock.release() + + def set_tracker_logfile(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_logfile(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_logfile(self) + finally: + self.sesslock.release() + + def set_tracker_allow_get(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_allow_get(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_allow_get(self) + finally: + self.sesslock.release() + + def set_tracker_keep_dead(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_keep_dead(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_keep_dead(self) + finally: + self.sesslock.release() + + def set_tracker_scrape_allowed(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_tracker_scrape_allowed(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_tracker_scrape_allowed(self) + finally: + self.sesslock.release() + + def set_overlay_max_message_length(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_overlay_max_message_length(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_overlay_max_message_length(self) + finally: + self.sesslock.release() + + def set_download_help_dir(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_download_help_dir(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_download_help_dir(self) + finally: + self.sesslock.release() + + def set_bartercast(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_bartercast(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_bartercast(self) + finally: + self.sesslock.release() + + def set_superpeer_file(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_superpeer_file(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_superpeer_file(self) + finally: + self.sesslock.release() + + def set_buddycast_collecting_solution(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_buddycast_collecting_solution(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_buddycast_collecting_solution(self) + finally: + self.sesslock.release() + + def set_peer_icon_path(self,value): + raise OperationNotPossibleAtRuntimeException() + + def get_peer_icon_path(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_peer_icon_path(self) + finally: + self.sesslock.release() + + # + # NAT Puncturing servers information setting/retrieval + # + def set_nat_detect(self,value): + raise OperationNotPossibleAtRuntimeException() + + def set_puncturing_private_port(self, puncturing_private_port): + raise OperationNotPossibleAtRuntimeException() + + def set_stun_servers(self, stun_servers): + raise OperationNotPossibleAtRuntimeException() + + def set_pingback_servers(self, pingback_servers): + raise OperationNotPossibleAtRuntimeException() + + def set_puncturing_coordinators(self, puncturing_coordinators): + raise OperationNotPossibleAtRuntimeException() + + def get_nat_detect(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_nat_detect(self) + finally: + self.sesslock.release() + + def get_puncturing_internal_port(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_puncturing_internal_port(self) + finally: + self.sesslock.release() + + def get_stun_servers(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_stun_servers(self) + finally: + self.sesslock.release() + + def get_pingback_servers(self): + self.sesslock.acquire() + try: + return SessionConfigInterface.get_pingback_servers(self) + finally: + self.sesslock.release() + diff --git a/tribler-mod/Tribler/Core/APIImplementation/SingleDownload.py b/tribler-mod/Tribler/Core/APIImplementation/SingleDownload.py new file mode 100644 index 0000000..95b81ec --- /dev/null +++ b/tribler-mod/Tribler/Core/APIImplementation/SingleDownload.py @@ -0,0 +1,303 @@ +from time import localtime, strftime + + + +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys +import os +import time +import copy +import sha +import pickle +import socket +import binascii +from types import StringType,ListType,IntType +from traceback import print_exc,print_stack +from threading import Event + +from Tribler.Core.simpledefs import * +from Tribler.Core.exceptions import * +from Tribler.Core.BitTornado.__init__ import createPeerID +from Tribler.Core.BitTornado.download_bt1 import BT1Download +from Tribler.Core.BitTornado.bencode import bencode,bdecode +from Tribler.Core.Video.VideoStatus import VideoStatus + + +SPECIAL_VALUE = 481 + +DEBUG = True #False + + +# Transfers' status info is output into status_file. +FILE_NAME = "status_msg_single_download.log" #+ strftime("_%d_%b_%Y_%H_%M_%S", localtime()) + ".log" +status_file = open(FILE_NAME, 'w'); + +class SingleDownload: + """ This class is accessed solely by the network thread """ + + def __init__(self,infohash,metainfo,kvconfig,multihandler,get_extip_func,listenport,videoanalyserpath,vodfileindex,set_error_func,pstate,lmvodeventcallback,lmhashcheckcompletecallback): + + self.dow = None + self.set_error_func = set_error_func + self.videoinfo = None + self.videostatus = None + self.lmvodeventcallback = lmvodeventcallback + self.lmhashcheckcompletecallback = lmhashcheckcompletecallback + self.logmsgs = [] + self._hashcheckfunc = None + self._getstatsfunc = None + try: + self.dldoneflag = Event() + self.dlrawserver = multihandler.newRawServer(infohash,self.dldoneflag) + self.lmvodeventcallback = lmvodeventcallback + + if pstate is not None: + self.hashcheckfrac = pstate['dlstate']['progress'] + else: + self.hashcheckfrac = 0.0 + + self.peerid = createPeerID() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload: __init__: My peer ID is",`peerid` + + self.dow = BT1Download(self.hashcheckprogressfunc, + self.finishedfunc, + self.fatalerrorfunc, + self.nonfatalerrorfunc, + self.logerrorfunc, + self.dldoneflag, + kvconfig, + metainfo, + infohash, + self.peerid, + self.dlrawserver, + get_extip_func, + listenport, + videoanalyserpath + ) + + file = self.dow.saveAs(self.save_as) + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload: dow.saveAs returned",file + + # Set local filename in vodfileindex + if vodfileindex is not None: + index = vodfileindex['index'] + if index == -1: + index = 0 + vodfileindex['outpath'] = self.dow.get_dest(index) + self.videoinfo = vodfileindex + if 'live' in metainfo['info']: + authparams = metainfo['info']['live'] + else: + authparams = None + self.videostatus = VideoStatus(metainfo['info']['piece length'],self.dow.files,vodfileindex,authparams) + self.videoinfo['status'] = self.videostatus + self.dow.set_videoinfo(vodfileindex,self.videostatus) + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload: setting vodfileindex",vodfileindex + + if pstate is None: + resumedata = None + else: + # Restarting download + resumedata=pstate['engineresumedata'] + self._hashcheckfunc = self.dow.initFiles(resumedata=resumedata) + + + except Exception,e: + self.fatalerrorfunc(e) + + def get_bt1download(self): + return self.dow + + def save_as(self,name,length,saveas,isdir): + """ Return the local filename to which to save the file 'name' in the torrent """ + if DEBUG: + print >>status_file, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload: save_as(",`name`,length,`saveas`,isdir,")" + status_file.close() + try: + if not os.access(saveas,os.F_OK): + os.mkdir(saveas) + path = os.path.join(saveas,name) + if isdir and not os.path.isdir(path): + os.mkdir(path) + return path + except Exception,e: + self.fatalerrorfunc(e) + + def perform_hashcheck(self,complete_callback): + """ Called by any thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload: perform_hashcheck()" # ,self.videoinfo + try: + """ Schedules actually hashcheck on network thread """ + self._getstatsfunc = SPECIAL_VALUE # signal we're hashchecking + # Already set, should be same + self.lmhashcheckcompletecallback = complete_callback + self._hashcheckfunc(self.lmhashcheckcompletecallback) + except Exception,e: + self.fatalerrorfunc(e) + + def hashcheck_done(self): + """ Called by LaunchMany when hashcheck complete and the Download can be + resumed + + Called by network thread + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload: hashcheck_done()" + try: + self.dow.startEngine(vodeventfunc = self.lmvodeventcallback) + self._getstatsfunc = self.dow.startStats() # not possible earlier + self.dow.startRerequester() + self.dlrawserver.start_listening(self.dow.getPortHandler()) + except Exception,e: + self.fatalerrorfunc(e) + + + # DownloadConfigInterface methods + def set_max_speed(self,direct,speed,callback): + if self.dow is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload: set_max_speed",`self.dow.response['info']['name']`,direct,speed + if direct == UPLOAD: + self.dow.setUploadRate(speed,networkcalling=True) + else: + self.dow.setDownloadRate(speed,networkcalling=True) + if callback is not None: + callback(direct,speed) + + def set_max_conns_to_initiate(self,nconns,callback): + if self.dow is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload: set_max_conns_to_initiate",`self.dow.response['info']['name']`,direct,speed + self.dow.setInitiate(nconns,networkcalling=True) + if callback is not None: + callback(nconns) + + + def set_max_conns(self,nconns,callback): + if self.dow is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload: set_max_conns",`self.dow.response['info']['name']`,direct,speed + self.dow.setMaxConns(nconns,networkcalling=True) + if callback is not None: + callback(nconns) + + + # + # For DownloadState + # + def get_stats(self,getpeerlist): + logmsgs = self.logmsgs[:] # copy + coopdl_helpers = [] + coopdl_coordinator = None + if self.dow is not None: + if not self.dow.helper is None: + coopdl_coordinator = self.dow.helper.get_coordinator_permid() + if self.dow.coordinator is not None: + # No coordinator when you're a helper + peerreclist = self.dow.coordinator.network_get_asked_helpers_copy() + for peerrec in peerreclist: + coopdl_helpers.append(peerrec['permid']) + if self._getstatsfunc is None: + return (DLSTATUS_WAITING4HASHCHECK,None,logmsgs,coopdl_helpers,coopdl_coordinator) + elif self._getstatsfunc == SPECIAL_VALUE: + stats = {} + stats['frac'] = self.hashcheckfrac + return (DLSTATUS_HASHCHECKING,stats,logmsgs,coopdl_helpers,coopdl_coordinator) + else: + return (None,self._getstatsfunc(getpeerlist=getpeerlist),logmsgs,coopdl_helpers,coopdl_coordinator) + + def get_infohash(self): + return self.infohash + + # + # Persistent State + # + def checkpoint(self): + if self.dow is not None: + return self.dow.checkpoint() + else: + return None + + def shutdown(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload: shutdown" + resumedata = None + if self.dow is not None: + self.dldoneflag.set() + self.dlrawserver.shutdown() + resumedata = self.dow.shutdown() + self.dow = None + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload: stopped dow" + + if self._getstatsfunc is None or self._getstatsfunc == SPECIAL_VALUE: + # Hashchecking or waiting for while being shutdown, signal LaunchMany + # so it can schedule a new one. + self.lmhashcheckcompletecallback(success=False) + return resumedata + + # + # Cooperative download + # + def ask_coopdl_helpers(self,peerreclist): + if self.dow is not None: + self.dow.coordinator.network_request_help(peerreclist) + + def stop_coopdl_helpers(self,peerreclist): + if self.dow is not None: + self.dow.coordinator.network_stop_help(peerreclist,force=True) + + def get_coopdl_role_object(self,role): + # Used by Coordinator/HelperMessageHandler indirectly + if self.dow is not None: + if role == COOPDL_ROLE_COORDINATOR: + return self.dow.coordinator + else: + return self.dow.helper + else: + return None + + # + # Internal methods + # + def hashcheckprogressfunc(self,activity = '', fractionDone = 0.0): + """ Allegedly only used by StorageWrapper during hashchecking """ + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload::statusfunc called",activity,fractionDone + self.hashcheckfrac = fractionDone + + def finishedfunc(self): + """ Download is complete """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload::finishedfunc called: Download is complete *******************************" + pass + + def fatalerrorfunc(self,data): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload::fatalerrorfunc called",data + if type(data) == StringType: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","LEGACY CORE FATAL ERROR",data + print_stack() + self.set_error_func(TriblerLegacyException(data)) + else: + print_exc() + self.set_error_func(data) + self.shutdown() + + def nonfatalerrorfunc(self,e): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload::nonfatalerrorfunc called",e + # Could log this somewhere, or phase it out (only used in Rerequester) + + def logerrorfunc(self,msg): + t = time.time() + self.logmsgs.append((t,msg)) + + # Keep max 10 log entries, API user should save them if he wants + # complete history + if len(self.logmsgs) > 10: + self.logmsgs.pop(0) + diff --git a/tribler-mod/Tribler/Core/APIImplementation/SingleDownload.py.bak b/tribler-mod/Tribler/Core/APIImplementation/SingleDownload.py.bak new file mode 100644 index 0000000..276b1d2 --- /dev/null +++ b/tribler-mod/Tribler/Core/APIImplementation/SingleDownload.py.bak @@ -0,0 +1,294 @@ + +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys +import os +import time +import copy +import sha +import pickle +import socket +import binascii +from types import StringType,ListType,IntType +from traceback import print_exc,print_stack +from threading import Event + +from Tribler.Core.simpledefs import * +from Tribler.Core.exceptions import * +from Tribler.Core.BitTornado.__init__ import createPeerID +from Tribler.Core.BitTornado.download_bt1 import BT1Download +from Tribler.Core.BitTornado.bencode import bencode,bdecode +from Tribler.Core.Video.VideoStatus import VideoStatus + + +SPECIAL_VALUE = 481 + +DEBUG = True #False + +class SingleDownload: + """ This class is accessed solely by the network thread """ + + def __init__(self,infohash,metainfo,kvconfig,multihandler,get_extip_func,listenport,videoanalyserpath,vodfileindex,set_error_func,pstate,lmvodeventcallback,lmhashcheckcompletecallback): + + self.dow = None + self.set_error_func = set_error_func + self.videoinfo = None + self.videostatus = None + self.lmvodeventcallback = lmvodeventcallback + self.lmhashcheckcompletecallback = lmhashcheckcompletecallback + self.logmsgs = [] + self._hashcheckfunc = None + self._getstatsfunc = None + try: + self.dldoneflag = Event() + self.dlrawserver = multihandler.newRawServer(infohash,self.dldoneflag) + self.lmvodeventcallback = lmvodeventcallback + + if pstate is not None: + self.hashcheckfrac = pstate['dlstate']['progress'] + else: + self.hashcheckfrac = 0.0 + + self.peerid = createPeerID() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload: __init__: My peer ID is",`peerid` + + self.dow = BT1Download(self.hashcheckprogressfunc, + self.finishedfunc, + self.fatalerrorfunc, + self.nonfatalerrorfunc, + self.logerrorfunc, + self.dldoneflag, + kvconfig, + metainfo, + infohash, + self.peerid, + self.dlrawserver, + get_extip_func, + listenport, + videoanalyserpath + ) + + file = self.dow.saveAs(self.save_as) + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload: dow.saveAs returned",file + + # Set local filename in vodfileindex + if vodfileindex is not None: + index = vodfileindex['index'] + if index == -1: + index = 0 + vodfileindex['outpath'] = self.dow.get_dest(index) + self.videoinfo = vodfileindex + if 'live' in metainfo['info']: + authparams = metainfo['info']['live'] + else: + authparams = None + self.videostatus = VideoStatus(metainfo['info']['piece length'],self.dow.files,vodfileindex,authparams) + self.videoinfo['status'] = self.videostatus + self.dow.set_videoinfo(vodfileindex,self.videostatus) + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload: setting vodfileindex",vodfileindex + + if pstate is None: + resumedata = None + else: + # Restarting download + resumedata=pstate['engineresumedata'] + self._hashcheckfunc = self.dow.initFiles(resumedata=resumedata) + + + except Exception,e: + self.fatalerrorfunc(e) + + def get_bt1download(self): + return self.dow + + def save_as(self,name,length,saveas,isdir): + """ Return the local filename to which to save the file 'name' in the torrent """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload: save_as(",`name`,length,`saveas`,isdir,")" + try: + if not os.access(saveas,os.F_OK): + os.mkdir(saveas) + path = os.path.join(saveas,name) + if isdir and not os.path.isdir(path): + os.mkdir(path) + return path + except Exception,e: + self.fatalerrorfunc(e) + + def perform_hashcheck(self,complete_callback): + """ Called by any thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload: perform_hashcheck()" # ,self.videoinfo + try: + """ Schedules actually hashcheck on network thread """ + self._getstatsfunc = SPECIAL_VALUE # signal we're hashchecking + # Already set, should be same + self.lmhashcheckcompletecallback = complete_callback + self._hashcheckfunc(self.lmhashcheckcompletecallback) + except Exception,e: + self.fatalerrorfunc(e) + + def hashcheck_done(self): + """ Called by LaunchMany when hashcheck complete and the Download can be + resumed + + Called by network thread + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload: hashcheck_done()" + try: + self.dow.startEngine(vodeventfunc = self.lmvodeventcallback) + self._getstatsfunc = self.dow.startStats() # not possible earlier + self.dow.startRerequester() + self.dlrawserver.start_listening(self.dow.getPortHandler()) + except Exception,e: + self.fatalerrorfunc(e) + + + # DownloadConfigInterface methods + def set_max_speed(self,direct,speed,callback): + if self.dow is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload: set_max_speed",`self.dow.response['info']['name']`,direct,speed + if direct == UPLOAD: + self.dow.setUploadRate(speed,networkcalling=True) + else: + self.dow.setDownloadRate(speed,networkcalling=True) + if callback is not None: + callback(direct,speed) + + def set_max_conns_to_initiate(self,nconns,callback): + if self.dow is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload: set_max_conns_to_initiate",`self.dow.response['info']['name']`,direct,speed + self.dow.setInitiate(nconns,networkcalling=True) + if callback is not None: + callback(nconns) + + + def set_max_conns(self,nconns,callback): + if self.dow is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload: set_max_conns",`self.dow.response['info']['name']`,direct,speed + self.dow.setMaxConns(nconns,networkcalling=True) + if callback is not None: + callback(nconns) + + + # + # For DownloadState + # + def get_stats(self,getpeerlist): + logmsgs = self.logmsgs[:] # copy + coopdl_helpers = [] + coopdl_coordinator = None + if self.dow is not None: + if not self.dow.helper is None: + coopdl_coordinator = self.dow.helper.get_coordinator_permid() + if self.dow.coordinator is not None: + # No coordinator when you're a helper + peerreclist = self.dow.coordinator.network_get_asked_helpers_copy() + for peerrec in peerreclist: + coopdl_helpers.append(peerrec['permid']) + if self._getstatsfunc is None: + return (DLSTATUS_WAITING4HASHCHECK,None,logmsgs,coopdl_helpers,coopdl_coordinator) + elif self._getstatsfunc == SPECIAL_VALUE: + stats = {} + stats['frac'] = self.hashcheckfrac + return (DLSTATUS_HASHCHECKING,stats,logmsgs,coopdl_helpers,coopdl_coordinator) + else: + return (None,self._getstatsfunc(getpeerlist=getpeerlist),logmsgs,coopdl_helpers,coopdl_coordinator) + + def get_infohash(self): + return self.infohash + + # + # Persistent State + # + def checkpoint(self): + if self.dow is not None: + return self.dow.checkpoint() + else: + return None + + def shutdown(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload: shutdown" + resumedata = None + if self.dow is not None: + self.dldoneflag.set() + self.dlrawserver.shutdown() + resumedata = self.dow.shutdown() + self.dow = None + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload: stopped dow" + + if self._getstatsfunc is None or self._getstatsfunc == SPECIAL_VALUE: + # Hashchecking or waiting for while being shutdown, signal LaunchMany + # so it can schedule a new one. + self.lmhashcheckcompletecallback(success=False) + return resumedata + + # + # Cooperative download + # + def ask_coopdl_helpers(self,peerreclist): + if self.dow is not None: + self.dow.coordinator.network_request_help(peerreclist) + + def stop_coopdl_helpers(self,peerreclist): + if self.dow is not None: + self.dow.coordinator.network_stop_help(peerreclist,force=True) + + def get_coopdl_role_object(self,role): + # Used by Coordinator/HelperMessageHandler indirectly + if self.dow is not None: + if role == COOPDL_ROLE_COORDINATOR: + return self.dow.coordinator + else: + return self.dow.helper + else: + return None + + # + # Internal methods + # + def hashcheckprogressfunc(self,activity = '', fractionDone = 0.0): + """ Allegedly only used by StorageWrapper during hashchecking """ + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload::statusfunc called",activity,fractionDone + self.hashcheckfrac = fractionDone + + def finishedfunc(self): + """ Download is complete """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload::finishedfunc called: Download is complete *******************************" + pass + + def fatalerrorfunc(self,data): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload::fatalerrorfunc called",data + if type(data) == StringType: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","LEGACY CORE FATAL ERROR",data + print_stack() + self.set_error_func(TriblerLegacyException(data)) + else: + print_exc() + self.set_error_func(data) + self.shutdown() + + def nonfatalerrorfunc(self,e): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleDownload::nonfatalerrorfunc called",e + # Could log this somewhere, or phase it out (only used in Rerequester) + + def logerrorfunc(self,msg): + t = time.time() + self.logmsgs.append((t,msg)) + + # Keep max 10 log entries, API user should save them if he wants + # complete history + if len(self.logmsgs) > 10: + self.logmsgs.pop(0) + diff --git a/tribler-mod/Tribler/Core/APIImplementation/ThreadPool.py b/tribler-mod/Tribler/Core/APIImplementation/ThreadPool.py new file mode 100644 index 0000000..a28432e --- /dev/null +++ b/tribler-mod/Tribler/Core/APIImplementation/ThreadPool.py @@ -0,0 +1,181 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg +# see LICENSE.txt for license information + +import sys +from traceback import print_exc +import threading +from time import sleep + +# Ensure booleans exist (not needed for Python 2.2.1 or higher) +try: + True +except NameError: + False = 0 + True = not False + +class ThreadPool: + + """Flexible thread pool class. Creates a pool of threads, then + accepts tasks that will be dispatched to the next available + thread.""" + + def __init__(self, numThreads): + + """Initialize the thread pool with numThreads workers.""" + + self.__threads = [] + self.__resizeLock = threading.Condition(threading.Lock()) + self.__taskLock = threading.Condition(threading.Lock()) + self.__tasks = [] + self.__isJoining = False + self.setThreadCount(numThreads) + + def setThreadCount(self, newNumThreads): + + """ External method to set the current pool size. Acquires + the resizing lock, then calls the internal version to do real + work.""" + + # Can't change the thread count if we're shutting down the pool! + if self.__isJoining: + return False + + self.__resizeLock.acquire() + try: + self.__setThreadCountNolock(newNumThreads) + finally: + self.__resizeLock.release() + return True + + def __setThreadCountNolock(self, newNumThreads): + + """Set the current pool size, spawning or terminating threads + if necessary. Internal use only; assumes the resizing lock is + held.""" + + # If we need to grow the pool, do so + while newNumThreads > len(self.__threads): + newThread = ThreadPoolThread(self) + self.__threads.append(newThread) + newThread.start() + # If we need to shrink the pool, do so + while newNumThreads < len(self.__threads): + self.__threads[0].goAway() + del self.__threads[0] + + def getThreadCount(self): + + """Return the number of threads in the pool.""" + + self.__resizeLock.acquire() + try: + return len(self.__threads) + finally: + self.__resizeLock.release() + + def queueTask(self, task, args=(), taskCallback=None): + + """Insert a task into the queue. task must be callable; + args and taskCallback can be None.""" + + if self.__isJoining == True: + return False + if not callable(task): + return False + + self.__taskLock.acquire() + try: + self.__tasks.append((task, args, taskCallback)) + return True + finally: + self.__taskLock.release() + + def getNextTask(self): + + """ Retrieve the next task from the task queue. For use + only by ThreadPoolThread objects contained in the pool.""" + + self.__taskLock.acquire() + try: + if self.__tasks == []: + return (None, None, None) + else: + return self.__tasks.pop(0) + finally: + self.__taskLock.release() + + def joinAll(self, waitForTasks = True, waitForThreads = True): + + """ Clear the task queue and terminate all pooled threads, + optionally allowing the tasks and threads to finish.""" + + # Mark the pool as joining to prevent any more task queueing + self.__isJoining = True + + # Wait for tasks to finish + if waitForTasks: + while self.__tasks != []: + sleep(.1) + + # Tell all the threads to quit + self.__resizeLock.acquire() + try: + self.__setThreadCountNolock(0) + self.__isJoining = True + + # Wait until all threads have exited + if waitForThreads: + for t in self.__threads: + t.goAway() + t.join() + del t + + # Reset the pool for potential reuse + self.__isJoining = False + finally: + self.__resizeLock.release() + + + +class ThreadPoolThread(threading.Thread): + + """ Pooled thread class. """ + + threadSleepTime = 0.1 + + def __init__(self, pool): + + """ Initialize the thread and remember the pool. """ + + threading.Thread.__init__(self) + self.setName('SessionPool'+self.getName()) + self.setDaemon(True) + self.__pool = pool + self.__isDying = False + + def run(self): + + """ Until told to quit, retrieve the next task and execute + it, calling the callback if any. """ + + while self.__isDying == False: + cmd, args, callback = self.__pool.getNextTask() + # If there's nothing to do, just sleep a bit + if cmd is None: + try: + sleep(ThreadPoolThread.threadSleepTime) + except AttributeError: # raised during interpreter shutdown + break + elif callback is None: + cmd(*args) + else: + callback(cmd(args)) + + + + def goAway(self): + + """ Exit the run loop next time through.""" + + self.__isDying = True \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/APIImplementation/ThreadPool.py.bak b/tribler-mod/Tribler/Core/APIImplementation/ThreadPool.py.bak new file mode 100644 index 0000000..e7c0680 --- /dev/null +++ b/tribler-mod/Tribler/Core/APIImplementation/ThreadPool.py.bak @@ -0,0 +1,180 @@ +# Written by Jelle Roozenburg +# see LICENSE.txt for license information + +import sys +from traceback import print_exc +import threading +from time import sleep + +# Ensure booleans exist (not needed for Python 2.2.1 or higher) +try: + True +except NameError: + False = 0 + True = not False + +class ThreadPool: + + """Flexible thread pool class. Creates a pool of threads, then + accepts tasks that will be dispatched to the next available + thread.""" + + def __init__(self, numThreads): + + """Initialize the thread pool with numThreads workers.""" + + self.__threads = [] + self.__resizeLock = threading.Condition(threading.Lock()) + self.__taskLock = threading.Condition(threading.Lock()) + self.__tasks = [] + self.__isJoining = False + self.setThreadCount(numThreads) + + def setThreadCount(self, newNumThreads): + + """ External method to set the current pool size. Acquires + the resizing lock, then calls the internal version to do real + work.""" + + # Can't change the thread count if we're shutting down the pool! + if self.__isJoining: + return False + + self.__resizeLock.acquire() + try: + self.__setThreadCountNolock(newNumThreads) + finally: + self.__resizeLock.release() + return True + + def __setThreadCountNolock(self, newNumThreads): + + """Set the current pool size, spawning or terminating threads + if necessary. Internal use only; assumes the resizing lock is + held.""" + + # If we need to grow the pool, do so + while newNumThreads > len(self.__threads): + newThread = ThreadPoolThread(self) + self.__threads.append(newThread) + newThread.start() + # If we need to shrink the pool, do so + while newNumThreads < len(self.__threads): + self.__threads[0].goAway() + del self.__threads[0] + + def getThreadCount(self): + + """Return the number of threads in the pool.""" + + self.__resizeLock.acquire() + try: + return len(self.__threads) + finally: + self.__resizeLock.release() + + def queueTask(self, task, args=(), taskCallback=None): + + """Insert a task into the queue. task must be callable; + args and taskCallback can be None.""" + + if self.__isJoining == True: + return False + if not callable(task): + return False + + self.__taskLock.acquire() + try: + self.__tasks.append((task, args, taskCallback)) + return True + finally: + self.__taskLock.release() + + def getNextTask(self): + + """ Retrieve the next task from the task queue. For use + only by ThreadPoolThread objects contained in the pool.""" + + self.__taskLock.acquire() + try: + if self.__tasks == []: + return (None, None, None) + else: + return self.__tasks.pop(0) + finally: + self.__taskLock.release() + + def joinAll(self, waitForTasks = True, waitForThreads = True): + + """ Clear the task queue and terminate all pooled threads, + optionally allowing the tasks and threads to finish.""" + + # Mark the pool as joining to prevent any more task queueing + self.__isJoining = True + + # Wait for tasks to finish + if waitForTasks: + while self.__tasks != []: + sleep(.1) + + # Tell all the threads to quit + self.__resizeLock.acquire() + try: + self.__setThreadCountNolock(0) + self.__isJoining = True + + # Wait until all threads have exited + if waitForThreads: + for t in self.__threads: + t.goAway() + t.join() + del t + + # Reset the pool for potential reuse + self.__isJoining = False + finally: + self.__resizeLock.release() + + + +class ThreadPoolThread(threading.Thread): + + """ Pooled thread class. """ + + threadSleepTime = 0.1 + + def __init__(self, pool): + + """ Initialize the thread and remember the pool. """ + + threading.Thread.__init__(self) + self.setName('SessionPool'+self.getName()) + self.setDaemon(True) + self.__pool = pool + self.__isDying = False + + def run(self): + + """ Until told to quit, retrieve the next task and execute + it, calling the callback if any. """ + + while self.__isDying == False: + cmd, args, callback = self.__pool.getNextTask() + # If there's nothing to do, just sleep a bit + if cmd is None: + try: + sleep(ThreadPoolThread.threadSleepTime) + except AttributeError: # raised during interpreter shutdown + break + elif callback is None: + cmd(*args) + else: + callback(cmd(args)) + + + + def goAway(self): + + """ Exit the run loop next time through.""" + + self.__isDying = True \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/APIImplementation/UserCallbackHandler.py b/tribler-mod/Tribler/Core/APIImplementation/UserCallbackHandler.py new file mode 100644 index 0000000..dd8a170 --- /dev/null +++ b/tribler-mod/Tribler/Core/APIImplementation/UserCallbackHandler.py @@ -0,0 +1,130 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys +import os +import shutil +import binascii +from threading import currentThread +from traceback import print_exc + +from Tribler.Core.simpledefs import * +from Tribler.Core.APIImplementation.ThreadPool import ThreadPool +from Tribler.Core.CacheDB.Notifier import Notifier + +DEBUG = False + +class UserCallbackHandler: + + def __init__(self,session): + self.session = session + self.sesslock = session.sesslock + self.sessconfig = session.sessconfig + + # Notifier for callbacks to API user + self.threadpool = ThreadPool(2) + self.notifier = Notifier.getInstance(self.threadpool) + + def shutdown(self): + # stop threadpool + self.threadpool.joinAll() + + def perform_vod_usercallback(self,d,usercallback,event,params): + """ Called by network thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Session: perform_vod_usercallback()",`d.get_def().get_name_as_unicode()` + def session_vod_usercallback_target(): + try: + usercallback(d,event,params) + except: + print_exc() + self.perform_usercallback(session_vod_usercallback_target) + + def perform_getstate_usercallback(self,usercallback,data,returncallback): + """ Called by network thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Session: perform_getstate_usercallback()" + def session_getstate_usercallback_target(): + try: + (when,getpeerlist) = usercallback(data) + returncallback(usercallback,when,getpeerlist) + except: + print_exc() + self.perform_usercallback(session_getstate_usercallback_target) + + + def perform_removestate_callback(self,infohash,contentdest,removecontent): + """ Called by network thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Session: perform_removestate_callback()" + def session_removestate_callback_target(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Session: session_removestate_callback_target called",currentThread().getName() + try: + self.sesscb_removestate(infohash,contentdest,removecontent) + except: + print_exc() + self.perform_usercallback(session_removestate_callback_target) + + def perform_usercallback(self,target): + self.sesslock.acquire() + try: + # TODO: thread pool, etc. + self.threadpool.queueTask(target) + + finally: + self.sesslock.release() + + def sesscb_removestate(self,infohash,contentdest,removecontent): + """ See DownloadImpl.setup(). + Called by SessionCallbackThread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Session: sesscb_removestate called",`infohash`,`contentdest`,removecontent + self.sesslock.acquire() + try: + dlpstatedir = os.path.join(self.sessconfig['state_dir'],STATEDIR_DLPSTATE_DIR) + finally: + self.sesslock.release() + + # See if torrent uses internal tracker + try: + self.session.remove_from_internal_tracker_by_infohash(infohash) + except: + # Show must go on + print_exc() + + # Remove checkpoint + hexinfohash = binascii.hexlify(infohash) + try: + basename = hexinfohash+'.pickle' + filename = os.path.join(dlpstatedir,basename) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Session: sesscb_removestate: removing dlcheckpoint entry",filename + if os.access(filename,os.F_OK): + os.remove(filename) + except: + # Show must go on + print_exc() + + # Remove downloaded content from disk + if removecontent: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Session: sesscb_removestate: removing saved content",contentdest + if not os.path.isdir(contentdest): + # single-file torrent + os.remove(contentdest) + else: + # multi-file torrent + shutil.rmtree(contentdest,True) # ignore errors + + + def notify(self, subject, changeType, obj_id, *args): + """ + Notify all interested observers about an event with threads from the pool + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ucb: notify called:",subject,changeType,`obj_id`, args + self.notifier.notify(subject,changeType,obj_id,*args) + + diff --git a/tribler-mod/Tribler/Core/APIImplementation/UserCallbackHandler.py.bak b/tribler-mod/Tribler/Core/APIImplementation/UserCallbackHandler.py.bak new file mode 100644 index 0000000..776fa18 --- /dev/null +++ b/tribler-mod/Tribler/Core/APIImplementation/UserCallbackHandler.py.bak @@ -0,0 +1,129 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys +import os +import shutil +import binascii +from threading import currentThread +from traceback import print_exc + +from Tribler.Core.simpledefs import * +from Tribler.Core.APIImplementation.ThreadPool import ThreadPool +from Tribler.Core.CacheDB.Notifier import Notifier + +DEBUG = False + +class UserCallbackHandler: + + def __init__(self,session): + self.session = session + self.sesslock = session.sesslock + self.sessconfig = session.sessconfig + + # Notifier for callbacks to API user + self.threadpool = ThreadPool(2) + self.notifier = Notifier.getInstance(self.threadpool) + + def shutdown(self): + # stop threadpool + self.threadpool.joinAll() + + def perform_vod_usercallback(self,d,usercallback,event,params): + """ Called by network thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Session: perform_vod_usercallback()",`d.get_def().get_name_as_unicode()` + def session_vod_usercallback_target(): + try: + usercallback(d,event,params) + except: + print_exc() + self.perform_usercallback(session_vod_usercallback_target) + + def perform_getstate_usercallback(self,usercallback,data,returncallback): + """ Called by network thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Session: perform_getstate_usercallback()" + def session_getstate_usercallback_target(): + try: + (when,getpeerlist) = usercallback(data) + returncallback(usercallback,when,getpeerlist) + except: + print_exc() + self.perform_usercallback(session_getstate_usercallback_target) + + + def perform_removestate_callback(self,infohash,contentdest,removecontent): + """ Called by network thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Session: perform_removestate_callback()" + def session_removestate_callback_target(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Session: session_removestate_callback_target called",currentThread().getName() + try: + self.sesscb_removestate(infohash,contentdest,removecontent) + except: + print_exc() + self.perform_usercallback(session_removestate_callback_target) + + def perform_usercallback(self,target): + self.sesslock.acquire() + try: + # TODO: thread pool, etc. + self.threadpool.queueTask(target) + + finally: + self.sesslock.release() + + def sesscb_removestate(self,infohash,contentdest,removecontent): + """ See DownloadImpl.setup(). + Called by SessionCallbackThread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Session: sesscb_removestate called",`infohash`,`contentdest`,removecontent + self.sesslock.acquire() + try: + dlpstatedir = os.path.join(self.sessconfig['state_dir'],STATEDIR_DLPSTATE_DIR) + finally: + self.sesslock.release() + + # See if torrent uses internal tracker + try: + self.session.remove_from_internal_tracker_by_infohash(infohash) + except: + # Show must go on + print_exc() + + # Remove checkpoint + hexinfohash = binascii.hexlify(infohash) + try: + basename = hexinfohash+'.pickle' + filename = os.path.join(dlpstatedir,basename) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Session: sesscb_removestate: removing dlcheckpoint entry",filename + if os.access(filename,os.F_OK): + os.remove(filename) + except: + # Show must go on + print_exc() + + # Remove downloaded content from disk + if removecontent: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Session: sesscb_removestate: removing saved content",contentdest + if not os.path.isdir(contentdest): + # single-file torrent + os.remove(contentdest) + else: + # multi-file torrent + shutil.rmtree(contentdest,True) # ignore errors + + + def notify(self, subject, changeType, obj_id, *args): + """ + Notify all interested observers about an event with threads from the pool + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ucb: notify called:",subject,changeType,`obj_id`, args + self.notifier.notify(subject,changeType,obj_id,*args) + + diff --git a/tribler-mod/Tribler/Core/APIImplementation/__init__.py b/tribler-mod/Tribler/Core/APIImplementation/__init__.py new file mode 100644 index 0000000..b7ef832 --- /dev/null +++ b/tribler-mod/Tribler/Core/APIImplementation/__init__.py @@ -0,0 +1,3 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/APIImplementation/__init__.py.bak b/tribler-mod/Tribler/Core/APIImplementation/__init__.py.bak new file mode 100644 index 0000000..395f8fb --- /dev/null +++ b/tribler-mod/Tribler/Core/APIImplementation/__init__.py.bak @@ -0,0 +1,2 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/APIImplementation/maketorrent.py b/tribler-mod/Tribler/Core/APIImplementation/maketorrent.py new file mode 100644 index 0000000..a290679 --- /dev/null +++ b/tribler-mod/Tribler/Core/APIImplementation/maketorrent.py @@ -0,0 +1,581 @@ +from time import localtime, strftime +# Written by Arno Bakker, Bram Cohen +# multitracker extensions by John Hoffman +# modified for Merkle hashes and digital signatures by Arno Bakker +# see LICENSE.txt for license information + +import sys +import os +import md5 +import zlib + +from sha import sha +from copy import copy +from time import time +from traceback import print_exc +from types import LongType + +from Tribler.Core.BitTornado.bencode import bencode +from Tribler.Core.BitTornado.BT1.btformats import check_info +from Tribler.Core.Merkle.merkle import MerkleTree +from Tribler.Core.Overlay.permid import create_torrent_signature +from Tribler.Core.Utilities.unicode import str2unicode,bin2unicode +from Tribler.Core.APIImplementation.miscutils import parse_playtime_to_secs,offset2piece +from Tribler.Core.osutils import fix_filebasename +from Tribler.Core.defaults import tdefdictdefaults + + +ignore = [] # Arno: was ['core', 'CVS'] + +DEBUG = False + +def make_torrent_file(input, userabortflag = None, userprogresscallback = lambda x: None): + """ Create a torrent file from the supplied input. + + Returns a (infohash,metainfo) pair, or (None,None) on userabort. """ + + (info,piece_length) = makeinfo(input,userabortflag,userprogresscallback) + if userabortflag is not None and userabortflag.isSet(): + return (None,None) + if info is None: + return (None,None) + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","mktorrent: makeinfo returned",`info` + + check_info(info) + metainfo = {'info': info, 'encoding': input['encoding'], 'creation date': long(time())} + + # http://www.bittorrent.org/DHT_protocol.html says both announce and nodes + # are not allowed, but some torrents (Azureus?) apparently violate this. + if input['nodes'] is None and input['announce'] is None: + raise ValueError('No tracker set') + + for key in ['announce','announce-list','nodes','comment','created by','httpseeds']: + if input[key] is not None and len(input[key]) > 0: + metainfo[key] = input[key] + if key == 'comment': + metainfo['comment.utf-8'] = uniconvert(input['comment'],'utf-8') + + # Assuming 1 file, Azureus format no support multi-file torrent with diff + # bitrates + bitrate = None + for file in input['files']: + if file['playtime'] is not None: + secs = parse_playtime_to_secs(file['playtime']) + bitrate = file['length']/secs + break + if input.get('bps') is not None: + bitrate = input['bps'] + break + + if bitrate is not None or input['thumb'] is not None: + mdict = {} + mdict['Publisher'] = 'Tribler' + if input['comment'] is None: + descr = '' + else: + descr = input['comment'] + mdict['Description'] = descr + + if bitrate is not None: + mdict['Progressive'] = 1 + mdict['Speed Bps'] = bitrate + else: + mdict['Progressive'] = 0 + + mdict['Title'] = metainfo['info']['name'] + mdict['Creation Date'] = long(time()) + # Azureus client source code doesn't tell what this is, so just put in random value from real torrent + mdict['Content Hash'] = 'PT3GQCPW4NPT6WRKKT25IQD4MU5HM4UY' + mdict['Revision Date'] = long(time()) + if input['thumb'] is not None: + mdict['Thumbnail'] = input['thumb'] + cdict = {} + cdict['Content'] = mdict + metainfo['azureus_properties'] = cdict + + if input['torrentsigkeypairfilename'] is not None: + create_torrent_signature(metainfo,input['torrentsigkeypairfilename']) + + infohash = sha(bencode(info)).digest() + return (infohash,metainfo) + + +def uniconvertl(l, e): + """ Convert a pathlist to a list of strings encoded in encoding "e" using + uniconvert. """ + r = [] + try: + for s in l: + r.append(uniconvert(s, e)) + except UnicodeError: + raise UnicodeError('bad filename: '+os.path.join(l)) + return r + +def uniconvert(s, enc): + """ Convert 's' to a string containing a Unicode sequence encoded using + encoding "enc". If 's' is not a Unicode object, we first try to convert + it to one, guessing the encoding if necessary. """ + if not isinstance(s, unicode): + try: + s = str2unicode(s) + except UnicodeError: + raise UnicodeError('bad filename: '+s) + return s.encode(enc) + + +def makeinfo(input,userabortflag,userprogresscallback): + """ Calculate hashes and create torrent file's 'info' part """ + encoding = input['encoding'] + + pieces = [] + sh = sha() + done = 0L + fs = [] + totalsize = 0L + totalhashed = 0L + + # 1. Determine which files should go into the torrent (=expand any dirs + # specified by user in input['files'] + subs = [] + for file in input['files']: + inpath = file['inpath'] + outpath = file['outpath'] + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","makeinfo: inpath",inpath,"outpath",outpath + + if os.path.isdir(inpath): + dirsubs = subfiles(inpath) + subs.extend(dirsubs) + else: + if outpath is None: + subs.append(([os.path.basename(inpath)],inpath)) + else: + subs.append((filename2pathlist(outpath,skipfirst=True),inpath)) + + subs.sort() + + # 2. Calc total size + newsubs = [] + for p, f in subs: + if 'live' in input: + size = input['files'][0]['length'] + else: + size = os.path.getsize(f) + totalsize += size + newsubs.append((p,f,size)) + subs = newsubs + + # 3. Calc piece length from totalsize if not set + if input['piece length'] == 0: + if input['createmerkletorrent']: + # used to be 15=32K, but this works better with slow python + piece_len_exp = 18 + else: + if totalsize > 8L*1024*1024*1024: # > 8 gig = + piece_len_exp = 21 # 2 meg pieces + elif totalsize > 2*1024*1024*1024: # > 2 gig = + piece_len_exp = 20 # 1 meg pieces + elif totalsize > 512*1024*1024: # > 512M = + piece_len_exp = 19 # 512K pieces + elif totalsize > 64*1024*1024: # > 64M = + piece_len_exp = 18 # 256K pieces + elif totalsize > 16*1024*1024: # > 16M = + piece_len_exp = 17 # 128K pieces + elif totalsize > 4*1024*1024: # > 4M = + piece_len_exp = 16 # 64K pieces + else: # < 4M = + piece_len_exp = 15 # 32K pieces + piece_length = 2 ** piece_len_exp + else: + piece_length = input['piece length'] + + # 4. Read files and calc hashes, if not live + if 'live' not in input: + for p, f, size in subs: + pos = 0L + + h = open(f, 'rb') + + if input['makehash_md5']: + hash_md5 = md5.new() + if input['makehash_sha1']: + hash_sha1 = sha() + if input['makehash_crc32']: + hash_crc32 = zlib.crc32('') + + while pos < size: + a = min(size - pos, piece_length - done) + + # See if the user cancelled + if userabortflag is not None and userabortflag.isSet(): + return (None,None) + + readpiece = h.read(a) + + # See if the user cancelled + if userabortflag is not None and userabortflag.isSet(): + return (None,None) + + sh.update(readpiece) + + if input['makehash_md5']: + # Update MD5 + hash_md5.update(readpiece) + + if input['makehash_crc32']: + # Update CRC32 + hash_crc32 = zlib.crc32(readpiece, hash_crc32) + + if input['makehash_sha1']: + # Update SHA1 + hash_sha1.update(readpiece) + + done += a + pos += a + totalhashed += a + + if done == piece_length: + pieces.append(sh.digest()) + done = 0 + sh = sha() + + if userprogresscallback is not None: + userprogresscallback(float(totalhashed) / float(totalsize)) + + newdict = {'length': num2num(size), + 'path': uniconvertl(p,encoding), + 'path.utf-8': uniconvertl(p, 'utf-8') } + + # Find and add playtime + for file in input['files']: + if file['inpath'] == f: + if file['playtime'] is not None: + newdict['playtime'] = file['playtime'] + break + + if input['makehash_md5']: + newdict['md5sum'] = hash_md5.hexdigest() + if input['makehash_crc32']: + newdict['crc32'] = "%08X" % hash_crc32 + if input['makehash_sha1']: + newdict['sha1'] = hash_sha1.digest() + + fs.append(newdict) + + h.close() + + if done > 0: + pieces.append(sh.digest()) + + # 5. Create info dict + if len(subs) == 1: + flkey = 'length' + flval = num2num(totalsize) + name = subs[0][0][0] + else: + flkey = 'files' + flval = fs + + outpath = input['files'][0]['outpath'] + l = filename2pathlist(outpath) + name = l[0] + + infodict = { 'piece length':num2num(piece_length), flkey: flval, + 'name': uniconvert(name,encoding), + 'name.utf-8': uniconvert(name,'utf-8')} + + if 'live' not in input: + + if input['createmerkletorrent']: + merkletree = MerkleTree(piece_length,totalsize,None,pieces) + root_hash = merkletree.get_root_hash() + infodict.update( {'root hash': root_hash } ) + else: + infodict.update( {'pieces': ''.join(pieces) } ) + else: + # With source auth, live is a dict + infodict['live'] = input['live'] + + if len(subs) == 1: + # Find and add playtime + for file in input['files']: + if file['inpath'] == f: + if file['playtime'] is not None: + infodict['playtime'] = file['playtime'] + + return (infodict,piece_length) + + +def subfiles(d): + """ Return list of (pathlist,local filename) tuples for all the files in + directory 'd' """ + r = [] + stack = [([], d)] + while stack: + p, n = stack.pop() + if os.path.isdir(n): + for s in os.listdir(n): + if s not in ignore and s[:1] != '.': + stack.append((copy(p) + [s], os.path.join(n, s))) + else: + r.append((p, n)) + return r + + +def filename2pathlist(path,skipfirst=False): + """ Convert a filename to a 'path' entry suitable for a multi-file torrent + file """ + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","mktorrent: filename2pathlist:",path,skipfirst + + h = path + l = [] + while True: + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","mktorrent: filename2pathlist: splitting",h + + (h,t) = os.path.split(h) + if h == '' and t == '': + break + if h == '' and skipfirst: + continue + if t != '': # handle case where path ends in / (=path separator) + l.append(t) + + l.reverse() + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","mktorrent: filename2pathlist: returning",l + + return l + + +def pathlist2filename(pathlist): + """ Convert a multi-file torrent file 'path' entry to a filename. """ + fullpath = '' + for elem in pathlist: + fullpath = os.path.join(fullpath,elem) + return fullpath + +def pathlist2savefilename(pathlist,encoding): + fullpath = u'' + for elem in pathlist: + u = bin2unicode(elem,encoding) + b = fix_filebasename(u) + fullpath = os.path.join(fullpath,b) + return fullpath + +def torrentfilerec2savefilename(filerec,length=None): + if length is None: + length = len(filerec['path']) + if 'path.utf-8' in filerec: + key = 'path.utf-8' + encoding = 'utf-8' + else: + key = 'path' + encoding = None + + return pathlist2savefilename(filerec[key][:length],encoding) + +def savefilenames2finaldest(fn1,fn2): + """ Returns the join of two savefilenames, possibly shortened + to adhere to OS specific limits. + """ + j = os.path.join(fn1,fn2) + if sys.platform == 'win32': + # Windows has a maximum path length of 260 + # http://msdn2.microsoft.com/en-us/library/aa365247.aspx + j = j[:259] # 260 don't work. + return j + + +def num2num(num): + """ Converts long to int if small enough to fit """ + if type(num) == LongType and num < sys.maxint: + return int(num) + else: + return num + +def get_torrentfilerec_from_metainfo(filename,metainfo): + info = metainfo['info'] + if filename is None: + return info + + if filename is not None and 'files' in info: + for i in range(len(info['files'])): + x = info['files'][i] + + intorrentpath = pathlist2filename(x['path']) + if intorrentpath == filename: + return x + + raise ValueError("File not found in torrent") + else: + raise ValueError("File not found in single-file torrent") + +def get_bitrate_from_metainfo(file,metainfo): + info = metainfo['info'] + if file is None: + bitrate = None + try: + playtime = None + if info.has_key('playtime'): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDef: get_bitrate: Bitrate in info field" + playtime = parse_playtime_to_secs(info['playtime']) + elif 'playtime' in metainfo: # HACK: encode playtime in non-info part of existing torrent + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDef: get_bitrate: Bitrate in metainfo" + playtime = parse_playtime_to_secs(metainfo['playtime']) + elif 'azureus_properties' in metainfo: + azprop = metainfo['azureus_properties'] + if 'Content' in azprop: + content = metainfo['azureus_properties']['Content'] + if 'Speed Bps' in content: + bitrate = float(content['Speed Bps']) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDef: get_bitrate: Bitrate in Azureus metainfo",bitrate + if playtime is not None: + bitrate = info['length']/playtime + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDef: get_bitrate: Found bitrate",bitrate + except: + print_exc() + + return bitrate + + if file is not None and 'files' in info: + for i in range(len(info['files'])): + x = info['files'][i] + + intorrentpath = '' + for elem in x['path']: + intorrentpath = os.path.join(intorrentpath,elem) + bitrate = None + try: + playtime = None + if x.has_key('playtime'): + playtime = parse_playtime_to_secs(x['playtime']) + elif 'playtime' in metainfo: # HACK: encode playtime in non-info part of existing torrent + playtime = parse_playtime_to_secs(metainfo['playtime']) + elif 'azureus_properties' in metainfo: + azprop = metainfo['azureus_properties'] + if 'Content' in azprop: + content = metainfo['azureus_properties']['Content'] + if 'Speed Bps' in content: + bitrate = float(content['Speed Bps']) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDef: get_bitrate: Bitrate in Azureus metainfo",bitrate + + if playtime is not None: + bitrate = x['length']/playtime + except: + print_exc() + + if intorrentpath == file: + return bitrate + + raise ValueError("File not found in torrent") + else: + raise ValueError("File not found in single-file torrent: "+file) + + +def get_length_filepieceranges_from_metainfo(metainfo,selectedfiles): + + if 'files' not in metainfo['info']: + # single-file torrent + return (metainfo['info']['length'],None) + else: + # multi-file torrent + files = metainfo['info']['files'] + piecesize = metainfo['info']['piece length'] + + total = 0L + filepieceranges = [] + for i in xrange(len(files)): + path = files[i]['path'] + length = files[i]['length'] + filename = pathlist2filename(path) + + if length > 0 and (not selectedfiles or (selectedfiles and filename in selectedfiles)): + range = (offset2piece(total,piecesize), offset2piece(total + length,piecesize),filename) + filepieceranges.append(range) + total += length + return (total,filepieceranges) + + +def copy_metainfo_to_input(metainfo,input): + + for key in tdefdictdefaults.keys(): + if key in metainfo: + input[key] = metainfo[key] + + infokeys = ['name','piece length','live'] + for key in infokeys: + if key in metainfo['info']: + input[key] = metainfo['info'][key] + + # Note: don't know inpath, set to outpath + if 'length' in metainfo['info']: + outpath = metainfo['info']['name'] + if 'playtime' in metainfo['info']: + playtime = metainfo['info']['playtime'] + else: + playtime = None + length = metainfo['info']['length'] + d = {'inpath':outpath,'outpath':outpath,'playtime':playtime,'length':length} + input['files'].append(d) + else: # multi-file torrent + files = metainfo['info']['files'] + for file in files: + outpath = pathlist2filename(file['path']) + if 'playtime' in file: + playtime = file['playtime'] + else: + playtime = None + length = file['length'] + d = {'inpath':outpath,'outpath':outpath,'playtime':playtime,'length':length} + input['files'].append(d) + + if 'azureus_properties' in metainfo: + azprop = metainfo['azureus_properties'] + if 'Content' in azprop: + content = metainfo['azureus_properties']['Content'] + if 'Thumbnail' in content: + input['thumb'] = content['Thumbnail'] + + if 'live' in metainfo['info']: + input['live'] = metainfo['info']['live'] + + +def get_files(metainfo,exts): + + videofiles = [] + if 'files' in metainfo['info']: + # Multi-file torrent + files = metainfo['info']['files'] + for file in files: + + p = file['path'] + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDef: get_files: file is",p + filename = '' + for elem in p: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDef: get_files: elem is",elem + filename = os.path.join(filename,elem) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDef: get_files: composed filename is",filename + (prefix,ext) = os.path.splitext(filename) + if ext != '' and ext[0] == '.': + ext = ext[1:] + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDef: get_files: ext",ext + if exts is None or ext in exts: + videofiles.append(filename) + else: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDef: get_files: Single-torrent file" + + filename = metainfo['info']['name'] # don't think we need fixed name here + (prefix,ext) = os.path.splitext(filename) + if ext != '' and ext[0] == '.': + ext = ext[1:] + if exts is None or ext in exts: + videofiles.append(filename) + return videofiles + diff --git a/tribler-mod/Tribler/Core/APIImplementation/maketorrent.py.bak b/tribler-mod/Tribler/Core/APIImplementation/maketorrent.py.bak new file mode 100644 index 0000000..1179292 --- /dev/null +++ b/tribler-mod/Tribler/Core/APIImplementation/maketorrent.py.bak @@ -0,0 +1,580 @@ +# Written by Arno Bakker, Bram Cohen +# multitracker extensions by John Hoffman +# modified for Merkle hashes and digital signatures by Arno Bakker +# see LICENSE.txt for license information + +import sys +import os +import md5 +import zlib + +from sha import sha +from copy import copy +from time import time +from traceback import print_exc +from types import LongType + +from Tribler.Core.BitTornado.bencode import bencode +from Tribler.Core.BitTornado.BT1.btformats import check_info +from Tribler.Core.Merkle.merkle import MerkleTree +from Tribler.Core.Overlay.permid import create_torrent_signature +from Tribler.Core.Utilities.unicode import str2unicode,bin2unicode +from Tribler.Core.APIImplementation.miscutils import parse_playtime_to_secs,offset2piece +from Tribler.Core.osutils import fix_filebasename +from Tribler.Core.defaults import tdefdictdefaults + + +ignore = [] # Arno: was ['core', 'CVS'] + +DEBUG = False + +def make_torrent_file(input, userabortflag = None, userprogresscallback = lambda x: None): + """ Create a torrent file from the supplied input. + + Returns a (infohash,metainfo) pair, or (None,None) on userabort. """ + + (info,piece_length) = makeinfo(input,userabortflag,userprogresscallback) + if userabortflag is not None and userabortflag.isSet(): + return (None,None) + if info is None: + return (None,None) + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","mktorrent: makeinfo returned",`info` + + check_info(info) + metainfo = {'info': info, 'encoding': input['encoding'], 'creation date': long(time())} + + # http://www.bittorrent.org/DHT_protocol.html says both announce and nodes + # are not allowed, but some torrents (Azureus?) apparently violate this. + if input['nodes'] is None and input['announce'] is None: + raise ValueError('No tracker set') + + for key in ['announce','announce-list','nodes','comment','created by','httpseeds']: + if input[key] is not None and len(input[key]) > 0: + metainfo[key] = input[key] + if key == 'comment': + metainfo['comment.utf-8'] = uniconvert(input['comment'],'utf-8') + + # Assuming 1 file, Azureus format no support multi-file torrent with diff + # bitrates + bitrate = None + for file in input['files']: + if file['playtime'] is not None: + secs = parse_playtime_to_secs(file['playtime']) + bitrate = file['length']/secs + break + if input.get('bps') is not None: + bitrate = input['bps'] + break + + if bitrate is not None or input['thumb'] is not None: + mdict = {} + mdict['Publisher'] = 'Tribler' + if input['comment'] is None: + descr = '' + else: + descr = input['comment'] + mdict['Description'] = descr + + if bitrate is not None: + mdict['Progressive'] = 1 + mdict['Speed Bps'] = bitrate + else: + mdict['Progressive'] = 0 + + mdict['Title'] = metainfo['info']['name'] + mdict['Creation Date'] = long(time()) + # Azureus client source code doesn't tell what this is, so just put in random value from real torrent + mdict['Content Hash'] = 'PT3GQCPW4NPT6WRKKT25IQD4MU5HM4UY' + mdict['Revision Date'] = long(time()) + if input['thumb'] is not None: + mdict['Thumbnail'] = input['thumb'] + cdict = {} + cdict['Content'] = mdict + metainfo['azureus_properties'] = cdict + + if input['torrentsigkeypairfilename'] is not None: + create_torrent_signature(metainfo,input['torrentsigkeypairfilename']) + + infohash = sha(bencode(info)).digest() + return (infohash,metainfo) + + +def uniconvertl(l, e): + """ Convert a pathlist to a list of strings encoded in encoding "e" using + uniconvert. """ + r = [] + try: + for s in l: + r.append(uniconvert(s, e)) + except UnicodeError: + raise UnicodeError('bad filename: '+os.path.join(l)) + return r + +def uniconvert(s, enc): + """ Convert 's' to a string containing a Unicode sequence encoded using + encoding "enc". If 's' is not a Unicode object, we first try to convert + it to one, guessing the encoding if necessary. """ + if not isinstance(s, unicode): + try: + s = str2unicode(s) + except UnicodeError: + raise UnicodeError('bad filename: '+s) + return s.encode(enc) + + +def makeinfo(input,userabortflag,userprogresscallback): + """ Calculate hashes and create torrent file's 'info' part """ + encoding = input['encoding'] + + pieces = [] + sh = sha() + done = 0L + fs = [] + totalsize = 0L + totalhashed = 0L + + # 1. Determine which files should go into the torrent (=expand any dirs + # specified by user in input['files'] + subs = [] + for file in input['files']: + inpath = file['inpath'] + outpath = file['outpath'] + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","makeinfo: inpath",inpath,"outpath",outpath + + if os.path.isdir(inpath): + dirsubs = subfiles(inpath) + subs.extend(dirsubs) + else: + if outpath is None: + subs.append(([os.path.basename(inpath)],inpath)) + else: + subs.append((filename2pathlist(outpath,skipfirst=True),inpath)) + + subs.sort() + + # 2. Calc total size + newsubs = [] + for p, f in subs: + if 'live' in input: + size = input['files'][0]['length'] + else: + size = os.path.getsize(f) + totalsize += size + newsubs.append((p,f,size)) + subs = newsubs + + # 3. Calc piece length from totalsize if not set + if input['piece length'] == 0: + if input['createmerkletorrent']: + # used to be 15=32K, but this works better with slow python + piece_len_exp = 18 + else: + if totalsize > 8L*1024*1024*1024: # > 8 gig = + piece_len_exp = 21 # 2 meg pieces + elif totalsize > 2*1024*1024*1024: # > 2 gig = + piece_len_exp = 20 # 1 meg pieces + elif totalsize > 512*1024*1024: # > 512M = + piece_len_exp = 19 # 512K pieces + elif totalsize > 64*1024*1024: # > 64M = + piece_len_exp = 18 # 256K pieces + elif totalsize > 16*1024*1024: # > 16M = + piece_len_exp = 17 # 128K pieces + elif totalsize > 4*1024*1024: # > 4M = + piece_len_exp = 16 # 64K pieces + else: # < 4M = + piece_len_exp = 15 # 32K pieces + piece_length = 2 ** piece_len_exp + else: + piece_length = input['piece length'] + + # 4. Read files and calc hashes, if not live + if 'live' not in input: + for p, f, size in subs: + pos = 0L + + h = open(f, 'rb') + + if input['makehash_md5']: + hash_md5 = md5.new() + if input['makehash_sha1']: + hash_sha1 = sha() + if input['makehash_crc32']: + hash_crc32 = zlib.crc32('') + + while pos < size: + a = min(size - pos, piece_length - done) + + # See if the user cancelled + if userabortflag is not None and userabortflag.isSet(): + return (None,None) + + readpiece = h.read(a) + + # See if the user cancelled + if userabortflag is not None and userabortflag.isSet(): + return (None,None) + + sh.update(readpiece) + + if input['makehash_md5']: + # Update MD5 + hash_md5.update(readpiece) + + if input['makehash_crc32']: + # Update CRC32 + hash_crc32 = zlib.crc32(readpiece, hash_crc32) + + if input['makehash_sha1']: + # Update SHA1 + hash_sha1.update(readpiece) + + done += a + pos += a + totalhashed += a + + if done == piece_length: + pieces.append(sh.digest()) + done = 0 + sh = sha() + + if userprogresscallback is not None: + userprogresscallback(float(totalhashed) / float(totalsize)) + + newdict = {'length': num2num(size), + 'path': uniconvertl(p,encoding), + 'path.utf-8': uniconvertl(p, 'utf-8') } + + # Find and add playtime + for file in input['files']: + if file['inpath'] == f: + if file['playtime'] is not None: + newdict['playtime'] = file['playtime'] + break + + if input['makehash_md5']: + newdict['md5sum'] = hash_md5.hexdigest() + if input['makehash_crc32']: + newdict['crc32'] = "%08X" % hash_crc32 + if input['makehash_sha1']: + newdict['sha1'] = hash_sha1.digest() + + fs.append(newdict) + + h.close() + + if done > 0: + pieces.append(sh.digest()) + + # 5. Create info dict + if len(subs) == 1: + flkey = 'length' + flval = num2num(totalsize) + name = subs[0][0][0] + else: + flkey = 'files' + flval = fs + + outpath = input['files'][0]['outpath'] + l = filename2pathlist(outpath) + name = l[0] + + infodict = { 'piece length':num2num(piece_length), flkey: flval, + 'name': uniconvert(name,encoding), + 'name.utf-8': uniconvert(name,'utf-8')} + + if 'live' not in input: + + if input['createmerkletorrent']: + merkletree = MerkleTree(piece_length,totalsize,None,pieces) + root_hash = merkletree.get_root_hash() + infodict.update( {'root hash': root_hash } ) + else: + infodict.update( {'pieces': ''.join(pieces) } ) + else: + # With source auth, live is a dict + infodict['live'] = input['live'] + + if len(subs) == 1: + # Find and add playtime + for file in input['files']: + if file['inpath'] == f: + if file['playtime'] is not None: + infodict['playtime'] = file['playtime'] + + return (infodict,piece_length) + + +def subfiles(d): + """ Return list of (pathlist,local filename) tuples for all the files in + directory 'd' """ + r = [] + stack = [([], d)] + while stack: + p, n = stack.pop() + if os.path.isdir(n): + for s in os.listdir(n): + if s not in ignore and s[:1] != '.': + stack.append((copy(p) + [s], os.path.join(n, s))) + else: + r.append((p, n)) + return r + + +def filename2pathlist(path,skipfirst=False): + """ Convert a filename to a 'path' entry suitable for a multi-file torrent + file """ + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","mktorrent: filename2pathlist:",path,skipfirst + + h = path + l = [] + while True: + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","mktorrent: filename2pathlist: splitting",h + + (h,t) = os.path.split(h) + if h == '' and t == '': + break + if h == '' and skipfirst: + continue + if t != '': # handle case where path ends in / (=path separator) + l.append(t) + + l.reverse() + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","mktorrent: filename2pathlist: returning",l + + return l + + +def pathlist2filename(pathlist): + """ Convert a multi-file torrent file 'path' entry to a filename. """ + fullpath = '' + for elem in pathlist: + fullpath = os.path.join(fullpath,elem) + return fullpath + +def pathlist2savefilename(pathlist,encoding): + fullpath = u'' + for elem in pathlist: + u = bin2unicode(elem,encoding) + b = fix_filebasename(u) + fullpath = os.path.join(fullpath,b) + return fullpath + +def torrentfilerec2savefilename(filerec,length=None): + if length is None: + length = len(filerec['path']) + if 'path.utf-8' in filerec: + key = 'path.utf-8' + encoding = 'utf-8' + else: + key = 'path' + encoding = None + + return pathlist2savefilename(filerec[key][:length],encoding) + +def savefilenames2finaldest(fn1,fn2): + """ Returns the join of two savefilenames, possibly shortened + to adhere to OS specific limits. + """ + j = os.path.join(fn1,fn2) + if sys.platform == 'win32': + # Windows has a maximum path length of 260 + # http://msdn2.microsoft.com/en-us/library/aa365247.aspx + j = j[:259] # 260 don't work. + return j + + +def num2num(num): + """ Converts long to int if small enough to fit """ + if type(num) == LongType and num < sys.maxint: + return int(num) + else: + return num + +def get_torrentfilerec_from_metainfo(filename,metainfo): + info = metainfo['info'] + if filename is None: + return info + + if filename is not None and 'files' in info: + for i in range(len(info['files'])): + x = info['files'][i] + + intorrentpath = pathlist2filename(x['path']) + if intorrentpath == filename: + return x + + raise ValueError("File not found in torrent") + else: + raise ValueError("File not found in single-file torrent") + +def get_bitrate_from_metainfo(file,metainfo): + info = metainfo['info'] + if file is None: + bitrate = None + try: + playtime = None + if info.has_key('playtime'): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDef: get_bitrate: Bitrate in info field" + playtime = parse_playtime_to_secs(info['playtime']) + elif 'playtime' in metainfo: # HACK: encode playtime in non-info part of existing torrent + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDef: get_bitrate: Bitrate in metainfo" + playtime = parse_playtime_to_secs(metainfo['playtime']) + elif 'azureus_properties' in metainfo: + azprop = metainfo['azureus_properties'] + if 'Content' in azprop: + content = metainfo['azureus_properties']['Content'] + if 'Speed Bps' in content: + bitrate = float(content['Speed Bps']) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDef: get_bitrate: Bitrate in Azureus metainfo",bitrate + if playtime is not None: + bitrate = info['length']/playtime + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDef: get_bitrate: Found bitrate",bitrate + except: + print_exc() + + return bitrate + + if file is not None and 'files' in info: + for i in range(len(info['files'])): + x = info['files'][i] + + intorrentpath = '' + for elem in x['path']: + intorrentpath = os.path.join(intorrentpath,elem) + bitrate = None + try: + playtime = None + if x.has_key('playtime'): + playtime = parse_playtime_to_secs(x['playtime']) + elif 'playtime' in metainfo: # HACK: encode playtime in non-info part of existing torrent + playtime = parse_playtime_to_secs(metainfo['playtime']) + elif 'azureus_properties' in metainfo: + azprop = metainfo['azureus_properties'] + if 'Content' in azprop: + content = metainfo['azureus_properties']['Content'] + if 'Speed Bps' in content: + bitrate = float(content['Speed Bps']) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDef: get_bitrate: Bitrate in Azureus metainfo",bitrate + + if playtime is not None: + bitrate = x['length']/playtime + except: + print_exc() + + if intorrentpath == file: + return bitrate + + raise ValueError("File not found in torrent") + else: + raise ValueError("File not found in single-file torrent: "+file) + + +def get_length_filepieceranges_from_metainfo(metainfo,selectedfiles): + + if 'files' not in metainfo['info']: + # single-file torrent + return (metainfo['info']['length'],None) + else: + # multi-file torrent + files = metainfo['info']['files'] + piecesize = metainfo['info']['piece length'] + + total = 0L + filepieceranges = [] + for i in xrange(len(files)): + path = files[i]['path'] + length = files[i]['length'] + filename = pathlist2filename(path) + + if length > 0 and (not selectedfiles or (selectedfiles and filename in selectedfiles)): + range = (offset2piece(total,piecesize), offset2piece(total + length,piecesize),filename) + filepieceranges.append(range) + total += length + return (total,filepieceranges) + + +def copy_metainfo_to_input(metainfo,input): + + for key in tdefdictdefaults.keys(): + if key in metainfo: + input[key] = metainfo[key] + + infokeys = ['name','piece length','live'] + for key in infokeys: + if key in metainfo['info']: + input[key] = metainfo['info'][key] + + # Note: don't know inpath, set to outpath + if 'length' in metainfo['info']: + outpath = metainfo['info']['name'] + if 'playtime' in metainfo['info']: + playtime = metainfo['info']['playtime'] + else: + playtime = None + length = metainfo['info']['length'] + d = {'inpath':outpath,'outpath':outpath,'playtime':playtime,'length':length} + input['files'].append(d) + else: # multi-file torrent + files = metainfo['info']['files'] + for file in files: + outpath = pathlist2filename(file['path']) + if 'playtime' in file: + playtime = file['playtime'] + else: + playtime = None + length = file['length'] + d = {'inpath':outpath,'outpath':outpath,'playtime':playtime,'length':length} + input['files'].append(d) + + if 'azureus_properties' in metainfo: + azprop = metainfo['azureus_properties'] + if 'Content' in azprop: + content = metainfo['azureus_properties']['Content'] + if 'Thumbnail' in content: + input['thumb'] = content['Thumbnail'] + + if 'live' in metainfo['info']: + input['live'] = metainfo['info']['live'] + + +def get_files(metainfo,exts): + + videofiles = [] + if 'files' in metainfo['info']: + # Multi-file torrent + files = metainfo['info']['files'] + for file in files: + + p = file['path'] + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDef: get_files: file is",p + filename = '' + for elem in p: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDef: get_files: elem is",elem + filename = os.path.join(filename,elem) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDef: get_files: composed filename is",filename + (prefix,ext) = os.path.splitext(filename) + if ext != '' and ext[0] == '.': + ext = ext[1:] + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDef: get_files: ext",ext + if exts is None or ext in exts: + videofiles.append(filename) + else: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDef: get_files: Single-torrent file" + + filename = metainfo['info']['name'] # don't think we need fixed name here + (prefix,ext) = os.path.splitext(filename) + if ext != '' and ext[0] == '.': + ext = ext[1:] + if exts is None or ext in exts: + videofiles.append(filename) + return videofiles + diff --git a/tribler-mod/Tribler/Core/APIImplementation/miscutils.py b/tribler-mod/Tribler/Core/APIImplementation/miscutils.py new file mode 100644 index 0000000..8f94e95 --- /dev/null +++ b/tribler-mod/Tribler/Core/APIImplementation/miscutils.py @@ -0,0 +1,43 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys +import re +from threading import Timer + +DEBUG = False + +def parse_playtime_to_secs(hhmmss): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","miscutils: Playtime is",hhmmss + r = re.compile("([0-9]+):*") + occ = r.findall(hhmmss) + t = None + if len(occ) > 0: + if len(occ) == 3: + # hours as well + t = int(occ[0])*3600 + int(occ[1])*60 + int(occ[2]) + elif len(occ) == 2: + # minutes and seconds + t = int(occ[0])*60 + int(occ[1]) + elif len(occ) == 1: + # seconds + t = int(occ[0]) + return t + + +def offset2piece(offset,piecesize): + + p = offset / piecesize + if offset % piecesize > 0: + p += 1 + return p + + + +def NamedTimer(*args,**kwargs): + t = Timer(*args,**kwargs) + t.setDaemon(True) + t.setName("NamedTimer"+t.getName()) + return t diff --git a/tribler-mod/Tribler/Core/APIImplementation/miscutils.py.bak b/tribler-mod/Tribler/Core/APIImplementation/miscutils.py.bak new file mode 100644 index 0000000..34b194b --- /dev/null +++ b/tribler-mod/Tribler/Core/APIImplementation/miscutils.py.bak @@ -0,0 +1,42 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys +import re +from threading import Timer + +DEBUG = False + +def parse_playtime_to_secs(hhmmss): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","miscutils: Playtime is",hhmmss + r = re.compile("([0-9]+):*") + occ = r.findall(hhmmss) + t = None + if len(occ) > 0: + if len(occ) == 3: + # hours as well + t = int(occ[0])*3600 + int(occ[1])*60 + int(occ[2]) + elif len(occ) == 2: + # minutes and seconds + t = int(occ[0])*60 + int(occ[1]) + elif len(occ) == 1: + # seconds + t = int(occ[0]) + return t + + +def offset2piece(offset,piecesize): + + p = offset / piecesize + if offset % piecesize > 0: + p += 1 + return p + + + +def NamedTimer(*args,**kwargs): + t = Timer(*args,**kwargs) + t.setDaemon(True) + t.setName("NamedTimer"+t.getName()) + return t diff --git a/tribler-mod/Tribler/Core/Base.py b/tribler-mod/Tribler/Core/Base.py new file mode 100644 index 0000000..3a2d6b9 --- /dev/null +++ b/tribler-mod/Tribler/Core/Base.py @@ -0,0 +1,31 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information +""" Base classes for the Core API """ + +from Tribler.Core.exceptions import * + +DEBUG = False + +# +# Tribler API base classes +# +class Serializable: + """ + Interface to signal that the object is pickleable. + """ + def __init__(self): + pass + +class Copyable: + """ + Interface for copying an instance (or rather signaling that it can be + copied) + """ + def copy(self): + """ + Copies the instance. + @param self an unbound instance of the class + @return Returns a copy of "self" + """ + raise NotYetImplementedException() diff --git a/tribler-mod/Tribler/Core/Base.py.bak b/tribler-mod/Tribler/Core/Base.py.bak new file mode 100644 index 0000000..9f11ab2 --- /dev/null +++ b/tribler-mod/Tribler/Core/Base.py.bak @@ -0,0 +1,30 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information +""" Base classes for the Core API """ + +from Tribler.Core.exceptions import * + +DEBUG = False + +# +# Tribler API base classes +# +class Serializable: + """ + Interface to signal that the object is pickleable. + """ + def __init__(self): + pass + +class Copyable: + """ + Interface for copying an instance (or rather signaling that it can be + copied) + """ + def copy(self): + """ + Copies the instance. + @param self an unbound instance of the class + @return Returns a copy of "self" + """ + raise NotYetImplementedException() diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/Choker.py b/tribler-mod/Tribler/Core/BitTornado/BT1/Choker.py new file mode 100644 index 0000000..7b48ffb --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/Choker.py @@ -0,0 +1,248 @@ +from time import localtime, strftime + +# Written by Bram Cohen, Pawel Garbacki, Boxun Zhang +# see LICENSE.txt for license information + +from random import randrange, shuffle +import sys + +from Tribler.Core.BitTornado.clock import clock + +try: + True +except: + True = 1 + False = 0 + +DEBUG = True #False + +class Choker: + def __init__(self, config, schedule, picker, seeding_selector, done = lambda: False): + self.config = config + self.round_robin_period = config['round_robin_period'] + self.schedule = schedule + self.picker = picker + self.connections = [] + self.last_preferred = 0 + self.last_round_robin = clock() + self.done = done + self.super_seed = False + self.paused = False + schedule(self._round_robin, 5) + + # SelectiveSeeding + self.seeding_manager = None + + + def set_round_robin_period(self, x): + self.round_robin_period = x + + def _round_robin(self): + self.schedule(self._round_robin, 5) + if self.super_seed: + cons = range(len(self.connections)) + to_close = [] + count = self.config['min_uploads']-self.last_preferred + if count > 0: # optimization + shuffle(cons) + for c in cons: + # SelectiveSeeding + if self.seeding_manager is None or self.seeding_manager.is_conn_eligible(c): + + i = self.picker.next_have(self.connections[c], count > 0) + if i is None: + continue + if i < 0: + to_close.append(self.connections[c]) + continue + self.connections[c].send_have(i) + count -= 1 + else: + # Drop non-eligible connections + to_close.append(self.connections[c]) + for c in to_close: + c.close() + if self.last_round_robin + self.round_robin_period < clock(): + self.last_round_robin = clock() + for i in xrange(1, len(self.connections)): + c = self.connections[i] + + # SelectiveSeeding + if self.seeding_manager is None or self.seeding_manager.is_conn_eligible(c): + u = c.get_upload() + if u.is_choked() and u.is_interested(): + self.connections = self.connections[i:] + self.connections[:i] + break + self._rechoke() + + def _rechoke(self): + # 2fast + helper = self.picker.helper + if helper is not None and helper.coordinator is None and helper.is_complete(): + for c in self.connections: + if not c.connection.is_coordinator_con(): + u = c.get_upload() + u.choke() + return + + if self.paused: + for c in self.connections: + c.get_upload().choke() + return + + # NETWORK AWARE + if 'unchoke_bias_for_internal' in self.config: + checkinternalbias = self.config['unchoke_bias_for_internal'] + else: + checkinternalbias = 0 + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","choker: _rechoke: checkinternalbias",checkinternalbias + + # 0. Construct candidate list + preferred = [] + maxuploads = self.config['max_uploads'] + if maxuploads > 1: + + # 1. Get some regular candidates + for c in self.connections: + + # g2g: unchoke some g2g peers later + if c.use_g2g: + continue + + # SelectiveSeeding + if self.seeding_manager is None or self.seeding_manager.is_conn_eligible(c): + u = c.get_upload() + if not u.is_interested(): + continue + if self.done(): + r = u.get_rate() + else: + d = c.get_download() + r = d.get_rate() + if r < 1000 or d.is_snubbed(): + continue + + # NETWORK AWARENESS + if checkinternalbias and c.na_get_address_distance() == 0: + r += checkinternalbias + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","choker: _rechoke: BIASING",c.get_ip(),c.get_port() + + preferred.append((-r, c)) + + self.last_preferred = len(preferred) + preferred.sort() + del preferred[maxuploads-1:] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","choker: _rechoke: NORMAL UNCHOKE",preferred + preferred = [x[1] for x in preferred] + + # 2. Get some g2g candidates + g2g_preferred = [] + for c in self.connections: + if not c.use_g2g: + continue + + # SelectiveSeeding + if self.seeding_manager is None or self.seeding_manager.is_conn_eligible(c): + + u = c.get_upload() + if not u.is_interested(): + continue + + r = c.g2g_score() + if checkinternalbias and c.na_get_address_distance() == 0: + r[0] += checkinternalbias + r[1] += checkinternalbias + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","choker: _rechoke: G2G BIASING",c.get_ip(),c.get_port() + + g2g_preferred.append((-r[0], -r[1], c)) + + g2g_preferred.sort() + del g2g_preferred[maxuploads-1:] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","choker: _rechoke: G2G UNCHOKE",g2g_preferred + g2g_preferred = [x[2] for x in g2g_preferred] + + preferred += g2g_preferred + + + # + count = len(preferred) + hit = False + to_unchoke = [] + + # 3. The live source must always unchoke its auxiliary seeders + # LIVESOURCE + if 'live_aux_seeders' in self.config: + + for hostport in self.config['live_aux_seeders']: + for c in self.connections: + if c.get_ip() == hostport[0]: + u = c.get_upload() + to_unchoke.append(u) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Choker: _rechoke: LIVE: Permanently unchoking aux seed",hostport + + # 4. Select from candidate lists, aux seeders always selected + for c in self.connections: + u = c.get_upload() + if c in preferred: + to_unchoke.append(u) + else: + # TODO: apply service policies to optimistic slot + if count < maxuploads or not hit: + to_unchoke.append(u) + if u.is_interested(): + count += 1 + if DEBUG and not hit: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","choker: OPTIMISTIC UNCHOKE",c + hit = True + else: + if not c.connection.is_coordinator_con() and not c.connection.is_helper_con(): + u.choke() + elif u.is_choked(): + to_unchoke.append(u) + + # 5. Unchoke selected candidates + for u in to_unchoke: + u.unchoke() + + + def connection_made(self, connection, p = None): + if p is None: + p = randrange(-2, len(self.connections) + 1) + self.connections.insert(max(p, 0), connection) + self.picker.got_peer(connection) + self._rechoke() + + def connection_lost(self, connection): + """ connection is a Connecter.Connection """ + self.connections.remove(connection) + self.picker.lost_peer(connection) + if connection.get_upload().is_interested() and not connection.get_upload().is_choked(): + self._rechoke() + + def interested(self, connection): + if not connection.get_upload().is_choked(): + self._rechoke() + + def not_interested(self, connection): + if not connection.get_upload().is_choked(): + self._rechoke() + + def set_super_seed(self): + while self.connections: # close all connections + self.connections[0].close() + self.picker.set_superseed() + self.super_seed = True + + def pause(self, flag): + self.paused = flag + self._rechoke() + + # SelectiveSeeding + def set_seeding_manager(self, manager): + # When seeding starts, a non-trivial seeding manager will be set + self.seeding_manager = manager diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/Choker.py.bak b/tribler-mod/Tribler/Core/BitTornado/BT1/Choker.py.bak new file mode 100644 index 0000000..99b08a3 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/Choker.py.bak @@ -0,0 +1,247 @@ + +# Written by Bram Cohen, Pawel Garbacki, Boxun Zhang +# see LICENSE.txt for license information + +from random import randrange, shuffle +import sys + +from Tribler.Core.BitTornado.clock import clock + +try: + True +except: + True = 1 + False = 0 + +DEBUG = True #False + +class Choker: + def __init__(self, config, schedule, picker, seeding_selector, done = lambda: False): + self.config = config + self.round_robin_period = config['round_robin_period'] + self.schedule = schedule + self.picker = picker + self.connections = [] + self.last_preferred = 0 + self.last_round_robin = clock() + self.done = done + self.super_seed = False + self.paused = False + schedule(self._round_robin, 5) + + # SelectiveSeeding + self.seeding_manager = None + + + def set_round_robin_period(self, x): + self.round_robin_period = x + + def _round_robin(self): + self.schedule(self._round_robin, 5) + if self.super_seed: + cons = range(len(self.connections)) + to_close = [] + count = self.config['min_uploads']-self.last_preferred + if count > 0: # optimization + shuffle(cons) + for c in cons: + # SelectiveSeeding + if self.seeding_manager is None or self.seeding_manager.is_conn_eligible(c): + + i = self.picker.next_have(self.connections[c], count > 0) + if i is None: + continue + if i < 0: + to_close.append(self.connections[c]) + continue + self.connections[c].send_have(i) + count -= 1 + else: + # Drop non-eligible connections + to_close.append(self.connections[c]) + for c in to_close: + c.close() + if self.last_round_robin + self.round_robin_period < clock(): + self.last_round_robin = clock() + for i in xrange(1, len(self.connections)): + c = self.connections[i] + + # SelectiveSeeding + if self.seeding_manager is None or self.seeding_manager.is_conn_eligible(c): + u = c.get_upload() + if u.is_choked() and u.is_interested(): + self.connections = self.connections[i:] + self.connections[:i] + break + self._rechoke() + + def _rechoke(self): + # 2fast + helper = self.picker.helper + if helper is not None and helper.coordinator is None and helper.is_complete(): + for c in self.connections: + if not c.connection.is_coordinator_con(): + u = c.get_upload() + u.choke() + return + + if self.paused: + for c in self.connections: + c.get_upload().choke() + return + + # NETWORK AWARE + if 'unchoke_bias_for_internal' in self.config: + checkinternalbias = self.config['unchoke_bias_for_internal'] + else: + checkinternalbias = 0 + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","choker: _rechoke: checkinternalbias",checkinternalbias + + # 0. Construct candidate list + preferred = [] + maxuploads = self.config['max_uploads'] + if maxuploads > 1: + + # 1. Get some regular candidates + for c in self.connections: + + # g2g: unchoke some g2g peers later + if c.use_g2g: + continue + + # SelectiveSeeding + if self.seeding_manager is None or self.seeding_manager.is_conn_eligible(c): + u = c.get_upload() + if not u.is_interested(): + continue + if self.done(): + r = u.get_rate() + else: + d = c.get_download() + r = d.get_rate() + if r < 1000 or d.is_snubbed(): + continue + + # NETWORK AWARENESS + if checkinternalbias and c.na_get_address_distance() == 0: + r += checkinternalbias + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","choker: _rechoke: BIASING",c.get_ip(),c.get_port() + + preferred.append((-r, c)) + + self.last_preferred = len(preferred) + preferred.sort() + del preferred[maxuploads-1:] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","choker: _rechoke: NORMAL UNCHOKE",preferred + preferred = [x[1] for x in preferred] + + # 2. Get some g2g candidates + g2g_preferred = [] + for c in self.connections: + if not c.use_g2g: + continue + + # SelectiveSeeding + if self.seeding_manager is None or self.seeding_manager.is_conn_eligible(c): + + u = c.get_upload() + if not u.is_interested(): + continue + + r = c.g2g_score() + if checkinternalbias and c.na_get_address_distance() == 0: + r[0] += checkinternalbias + r[1] += checkinternalbias + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","choker: _rechoke: G2G BIASING",c.get_ip(),c.get_port() + + g2g_preferred.append((-r[0], -r[1], c)) + + g2g_preferred.sort() + del g2g_preferred[maxuploads-1:] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","choker: _rechoke: G2G UNCHOKE",g2g_preferred + g2g_preferred = [x[2] for x in g2g_preferred] + + preferred += g2g_preferred + + + # + count = len(preferred) + hit = False + to_unchoke = [] + + # 3. The live source must always unchoke its auxiliary seeders + # LIVESOURCE + if 'live_aux_seeders' in self.config: + + for hostport in self.config['live_aux_seeders']: + for c in self.connections: + if c.get_ip() == hostport[0]: + u = c.get_upload() + to_unchoke.append(u) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Choker: _rechoke: LIVE: Permanently unchoking aux seed",hostport + + # 4. Select from candidate lists, aux seeders always selected + for c in self.connections: + u = c.get_upload() + if c in preferred: + to_unchoke.append(u) + else: + # TODO: apply service policies to optimistic slot + if count < maxuploads or not hit: + to_unchoke.append(u) + if u.is_interested(): + count += 1 + if DEBUG and not hit: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","choker: OPTIMISTIC UNCHOKE",c + hit = True + else: + if not c.connection.is_coordinator_con() and not c.connection.is_helper_con(): + u.choke() + elif u.is_choked(): + to_unchoke.append(u) + + # 5. Unchoke selected candidates + for u in to_unchoke: + u.unchoke() + + + def connection_made(self, connection, p = None): + if p is None: + p = randrange(-2, len(self.connections) + 1) + self.connections.insert(max(p, 0), connection) + self.picker.got_peer(connection) + self._rechoke() + + def connection_lost(self, connection): + """ connection is a Connecter.Connection """ + self.connections.remove(connection) + self.picker.lost_peer(connection) + if connection.get_upload().is_interested() and not connection.get_upload().is_choked(): + self._rechoke() + + def interested(self, connection): + if not connection.get_upload().is_choked(): + self._rechoke() + + def not_interested(self, connection): + if not connection.get_upload().is_choked(): + self._rechoke() + + def set_super_seed(self): + while self.connections: # close all connections + self.connections[0].close() + self.picker.set_superseed() + self.super_seed = True + + def pause(self, flag): + self.paused = flag + self._rechoke() + + # SelectiveSeeding + def set_seeding_manager(self, manager): + # When seeding starts, a non-trivial seeding manager will be set + self.seeding_manager = manager diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/Connecter.py b/tribler-mod/Tribler/Core/BitTornado/BT1/Connecter.py new file mode 100644 index 0000000..1ce32fd --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/Connecter.py @@ -0,0 +1,1251 @@ +from time import localtime, strftime + +# Written by Bram Cohen, Pawel Garbacki and Arno Bakker +# see LICENSE.txt for license information + +import time +import sys +from types import DictType,IntType,StringType +from random import shuffle +from traceback import print_exc +from math import ceil +import socket +import urlparse + +from Tribler.Core.BitTornado.bitfield import Bitfield +from Tribler.Core.BitTornado.clock import clock +from Tribler.Core.BitTornado.bencode import bencode,bdecode +from Tribler.Core.BitTornado.__init__ import version_short,decodePeerID,TRIBLER_PEERID_LETTER +from Tribler.Core.BitTornado.BT1.convert import tobinary,toint +from Tribler.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge + +from MessageID import * + +from Tribler.Core.CacheDB.CacheDBHandler import PeerDBHandler, BarterCastDBHandler +from Tribler.Core.Overlay.SecureOverlay import SecureOverlay +from Tribler.Core.DecentralizedTracking.ut_pex import * +from Tribler.Core.BitTornado.BT1.track import compact_ip,decompact_ip + +from Tribler.Core.BitTornado.CurrentRateMeasure import Measure + +try: + True +except: + True = 1 + False = 0 + +KICK_OLD_CLIENTS=False +DEBUG = True #False +DEBUG_NORMAL_MSGS = True #False +DEBUG_UT_PEX = False +DEBUG_MESSAGE_HANDLING = False + +UNAUTH_PERMID_PERIOD = 3600 + +""" +Arno: 2007-02-16: +uTorrent and Bram's BitTorrent now support an extension to the protocol, +documented on http://www.rasterbar.com/products/libtorrent/extension_protocol.html + +The problem is that the bit they use in the options field of the BT handshake +is the same as we use to indicate a peer supports the overlay-swarm connection. +The new clients will send an EXTEND message with ID 20 after the handshake to +inform the otherside what new messages it supports. + +As a result, Tribler <= 3.5.0 clients won't be confused, but can't talk to these +new clients either or vice versa. The new client will think we understand the +message, send it. But because we don't know that message ID, we will close +the connection. Our attempts to establish a new overlay connection with the new +client will gracefully fail, as the new client will not know of infohash=00000... +and close the connection. + +We solve this conflict by adding support for the EXTEND message. We are now be +able to receive it, and send our own. Our message will contain one method name, +i.e. Tr_OVERLAYSWARM=253. Processing is now as follows: + +* If bit 43 is set and the peerID is from an old Tribler (<=3.5.0) + peer, we initiate an overlay-swarm connection. +* If bit 43 is set and the peer's EXTEND hs message contains method Tr_OVERLAYSWARM, + it's a new Tribler peer, and we initiate an overlay-swarm connection. +* If bit 43 is set, and the EXTEND hs message does not contain Tr_OVERLAYSWARM + it's not a Tribler client and we do not initiate an overlay-swarm + connection. + +N.B. The EXTEND message is poorly designed, it lacks protocol versioning +support which is present in the Azureus Extended Messaging Protocol +and our overlay-swarm protocol. + +""" +EXTEND_MSG_HANDSHAKE_ID = chr(0) +EXTEND_MSG_OVERLAYSWARM = 'Tr_OVERLAYSWARM' +EXTEND_MSG_G2G_V1 = 'Tr_G2G' +EXTEND_MSG_G2G_V2 = 'Tr_G2G_v2' + +CURRENT_LIVE_VERSION=1 +EXTEND_MSG_LIVE_PREFIX = 'Tr_LIVE_v' +LIVE_FAKE_MESSAGE_ID = chr(254) + + + +G2G_CALLBACK_INTERVAL = 4 + +def show(s): + text = [] + for i in xrange(len(s)): + text.append(ord(s[i])) + return text + + +class Connection: + def __init__(self, connection, connecter): + self.connection = connection + self.connecter = connecter + self.got_anything = False + self.next_upload = None + self.outqueue = [] + self.partial_message = None + self.download = None + self.upload = None + self.send_choke_queued = False + self.just_unchoked = None + self.unauth_permid = None + self.looked_for_permid = UNAUTH_PERMID_PERIOD-3 + self.closed = False + self.extend_hs_dict = {} # what extended messages does this peer support + self.initiated_overlay = False + + # G2G + self.use_g2g = False # set to true if both sides use G2G, indicated by self.connector.use_g2g + self.g2g_version = None + self.perc_sent = {} + # batch G2G_XFER information and periodically send it out. + self.last_perc_sent = {} + + config = self.connecter.config + self.forward_speeds = [0] * 2 + self.forward_speeds[0] = Measure(config['max_rate_period'], config['upload_rate_fudge']) + self.forward_speeds[1] = Measure(config['max_rate_period'], config['upload_rate_fudge']) + + # BarterCast counters + self.total_downloaded = 0 + self.total_uploaded = 0 + + self.ut_pex_first_flag = True # first time we sent a ut_pex to this peer? + + self.na_candidate_ext_ip = None + + + def get_myip(self, real=False): + return self.connection.get_myip(real) + + def get_myport(self, real=False): + return self.connection.get_myport(real) + + def get_ip(self, real=False): + return self.connection.get_ip(real) + + def get_port(self, real=False): + return self.connection.get_port(real) + + def get_id(self): + return self.connection.get_id() + + def get_readable_id(self): + return self.connection.get_readable_id() + + def close(self): + if DEBUG: + print >>sys.strderr, 'connection closed' + if self.get_ip() == self.connecter.tracker_ip: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: close: live: WAAH closing SOURCE" + + self.connection.close() + self.closed = True + + + def is_closed(self): + return self.closed + + def is_locally_initiated(self): + return self.connection.is_locally_initiated() + + def send_interested(self): + self._send_message(INTERESTED) + + def send_not_interested(self): + self._send_message(NOT_INTERESTED) + + def send_choke(self): + if self.partial_message: + self.send_choke_queued = True + else: + self._send_message(CHOKE) + self.upload.choke_sent() + self.just_unchoked = 0 + + def send_unchoke(self): + if self.send_choke_queued: + self.send_choke_queued = False + if DEBUG_NORMAL_MSGS: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'CHOKE SUPPRESSED' + else: + self._send_message(UNCHOKE) + if (self.partial_message or self.just_unchoked is None + or not self.upload.interested or self.download.active_requests): + self.just_unchoked = 0 + else: + self.just_unchoked = clock() + + def send_request(self, index, begin, length): + self._send_message(REQUEST + tobinary(index) + + tobinary(begin) + tobinary(length)) + if DEBUG_NORMAL_MSGS: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","sending REQUEST to",self.get_ip() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'sent request: '+str(index)+': '+str(begin)+'-'+str(begin+length) + + def send_cancel(self, index, begin, length): + self._send_message(CANCEL + tobinary(index) + + tobinary(begin) + tobinary(length)) + if DEBUG_NORMAL_MSGS: + print >>sys.strderr, 'sent cancel: '+str(index)+': '+str(begin)+'-'+str(begin+length) + + def send_bitfield(self, bitfield): + self._send_message(BITFIELD + bitfield) + + def send_have(self, index): + self._send_message(HAVE + tobinary(index)) + + def send_keepalive(self): + self._send_message('') + + def _send_message(self, s): + s = tobinary(len(s))+s + if self.partial_message: + self.outqueue.append(s) + else: + self.connection.send_message_raw(s) + + def send_partial(self, bytes): + if self.connection.closed: + return 0 + if self.partial_message is None: + s = self.upload.get_upload_chunk() + if s is None: + return 0 + # Merkle: send hashlist along with piece in HASHPIECE message + index, begin, hashlist, piece = s + + if self.use_g2g: + # ----- G2G: record who we send this to + self.g2g_sent_piece_part( self, index, begin, hashlist, piece ) + + # ---- G2G: we are uploading len(piece) data of piece #index + for c in self.connecter.connections.itervalues(): + if not c.use_g2g: + continue + + # include sending to self, because it should not be excluded from the statistics + + c.queue_g2g_piece_xfer( index, begin, piece ) + + if self.connecter.merkle_torrent: + bhashlist = bencode(hashlist) + self.partial_message = ''.join(( + tobinary(1+4+4+4+len(bhashlist)+len(piece)), HASHPIECE, + tobinary(index), tobinary(begin), tobinary(len(bhashlist)), bhashlist, piece.tostring() )) + else: + self.partial_message = ''.join(( + tobinary(len(piece) + 9), PIECE, + tobinary(index), tobinary(begin), piece.tostring())) + if DEBUG_NORMAL_MSGS: + print >>sys.strderr, 'sending chunk: '+str(index)+': '+str(begin)+'-'+str(begin+len(piece)) + + if bytes < len(self.partial_message): + self.connection.send_message_raw(self.partial_message[:bytes]) + self.partial_message = self.partial_message[bytes:] + return bytes + + q = [self.partial_message] + self.partial_message = None + if self.send_choke_queued: + self.send_choke_queued = False + self.outqueue.append(tobinary(1)+CHOKE) + self.upload.choke_sent() + self.just_unchoked = 0 + q.extend(self.outqueue) + self.outqueue = [] + q = ''.join(q) + self.connection.send_message_raw(q) + return len(q) + + def get_upload(self): + return self.upload + + def get_download(self): + return self.download + + def set_download(self, download): + self.download = download + + def backlogged(self): + return not self.connection.is_flushed() + + def got_request(self, i, p, l): + self.upload.got_request(i, p, l) + if self.just_unchoked: + self.connecter.ratelimiter.ping(clock() - self.just_unchoked) + self.just_unchoked = 0 + + # + # Extension protocol support + # + def supports_extend_msg(self,msg_name): + if 'm' in self.extend_hs_dict: + return msg_name in self.extend_hs_dict['m'] + else: + return False + + def got_extend_handshake(self,d): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got EXTEND handshake:",d + if 'm' in d: + if type(d['m']) != DictType: + raise ValueError('Key m does not map to a dict') + m = d['m'] + for key,val in m.iteritems(): + if type(val) != IntType: + raise ValueError('Message ID in m-dict not int') + + if not 'm' in self.extend_hs_dict: + self.extend_hs_dict['m'] = {} + # Note: we store the dict without converting the msg IDs to bytes. + self.extend_hs_dict['m'].update(d['m']) + if self.connecter.overlay_enabled and EXTEND_MSG_OVERLAYSWARM in self.extend_hs_dict['m']: + # This peer understands our overlay swarm extension + if self.connection.locally_initiated: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Peer supports Tr_OVERLAYSWARM, attempt connection" + self.connect_overlay() + if self.connecter.use_g2g and (EXTEND_MSG_G2G_V1 in self.extend_hs_dict['m'] or EXTEND_MSG_G2G_V2 in self.extend_hs_dict['m']): + # Both us and the peer want to use G2G + if self.connection.locally_initiated: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Peer supports Tr_G2G" + + self.use_g2g = True + if EXTEND_MSG_G2G_V2 in self.extend_hs_dict['m']: + self.g2g_version = EXTEND_MSG_G2G_V2 + else: + self.g2g_version = EXTEND_MSG_G2G_V1 + + # LIVEHACK + if KICK_OLD_CLIENTS: + peerhaslivekey = False + for key in self.extend_hs_dict['m']: + if key.startswith(EXTEND_MSG_LIVE_PREFIX): + peerhaslivekey = True + livever = int(key[len(EXTEND_MSG_LIVE_PREFIX):]) + if livever < CURRENT_LIVE_VERSION: + raise ValueError("Too old LIVE VERSION "+livever) + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Connecter: live: Keeping connection to up-to-date peer v",livever,self.get_ip() + + if not peerhaslivekey: + if self.get_ip() == self.connecter.tracker_ip: + # Keep connection to tracker / source + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Connecter: live: Keeping connection to SOURCE",self.connecter.tracker_ip + else: + raise ValueError("Kicking old LIVE peer "+self.get_ip()) + + # 'p' is peer's listen port, 'v' is peer's version, all optional + # 'e' is used by uTorrent to show it prefers encryption (whatever that means) + # See http://www.bittorrent.org/beps/bep_0010.html + for key in ['p','e', 'yourip','ipv4','ipv6','reqq']: + if key in d: + self.extend_hs_dict[key] = d[key] + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: got_extend_hs: keys",d.keys() + + # If he tells us our IP, record this and see if we get a majority vote on it + if 'yourip' in d: + try: + yourip = decompact_ip(d['yourip']) + from Tribler.Core.NATFirewall.DialbackMsgHandler import DialbackMsgHandler + dmh = DialbackMsgHandler.getInstance() + dmh.network_btengine_extend_yourip(yourip) + + if 'same_nat_try_internal' in self.connecter.config and self.connecter.config['same_nat_try_internal']: + if 'ipv4' in d: + self.na_check_for_same_nat(yourip) + except: + print_exc() + + + def his_extend_msg_name_to_id(self,ext_name): + """ returns the message id (byte) for the given message name or None """ + val = self.extend_hs_dict['m'].get(ext_name) + if val is None: + return val + else: + return chr(val) + + def get_extend_encryption(self): + return self.extend_hs_dict.get('e',0) + + def get_extend_listenport(self): + return self.extend_hs_dict.get('p') + + def send_extend_handshake(self): + + # NETWORK AWARE + hisip = self.connection.get_ip(real=True) + ipv4 = None + if self.connecter.config.get('same_nat_try_internal',0): + [client,version] = decodePeerID(self.connection.id) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: send_extend_hs: Peer is client",client + if client == TRIBLER_PEERID_LETTER: + # If we're connecting to a Tribler peer, show our internal IP address + # as 'ipv4'. + ipv4 = self.get_ip(real=True) + + # See: http://www.bittorrent.org/beps/bep_0010.html + d = {} + d['m'] = self.connecter.EXTEND_HANDSHAKE_M_DICT + d['p'] = self.connecter.mylistenport + ver = version_short.replace('-',' ',1) + d['v'] = ver + d['e'] = 0 # Apparently this means we don't like uTorrent encryption + d['yourip'] = compact_ip(hisip) + if ipv4 is not None: + # Only send IPv4 when necessary, we prefer this peer to use this addr. + d['ipv4'] = compact_ip(ipv4) + + self._send_message(EXTEND + EXTEND_MSG_HANDSHAKE_ID + bencode(d)) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'connecter: sent extend: id=0+',d,"yourip",hisip,"ipv4",ipv4 + + # + # ut_pex support + # + def got_ut_pex(self,d): + if DEBUG_UT_PEX: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got uTorrent PEX:",d + (added_peers,dropped_peers) = check_ut_pex(d) + + # DoS protection: we're accepting IP addresses from + # an untrusted source, so be a bit careful + mx = self.connecter.ut_pex_max_addrs_from_peer + if DEBUG_UT_PEX: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got",len(added_peers),"peers via uTorrent PEX, using max",mx + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got",added_peers + # Take random sample of mx peers + shuffle(added_peers) + sample_added_peers_with_id = [] + + # Put the sample in the format desired by Encoder.start_connections() + for dns in added_peers[:mx]: + peer_with_id = (dns, 0) + sample_added_peers_with_id.append(peer_with_id) + if len(sample_added_peers_with_id) > 0: + if DEBUG_UT_PEX: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Starting ut_pex conns to",len(sample_added_peers_with_id) + self.connection.Encoder.start_connections(sample_added_peers_with_id) + + def send_extend_ut_pex(self,payload): + msg = EXTEND+self.his_extend_msg_name_to_id(EXTEND_MSG_UTORRENT_PEX)+payload + self._send_message(msg) + + def first_ut_pex(self): + if self.ut_pex_first_flag: + self.ut_pex_first_flag = False + return True + else: + return False + + + # + # Give-2-Get + # + def g2g_sent_piece_part( self, c, index, begin, hashlist, piece ): + """ Keeps a record of the fact that we sent piece index[begin:begin+chunk]. """ + + wegaveperc = float(len(piece))/float(self.connecter.piece_size) + if index in self.perc_sent: + self.perc_sent[index] = self.perc_sent[index] + wegaveperc + else: + self.perc_sent[index] = wegaveperc + + + def queue_g2g_piece_xfer(self,index,begin,piece): + """ Queue the fact that we sent piece index[begin:begin+chunk] for + tranmission to peers + """ + if self.g2g_version == EXTEND_MSG_G2G_V1: + self.send_g2g_piece_xfer_v1(index,begin,piece) + return + + perc = float(len(piece))/float(self.connecter.piece_size) + if index in self.last_perc_sent: + self.last_perc_sent[index] = self.last_perc_sent[index] + perc + else: + self.last_perc_sent[index] = perc + + def dequeue_g2g_piece_xfer(self): + """ Send queued information about pieces we sent to peers. Called + periodically. + """ + psf = float(self.connecter.piece_size) + ppdict = {} + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: g2g dq: orig",self.last_perc_sent + + for index,perc in self.last_perc_sent.iteritems(): + # due to rerequests due to slow pieces the sum can be above 1.0 + capperc = min(1.0,perc) + percb = chr(int((100.0 * capperc))) + # bencode can't deal with int keys + ppdict[str(index)] = percb + self.last_perc_sent = {} + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: g2g dq: dest",ppdict + + if len(ppdict) > 0: + self.send_g2g_piece_xfer_v2(ppdict) + + def send_g2g_piece_xfer_v1(self,index,begin,piece): + """ Send fact that we sent piece index[begin:begin+chunk] to a peer + to all peers (G2G V1). + """ + self._send_message(self.his_extend_msg_name_to_id(EXTEND_MSG_G2G_V1) + tobinary(index) + tobinary(begin) + tobinary(len(piece))) + + def send_g2g_piece_xfer_v2(self,ppdict): + """ Send list of facts that we sent pieces to all peers (G2G V2). """ + blist = bencode(ppdict) + self._send_message(EXTEND + self.his_extend_msg_name_to_id(EXTEND_MSG_G2G_V2) + blist) + + def got_g2g_piece_xfer_v1(self,index,begin,length): + """ Got a G2G_PIECE_XFER message in V1 format. """ + hegaveperc = float(length)/float(self.connecter.piece_size) + self.g2g_peer_forwarded_piece_part(index,hegaveperc) + + def got_g2g_piece_xfer_v2(self,ppdict): + """ Got a G2G_PIECE_XFER message in V2 format. """ + for indexstr,hegavepercb in ppdict.iteritems(): + index = int(indexstr) + hegaveperc = float(ord(hegavepercb))/100.0 + self.g2g_peer_forwarded_piece_part(index,hegaveperc) + + def g2g_peer_forwarded_piece_part(self,index,hegaveperc): + """ Processes this peer forwarding piece i[begin:end] to a grandchild. """ + # Reward for forwarding data in general + length = ceil(hegaveperc * float(self.connecter.piece_size)) + self.forward_speeds[1].update_rate(length) + + if index not in self.perc_sent: + # piece came from disk + return + + # Extra reward if its data we sent + wegaveperc = self.perc_sent[index] + overlapperc = wegaveperc * hegaveperc + overlap = ceil(overlapperc * float(self.connecter.piece_size)) + if overlap > 0: + self.forward_speeds[0].update_rate( overlap ) + + def g2g_score( self ): + return [x.get_rate() for x in self.forward_speeds] + + + # + # SecureOverlay support + # + def connect_overlay(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Initiating overlay connection" + if not self.initiated_overlay: + self.initiated_overlay = True + so = SecureOverlay.getInstance() + so.connect_dns(self.connection.dns,self.network_connect_dns_callback) + + def network_connect_dns_callback(self,exc,dns,permid,selversion): + # WARNING: WILL BE CALLED BY NetworkThread + if exc is not None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: peer",dns,"said he supported overlay swarm, but we can't connect to him",exc + + + # + # NETWORK AWARE + # + def na_check_for_same_nat(self,yourip): + """ See if peer is local, e.g. behind same NAT, same AS or something. + If so, try to optimize: + - Same NAT -> reconnect to use internal network + """ + hisip = self.connection.get_ip(real=True) + if hisip == yourip: + # Do we share the same NAT? + myextip = self.connecter.get_extip_func(unknowniflocal=True) + myintip = self.get_ip(real=True) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: na_check_for_same_nat: his",hisip,"myext",myextip,"myint",myintip + + if hisip != myintip or hisip == '127.0.0.1': # to allow testing + # He can't fake his source addr, so we're not running on the + # same machine, + + # He may be quicker to determine we should have a local + # conn, so prepare for his connection in advance. + # + if myextip is None: + # I don't known my external IP and he's not on the same + # machine as me. yourip could be our real external IP, test. + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: na_check_same_nat: Don't know my ext ip, try to loopback to",yourip,"to see if that's me" + self.na_start_loopback_connection(yourip) + elif hisip == myextip: + # Same NAT. He can't fake his source addr. + # Attempt local network connection + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: na_check_same_nat: Yes, trying to connect via internal" + self.na_start_internal_connection() + else: + # hisip != myextip + # He claims we share the same IP, but I think my ext IP + # is something different. Either he is lying or I'm + # mistaken, test + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: na_check_same_nat: Maybe, me thinks not, try to loopback to",yourip + self.na_start_loopback_connection(yourip) + + + def na_start_loopback_connection(self,yourip): + """ Peer claims my external IP is "yourip". Try to connect back to myself """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: na_start_loopback: Checking if my ext ip is",yourip + self.na_candidate_ext_ip = yourip + + dns = (yourip,self.connecter.mylistenport) + self.connection.Encoder.start_connection(dns,0,forcenew=True) + + def na_got_loopback(self,econnection): + """ Got a connection with my peer ID. Check that this is indeed me looping + back to myself. No man-in-the-middle attacks protection. This is complex + if we're also connecting to ourselves because of a stale tracker + registration. Window of opportunity is small. + """ + himismeip = econnection.get_ip(real=True) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: conn: na_got_loopback:",himismeip,self.na_candidate_ext_ip + if self.na_candidate_ext_ip == himismeip: + self.na_start_internal_connection() + + + def na_start_internal_connection(self): + """ Reconnect to peer using internal network """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: na_start_internal_connection" + + # Doesn't really matter who initiates. Letting other side do it makes + # testing easier. + if not self.is_locally_initiated(): + + hisip = decompact_ip(self.extend_hs_dict['ipv4']) + hisport = self.extend_hs_dict['p'] + + # For testing, see Tribler/Test/test_na_extend_hs.py + if hisip == '224.4.8.1' and hisport == 4810: + hisip = '127.0.0.1' + hisport = 4811 + + self.connection.na_want_internal_conn_from = hisip + + hisdns = (hisip,hisport) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: na_start_internal_connection to",hisdns + self.connection.Encoder.start_connection(hisdns,0) + + def na_get_address_distance(self): + return self.connection.na_get_address_distance() + + +class Connecter: +# 2fastbt_ + def __init__(self, make_upload, downloader, choker, numpieces, piece_size, + totalup, config, ratelimiter, merkle_torrent, sched = None, + coordinator = None, helper = None, get_extip_func = lambda: None, mylistenport = None, use_g2g = False, infohash=None, tracker=None): + self.downloader = downloader + self.make_upload = make_upload + self.choker = choker + self.numpieces = numpieces + self.piece_size = piece_size + self.config = config + self.ratelimiter = ratelimiter + self.rate_capped = False + self.sched = sched + self.totalup = totalup + self.rate_capped = False + self.connections = {} + self.external_connection_made = 0 + self.merkle_torrent = merkle_torrent + self.use_g2g = use_g2g + # 2fastbt_ + self.coordinator = coordinator + self.helper = helper + self.round = 0 + self.get_extip_func = get_extip_func + self.mylistenport = mylistenport + self.infohash = infohash + self.tracker = tracker + try: + (scheme, netloc, path, pars, query, _fragment) = urlparse.urlparse(self.tracker) + host = netloc.split(':')[0] + self.tracker_ip = socket.getaddrinfo(host,None)[0][4][0] + except: + print_exc() + self.tracker_ip = None + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Connecter: live: source/tracker is",self.tracker_ip + + self.overlay_enabled = 0 + if self.config['overlay']: + self.overlay_enabled = True + + if DEBUG: + if self.overlay_enabled: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Enabling overlay" + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Disabling overlay" + + if DEBUG: + if self.overlay_enabled: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Enabling overlay" + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Disabling overlay" + + self.ut_pex_enabled = 0 + if 'ut_pex_max_addrs_from_peer' in self.config: + self.ut_pex_max_addrs_from_peer = self.config['ut_pex_max_addrs_from_peer'] + self.ut_pex_enabled = self.ut_pex_max_addrs_from_peer > 0 + self.ut_pex_previous_conns = [] # last value of 'added' field for all peers + + if DEBUG_UT_PEX: + if self.ut_pex_enabled: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Enabling uTorrent PEX",self.ut_pex_max_addrs_from_peer + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Disabling uTorrent PEX" + + # The set of messages we support. Note that the msg ID is an int not a byte in + # this dict. + self.EXTEND_HANDSHAKE_M_DICT = {} + + if self.overlay_enabled: + # Say in the EXTEND handshake we support the overlay-swarm ext. + d = {EXTEND_MSG_OVERLAYSWARM:ord(CHALLENGE)} + self.EXTEND_HANDSHAKE_M_DICT.update(d) + if self.ut_pex_enabled: + # Say in the EXTEND handshake we support uTorrent's peer exchange ext. + d = {EXTEND_MSG_UTORRENT_PEX:ord(EXTEND_MSG_UTORRENT_PEX_ID)} + self.EXTEND_HANDSHAKE_M_DICT.update(d) + self.sched(self.ut_pex_callback,6) + if self.use_g2g: + # Say in the EXTEND handshake we want to do G2G. + d = {EXTEND_MSG_G2G_V2:ord(G2G_PIECE_XFER)} + self.EXTEND_HANDSHAKE_M_DICT.update(d) + self.sched(self.g2g_callback,G2G_CALLBACK_INTERVAL) + + # LIVEHACK + livekey = EXTEND_MSG_LIVE_PREFIX+str(CURRENT_LIVE_VERSION) + d = {livekey:ord(LIVE_FAKE_MESSAGE_ID)} + self.EXTEND_HANDSHAKE_M_DICT.update(d) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Connecter: EXTEND: my dict",self.EXTEND_HANDSHAKE_M_DICT + + # BarterCast + if config['overlay']: + self.overlay_bridge = OverlayThreadingBridge.getInstance() + else: + self.overlay_bridge = None + + def how_many_connections(self): + return len(self.connections) + + def connection_made(self, connection): + c = Connection(connection, self) + self.connections[connection] = c + + if connection.supports_extend_messages(): + # The peer either supports our overlay-swarm extension or + # the utorrent extended protocol. + + [client,version] = decodePeerID(connection.id) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Peer is client",client,"version",version,c.get_ip() + + if self.overlay_enabled and client == TRIBLER_PEERID_LETTER and version <= '3.5.0' and connection.locally_initiated: + # Old Tribler, establish overlay connection< + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Peer is previous Tribler version, attempt overlay connection" + c.connect_overlay() + elif self.ut_pex_enabled: + # EXTEND handshake must be sent just after BT handshake, + # before BITFIELD even + c.send_extend_handshake() + + #TODO: overlay swarm also needs upload and download to control transferring rate + c.upload = self.make_upload(c, self.ratelimiter, self.totalup) + c.download = self.downloader.make_download(c) + self.choker.connection_made(c) + return c + + def connection_lost(self, connection): + c = self.connections[connection] + + ###################################### + # BarterCast + if self.overlay_bridge is not None: + ip = c.get_ip(False) + port = c.get_port(False) + down_kb = int(c.total_downloaded / 1024) + up_kb = int(c.total_uploaded / 1024) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercast: attempting database update, adding olthread" + + olthread_bartercast_conn_lost_lambda = lambda:olthread_bartercast_conn_lost(ip,port,down_kb,up_kb) + self.overlay_bridge.add_task(olthread_bartercast_conn_lost_lambda,0) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercast: no overlay bridge found" + + ######################### + + if DEBUG: + if c.get_ip() == self.tracker_ip: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: connection_lost: live: WAAH2 closing SOURCE" + + del self.connections[connection] + if c.download: + c.download.disconnected() + self.choker.connection_lost(c) + + def connection_flushed(self, connection): + conn = self.connections[connection] + if conn.next_upload is None and (conn.partial_message is not None + or conn.upload.buffer): + self.ratelimiter.queue(conn) + + def got_piece(self, i): + for co in self.connections.values(): + co.send_have(i) + + def our_extend_msg_id_to_name(self,ext_id): + """ find the name for the given message id (byte) """ + for key,val in self.EXTEND_HANDSHAKE_M_DICT.iteritems(): + if val == ord(ext_id): + return key + return None + + def get_ut_pex_conns(self): + conns = [] + for conn in self.connections.values(): + if conn.get_extend_listenport() is not None: + conns.append(conn) + return conns + + def get_ut_pex_previous_conns(self): + return self.ut_pex_previous_conns + + def set_ut_pex_previous_conns(self,conns): + self.ut_pex_previous_conns = conns + + def ut_pex_callback(self): + """ Periocially send info about the peers you know to the other peers """ + if DEBUG_UT_PEX: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Periodic ut_pex update" + + currconns = self.get_ut_pex_conns() + (addedconns,droppedconns) = ut_pex_get_conns_diff(currconns,self.get_ut_pex_previous_conns()) + self.set_ut_pex_previous_conns(currconns) + if DEBUG_UT_PEX: + for conn in addedconns: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: ut_pex: Added",conn.get_ip(),conn.get_extend_listenport() + for conn in droppedconns: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: ut_pex: Dropped",conn.get_ip(),conn.get_extend_listenport() + + for c in currconns: + if c.supports_extend_msg(EXTEND_MSG_UTORRENT_PEX): + try: + if DEBUG_UT_PEX: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: ut_pex: Creating msg for",c.get_ip(),c.get_extend_listenport() + if c.first_ut_pex(): + aconns = currconns + dconns = [] + else: + aconns = addedconns + dconns = droppedconns + payload = create_ut_pex(aconns,dconns,c) + c.send_extend_ut_pex(payload) + except: + print_exc() + self.sched(self.ut_pex_callback,60) + + def g2g_callback(self): + try: + self.sched(self.g2g_callback,G2G_CALLBACK_INTERVAL) + for c in self.connections.itervalues(): + if not c.use_g2g: + continue + + c.dequeue_g2g_piece_xfer() + except: + print_exc() + + + # NETWORK AWARE + def na_got_loopback(self,econnection): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: na_got_loopback: Got connection from",econnection.get_ip(),econnection.get_port() + for c in self.connections.itervalues(): + ret = c.na_got_loopback(econnection) + if ret is not None: + return ret + return False + + def na_got_internal_connection(self,origconn,newconn): + """ This is called only at the initiator side of the internal conn. + Doesn't matter, only one is enough to close the original connection. + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: na_got_internal: From",newconn.get_ip(),newconn.get_port() + + origconn.close() + + def got_message(self, connection, message): + # connection: Encrypter.Connection; c: Connecter.Connection + c = self.connections[connection] + t = message[0] + # EXTEND handshake will be sent just after BT handshake, + # before BITFIELD even + + if DEBUG_MESSAGE_HANDLING: + st = time.time() + + if False: #connection.get_ip().startswith("192"): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got",getMessageName(t),connection.get_ip() + + if DEBUG_NORMAL_MSGS: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got",getMessageName(t),connection.get_ip() + + if t == EXTEND: + self.got_extend_message(connection,c,message,self.ut_pex_enabled) + return + if t == BITFIELD and c.got_anything: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on BITFIELD" + connection.close() + return + c.got_anything = True + if (t in [CHOKE, UNCHOKE, INTERESTED, NOT_INTERESTED] and + len(message) != 1): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad (UN)CHOKE/(NOT_)INTERESTED",t + connection.close() + return + if t == CHOKE: + if DEBUG_NORMAL_MSGS: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got CHOKE from",connection.get_ip() + c.download.got_choke() + elif t == UNCHOKE: + if DEBUG_NORMAL_MSGS: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got UNCHOKE from",connection.get_ip() + c.download.got_unchoke() + elif t == INTERESTED: + if DEBUG_NORMAL_MSGS: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got INTERESTED from",connection.get_ip() + if c.upload is not None: + c.upload.got_interested() + elif t == NOT_INTERESTED: + c.upload.got_not_interested() + elif t == HAVE: + if len(message) != 5: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad HAVE: msg len" + connection.close() + return + i = toint(message[1:]) + if i >= self.numpieces: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad HAVE: index out of range" + connection.close() + return + if DEBUG_NORMAL_MSGS: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got HAVE(",i,") from",connection.get_ip() + c.download.got_have(i) + elif t == BITFIELD: + if DEBUG_NORMAL_MSGS: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got BITFIELD from",connection.get_ip() + try: + b = Bitfield(self.numpieces, message[1:]) + except ValueError: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad BITFIELD" + connection.close() + return + if c.download is not None: + c.download.got_have_bitfield(b) + elif t == REQUEST: + if len(message) != 13: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad REQUEST: msg len" + connection.close() + return + i = toint(message[1:5]) + if i >= self.numpieces: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad REQUEST: index out of range" + connection.close() + return + if DEBUG_NORMAL_MSGS: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got REQUEST(",i,") from",connection.get_ip() + c.got_request(i, toint(message[5:9]), toint(message[9:])) + elif t == CANCEL: + if len(message) != 13: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad CANCEL: msg len" + connection.close() + return + i = toint(message[1:5]) + if i >= self.numpieces: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad CANCEL: index out of range" + connection.close() + return + c.upload.got_cancel(i, toint(message[5:9]), + toint(message[9:])) + elif t == PIECE: + if len(message) <= 9: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad PIECE: msg len" + connection.close() + return + i = toint(message[1:5]) + if i >= self.numpieces: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad PIECE: msg len" + connection.close() + return + if DEBUG_NORMAL_MSGS: # or connection.get_ip().startswith("192"): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got PIECE(",i,") from",connection.get_ip() + #if connection.get_ip().startswith("192"): + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","@", + try: + if c.download.got_piece(i, toint(message[5:9]), [], message[9:]): + self.got_piece(i) + except Exception,e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad PIECE: exception",str(e) + print_exc() + connection.close() + return + + elif t == HASHPIECE: + # Merkle: Handle pieces with hashes + try: + if len(message) <= 13: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad HASHPIECE: msg len" + connection.close() + return + i = toint(message[1:5]) + if i >= self.numpieces: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad HASHPIECE: index out of range" + connection.close() + return + begin = toint(message[5:9]) + len_hashlist = toint(message[9:13]) + bhashlist = message[13:13+len_hashlist] + hashlist = bdecode(bhashlist) + if not isinstance(hashlist, list): + raise AssertionError, "hashlist not list" + for oh in hashlist: + if not isinstance(oh,list) or \ + not (len(oh) == 2) or \ + not isinstance(oh[0],int) or \ + not isinstance(oh[1],str) or \ + not ((len(oh[1])==20)): \ + raise AssertionError, "hashlist entry invalid" + piece = message[13+len_hashlist:] + + if DEBUG_NORMAL_MSGS: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got HASHPIECE",i,begin + + if c.download.got_piece(i, begin, hashlist, piece): + self.got_piece(i) + except Exception,e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad HASHPIECE: exception",str(e) + print_exc() + connection.close() + return + elif t == G2G_PIECE_XFER: + # EXTEND_MSG_G2G_V1 only, V2 is proper EXTEND msg + if len(message) <= 12: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad G2G_PIECE_XFER: msg len" + connection.close() + return + if not c.use_g2g: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on receiving G2G_PIECE_XFER over non-g2g connection" + connection.close() + return + + index = toint(message[1:5]) + begin = toint(message[5:9]) + length = toint(message[9:13]) + c.got_g2g_piece_xfer_v1(index,begin,length) + else: + connection.close() + + if DEBUG_MESSAGE_HANDLING: + et = time.time() + diff = et - st + if diff > 0.1: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: $$$$$$$$$$$$",getMessageName(t),"took",diff + + + def got_extend_message(self,connection,c,message,ut_pex_enabled): + # connection: Encrypter.Connection; c: Connecter.Connection + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got EXTEND message, len",len(message) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: his handshake",c.extend_hs_dict,c.get_ip() + + try: + if len(message) < 4: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad EXTEND: msg len" + connection.close() + return + ext_id = message[1] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got EXTEND message, id",ord(ext_id) + if ext_id == EXTEND_MSG_HANDSHAKE_ID: + # Message is Handshake + d = bdecode(message[2:]) + if type(d) == DictType: + c.got_extend_handshake(d) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad EXTEND: payload of handshake is not a bencoded dict" + connection.close() + return + else: + # Message is regular message e.g ut_pex + ext_msg_name = self.our_extend_msg_id_to_name(ext_id) + if ext_msg_name is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad EXTEND: peer sent ID we didn't define in handshake" + connection.close() + return + elif ext_msg_name == EXTEND_MSG_OVERLAYSWARM: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Not closing EXTEND+CHALLENGE: peer didn't read our spec right, be liberal" + elif ext_msg_name == EXTEND_MSG_UTORRENT_PEX and ut_pex_enabled: + d = bdecode(message[2:]) + if type(d) == DictType: + c.got_ut_pex(d) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad EXTEND: payload of ut_pex is not a bencoded dict" + connection.close() + return + elif ext_msg_name == EXTEND_MSG_G2G_V2 and self.use_g2g: + ppdict = bdecode(message[2:]) + if type(ppdict) != DictType: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad EXTEND+G2G: payload not dict" + connection.close() + return + for k,v in ppdict.iteritems(): + if type(k) != StringType or type(v) != StringType: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad EXTEND+G2G: key,value not of type int,char" + connection.close() + return + try: + int(k) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad EXTEND+G2G: key not int" + connection.close() + return + if ord(v) > 100: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad EXTEND+G2G: value too big",ppdict,v,ord(v) + connection.close() + return + + c.got_g2g_piece_xfer_v2(ppdict) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad EXTEND: peer sent ID that maps to name we don't support",ext_msg_name,`ext_id`,ord(ext_id) + connection.close() + return + return + except Exception,e: + if not DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad EXTEND: exception:",str(e),`message[2:]` + print_exc() + connection.close() + return + + +def olthread_bartercast_conn_lost(ip,port,down_kb,up_kb): + """ Called by OverlayThread to store information about the peer to + whom the connection was just closed in the (slow) databases. """ + + peerdb = PeerDBHandler.getInstance() + bartercastdb = BarterCastDBHandler.getInstance() + + if bartercastdb: + + permid = peerdb.getPermIDByIP(ip) + my_permid = bartercastdb.my_permid + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercast: (Connecter): Up %d down %d peer %s:%s (PermID = %s)" % (up_kb, down_kb, ip, port, `permid`) + + # Save exchanged KBs in BarterCastDB + changed = False + if permid is not None: + #name = bartercastdb.getName(permid) + + if down_kb > 0: + new_value = bartercastdb.incrementItem((my_permid, permid), 'downloaded', down_kb, commit=False) + changed = True + + if up_kb > 0: + new_value = bartercastdb.incrementItem((my_permid, permid), 'uploaded', up_kb, commit=False) + changed = True + + # For the record: save KBs exchanged with non-tribler peers + else: + if down_kb > 0: + new_value = bartercastdb.incrementItem((my_permid, 'non-tribler'), 'downloaded', down_kb, commit=False) + changed = True + + if up_kb > 0: + new_value = bartercastdb.incrementItem((my_permid, 'non-tribler'), 'uploaded', up_kb, commit=False) + changed = True + + if changed: + bartercastdb.commit() + + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "BARTERCAST: No bartercastdb instance" diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/Connecter.py.bak b/tribler-mod/Tribler/Core/BitTornado/BT1/Connecter.py.bak new file mode 100644 index 0000000..40357dc --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/Connecter.py.bak @@ -0,0 +1,1251 @@ +from time import localtime, strftime + +# Written by Bram Cohen, Pawel Garbacki and Arno Bakker +# see LICENSE.txt for license information + +import time +import sys +from types import DictType,IntType,StringType +from random import shuffle +from traceback import print_exc +from math import ceil +import socket +import urlparse + +from Tribler.Core.BitTornado.bitfield import Bitfield +from Tribler.Core.BitTornado.clock import clock +from Tribler.Core.BitTornado.bencode import bencode,bdecode +from Tribler.Core.BitTornado.__init__ import version_short,decodePeerID,TRIBLER_PEERID_LETTER +from Tribler.Core.BitTornado.BT1.convert import tobinary,toint +from Tribler.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge + +from MessageID import * + +from Tribler.Core.CacheDB.CacheDBHandler import PeerDBHandler, BarterCastDBHandler +from Tribler.Core.Overlay.SecureOverlay import SecureOverlay +from Tribler.Core.DecentralizedTracking.ut_pex import * +from Tribler.Core.BitTornado.BT1.track import compact_ip,decompact_ip + +from Tribler.Core.BitTornado.CurrentRateMeasure import Measure + +try: + True +except: + True = 1 + False = 0 + +KICK_OLD_CLIENTS=False +DEBUG = True #False +DEBUG_NORMAL_MSGS = False +DEBUG_UT_PEX = False +DEBUG_MESSAGE_HANDLING = False + +UNAUTH_PERMID_PERIOD = 3600 + +""" +Arno: 2007-02-16: +uTorrent and Bram's BitTorrent now support an extension to the protocol, +documented on http://www.rasterbar.com/products/libtorrent/extension_protocol.html + +The problem is that the bit they use in the options field of the BT handshake +is the same as we use to indicate a peer supports the overlay-swarm connection. +The new clients will send an EXTEND message with ID 20 after the handshake to +inform the otherside what new messages it supports. + +As a result, Tribler <= 3.5.0 clients won't be confused, but can't talk to these +new clients either or vice versa. The new client will think we understand the +message, send it. But because we don't know that message ID, we will close +the connection. Our attempts to establish a new overlay connection with the new +client will gracefully fail, as the new client will not know of infohash=00000... +and close the connection. + +We solve this conflict by adding support for the EXTEND message. We are now be +able to receive it, and send our own. Our message will contain one method name, +i.e. Tr_OVERLAYSWARM=253. Processing is now as follows: + +* If bit 43 is set and the peerID is from an old Tribler (<=3.5.0) + peer, we initiate an overlay-swarm connection. +* If bit 43 is set and the peer's EXTEND hs message contains method Tr_OVERLAYSWARM, + it's a new Tribler peer, and we initiate an overlay-swarm connection. +* If bit 43 is set, and the EXTEND hs message does not contain Tr_OVERLAYSWARM + it's not a Tribler client and we do not initiate an overlay-swarm + connection. + +N.B. The EXTEND message is poorly designed, it lacks protocol versioning +support which is present in the Azureus Extended Messaging Protocol +and our overlay-swarm protocol. + +""" +EXTEND_MSG_HANDSHAKE_ID = chr(0) +EXTEND_MSG_OVERLAYSWARM = 'Tr_OVERLAYSWARM' +EXTEND_MSG_G2G_V1 = 'Tr_G2G' +EXTEND_MSG_G2G_V2 = 'Tr_G2G_v2' + +CURRENT_LIVE_VERSION=1 +EXTEND_MSG_LIVE_PREFIX = 'Tr_LIVE_v' +LIVE_FAKE_MESSAGE_ID = chr(254) + + + +G2G_CALLBACK_INTERVAL = 4 + +def show(s): + text = [] + for i in xrange(len(s)): + text.append(ord(s[i])) + return text + + +class Connection: + def __init__(self, connection, connecter): + self.connection = connection + self.connecter = connecter + self.got_anything = False + self.next_upload = None + self.outqueue = [] + self.partial_message = None + self.download = None + self.upload = None + self.send_choke_queued = False + self.just_unchoked = None + self.unauth_permid = None + self.looked_for_permid = UNAUTH_PERMID_PERIOD-3 + self.closed = False + self.extend_hs_dict = {} # what extended messages does this peer support + self.initiated_overlay = False + + # G2G + self.use_g2g = False # set to true if both sides use G2G, indicated by self.connector.use_g2g + self.g2g_version = None + self.perc_sent = {} + # batch G2G_XFER information and periodically send it out. + self.last_perc_sent = {} + + config = self.connecter.config + self.forward_speeds = [0] * 2 + self.forward_speeds[0] = Measure(config['max_rate_period'], config['upload_rate_fudge']) + self.forward_speeds[1] = Measure(config['max_rate_period'], config['upload_rate_fudge']) + + # BarterCast counters + self.total_downloaded = 0 + self.total_uploaded = 0 + + self.ut_pex_first_flag = True # first time we sent a ut_pex to this peer? + + self.na_candidate_ext_ip = None + + + def get_myip(self, real=False): + return self.connection.get_myip(real) + + def get_myport(self, real=False): + return self.connection.get_myport(real) + + def get_ip(self, real=False): + return self.connection.get_ip(real) + + def get_port(self, real=False): + return self.connection.get_port(real) + + def get_id(self): + return self.connection.get_id() + + def get_readable_id(self): + return self.connection.get_readable_id() + + def close(self): + if DEBUG: + print >>sys.strderr, 'connection closed' + if self.get_ip() == self.connecter.tracker_ip: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: close: live: WAAH closing SOURCE" + + self.connection.close() + self.closed = True + + + def is_closed(self): + return self.closed + + def is_locally_initiated(self): + return self.connection.is_locally_initiated() + + def send_interested(self): + self._send_message(INTERESTED) + + def send_not_interested(self): + self._send_message(NOT_INTERESTED) + + def send_choke(self): + if self.partial_message: + self.send_choke_queued = True + else: + self._send_message(CHOKE) + self.upload.choke_sent() + self.just_unchoked = 0 + + def send_unchoke(self): + if self.send_choke_queued: + self.send_choke_queued = False + if DEBUG_NORMAL_MSGS: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'CHOKE SUPPRESSED' + else: + self._send_message(UNCHOKE) + if (self.partial_message or self.just_unchoked is None + or not self.upload.interested or self.download.active_requests): + self.just_unchoked = 0 + else: + self.just_unchoked = clock() + + def send_request(self, index, begin, length): + self._send_message(REQUEST + tobinary(index) + + tobinary(begin) + tobinary(length)) + if DEBUG_NORMAL_MSGS: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","sending REQUEST to",self.get_ip() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'sent request: '+str(index)+': '+str(begin)+'-'+str(begin+length) + + def send_cancel(self, index, begin, length): + self._send_message(CANCEL + tobinary(index) + + tobinary(begin) + tobinary(length)) + if DEBUG_NORMAL_MSGS: + print >>sys.strderr, 'sent cancel: '+str(index)+': '+str(begin)+'-'+str(begin+length) + + def send_bitfield(self, bitfield): + self._send_message(BITFIELD + bitfield) + + def send_have(self, index): + self._send_message(HAVE + tobinary(index)) + + def send_keepalive(self): + self._send_message('') + + def _send_message(self, s): + s = tobinary(len(s))+s + if self.partial_message: + self.outqueue.append(s) + else: + self.connection.send_message_raw(s) + + def send_partial(self, bytes): + if self.connection.closed: + return 0 + if self.partial_message is None: + s = self.upload.get_upload_chunk() + if s is None: + return 0 + # Merkle: send hashlist along with piece in HASHPIECE message + index, begin, hashlist, piece = s + + if self.use_g2g: + # ----- G2G: record who we send this to + self.g2g_sent_piece_part( self, index, begin, hashlist, piece ) + + # ---- G2G: we are uploading len(piece) data of piece #index + for c in self.connecter.connections.itervalues(): + if not c.use_g2g: + continue + + # include sending to self, because it should not be excluded from the statistics + + c.queue_g2g_piece_xfer( index, begin, piece ) + + if self.connecter.merkle_torrent: + bhashlist = bencode(hashlist) + self.partial_message = ''.join(( + tobinary(1+4+4+4+len(bhashlist)+len(piece)), HASHPIECE, + tobinary(index), tobinary(begin), tobinary(len(bhashlist)), bhashlist, piece.tostring() )) + else: + self.partial_message = ''.join(( + tobinary(len(piece) + 9), PIECE, + tobinary(index), tobinary(begin), piece.tostring())) + if DEBUG_NORMAL_MSGS: + print >>sys.strderr, 'sending chunk: '+str(index)+': '+str(begin)+'-'+str(begin+len(piece)) + + if bytes < len(self.partial_message): + self.connection.send_message_raw(self.partial_message[:bytes]) + self.partial_message = self.partial_message[bytes:] + return bytes + + q = [self.partial_message] + self.partial_message = None + if self.send_choke_queued: + self.send_choke_queued = False + self.outqueue.append(tobinary(1)+CHOKE) + self.upload.choke_sent() + self.just_unchoked = 0 + q.extend(self.outqueue) + self.outqueue = [] + q = ''.join(q) + self.connection.send_message_raw(q) + return len(q) + + def get_upload(self): + return self.upload + + def get_download(self): + return self.download + + def set_download(self, download): + self.download = download + + def backlogged(self): + return not self.connection.is_flushed() + + def got_request(self, i, p, l): + self.upload.got_request(i, p, l) + if self.just_unchoked: + self.connecter.ratelimiter.ping(clock() - self.just_unchoked) + self.just_unchoked = 0 + + # + # Extension protocol support + # + def supports_extend_msg(self,msg_name): + if 'm' in self.extend_hs_dict: + return msg_name in self.extend_hs_dict['m'] + else: + return False + + def got_extend_handshake(self,d): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got EXTEND handshake:",d + if 'm' in d: + if type(d['m']) != DictType: + raise ValueError('Key m does not map to a dict') + m = d['m'] + for key,val in m.iteritems(): + if type(val) != IntType: + raise ValueError('Message ID in m-dict not int') + + if not 'm' in self.extend_hs_dict: + self.extend_hs_dict['m'] = {} + # Note: we store the dict without converting the msg IDs to bytes. + self.extend_hs_dict['m'].update(d['m']) + if self.connecter.overlay_enabled and EXTEND_MSG_OVERLAYSWARM in self.extend_hs_dict['m']: + # This peer understands our overlay swarm extension + if self.connection.locally_initiated: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Peer supports Tr_OVERLAYSWARM, attempt connection" + self.connect_overlay() + if self.connecter.use_g2g and (EXTEND_MSG_G2G_V1 in self.extend_hs_dict['m'] or EXTEND_MSG_G2G_V2 in self.extend_hs_dict['m']): + # Both us and the peer want to use G2G + if self.connection.locally_initiated: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Peer supports Tr_G2G" + + self.use_g2g = True + if EXTEND_MSG_G2G_V2 in self.extend_hs_dict['m']: + self.g2g_version = EXTEND_MSG_G2G_V2 + else: + self.g2g_version = EXTEND_MSG_G2G_V1 + + # LIVEHACK + if KICK_OLD_CLIENTS: + peerhaslivekey = False + for key in self.extend_hs_dict['m']: + if key.startswith(EXTEND_MSG_LIVE_PREFIX): + peerhaslivekey = True + livever = int(key[len(EXTEND_MSG_LIVE_PREFIX):]) + if livever < CURRENT_LIVE_VERSION: + raise ValueError("Too old LIVE VERSION "+livever) + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Connecter: live: Keeping connection to up-to-date peer v",livever,self.get_ip() + + if not peerhaslivekey: + if self.get_ip() == self.connecter.tracker_ip: + # Keep connection to tracker / source + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Connecter: live: Keeping connection to SOURCE",self.connecter.tracker_ip + else: + raise ValueError("Kicking old LIVE peer "+self.get_ip()) + + # 'p' is peer's listen port, 'v' is peer's version, all optional + # 'e' is used by uTorrent to show it prefers encryption (whatever that means) + # See http://www.bittorrent.org/beps/bep_0010.html + for key in ['p','e', 'yourip','ipv4','ipv6','reqq']: + if key in d: + self.extend_hs_dict[key] = d[key] + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: got_extend_hs: keys",d.keys() + + # If he tells us our IP, record this and see if we get a majority vote on it + if 'yourip' in d: + try: + yourip = decompact_ip(d['yourip']) + from Tribler.Core.NATFirewall.DialbackMsgHandler import DialbackMsgHandler + dmh = DialbackMsgHandler.getInstance() + dmh.network_btengine_extend_yourip(yourip) + + if 'same_nat_try_internal' in self.connecter.config and self.connecter.config['same_nat_try_internal']: + if 'ipv4' in d: + self.na_check_for_same_nat(yourip) + except: + print_exc() + + + def his_extend_msg_name_to_id(self,ext_name): + """ returns the message id (byte) for the given message name or None """ + val = self.extend_hs_dict['m'].get(ext_name) + if val is None: + return val + else: + return chr(val) + + def get_extend_encryption(self): + return self.extend_hs_dict.get('e',0) + + def get_extend_listenport(self): + return self.extend_hs_dict.get('p') + + def send_extend_handshake(self): + + # NETWORK AWARE + hisip = self.connection.get_ip(real=True) + ipv4 = None + if self.connecter.config.get('same_nat_try_internal',0): + [client,version] = decodePeerID(self.connection.id) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: send_extend_hs: Peer is client",client + if client == TRIBLER_PEERID_LETTER: + # If we're connecting to a Tribler peer, show our internal IP address + # as 'ipv4'. + ipv4 = self.get_ip(real=True) + + # See: http://www.bittorrent.org/beps/bep_0010.html + d = {} + d['m'] = self.connecter.EXTEND_HANDSHAKE_M_DICT + d['p'] = self.connecter.mylistenport + ver = version_short.replace('-',' ',1) + d['v'] = ver + d['e'] = 0 # Apparently this means we don't like uTorrent encryption + d['yourip'] = compact_ip(hisip) + if ipv4 is not None: + # Only send IPv4 when necessary, we prefer this peer to use this addr. + d['ipv4'] = compact_ip(ipv4) + + self._send_message(EXTEND + EXTEND_MSG_HANDSHAKE_ID + bencode(d)) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'connecter: sent extend: id=0+',d,"yourip",hisip,"ipv4",ipv4 + + # + # ut_pex support + # + def got_ut_pex(self,d): + if DEBUG_UT_PEX: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got uTorrent PEX:",d + (added_peers,dropped_peers) = check_ut_pex(d) + + # DoS protection: we're accepting IP addresses from + # an untrusted source, so be a bit careful + mx = self.connecter.ut_pex_max_addrs_from_peer + if DEBUG_UT_PEX: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got",len(added_peers),"peers via uTorrent PEX, using max",mx + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got",added_peers + # Take random sample of mx peers + shuffle(added_peers) + sample_added_peers_with_id = [] + + # Put the sample in the format desired by Encoder.start_connections() + for dns in added_peers[:mx]: + peer_with_id = (dns, 0) + sample_added_peers_with_id.append(peer_with_id) + if len(sample_added_peers_with_id) > 0: + if DEBUG_UT_PEX: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Starting ut_pex conns to",len(sample_added_peers_with_id) + self.connection.Encoder.start_connections(sample_added_peers_with_id) + + def send_extend_ut_pex(self,payload): + msg = EXTEND+self.his_extend_msg_name_to_id(EXTEND_MSG_UTORRENT_PEX)+payload + self._send_message(msg) + + def first_ut_pex(self): + if self.ut_pex_first_flag: + self.ut_pex_first_flag = False + return True + else: + return False + + + # + # Give-2-Get + # + def g2g_sent_piece_part( self, c, index, begin, hashlist, piece ): + """ Keeps a record of the fact that we sent piece index[begin:begin+chunk]. """ + + wegaveperc = float(len(piece))/float(self.connecter.piece_size) + if index in self.perc_sent: + self.perc_sent[index] = self.perc_sent[index] + wegaveperc + else: + self.perc_sent[index] = wegaveperc + + + def queue_g2g_piece_xfer(self,index,begin,piece): + """ Queue the fact that we sent piece index[begin:begin+chunk] for + tranmission to peers + """ + if self.g2g_version == EXTEND_MSG_G2G_V1: + self.send_g2g_piece_xfer_v1(index,begin,piece) + return + + perc = float(len(piece))/float(self.connecter.piece_size) + if index in self.last_perc_sent: + self.last_perc_sent[index] = self.last_perc_sent[index] + perc + else: + self.last_perc_sent[index] = perc + + def dequeue_g2g_piece_xfer(self): + """ Send queued information about pieces we sent to peers. Called + periodically. + """ + psf = float(self.connecter.piece_size) + ppdict = {} + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: g2g dq: orig",self.last_perc_sent + + for index,perc in self.last_perc_sent.iteritems(): + # due to rerequests due to slow pieces the sum can be above 1.0 + capperc = min(1.0,perc) + percb = chr(int((100.0 * capperc))) + # bencode can't deal with int keys + ppdict[str(index)] = percb + self.last_perc_sent = {} + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: g2g dq: dest",ppdict + + if len(ppdict) > 0: + self.send_g2g_piece_xfer_v2(ppdict) + + def send_g2g_piece_xfer_v1(self,index,begin,piece): + """ Send fact that we sent piece index[begin:begin+chunk] to a peer + to all peers (G2G V1). + """ + self._send_message(self.his_extend_msg_name_to_id(EXTEND_MSG_G2G_V1) + tobinary(index) + tobinary(begin) + tobinary(len(piece))) + + def send_g2g_piece_xfer_v2(self,ppdict): + """ Send list of facts that we sent pieces to all peers (G2G V2). """ + blist = bencode(ppdict) + self._send_message(EXTEND + self.his_extend_msg_name_to_id(EXTEND_MSG_G2G_V2) + blist) + + def got_g2g_piece_xfer_v1(self,index,begin,length): + """ Got a G2G_PIECE_XFER message in V1 format. """ + hegaveperc = float(length)/float(self.connecter.piece_size) + self.g2g_peer_forwarded_piece_part(index,hegaveperc) + + def got_g2g_piece_xfer_v2(self,ppdict): + """ Got a G2G_PIECE_XFER message in V2 format. """ + for indexstr,hegavepercb in ppdict.iteritems(): + index = int(indexstr) + hegaveperc = float(ord(hegavepercb))/100.0 + self.g2g_peer_forwarded_piece_part(index,hegaveperc) + + def g2g_peer_forwarded_piece_part(self,index,hegaveperc): + """ Processes this peer forwarding piece i[begin:end] to a grandchild. """ + # Reward for forwarding data in general + length = ceil(hegaveperc * float(self.connecter.piece_size)) + self.forward_speeds[1].update_rate(length) + + if index not in self.perc_sent: + # piece came from disk + return + + # Extra reward if its data we sent + wegaveperc = self.perc_sent[index] + overlapperc = wegaveperc * hegaveperc + overlap = ceil(overlapperc * float(self.connecter.piece_size)) + if overlap > 0: + self.forward_speeds[0].update_rate( overlap ) + + def g2g_score( self ): + return [x.get_rate() for x in self.forward_speeds] + + + # + # SecureOverlay support + # + def connect_overlay(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Initiating overlay connection" + if not self.initiated_overlay: + self.initiated_overlay = True + so = SecureOverlay.getInstance() + so.connect_dns(self.connection.dns,self.network_connect_dns_callback) + + def network_connect_dns_callback(self,exc,dns,permid,selversion): + # WARNING: WILL BE CALLED BY NetworkThread + if exc is not None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: peer",dns,"said he supported overlay swarm, but we can't connect to him",exc + + + # + # NETWORK AWARE + # + def na_check_for_same_nat(self,yourip): + """ See if peer is local, e.g. behind same NAT, same AS or something. + If so, try to optimize: + - Same NAT -> reconnect to use internal network + """ + hisip = self.connection.get_ip(real=True) + if hisip == yourip: + # Do we share the same NAT? + myextip = self.connecter.get_extip_func(unknowniflocal=True) + myintip = self.get_ip(real=True) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: na_check_for_same_nat: his",hisip,"myext",myextip,"myint",myintip + + if hisip != myintip or hisip == '127.0.0.1': # to allow testing + # He can't fake his source addr, so we're not running on the + # same machine, + + # He may be quicker to determine we should have a local + # conn, so prepare for his connection in advance. + # + if myextip is None: + # I don't known my external IP and he's not on the same + # machine as me. yourip could be our real external IP, test. + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: na_check_same_nat: Don't know my ext ip, try to loopback to",yourip,"to see if that's me" + self.na_start_loopback_connection(yourip) + elif hisip == myextip: + # Same NAT. He can't fake his source addr. + # Attempt local network connection + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: na_check_same_nat: Yes, trying to connect via internal" + self.na_start_internal_connection() + else: + # hisip != myextip + # He claims we share the same IP, but I think my ext IP + # is something different. Either he is lying or I'm + # mistaken, test + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: na_check_same_nat: Maybe, me thinks not, try to loopback to",yourip + self.na_start_loopback_connection(yourip) + + + def na_start_loopback_connection(self,yourip): + """ Peer claims my external IP is "yourip". Try to connect back to myself """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: na_start_loopback: Checking if my ext ip is",yourip + self.na_candidate_ext_ip = yourip + + dns = (yourip,self.connecter.mylistenport) + self.connection.Encoder.start_connection(dns,0,forcenew=True) + + def na_got_loopback(self,econnection): + """ Got a connection with my peer ID. Check that this is indeed me looping + back to myself. No man-in-the-middle attacks protection. This is complex + if we're also connecting to ourselves because of a stale tracker + registration. Window of opportunity is small. + """ + himismeip = econnection.get_ip(real=True) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: conn: na_got_loopback:",himismeip,self.na_candidate_ext_ip + if self.na_candidate_ext_ip == himismeip: + self.na_start_internal_connection() + + + def na_start_internal_connection(self): + """ Reconnect to peer using internal network """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: na_start_internal_connection" + + # Doesn't really matter who initiates. Letting other side do it makes + # testing easier. + if not self.is_locally_initiated(): + + hisip = decompact_ip(self.extend_hs_dict['ipv4']) + hisport = self.extend_hs_dict['p'] + + # For testing, see Tribler/Test/test_na_extend_hs.py + if hisip == '224.4.8.1' and hisport == 4810: + hisip = '127.0.0.1' + hisport = 4811 + + self.connection.na_want_internal_conn_from = hisip + + hisdns = (hisip,hisport) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: na_start_internal_connection to",hisdns + self.connection.Encoder.start_connection(hisdns,0) + + def na_get_address_distance(self): + return self.connection.na_get_address_distance() + + +class Connecter: +# 2fastbt_ + def __init__(self, make_upload, downloader, choker, numpieces, piece_size, + totalup, config, ratelimiter, merkle_torrent, sched = None, + coordinator = None, helper = None, get_extip_func = lambda: None, mylistenport = None, use_g2g = False, infohash=None, tracker=None): + self.downloader = downloader + self.make_upload = make_upload + self.choker = choker + self.numpieces = numpieces + self.piece_size = piece_size + self.config = config + self.ratelimiter = ratelimiter + self.rate_capped = False + self.sched = sched + self.totalup = totalup + self.rate_capped = False + self.connections = {} + self.external_connection_made = 0 + self.merkle_torrent = merkle_torrent + self.use_g2g = use_g2g + # 2fastbt_ + self.coordinator = coordinator + self.helper = helper + self.round = 0 + self.get_extip_func = get_extip_func + self.mylistenport = mylistenport + self.infohash = infohash + self.tracker = tracker + try: + (scheme, netloc, path, pars, query, _fragment) = urlparse.urlparse(self.tracker) + host = netloc.split(':')[0] + self.tracker_ip = socket.getaddrinfo(host,None)[0][4][0] + except: + print_exc() + self.tracker_ip = None + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Connecter: live: source/tracker is",self.tracker_ip + + self.overlay_enabled = 0 + if self.config['overlay']: + self.overlay_enabled = True + + if DEBUG: + if self.overlay_enabled: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Enabling overlay" + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Disabling overlay" + + if DEBUG: + if self.overlay_enabled: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Enabling overlay" + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Disabling overlay" + + self.ut_pex_enabled = 0 + if 'ut_pex_max_addrs_from_peer' in self.config: + self.ut_pex_max_addrs_from_peer = self.config['ut_pex_max_addrs_from_peer'] + self.ut_pex_enabled = self.ut_pex_max_addrs_from_peer > 0 + self.ut_pex_previous_conns = [] # last value of 'added' field for all peers + + if DEBUG_UT_PEX: + if self.ut_pex_enabled: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Enabling uTorrent PEX",self.ut_pex_max_addrs_from_peer + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Disabling uTorrent PEX" + + # The set of messages we support. Note that the msg ID is an int not a byte in + # this dict. + self.EXTEND_HANDSHAKE_M_DICT = {} + + if self.overlay_enabled: + # Say in the EXTEND handshake we support the overlay-swarm ext. + d = {EXTEND_MSG_OVERLAYSWARM:ord(CHALLENGE)} + self.EXTEND_HANDSHAKE_M_DICT.update(d) + if self.ut_pex_enabled: + # Say in the EXTEND handshake we support uTorrent's peer exchange ext. + d = {EXTEND_MSG_UTORRENT_PEX:ord(EXTEND_MSG_UTORRENT_PEX_ID)} + self.EXTEND_HANDSHAKE_M_DICT.update(d) + self.sched(self.ut_pex_callback,6) + if self.use_g2g: + # Say in the EXTEND handshake we want to do G2G. + d = {EXTEND_MSG_G2G_V2:ord(G2G_PIECE_XFER)} + self.EXTEND_HANDSHAKE_M_DICT.update(d) + self.sched(self.g2g_callback,G2G_CALLBACK_INTERVAL) + + # LIVEHACK + livekey = EXTEND_MSG_LIVE_PREFIX+str(CURRENT_LIVE_VERSION) + d = {livekey:ord(LIVE_FAKE_MESSAGE_ID)} + self.EXTEND_HANDSHAKE_M_DICT.update(d) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Connecter: EXTEND: my dict",self.EXTEND_HANDSHAKE_M_DICT + + # BarterCast + if config['overlay']: + self.overlay_bridge = OverlayThreadingBridge.getInstance() + else: + self.overlay_bridge = None + + def how_many_connections(self): + return len(self.connections) + + def connection_made(self, connection): + c = Connection(connection, self) + self.connections[connection] = c + + if connection.supports_extend_messages(): + # The peer either supports our overlay-swarm extension or + # the utorrent extended protocol. + + [client,version] = decodePeerID(connection.id) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Peer is client",client,"version",version,c.get_ip() + + if self.overlay_enabled and client == TRIBLER_PEERID_LETTER and version <= '3.5.0' and connection.locally_initiated: + # Old Tribler, establish overlay connection< + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Peer is previous Tribler version, attempt overlay connection" + c.connect_overlay() + elif self.ut_pex_enabled: + # EXTEND handshake must be sent just after BT handshake, + # before BITFIELD even + c.send_extend_handshake() + + #TODO: overlay swarm also needs upload and download to control transferring rate + c.upload = self.make_upload(c, self.ratelimiter, self.totalup) + c.download = self.downloader.make_download(c) + self.choker.connection_made(c) + return c + + def connection_lost(self, connection): + c = self.connections[connection] + + ###################################### + # BarterCast + if self.overlay_bridge is not None: + ip = c.get_ip(False) + port = c.get_port(False) + down_kb = int(c.total_downloaded / 1024) + up_kb = int(c.total_uploaded / 1024) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercast: attempting database update, adding olthread" + + olthread_bartercast_conn_lost_lambda = lambda:olthread_bartercast_conn_lost(ip,port,down_kb,up_kb) + self.overlay_bridge.add_task(olthread_bartercast_conn_lost_lambda,0) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercast: no overlay bridge found" + + ######################### + + if DEBUG: + if c.get_ip() == self.tracker_ip: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: connection_lost: live: WAAH2 closing SOURCE" + + del self.connections[connection] + if c.download: + c.download.disconnected() + self.choker.connection_lost(c) + + def connection_flushed(self, connection): + conn = self.connections[connection] + if conn.next_upload is None and (conn.partial_message is not None + or conn.upload.buffer): + self.ratelimiter.queue(conn) + + def got_piece(self, i): + for co in self.connections.values(): + co.send_have(i) + + def our_extend_msg_id_to_name(self,ext_id): + """ find the name for the given message id (byte) """ + for key,val in self.EXTEND_HANDSHAKE_M_DICT.iteritems(): + if val == ord(ext_id): + return key + return None + + def get_ut_pex_conns(self): + conns = [] + for conn in self.connections.values(): + if conn.get_extend_listenport() is not None: + conns.append(conn) + return conns + + def get_ut_pex_previous_conns(self): + return self.ut_pex_previous_conns + + def set_ut_pex_previous_conns(self,conns): + self.ut_pex_previous_conns = conns + + def ut_pex_callback(self): + """ Periocially send info about the peers you know to the other peers """ + if DEBUG_UT_PEX: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Periodic ut_pex update" + + currconns = self.get_ut_pex_conns() + (addedconns,droppedconns) = ut_pex_get_conns_diff(currconns,self.get_ut_pex_previous_conns()) + self.set_ut_pex_previous_conns(currconns) + if DEBUG_UT_PEX: + for conn in addedconns: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: ut_pex: Added",conn.get_ip(),conn.get_extend_listenport() + for conn in droppedconns: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: ut_pex: Dropped",conn.get_ip(),conn.get_extend_listenport() + + for c in currconns: + if c.supports_extend_msg(EXTEND_MSG_UTORRENT_PEX): + try: + if DEBUG_UT_PEX: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: ut_pex: Creating msg for",c.get_ip(),c.get_extend_listenport() + if c.first_ut_pex(): + aconns = currconns + dconns = [] + else: + aconns = addedconns + dconns = droppedconns + payload = create_ut_pex(aconns,dconns,c) + c.send_extend_ut_pex(payload) + except: + print_exc() + self.sched(self.ut_pex_callback,60) + + def g2g_callback(self): + try: + self.sched(self.g2g_callback,G2G_CALLBACK_INTERVAL) + for c in self.connections.itervalues(): + if not c.use_g2g: + continue + + c.dequeue_g2g_piece_xfer() + except: + print_exc() + + + # NETWORK AWARE + def na_got_loopback(self,econnection): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: na_got_loopback: Got connection from",econnection.get_ip(),econnection.get_port() + for c in self.connections.itervalues(): + ret = c.na_got_loopback(econnection) + if ret is not None: + return ret + return False + + def na_got_internal_connection(self,origconn,newconn): + """ This is called only at the initiator side of the internal conn. + Doesn't matter, only one is enough to close the original connection. + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: na_got_internal: From",newconn.get_ip(),newconn.get_port() + + origconn.close() + + def got_message(self, connection, message): + # connection: Encrypter.Connection; c: Connecter.Connection + c = self.connections[connection] + t = message[0] + # EXTEND handshake will be sent just after BT handshake, + # before BITFIELD even + + if DEBUG_MESSAGE_HANDLING: + st = time.time() + + if False: #connection.get_ip().startswith("192"): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got",getMessageName(t),connection.get_ip() + + if DEBUG_NORMAL_MSGS: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got",getMessageName(t),connection.get_ip() + + if t == EXTEND: + self.got_extend_message(connection,c,message,self.ut_pex_enabled) + return + if t == BITFIELD and c.got_anything: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on BITFIELD" + connection.close() + return + c.got_anything = True + if (t in [CHOKE, UNCHOKE, INTERESTED, NOT_INTERESTED] and + len(message) != 1): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad (UN)CHOKE/(NOT_)INTERESTED",t + connection.close() + return + if t == CHOKE: + if DEBUG_NORMAL_MSGS: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got CHOKE from",connection.get_ip() + c.download.got_choke() + elif t == UNCHOKE: + if DEBUG_NORMAL_MSGS: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got UNCHOKE from",connection.get_ip() + c.download.got_unchoke() + elif t == INTERESTED: + if DEBUG_NORMAL_MSGS: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got INTERESTED from",connection.get_ip() + if c.upload is not None: + c.upload.got_interested() + elif t == NOT_INTERESTED: + c.upload.got_not_interested() + elif t == HAVE: + if len(message) != 5: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad HAVE: msg len" + connection.close() + return + i = toint(message[1:]) + if i >= self.numpieces: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad HAVE: index out of range" + connection.close() + return + if DEBUG_NORMAL_MSGS: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got HAVE(",i,") from",connection.get_ip() + c.download.got_have(i) + elif t == BITFIELD: + if DEBUG_NORMAL_MSGS: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got BITFIELD from",connection.get_ip() + try: + b = Bitfield(self.numpieces, message[1:]) + except ValueError: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad BITFIELD" + connection.close() + return + if c.download is not None: + c.download.got_have_bitfield(b) + elif t == REQUEST: + if len(message) != 13: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad REQUEST: msg len" + connection.close() + return + i = toint(message[1:5]) + if i >= self.numpieces: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad REQUEST: index out of range" + connection.close() + return + if DEBUG_NORMAL_MSGS: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got REQUEST(",i,") from",connection.get_ip() + c.got_request(i, toint(message[5:9]), toint(message[9:])) + elif t == CANCEL: + if len(message) != 13: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad CANCEL: msg len" + connection.close() + return + i = toint(message[1:5]) + if i >= self.numpieces: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad CANCEL: index out of range" + connection.close() + return + c.upload.got_cancel(i, toint(message[5:9]), + toint(message[9:])) + elif t == PIECE: + if len(message) <= 9: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad PIECE: msg len" + connection.close() + return + i = toint(message[1:5]) + if i >= self.numpieces: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad PIECE: msg len" + connection.close() + return + if DEBUG_NORMAL_MSGS: # or connection.get_ip().startswith("192"): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got PIECE(",i,") from",connection.get_ip() + #if connection.get_ip().startswith("192"): + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","@", + try: + if c.download.got_piece(i, toint(message[5:9]), [], message[9:]): + self.got_piece(i) + except Exception,e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad PIECE: exception",str(e) + print_exc() + connection.close() + return + + elif t == HASHPIECE: + # Merkle: Handle pieces with hashes + try: + if len(message) <= 13: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad HASHPIECE: msg len" + connection.close() + return + i = toint(message[1:5]) + if i >= self.numpieces: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad HASHPIECE: index out of range" + connection.close() + return + begin = toint(message[5:9]) + len_hashlist = toint(message[9:13]) + bhashlist = message[13:13+len_hashlist] + hashlist = bdecode(bhashlist) + if not isinstance(hashlist, list): + raise AssertionError, "hashlist not list" + for oh in hashlist: + if not isinstance(oh,list) or \ + not (len(oh) == 2) or \ + not isinstance(oh[0],int) or \ + not isinstance(oh[1],str) or \ + not ((len(oh[1])==20)): \ + raise AssertionError, "hashlist entry invalid" + piece = message[13+len_hashlist:] + + if DEBUG_NORMAL_MSGS: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got HASHPIECE",i,begin + + if c.download.got_piece(i, begin, hashlist, piece): + self.got_piece(i) + except Exception,e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad HASHPIECE: exception",str(e) + print_exc() + connection.close() + return + elif t == G2G_PIECE_XFER: + # EXTEND_MSG_G2G_V1 only, V2 is proper EXTEND msg + if len(message) <= 12: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad G2G_PIECE_XFER: msg len" + connection.close() + return + if not c.use_g2g: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on receiving G2G_PIECE_XFER over non-g2g connection" + connection.close() + return + + index = toint(message[1:5]) + begin = toint(message[5:9]) + length = toint(message[9:13]) + c.got_g2g_piece_xfer_v1(index,begin,length) + else: + connection.close() + + if DEBUG_MESSAGE_HANDLING: + et = time.time() + diff = et - st + if diff > 0.1: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: $$$$$$$$$$$$",getMessageName(t),"took",diff + + + def got_extend_message(self,connection,c,message,ut_pex_enabled): + # connection: Encrypter.Connection; c: Connecter.Connection + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got EXTEND message, len",len(message) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: his handshake",c.extend_hs_dict,c.get_ip() + + try: + if len(message) < 4: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad EXTEND: msg len" + connection.close() + return + ext_id = message[1] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","connecter: Got EXTEND message, id",ord(ext_id) + if ext_id == EXTEND_MSG_HANDSHAKE_ID: + # Message is Handshake + d = bdecode(message[2:]) + if type(d) == DictType: + c.got_extend_handshake(d) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad EXTEND: payload of handshake is not a bencoded dict" + connection.close() + return + else: + # Message is regular message e.g ut_pex + ext_msg_name = self.our_extend_msg_id_to_name(ext_id) + if ext_msg_name is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad EXTEND: peer sent ID we didn't define in handshake" + connection.close() + return + elif ext_msg_name == EXTEND_MSG_OVERLAYSWARM: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Not closing EXTEND+CHALLENGE: peer didn't read our spec right, be liberal" + elif ext_msg_name == EXTEND_MSG_UTORRENT_PEX and ut_pex_enabled: + d = bdecode(message[2:]) + if type(d) == DictType: + c.got_ut_pex(d) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad EXTEND: payload of ut_pex is not a bencoded dict" + connection.close() + return + elif ext_msg_name == EXTEND_MSG_G2G_V2 and self.use_g2g: + ppdict = bdecode(message[2:]) + if type(ppdict) != DictType: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad EXTEND+G2G: payload not dict" + connection.close() + return + for k,v in ppdict.iteritems(): + if type(k) != StringType or type(v) != StringType: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad EXTEND+G2G: key,value not of type int,char" + connection.close() + return + try: + int(k) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad EXTEND+G2G: key not int" + connection.close() + return + if ord(v) > 100: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad EXTEND+G2G: value too big",ppdict,v,ord(v) + connection.close() + return + + c.got_g2g_piece_xfer_v2(ppdict) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad EXTEND: peer sent ID that maps to name we don't support",ext_msg_name,`ext_id`,ord(ext_id) + connection.close() + return + return + except Exception,e: + if not DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Close on bad EXTEND: exception:",str(e),`message[2:]` + print_exc() + connection.close() + return + + +def olthread_bartercast_conn_lost(ip,port,down_kb,up_kb): + """ Called by OverlayThread to store information about the peer to + whom the connection was just closed in the (slow) databases. """ + + peerdb = PeerDBHandler.getInstance() + bartercastdb = BarterCastDBHandler.getInstance() + + if bartercastdb: + + permid = peerdb.getPermIDByIP(ip) + my_permid = bartercastdb.my_permid + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercast: (Connecter): Up %d down %d peer %s:%s (PermID = %s)" % (up_kb, down_kb, ip, port, `permid`) + + # Save exchanged KBs in BarterCastDB + changed = False + if permid is not None: + #name = bartercastdb.getName(permid) + + if down_kb > 0: + new_value = bartercastdb.incrementItem((my_permid, permid), 'downloaded', down_kb, commit=False) + changed = True + + if up_kb > 0: + new_value = bartercastdb.incrementItem((my_permid, permid), 'uploaded', up_kb, commit=False) + changed = True + + # For the record: save KBs exchanged with non-tribler peers + else: + if down_kb > 0: + new_value = bartercastdb.incrementItem((my_permid, 'non-tribler'), 'downloaded', down_kb, commit=False) + changed = True + + if up_kb > 0: + new_value = bartercastdb.incrementItem((my_permid, 'non-tribler'), 'uploaded', up_kb, commit=False) + changed = True + + if changed: + bartercastdb.commit() + + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "BARTERCAST: No bartercastdb instance" diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/Downloader.py b/tribler-mod/Tribler/Core/BitTornado/BT1/Downloader.py new file mode 100644 index 0000000..f85abfc --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/Downloader.py @@ -0,0 +1,957 @@ +from time import localtime, strftime + +# Written by Bram Cohen and Pawel Garbacki +# see LICENSE.txt for license information + +from Tribler.Core.BitTornado.CurrentRateMeasure import Measure +from Tribler.Core.BitTornado.bitfield import Bitfield +from random import shuffle +from Tribler.Core.BitTornado.clock import clock +# 2fastbt_ +from Tribler.Core.CoopDownload.Helper import SingleDownloadHelperInterface + +import sys + +try: + True +except: + True = 1 + False = 0 + +DEBUG = True #False +EXPIRE_TIME = 60 * 60 + +# only define the following functions in __debug__. And only import +# them in this case. They are to expensive to have, and have no +# purpose, outside debug mode. +if __debug__: + _ident_letters = {} + _ident_letter_pool = None + def get_ident_letter(download): + if not download.ip in _ident_letters: + global _ident_letter_pool + if not _ident_letter_pool: + _ident_letter_pool = [chr(c) for c in range(ord("a"), ord("z")+1)] + [chr(c) for c in range(ord("A"), ord("Z")+1)] + _ident_letters[download.ip] = _ident_letter_pool.pop(0) + return _ident_letters[download.ip] + + def print_chunks(downloader, pieces, before=(), after=(), compact=True): + """ + Print a line summery indicating completed/outstanding/non-requested chunks + + When COMPACT is True one character will represent one piece. + # --> downloaded + - --> no outstanding requests + 1-9 --> the number of outstanding requests (max 9) + + When COMPACT is False one character will requests one chunk. + # --> downloaded + - --> no outstanding requests + a-z --> requested at peer with that character (also capitals, duplicates may occur) + 1-9 --> requested multipile times (at n peers) + """ + if pieces: + do_I_have = downloader.storage.do_I_have + do_I_have_requests = downloader.storage.do_I_have_requests + inactive_requests = downloader.storage.inactive_requests + piece_size = downloader.storage.piece_length + chunk_size = downloader.storage.request_size + chunks_per_piece = int(piece_size / chunk_size) + + if compact: + request_map = {} + for download in downloader.downloads: + for piece, begin, length in download.active_requests: + if not piece in request_map: + request_map[piece] = 0 + request_map[piece] += 1 + + def print_chunks_helper(piece_id): + if do_I_have(piece_id): return "#" + if do_I_have_requests(piece_id): return "-" + if piece_id in request_map: return str(min(9, request_map[piece_id])) + return "?" + + else: + request_map = {} + for download in downloader.downloads: + + for piece, begin, length in download.active_requests: + if not piece in request_map: + request_map[piece] = ["-"] * chunks_per_piece + index = int(begin/chunk_size) + if request_map[piece][index] == "-": + request_map[piece][index] = get_ident_letter(download) + elif type(request_map[piece][index]) is str: + request_map[piece][index] = 2 + else: + request_map[piece][index] += 1 + request_map[piece][int(begin/chunk_size)] = get_ident_letter(download) + + def print_chunks_helper(piece_id): + if do_I_have(piece_id): return "#" * chunks_per_piece +# if do_I_have_requests(piece_id): return "-" * chunks_per_piece + if piece_id in request_map: + if piece_id in inactive_requests and type(inactive_requests[piece_id]) is list: + for begin, length in inactive_requests[piece_id]: + request_map[piece_id][int(begin/chunk_size)] = " " + return "".join([str(c) for c in request_map[piece_id]]) + return "-" * chunks_per_piece + + if before: + s_before = before[0] + else: + s_before = "" + + if after: + s_after = after[-1] + else: + s_after = "" + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Outstanding %s:%d:%d:%s [%s|%s|%s]" % (s_before, pieces[0], pieces[-1], s_after, "".join(map(print_chunks_helper, before)), "".join(map(print_chunks_helper, pieces)), "".join(map(print_chunks_helper, after))) + + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Outstanding 0:0 []" +else: + def print_chunks(downloader, pieces, before=(), after=(), compact=True): + pass + +class PerIPStats: + def __init__(self, ip): + self.numgood = 0 + self.bad = {} + self.numconnections = 0 + self.lastdownload = None + self.peerid = None + +class BadDataGuard: + def __init__(self, download): + self.download = download + self.ip = download.ip + self.downloader = download.downloader + self.stats = self.downloader.perip[self.ip] + self.lastindex = None + + def failed(self, index, bump = False): + self.stats.bad.setdefault(index, 0) + self.downloader.gotbaddata[self.ip] = 1 + self.stats.bad[index] += 1 + if len(self.stats.bad) > 1: + if self.download is not None: + self.downloader.try_kick(self.download) + elif self.stats.numconnections == 1 and self.stats.lastdownload is not None: + self.downloader.try_kick(self.stats.lastdownload) + if len(self.stats.bad) >= 3 and len(self.stats.bad) > int(self.stats.numgood/30): + self.downloader.try_ban(self.ip) + elif bump: + self.downloader.picker.bump(index) + + def good(self, index): + # lastindex is a hack to only increase numgood by one for each good + # piece, however many chunks come from the connection(s) from this IP + if index != self.lastindex: + self.stats.numgood += 1 + self.lastindex = index + +# 2fastbt_ +class SingleDownload(SingleDownloadHelperInterface): +# _2fastbt + def __init__(self, downloader, connection): +# 2fastbt_ + SingleDownloadHelperInterface.__init__(self) +# _2fastbt + self.downloader = downloader + self.connection = connection + self.choked = True + self.interested = False + self.active_requests = [] + self.measure = Measure(downloader.max_rate_period) + self.peermeasure = Measure(downloader.max_rate_period) + self.have = Bitfield(downloader.numpieces) + self.last = -1000 + self.last2 = -1000 + self.example_interest = None + self.backlog = 2 + self.ip = connection.get_ip() + self.guard = BadDataGuard(self) +# 2fastbt_ + self.helper = downloader.picker.helper +# _2fastbt + + # boudewijn: VOD needs a download measurement that is not + # averaged over a 'long' period. downloader.max_rate_period is + # (by default) 20 seconds because this matches the unchoke + # policy. + self.short_term_measure = Measure(5) + + # boudewijn: each download maintains a counter for the number + # of high priority piece requests that did not get any + # responce within x seconds. + self.bad_performance_counter = 0 + + def _backlog(self, just_unchoked): + self.backlog = int(min( + 2+int(4*self.measure.get_rate()/self.downloader.chunksize), + (2*just_unchoked)+self.downloader.queue_limit() )) + if self.backlog > 50: + self.backlog = int(max(50, self.backlog * 0.075)) + return self.backlog + + def disconnected(self): + self.downloader.lost_peer(self) + + """ JD: obsoleted -- moved to picker.lost_peer + + if self.have.complete(): + self.downloader.picker.lost_seed() + else: + for i in xrange(len(self.have)): + if self.have[i]: + self.downloader.picker.lost_have(i) + """ + + if self.have.complete() and self.downloader.storage.is_endgame(): + self.downloader.add_disconnected_seed(self.connection.get_readable_id()) + self._letgo() + self.guard.download = None + + def _letgo(self): + if self.downloader.queued_out.has_key(self): + del self.downloader.queued_out[self] + if not self.active_requests: + return + if self.downloader.endgamemode: + self.active_requests = [] + return + lost = {} + for index, begin, length in self.active_requests: + self.downloader.storage.request_lost(index, begin, length) + lost[index] = 1 + lost = lost.keys() + self.active_requests = [] + if self.downloader.paused: + return + ds = [d for d in self.downloader.downloads if not d.choked] + shuffle(ds) + for d in ds: + d._request_more() + for d in self.downloader.downloads: + if d.choked and not d.interested: + for l in lost: + if d.have[l] and self.downloader.storage.do_I_have_requests(l): + d.send_interested() + break + + def got_choke(self): + if not self.choked: + self.choked = True + self._letgo() + + def got_unchoke(self): + if self.choked: + self.choked = False + if self.interested: + self._request_more(new_unchoke = True) + self.last2 = clock() + + def is_choked(self): + return self.choked + + def is_interested(self): + return self.interested + + def send_interested(self): + if not self.interested: + self.interested = True + self.connection.send_interested() + + def send_not_interested(self): + if self.interested: + self.interested = False + self.connection.send_not_interested() + + def got_piece(self, index, begin, hashlist, piece): + """ + Returns True if the piece is complete. + Note that in this case a -piece- means a chunk! + """ + + if self.bad_performance_counter: + self.bad_performance_counter -= 1 + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "decreased bad_performance_counter to", self.bad_performance_counter + + length = len(piece) + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Downloader: got piece of length %d' % length + try: + self.active_requests.remove((index, begin, length)) + except ValueError: + self.downloader.discarded += length + return False + if self.downloader.endgamemode: + self.downloader.all_requests.remove((index, begin, length)) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Downloader: got_piece: removed one request from all_requests", len(self.downloader.all_requests), "remaining" + + self.last = clock() + self.last2 = clock() + self.measure.update_rate(length) + self.short_term_measure.update_rate(length) + self.downloader.measurefunc(length) + if not self.downloader.storage.piece_came_in(index, begin, hashlist, piece, self.guard): + self.downloader.piece_flunked(index) + return False + + # boudewijn: we need more accurate (if possibly invalid) + # measurements on current download speed + self.downloader.picker.got_piece(index, begin, length) + + if self.downloader.storage.do_I_have(index): + self.downloader.picker.complete(index) + + if self.downloader.endgamemode: + for d in self.downloader.downloads: + if d is not self: + if d.interested: + if d.choked: + assert not d.active_requests + d.fix_download_endgame() + else: + try: + d.active_requests.remove((index, begin, length)) + except ValueError: + continue + d.connection.send_cancel(index, begin, length) + d.fix_download_endgame() + else: + assert not d.active_requests + self._request_more() + self.downloader.check_complete(index) + + # BarterCast counter + self.connection.total_downloaded += length + + return self.downloader.storage.do_I_have(index) + +# 2fastbt_ + def helper_forces_unchoke(self): + self.choked = False +# _2fastbt + + def _request_more(self, new_unchoke = False, slowpieces = []): +# 2fastbt_ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: _request_more()" + if self.is_frozen_by_helper(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: _request_more: blocked, returning" + return +# _2fastbt + if self.choked: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: _request_more: choked, returning" + return +# 2fastbt_ + # do not download from coordinator + if self.connection.connection.is_coordinator_con(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: _request_more: coordinator conn" + return +# _2fastbt + if self.downloader.endgamemode: + self.fix_download_endgame(new_unchoke) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: _request_more: endgame mode, returning" + return + if self.downloader.paused: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: _request_more: paused, returning" + return + if len(self.active_requests) >= self._backlog(new_unchoke): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: more req than unchoke (active req: %d >= backlog: %d)" % (len(self.active_requests), self._backlog(new_unchoke)) + # Jelle: Schedule _request more to be called in some time. Otherwise requesting and receiving packages + # may stop, if they arrive to quickly + if self.downloader.download_rate: + wait_period = self.downloader.chunksize / self.downloader.download_rate / 2.0 + + # Boudewijn: when wait_period is 0.0 this will cause + # the the _request_more method to be scheduled + # multiple times (recursively), causing severe cpu + # problems. + # + # Therefore, only schedule _request_more to be called + # if the call will be made in the future. The minimal + # wait_period should be tweaked. + if wait_period > 1.0: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: waiting for %f s to call _request_more again" % wait_period + self.downloader.scheduler(self._request_more, wait_period) + + if not (self.active_requests or self.backlog): + self.downloader.queued_out[self] = 1 + return + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: _request_more: len act",len(self.active_requests),"back",self.backlog + + lost_interests = [] + while len(self.active_requests) < self.backlog: + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: Looking for interesting piece" + #st = time.time() + interest = self.downloader.picker.next(self.have, + self.downloader.storage.do_I_have_requests, + self, + self.downloader.too_many_partials(), + self.connection.connection.is_helper_con(), + slowpieces = slowpieces, connection = self.connection) + #et = time.time() + #diff = et-st + diff=-1 + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: _request_more: next() returned",interest,"took %.5f" % (diff) + if interest is None: + break + self.example_interest = interest + self.send_interested() + loop = True + while len(self.active_requests) < self.backlog and loop: + + begin, length = self.downloader.storage.new_request(interest) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: new_request",interest,begin,length,"to",self.connection.connection.get_ip(),self.connection.connection.get_port() + + self.downloader.picker.requested(interest, begin, length) + self.active_requests.append((interest, begin, length)) + self.connection.send_request(interest, begin, length) + self.downloader.chunk_requested(length) + if not self.downloader.storage.do_I_have_requests(interest): + loop = False + lost_interests.append(interest) + if not self.active_requests: + self.send_not_interested() + if lost_interests: + for d in self.downloader.downloads: + if d.active_requests or not d.interested: + continue + if d.example_interest is not None and self.downloader.storage.do_I_have_requests(d.example_interest): + continue + for lost in lost_interests: + if d.have[lost]: + break + else: + continue +# 2fastbt_ + #st = time.time() + interest = self.downloader.picker.next(d.have, + self.downloader.storage.do_I_have_requests, + self, # Arno, 2008-05-22; self -> d? Original Pawel code + self.downloader.too_many_partials(), + self.connection.connection.is_helper_con(), willrequest=False,connection=self.connection) + #et = time.time() + #diff = et-st + diff=-1 + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: _request_more: next()2 returned",interest,"took %.5f" % (diff) +# _2fastbt + if interest is None: + d.send_not_interested() + else: + d.example_interest = interest + + # Arno: LIVEWRAP: no endgame + if not self.downloader.endgamemode and \ + self.downloader.storage.is_endgame() and \ + not (self.downloader.picker.videostatus and self.downloader.picker.videostatus.live_streaming): + self.downloader.start_endgame() + + + def fix_download_endgame(self, new_unchoke = False): +# 2fastbt_ + # do not download from coordinator + if self.downloader.paused or self.connection.connection.is_coordinator_con(): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Downloader: fix_download_endgame: paused", self.downloader.paused, "or is_coordinator_con", self.connection.connection.is_coordinator_con() + return +# _2fastbt + + if len(self.active_requests) >= self._backlog(new_unchoke): + if not (self.active_requests or self.backlog) and not self.choked: + self.downloader.queued_out[self] = 1 + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Downloader: fix_download_endgame: returned" + return +# 2fastbt_ + want = [a for a in self.downloader.all_requests if self.have[a[0]] and a not in self.active_requests and (self.helper is None or self.connection.connection.is_helper_con() or not self.helper.is_ignored(a[0]))] +# _2fastbt + if not (self.active_requests or want): + self.send_not_interested() + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Downloader: fix_download_endgame: not interested" + return + if want: + self.send_interested() + if self.choked: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Downloader: fix_download_endgame: choked" + return + shuffle(want) + del want[self.backlog - len(self.active_requests):] + self.active_requests.extend(want) + for piece, begin, length in want: +# 2fastbt_ + if self.helper is None or self.connection.connection.is_helper_con() or self.helper.reserve_piece(piece,self): + self.connection.send_request(piece, begin, length) + self.downloader.chunk_requested(length) +# _2fastbt + + def got_have(self, index): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: got_have",index + if index == self.downloader.numpieces-1: + self.downloader.totalmeasure.update_rate(self.downloader.storage.total_length-(self.downloader.numpieces-1)*self.downloader.storage.piece_length) + self.peermeasure.update_rate(self.downloader.storage.total_length-(self.downloader.numpieces-1)*self.downloader.storage.piece_length) + else: + self.downloader.totalmeasure.update_rate(self.downloader.storage.piece_length) + self.peermeasure.update_rate(self.downloader.storage.piece_length) + + # Arno: LIVEWRAP + if not self.downloader.picker.is_valid_piece(index): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: got_have",index,"is invalid piece" + return # TODO: should we request_more()? + + if self.have[index]: + return + self.have[index] = True + self.downloader.picker.got_have(index,self.connection) + if self.have.complete(): + self.downloader.picker.became_seed() + if self.downloader.picker.am_I_complete(): + self.downloader.add_disconnected_seed(self.connection.get_readable_id()) + self.connection.close() + return + if self.downloader.endgamemode: + self.fix_download_endgame() + elif ( not self.downloader.paused + and not self.downloader.picker.is_blocked(index) + and self.downloader.storage.do_I_have_requests(index) ): + if not self.choked: + self._request_more() + else: + self.send_interested() + + def _check_interests(self): + if self.interested or self.downloader.paused: + return + for i in xrange(len(self.have)): + if ( self.have[i] and not self.downloader.picker.is_blocked(i) + and ( self.downloader.endgamemode + or self.downloader.storage.do_I_have_requests(i) ) ): + self.send_interested() + return + + def got_have_bitfield(self, have): + + if self.downloader.picker.am_I_complete() and have.complete(): + # Arno: If we're both seeds + if self.downloader.super_seeding: + self.connection.send_bitfield(have.tostring()) # be nice, show you're a seed too + self.connection.close() + self.downloader.add_disconnected_seed(self.connection.get_readable_id()) + return + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: got_have_bitfield: VVV#############################################################################################VVVVVVVVVVVVVVVVVVVVVVVVV valid",self.downloader.picker.get_valid_range_iterator(),"len",self.downloader.numpieces + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: got_have_bitfield: input",`have.toboollist()` + if have.complete(): + # Arno: He is seed + self.downloader.picker.got_seed() + else: + # Arno: LIVEWRAP: filter out valid pieces + # TODO: may be slow with 32K pieces. + validhave = Bitfield(self.downloader.numpieces) + for i in self.downloader.picker.get_valid_range_iterator(): + if have[i]: + validhave[i] = True + self.downloader.picker.got_have(i,self.connection) + have = validhave + # Store filtered bitfield + self.have = have + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: got_have_bitfield: valid",`have.toboollist()` + + if self.downloader.endgamemode and not self.downloader.paused: + for piece, begin, length in self.downloader.all_requests: + if self.have[piece]: + self.send_interested() + break + return + self._check_interests() + + def get_rate(self): + return self.measure.get_rate() + + def get_short_term_rate(self): + return self.short_term_measure.get_rate() + + def is_snubbed(self): +# 2fastbt_ + if not self.choked and clock() - self.last2 > self.downloader.snub_time and \ + not self.connection.connection.is_helper_con() and \ + not self.connection.connection.is_coordinator_con(): +# _2fastbt + for index, begin, length in self.active_requests: + self.connection.send_cancel(index, begin, length) + self.got_choke() # treat it just like a choke + return clock() - self.last > self.downloader.snub_time + + def peer_is_complete(self): + return self.have.complete() + +class Downloader: + def __init__(self, storage, picker, backlog, max_rate_period, + numpieces, chunksize, measurefunc, snub_time, + kickbans_ok, kickfunc, banfunc, scheduler = None): + self.storage = storage + self.picker = picker + self.backlog = backlog + self.max_rate_period = max_rate_period + self.measurefunc = measurefunc + self.totalmeasure = Measure(max_rate_period*storage.piece_length/storage.request_size) + self.numpieces = numpieces + self.chunksize = chunksize + self.snub_time = snub_time + self.kickfunc = kickfunc + self.banfunc = banfunc + self.disconnectedseeds = {} + self.downloads = [] + self.perip = {} + self.gotbaddata = {} + self.kicked = {} + self.banned = {} + self.kickbans_ok = kickbans_ok + self.kickbans_halted = False + self.super_seeding = False + self.endgamemode = False + self.endgame_queued_pieces = [] + self.all_requests = [] + self.discarded = 0L + self.download_rate = 0 +# self.download_rate = 25000 # 25K/s test rate + self.bytes_requested = 0 + self.last_time = clock() + self.queued_out = {} + self.requeueing = False + self.paused = False + self.scheduler = scheduler + + # check periodicaly + self.scheduler(self.periodic_check, 1) + + def periodic_check(self): + self.picker.check_outstanding_requests(self.downloads) + + ds = [d for d in self.downloads if not d.choked] + shuffle(ds) + for d in ds: + d._request_more() + + self.scheduler(self.periodic_check, 1) + + def set_download_rate(self, rate): + self.download_rate = rate * 1000 + self.bytes_requested = 0 + + def queue_limit(self): + if not self.download_rate: + return 10e10 # that's a big queue! + t = clock() + self.bytes_requested -= (t - self.last_time) * self.download_rate + self.last_time = t + if not self.requeueing and self.queued_out and self.bytes_requested < 0: + self.requeueing = True + q = self.queued_out.keys() + shuffle(q) + self.queued_out = {} + for d in q: + d._request_more() + self.requeueing = False + if -self.bytes_requested > 5*self.download_rate: + self.bytes_requested = -5*self.download_rate + ql = max(int(-self.bytes_requested/self.chunksize), 0) + # if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Downloader: download_rate: %s, bytes_requested: %s, chunk: %s -> queue limit: %d' % \ + # (self.download_rate, self.bytes_requested, self.chunksize, ql) + return ql + + def chunk_requested(self, size): + self.bytes_requested += size + + external_data_received = chunk_requested + + def make_download(self, connection): + ip = connection.get_ip() + if self.perip.has_key(ip): + perip = self.perip[ip] + else: + perip = self.perip.setdefault(ip, PerIPStats(ip)) + perip.peerid = connection.get_readable_id() + perip.numconnections += 1 + d = SingleDownload(self, connection) + perip.lastdownload = d + self.downloads.append(d) + return d + + def piece_flunked(self, index): + if self.paused: + return + if self.endgamemode: + if self.downloads: + while self.storage.do_I_have_requests(index): + nb, nl = self.storage.new_request(index) + self.all_requests.append((index, nb, nl)) + for d in self.downloads: + d.fix_download_endgame() + return + self._reset_endgame() + return + ds = [d for d in self.downloads if not d.choked] + shuffle(ds) + for d in ds: + d._request_more() + ds = [d for d in self.downloads if not d.interested and d.have[index]] + for d in ds: + d.example_interest = index + d.send_interested() + + def has_downloaders(self): + return len(self.downloads) + + def lost_peer(self, download): + ip = download.ip + self.perip[ip].numconnections -= 1 + if self.perip[ip].lastdownload == download: + self.perip[ip].lastdownload = None + self.downloads.remove(download) + if self.endgamemode and not self.downloads: # all peers gone + self._reset_endgame() + + def _reset_endgame(self): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Downloader: _reset_endgame" + self.storage.reset_endgame(self.all_requests) + self.endgamemode = False + self.all_requests = [] + self.endgame_queued_pieces = [] + + def add_disconnected_seed(self, id): +# if not self.disconnectedseeds.has_key(id): +# self.picker.seed_seen_recently() + self.disconnectedseeds[id]=clock() + +# def expire_disconnected_seeds(self): + + def num_disconnected_seeds(self): + # first expire old ones + expired = [] + for id, t in self.disconnectedseeds.items(): + if clock() - t > EXPIRE_TIME: #Expire old seeds after so long + expired.append(id) + for id in expired: +# self.picker.seed_disappeared() + del self.disconnectedseeds[id] + return len(self.disconnectedseeds) + # if this isn't called by a stats-gathering function + # it should be scheduled to run every minute or two. + + def _check_kicks_ok(self): + if len(self.gotbaddata) > 10: + self.kickbans_ok = False + self.kickbans_halted = True + return self.kickbans_ok and len(self.downloads) > 2 + + def try_kick(self, download): + if self._check_kicks_ok(): + download.guard.download = None + ip = download.ip + id = download.connection.get_readable_id() + self.kicked[ip] = id + self.perip[ip].peerid = id + self.kickfunc(download.connection) + + def try_ban(self, ip): + if self._check_kicks_ok(): + self.banfunc(ip) + self.banned[ip] = self.perip[ip].peerid + if self.kicked.has_key(ip): + del self.kicked[ip] + + def set_super_seed(self): + self.super_seeding = True + + def check_complete(self, index): + if self.endgamemode and not self.all_requests: + self.endgamemode = False + if self.endgame_queued_pieces and not self.endgamemode: + self.requeue_piece_download() + if self.picker.am_I_complete(): + assert not self.all_requests + assert not self.endgamemode + for d in [i for i in self.downloads if i.have.complete()]: + d.connection.send_have(index) # be nice, tell the other seed you completed + self.add_disconnected_seed(d.connection.get_readable_id()) + d.connection.close() + return True + return False + + def too_many_partials(self): + return len(self.storage.dirty) > (len(self.downloads)/2) + + def cancel_requests(self, requests, allowrerequest=True): + + # todo: remove duplicates + slowpieces = [piece_id for piece_id, _, _ in requests] + + if self.endgamemode: + if self.endgame_queued_pieces: + for piece_id, _, _ in requests: + if not self.storage.do_I_have(piece_id): + try: + self.endgame_queued_pieces.remove(piece_id) + except: + pass + + # remove the items in requests from self.all_requests + if not allowrerequest: + self.all_requests = [request for request in self.all_requests if not request in requests] + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Downloader: cancel_requests: all_requests", len(self.all_requests), "remaining" + + for download in self.downloads: + hit = False + for request in download.active_requests: + if request in requests: + hit = True + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Downloader:cancel_requests: canceling", request, "on", download.ip + download.connection.send_cancel(*request) + if not self.endgamemode: + self.storage.request_lost(*request) + if hit: + download.active_requests = [request for request in download.active_requests if not request in requests] + # Arno: VOD: all these peers were slow for their individually + # assigned pieces. These pieces have high priority, so don't + # retrieve any of theses pieces from these slow peers, just + # give them something further in the future. + if allowrerequest: + download._request_more() + else: + # Arno: ALT is to just kick peer. Good option if we have lots (See Encryper.to_connect() queue + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: Kicking slow peer",d.ip + #d.connection.close() # bye bye, zwaai zwaai + download._request_more(slowpieces=slowpieces) + + if not self.endgamemode and download.choked: + download._check_interests() + + def cancel_piece_download(self, pieces, allowrerequest=True): + if self.endgamemode: + if self.endgame_queued_pieces: + for piece in pieces: + try: + self.endgame_queued_pieces.remove(piece) + except: + pass + + if allowrerequest: + for index, nb, nl in self.all_requests: + if index in pieces: + self.storage.request_lost(index, nb, nl) + + else: + new_all_requests = [] + for index, nb, nl in self.all_requests: + if index in pieces: + self.storage.request_lost(index, nb, nl) + else: + new_all_requests.append((index, nb, nl)) + self.all_requests = new_all_requests + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Downloader: cancel_piece_download: all_requests", len(self.all_requests), "remaining" + + for d in self.downloads: + hit = False + for index, nb, nl in d.active_requests: + if index in pieces: + hit = True + d.connection.send_cancel(index, nb, nl) + if not self.endgamemode: + self.storage.request_lost(index, nb, nl) + if hit: + d.active_requests = [ r for r in d.active_requests + if r[0] not in pieces ] + # Arno: VOD: all these peers were slow for their individually + # assigned pieces. These pieces have high priority, so don't + # retrieve any of theses pieces from these slow peers, just + # give them something further in the future. + if not allowrerequest: + # Arno: ALT is to just kick peer. Good option if we have lots (See Encryper.to_connect() queue + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: Kicking slow peer",d.ip + #d.connection.close() # bye bye, zwaai zwaai + d._request_more(slowpieces=pieces) + else: + d._request_more() + if not self.endgamemode and d.choked: + d._check_interests() + + def requeue_piece_download(self, pieces = []): + if self.endgame_queued_pieces: + for piece in pieces: + if not piece in self.endgame_queued_pieces: + self.endgame_queued_pieces.append(piece) + pieces = self.endgame_queued_pieces + if self.endgamemode: + if self.all_requests: + self.endgame_queued_pieces = pieces + return + self.endgamemode = False + self.endgame_queued_pieces = None + + ds = [d for d in self.downloads] + shuffle(ds) + for d in ds: + if d.choked: + d._check_interests() + else: + d._request_more() + + def start_endgame(self): + assert not self.endgamemode + self.endgamemode = True + assert not self.all_requests + for d in self.downloads: + if d.active_requests: + assert d.interested and not d.choked + for request in d.active_requests: + assert not request in self.all_requests + self.all_requests.append(request) + for d in self.downloads: + d.fix_download_endgame() + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Downloader: start_endgame: we have", len(self.all_requests), "requests remaining" + + def pause(self, flag): + self.paused = flag + if flag: + for d in self.downloads: + for index, begin, length in d.active_requests: + d.connection.send_cancel(index, begin, length) + d._letgo() + d.send_not_interested() + if self.endgamemode: + self._reset_endgame() + else: + shuffle(self.downloads) + for d in self.downloads: + d._check_interests() + if d.interested and not d.choked: + d._request_more() + + def live_invalidate(self,piece): # Arno: LIVEWRAP + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: live_invalidate",piece + for d in self.downloads: + d.have[piece] = False + self.storage.live_invalidate(piece) + diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/Downloader.py.bak b/tribler-mod/Tribler/Core/BitTornado/BT1/Downloader.py.bak new file mode 100644 index 0000000..a8b2698 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/Downloader.py.bak @@ -0,0 +1,956 @@ + +# Written by Bram Cohen and Pawel Garbacki +# see LICENSE.txt for license information + +from Tribler.Core.BitTornado.CurrentRateMeasure import Measure +from Tribler.Core.BitTornado.bitfield import Bitfield +from random import shuffle +from Tribler.Core.BitTornado.clock import clock +# 2fastbt_ +from Tribler.Core.CoopDownload.Helper import SingleDownloadHelperInterface + +import sys + +try: + True +except: + True = 1 + False = 0 + +DEBUG = True #False +EXPIRE_TIME = 60 * 60 + +# only define the following functions in __debug__. And only import +# them in this case. They are to expensive to have, and have no +# purpose, outside debug mode. +if __debug__: + _ident_letters = {} + _ident_letter_pool = None + def get_ident_letter(download): + if not download.ip in _ident_letters: + global _ident_letter_pool + if not _ident_letter_pool: + _ident_letter_pool = [chr(c) for c in range(ord("a"), ord("z")+1)] + [chr(c) for c in range(ord("A"), ord("Z")+1)] + _ident_letters[download.ip] = _ident_letter_pool.pop(0) + return _ident_letters[download.ip] + + def print_chunks(downloader, pieces, before=(), after=(), compact=True): + """ + Print a line summery indicating completed/outstanding/non-requested chunks + + When COMPACT is True one character will represent one piece. + # --> downloaded + - --> no outstanding requests + 1-9 --> the number of outstanding requests (max 9) + + When COMPACT is False one character will requests one chunk. + # --> downloaded + - --> no outstanding requests + a-z --> requested at peer with that character (also capitals, duplicates may occur) + 1-9 --> requested multipile times (at n peers) + """ + if pieces: + do_I_have = downloader.storage.do_I_have + do_I_have_requests = downloader.storage.do_I_have_requests + inactive_requests = downloader.storage.inactive_requests + piece_size = downloader.storage.piece_length + chunk_size = downloader.storage.request_size + chunks_per_piece = int(piece_size / chunk_size) + + if compact: + request_map = {} + for download in downloader.downloads: + for piece, begin, length in download.active_requests: + if not piece in request_map: + request_map[piece] = 0 + request_map[piece] += 1 + + def print_chunks_helper(piece_id): + if do_I_have(piece_id): return "#" + if do_I_have_requests(piece_id): return "-" + if piece_id in request_map: return str(min(9, request_map[piece_id])) + return "?" + + else: + request_map = {} + for download in downloader.downloads: + + for piece, begin, length in download.active_requests: + if not piece in request_map: + request_map[piece] = ["-"] * chunks_per_piece + index = int(begin/chunk_size) + if request_map[piece][index] == "-": + request_map[piece][index] = get_ident_letter(download) + elif type(request_map[piece][index]) is str: + request_map[piece][index] = 2 + else: + request_map[piece][index] += 1 + request_map[piece][int(begin/chunk_size)] = get_ident_letter(download) + + def print_chunks_helper(piece_id): + if do_I_have(piece_id): return "#" * chunks_per_piece +# if do_I_have_requests(piece_id): return "-" * chunks_per_piece + if piece_id in request_map: + if piece_id in inactive_requests and type(inactive_requests[piece_id]) is list: + for begin, length in inactive_requests[piece_id]: + request_map[piece_id][int(begin/chunk_size)] = " " + return "".join([str(c) for c in request_map[piece_id]]) + return "-" * chunks_per_piece + + if before: + s_before = before[0] + else: + s_before = "" + + if after: + s_after = after[-1] + else: + s_after = "" + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Outstanding %s:%d:%d:%s [%s|%s|%s]" % (s_before, pieces[0], pieces[-1], s_after, "".join(map(print_chunks_helper, before)), "".join(map(print_chunks_helper, pieces)), "".join(map(print_chunks_helper, after))) + + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Outstanding 0:0 []" +else: + def print_chunks(downloader, pieces, before=(), after=(), compact=True): + pass + +class PerIPStats: + def __init__(self, ip): + self.numgood = 0 + self.bad = {} + self.numconnections = 0 + self.lastdownload = None + self.peerid = None + +class BadDataGuard: + def __init__(self, download): + self.download = download + self.ip = download.ip + self.downloader = download.downloader + self.stats = self.downloader.perip[self.ip] + self.lastindex = None + + def failed(self, index, bump = False): + self.stats.bad.setdefault(index, 0) + self.downloader.gotbaddata[self.ip] = 1 + self.stats.bad[index] += 1 + if len(self.stats.bad) > 1: + if self.download is not None: + self.downloader.try_kick(self.download) + elif self.stats.numconnections == 1 and self.stats.lastdownload is not None: + self.downloader.try_kick(self.stats.lastdownload) + if len(self.stats.bad) >= 3 and len(self.stats.bad) > int(self.stats.numgood/30): + self.downloader.try_ban(self.ip) + elif bump: + self.downloader.picker.bump(index) + + def good(self, index): + # lastindex is a hack to only increase numgood by one for each good + # piece, however many chunks come from the connection(s) from this IP + if index != self.lastindex: + self.stats.numgood += 1 + self.lastindex = index + +# 2fastbt_ +class SingleDownload(SingleDownloadHelperInterface): +# _2fastbt + def __init__(self, downloader, connection): +# 2fastbt_ + SingleDownloadHelperInterface.__init__(self) +# _2fastbt + self.downloader = downloader + self.connection = connection + self.choked = True + self.interested = False + self.active_requests = [] + self.measure = Measure(downloader.max_rate_period) + self.peermeasure = Measure(downloader.max_rate_period) + self.have = Bitfield(downloader.numpieces) + self.last = -1000 + self.last2 = -1000 + self.example_interest = None + self.backlog = 2 + self.ip = connection.get_ip() + self.guard = BadDataGuard(self) +# 2fastbt_ + self.helper = downloader.picker.helper +# _2fastbt + + # boudewijn: VOD needs a download measurement that is not + # averaged over a 'long' period. downloader.max_rate_period is + # (by default) 20 seconds because this matches the unchoke + # policy. + self.short_term_measure = Measure(5) + + # boudewijn: each download maintains a counter for the number + # of high priority piece requests that did not get any + # responce within x seconds. + self.bad_performance_counter = 0 + + def _backlog(self, just_unchoked): + self.backlog = int(min( + 2+int(4*self.measure.get_rate()/self.downloader.chunksize), + (2*just_unchoked)+self.downloader.queue_limit() )) + if self.backlog > 50: + self.backlog = int(max(50, self.backlog * 0.075)) + return self.backlog + + def disconnected(self): + self.downloader.lost_peer(self) + + """ JD: obsoleted -- moved to picker.lost_peer + + if self.have.complete(): + self.downloader.picker.lost_seed() + else: + for i in xrange(len(self.have)): + if self.have[i]: + self.downloader.picker.lost_have(i) + """ + + if self.have.complete() and self.downloader.storage.is_endgame(): + self.downloader.add_disconnected_seed(self.connection.get_readable_id()) + self._letgo() + self.guard.download = None + + def _letgo(self): + if self.downloader.queued_out.has_key(self): + del self.downloader.queued_out[self] + if not self.active_requests: + return + if self.downloader.endgamemode: + self.active_requests = [] + return + lost = {} + for index, begin, length in self.active_requests: + self.downloader.storage.request_lost(index, begin, length) + lost[index] = 1 + lost = lost.keys() + self.active_requests = [] + if self.downloader.paused: + return + ds = [d for d in self.downloader.downloads if not d.choked] + shuffle(ds) + for d in ds: + d._request_more() + for d in self.downloader.downloads: + if d.choked and not d.interested: + for l in lost: + if d.have[l] and self.downloader.storage.do_I_have_requests(l): + d.send_interested() + break + + def got_choke(self): + if not self.choked: + self.choked = True + self._letgo() + + def got_unchoke(self): + if self.choked: + self.choked = False + if self.interested: + self._request_more(new_unchoke = True) + self.last2 = clock() + + def is_choked(self): + return self.choked + + def is_interested(self): + return self.interested + + def send_interested(self): + if not self.interested: + self.interested = True + self.connection.send_interested() + + def send_not_interested(self): + if self.interested: + self.interested = False + self.connection.send_not_interested() + + def got_piece(self, index, begin, hashlist, piece): + """ + Returns True if the piece is complete. + Note that in this case a -piece- means a chunk! + """ + + if self.bad_performance_counter: + self.bad_performance_counter -= 1 + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "decreased bad_performance_counter to", self.bad_performance_counter + + length = len(piece) + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Downloader: got piece of length %d' % length + try: + self.active_requests.remove((index, begin, length)) + except ValueError: + self.downloader.discarded += length + return False + if self.downloader.endgamemode: + self.downloader.all_requests.remove((index, begin, length)) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Downloader: got_piece: removed one request from all_requests", len(self.downloader.all_requests), "remaining" + + self.last = clock() + self.last2 = clock() + self.measure.update_rate(length) + self.short_term_measure.update_rate(length) + self.downloader.measurefunc(length) + if not self.downloader.storage.piece_came_in(index, begin, hashlist, piece, self.guard): + self.downloader.piece_flunked(index) + return False + + # boudewijn: we need more accurate (if possibly invalid) + # measurements on current download speed + self.downloader.picker.got_piece(index, begin, length) + + if self.downloader.storage.do_I_have(index): + self.downloader.picker.complete(index) + + if self.downloader.endgamemode: + for d in self.downloader.downloads: + if d is not self: + if d.interested: + if d.choked: + assert not d.active_requests + d.fix_download_endgame() + else: + try: + d.active_requests.remove((index, begin, length)) + except ValueError: + continue + d.connection.send_cancel(index, begin, length) + d.fix_download_endgame() + else: + assert not d.active_requests + self._request_more() + self.downloader.check_complete(index) + + # BarterCast counter + self.connection.total_downloaded += length + + return self.downloader.storage.do_I_have(index) + +# 2fastbt_ + def helper_forces_unchoke(self): + self.choked = False +# _2fastbt + + def _request_more(self, new_unchoke = False, slowpieces = []): +# 2fastbt_ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: _request_more()" + if self.is_frozen_by_helper(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: _request_more: blocked, returning" + return +# _2fastbt + if self.choked: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: _request_more: choked, returning" + return +# 2fastbt_ + # do not download from coordinator + if self.connection.connection.is_coordinator_con(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: _request_more: coordinator conn" + return +# _2fastbt + if self.downloader.endgamemode: + self.fix_download_endgame(new_unchoke) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: _request_more: endgame mode, returning" + return + if self.downloader.paused: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: _request_more: paused, returning" + return + if len(self.active_requests) >= self._backlog(new_unchoke): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: more req than unchoke (active req: %d >= backlog: %d)" % (len(self.active_requests), self._backlog(new_unchoke)) + # Jelle: Schedule _request more to be called in some time. Otherwise requesting and receiving packages + # may stop, if they arrive to quickly + if self.downloader.download_rate: + wait_period = self.downloader.chunksize / self.downloader.download_rate / 2.0 + + # Boudewijn: when wait_period is 0.0 this will cause + # the the _request_more method to be scheduled + # multiple times (recursively), causing severe cpu + # problems. + # + # Therefore, only schedule _request_more to be called + # if the call will be made in the future. The minimal + # wait_period should be tweaked. + if wait_period > 1.0: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: waiting for %f s to call _request_more again" % wait_period + self.downloader.scheduler(self._request_more, wait_period) + + if not (self.active_requests or self.backlog): + self.downloader.queued_out[self] = 1 + return + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: _request_more: len act",len(self.active_requests),"back",self.backlog + + lost_interests = [] + while len(self.active_requests) < self.backlog: + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: Looking for interesting piece" + #st = time.time() + interest = self.downloader.picker.next(self.have, + self.downloader.storage.do_I_have_requests, + self, + self.downloader.too_many_partials(), + self.connection.connection.is_helper_con(), + slowpieces = slowpieces, connection = self.connection) + #et = time.time() + #diff = et-st + diff=-1 + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: _request_more: next() returned",interest,"took %.5f" % (diff) + if interest is None: + break + self.example_interest = interest + self.send_interested() + loop = True + while len(self.active_requests) < self.backlog and loop: + + begin, length = self.downloader.storage.new_request(interest) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: new_request",interest,begin,length,"to",self.connection.connection.get_ip(),self.connection.connection.get_port() + + self.downloader.picker.requested(interest, begin, length) + self.active_requests.append((interest, begin, length)) + self.connection.send_request(interest, begin, length) + self.downloader.chunk_requested(length) + if not self.downloader.storage.do_I_have_requests(interest): + loop = False + lost_interests.append(interest) + if not self.active_requests: + self.send_not_interested() + if lost_interests: + for d in self.downloader.downloads: + if d.active_requests or not d.interested: + continue + if d.example_interest is not None and self.downloader.storage.do_I_have_requests(d.example_interest): + continue + for lost in lost_interests: + if d.have[lost]: + break + else: + continue +# 2fastbt_ + #st = time.time() + interest = self.downloader.picker.next(d.have, + self.downloader.storage.do_I_have_requests, + self, # Arno, 2008-05-22; self -> d? Original Pawel code + self.downloader.too_many_partials(), + self.connection.connection.is_helper_con(), willrequest=False,connection=self.connection) + #et = time.time() + #diff = et-st + diff=-1 + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: _request_more: next()2 returned",interest,"took %.5f" % (diff) +# _2fastbt + if interest is None: + d.send_not_interested() + else: + d.example_interest = interest + + # Arno: LIVEWRAP: no endgame + if not self.downloader.endgamemode and \ + self.downloader.storage.is_endgame() and \ + not (self.downloader.picker.videostatus and self.downloader.picker.videostatus.live_streaming): + self.downloader.start_endgame() + + + def fix_download_endgame(self, new_unchoke = False): +# 2fastbt_ + # do not download from coordinator + if self.downloader.paused or self.connection.connection.is_coordinator_con(): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Downloader: fix_download_endgame: paused", self.downloader.paused, "or is_coordinator_con", self.connection.connection.is_coordinator_con() + return +# _2fastbt + + if len(self.active_requests) >= self._backlog(new_unchoke): + if not (self.active_requests or self.backlog) and not self.choked: + self.downloader.queued_out[self] = 1 + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Downloader: fix_download_endgame: returned" + return +# 2fastbt_ + want = [a for a in self.downloader.all_requests if self.have[a[0]] and a not in self.active_requests and (self.helper is None or self.connection.connection.is_helper_con() or not self.helper.is_ignored(a[0]))] +# _2fastbt + if not (self.active_requests or want): + self.send_not_interested() + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Downloader: fix_download_endgame: not interested" + return + if want: + self.send_interested() + if self.choked: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Downloader: fix_download_endgame: choked" + return + shuffle(want) + del want[self.backlog - len(self.active_requests):] + self.active_requests.extend(want) + for piece, begin, length in want: +# 2fastbt_ + if self.helper is None or self.connection.connection.is_helper_con() or self.helper.reserve_piece(piece,self): + self.connection.send_request(piece, begin, length) + self.downloader.chunk_requested(length) +# _2fastbt + + def got_have(self, index): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: got_have",index + if index == self.downloader.numpieces-1: + self.downloader.totalmeasure.update_rate(self.downloader.storage.total_length-(self.downloader.numpieces-1)*self.downloader.storage.piece_length) + self.peermeasure.update_rate(self.downloader.storage.total_length-(self.downloader.numpieces-1)*self.downloader.storage.piece_length) + else: + self.downloader.totalmeasure.update_rate(self.downloader.storage.piece_length) + self.peermeasure.update_rate(self.downloader.storage.piece_length) + + # Arno: LIVEWRAP + if not self.downloader.picker.is_valid_piece(index): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: got_have",index,"is invalid piece" + return # TODO: should we request_more()? + + if self.have[index]: + return + self.have[index] = True + self.downloader.picker.got_have(index,self.connection) + if self.have.complete(): + self.downloader.picker.became_seed() + if self.downloader.picker.am_I_complete(): + self.downloader.add_disconnected_seed(self.connection.get_readable_id()) + self.connection.close() + return + if self.downloader.endgamemode: + self.fix_download_endgame() + elif ( not self.downloader.paused + and not self.downloader.picker.is_blocked(index) + and self.downloader.storage.do_I_have_requests(index) ): + if not self.choked: + self._request_more() + else: + self.send_interested() + + def _check_interests(self): + if self.interested or self.downloader.paused: + return + for i in xrange(len(self.have)): + if ( self.have[i] and not self.downloader.picker.is_blocked(i) + and ( self.downloader.endgamemode + or self.downloader.storage.do_I_have_requests(i) ) ): + self.send_interested() + return + + def got_have_bitfield(self, have): + + if self.downloader.picker.am_I_complete() and have.complete(): + # Arno: If we're both seeds + if self.downloader.super_seeding: + self.connection.send_bitfield(have.tostring()) # be nice, show you're a seed too + self.connection.close() + self.downloader.add_disconnected_seed(self.connection.get_readable_id()) + return + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: got_have_bitfield: VVV#############################################################################################VVVVVVVVVVVVVVVVVVVVVVVVV valid",self.downloader.picker.get_valid_range_iterator(),"len",self.downloader.numpieces + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: got_have_bitfield: input",`have.toboollist()` + if have.complete(): + # Arno: He is seed + self.downloader.picker.got_seed() + else: + # Arno: LIVEWRAP: filter out valid pieces + # TODO: may be slow with 32K pieces. + validhave = Bitfield(self.downloader.numpieces) + for i in self.downloader.picker.get_valid_range_iterator(): + if have[i]: + validhave[i] = True + self.downloader.picker.got_have(i,self.connection) + have = validhave + # Store filtered bitfield + self.have = have + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: got_have_bitfield: valid",`have.toboollist()` + + if self.downloader.endgamemode and not self.downloader.paused: + for piece, begin, length in self.downloader.all_requests: + if self.have[piece]: + self.send_interested() + break + return + self._check_interests() + + def get_rate(self): + return self.measure.get_rate() + + def get_short_term_rate(self): + return self.short_term_measure.get_rate() + + def is_snubbed(self): +# 2fastbt_ + if not self.choked and clock() - self.last2 > self.downloader.snub_time and \ + not self.connection.connection.is_helper_con() and \ + not self.connection.connection.is_coordinator_con(): +# _2fastbt + for index, begin, length in self.active_requests: + self.connection.send_cancel(index, begin, length) + self.got_choke() # treat it just like a choke + return clock() - self.last > self.downloader.snub_time + + def peer_is_complete(self): + return self.have.complete() + +class Downloader: + def __init__(self, storage, picker, backlog, max_rate_period, + numpieces, chunksize, measurefunc, snub_time, + kickbans_ok, kickfunc, banfunc, scheduler = None): + self.storage = storage + self.picker = picker + self.backlog = backlog + self.max_rate_period = max_rate_period + self.measurefunc = measurefunc + self.totalmeasure = Measure(max_rate_period*storage.piece_length/storage.request_size) + self.numpieces = numpieces + self.chunksize = chunksize + self.snub_time = snub_time + self.kickfunc = kickfunc + self.banfunc = banfunc + self.disconnectedseeds = {} + self.downloads = [] + self.perip = {} + self.gotbaddata = {} + self.kicked = {} + self.banned = {} + self.kickbans_ok = kickbans_ok + self.kickbans_halted = False + self.super_seeding = False + self.endgamemode = False + self.endgame_queued_pieces = [] + self.all_requests = [] + self.discarded = 0L + self.download_rate = 0 +# self.download_rate = 25000 # 25K/s test rate + self.bytes_requested = 0 + self.last_time = clock() + self.queued_out = {} + self.requeueing = False + self.paused = False + self.scheduler = scheduler + + # check periodicaly + self.scheduler(self.periodic_check, 1) + + def periodic_check(self): + self.picker.check_outstanding_requests(self.downloads) + + ds = [d for d in self.downloads if not d.choked] + shuffle(ds) + for d in ds: + d._request_more() + + self.scheduler(self.periodic_check, 1) + + def set_download_rate(self, rate): + self.download_rate = rate * 1000 + self.bytes_requested = 0 + + def queue_limit(self): + if not self.download_rate: + return 10e10 # that's a big queue! + t = clock() + self.bytes_requested -= (t - self.last_time) * self.download_rate + self.last_time = t + if not self.requeueing and self.queued_out and self.bytes_requested < 0: + self.requeueing = True + q = self.queued_out.keys() + shuffle(q) + self.queued_out = {} + for d in q: + d._request_more() + self.requeueing = False + if -self.bytes_requested > 5*self.download_rate: + self.bytes_requested = -5*self.download_rate + ql = max(int(-self.bytes_requested/self.chunksize), 0) + # if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Downloader: download_rate: %s, bytes_requested: %s, chunk: %s -> queue limit: %d' % \ + # (self.download_rate, self.bytes_requested, self.chunksize, ql) + return ql + + def chunk_requested(self, size): + self.bytes_requested += size + + external_data_received = chunk_requested + + def make_download(self, connection): + ip = connection.get_ip() + if self.perip.has_key(ip): + perip = self.perip[ip] + else: + perip = self.perip.setdefault(ip, PerIPStats(ip)) + perip.peerid = connection.get_readable_id() + perip.numconnections += 1 + d = SingleDownload(self, connection) + perip.lastdownload = d + self.downloads.append(d) + return d + + def piece_flunked(self, index): + if self.paused: + return + if self.endgamemode: + if self.downloads: + while self.storage.do_I_have_requests(index): + nb, nl = self.storage.new_request(index) + self.all_requests.append((index, nb, nl)) + for d in self.downloads: + d.fix_download_endgame() + return + self._reset_endgame() + return + ds = [d for d in self.downloads if not d.choked] + shuffle(ds) + for d in ds: + d._request_more() + ds = [d for d in self.downloads if not d.interested and d.have[index]] + for d in ds: + d.example_interest = index + d.send_interested() + + def has_downloaders(self): + return len(self.downloads) + + def lost_peer(self, download): + ip = download.ip + self.perip[ip].numconnections -= 1 + if self.perip[ip].lastdownload == download: + self.perip[ip].lastdownload = None + self.downloads.remove(download) + if self.endgamemode and not self.downloads: # all peers gone + self._reset_endgame() + + def _reset_endgame(self): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Downloader: _reset_endgame" + self.storage.reset_endgame(self.all_requests) + self.endgamemode = False + self.all_requests = [] + self.endgame_queued_pieces = [] + + def add_disconnected_seed(self, id): +# if not self.disconnectedseeds.has_key(id): +# self.picker.seed_seen_recently() + self.disconnectedseeds[id]=clock() + +# def expire_disconnected_seeds(self): + + def num_disconnected_seeds(self): + # first expire old ones + expired = [] + for id, t in self.disconnectedseeds.items(): + if clock() - t > EXPIRE_TIME: #Expire old seeds after so long + expired.append(id) + for id in expired: +# self.picker.seed_disappeared() + del self.disconnectedseeds[id] + return len(self.disconnectedseeds) + # if this isn't called by a stats-gathering function + # it should be scheduled to run every minute or two. + + def _check_kicks_ok(self): + if len(self.gotbaddata) > 10: + self.kickbans_ok = False + self.kickbans_halted = True + return self.kickbans_ok and len(self.downloads) > 2 + + def try_kick(self, download): + if self._check_kicks_ok(): + download.guard.download = None + ip = download.ip + id = download.connection.get_readable_id() + self.kicked[ip] = id + self.perip[ip].peerid = id + self.kickfunc(download.connection) + + def try_ban(self, ip): + if self._check_kicks_ok(): + self.banfunc(ip) + self.banned[ip] = self.perip[ip].peerid + if self.kicked.has_key(ip): + del self.kicked[ip] + + def set_super_seed(self): + self.super_seeding = True + + def check_complete(self, index): + if self.endgamemode and not self.all_requests: + self.endgamemode = False + if self.endgame_queued_pieces and not self.endgamemode: + self.requeue_piece_download() + if self.picker.am_I_complete(): + assert not self.all_requests + assert not self.endgamemode + for d in [i for i in self.downloads if i.have.complete()]: + d.connection.send_have(index) # be nice, tell the other seed you completed + self.add_disconnected_seed(d.connection.get_readable_id()) + d.connection.close() + return True + return False + + def too_many_partials(self): + return len(self.storage.dirty) > (len(self.downloads)/2) + + def cancel_requests(self, requests, allowrerequest=True): + + # todo: remove duplicates + slowpieces = [piece_id for piece_id, _, _ in requests] + + if self.endgamemode: + if self.endgame_queued_pieces: + for piece_id, _, _ in requests: + if not self.storage.do_I_have(piece_id): + try: + self.endgame_queued_pieces.remove(piece_id) + except: + pass + + # remove the items in requests from self.all_requests + if not allowrerequest: + self.all_requests = [request for request in self.all_requests if not request in requests] + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Downloader: cancel_requests: all_requests", len(self.all_requests), "remaining" + + for download in self.downloads: + hit = False + for request in download.active_requests: + if request in requests: + hit = True + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Downloader:cancel_requests: canceling", request, "on", download.ip + download.connection.send_cancel(*request) + if not self.endgamemode: + self.storage.request_lost(*request) + if hit: + download.active_requests = [request for request in download.active_requests if not request in requests] + # Arno: VOD: all these peers were slow for their individually + # assigned pieces. These pieces have high priority, so don't + # retrieve any of theses pieces from these slow peers, just + # give them something further in the future. + if allowrerequest: + download._request_more() + else: + # Arno: ALT is to just kick peer. Good option if we have lots (See Encryper.to_connect() queue + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: Kicking slow peer",d.ip + #d.connection.close() # bye bye, zwaai zwaai + download._request_more(slowpieces=slowpieces) + + if not self.endgamemode and download.choked: + download._check_interests() + + def cancel_piece_download(self, pieces, allowrerequest=True): + if self.endgamemode: + if self.endgame_queued_pieces: + for piece in pieces: + try: + self.endgame_queued_pieces.remove(piece) + except: + pass + + if allowrerequest: + for index, nb, nl in self.all_requests: + if index in pieces: + self.storage.request_lost(index, nb, nl) + + else: + new_all_requests = [] + for index, nb, nl in self.all_requests: + if index in pieces: + self.storage.request_lost(index, nb, nl) + else: + new_all_requests.append((index, nb, nl)) + self.all_requests = new_all_requests + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Downloader: cancel_piece_download: all_requests", len(self.all_requests), "remaining" + + for d in self.downloads: + hit = False + for index, nb, nl in d.active_requests: + if index in pieces: + hit = True + d.connection.send_cancel(index, nb, nl) + if not self.endgamemode: + self.storage.request_lost(index, nb, nl) + if hit: + d.active_requests = [ r for r in d.active_requests + if r[0] not in pieces ] + # Arno: VOD: all these peers were slow for their individually + # assigned pieces. These pieces have high priority, so don't + # retrieve any of theses pieces from these slow peers, just + # give them something further in the future. + if not allowrerequest: + # Arno: ALT is to just kick peer. Good option if we have lots (See Encryper.to_connect() queue + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: Kicking slow peer",d.ip + #d.connection.close() # bye bye, zwaai zwaai + d._request_more(slowpieces=pieces) + else: + d._request_more() + if not self.endgamemode and d.choked: + d._check_interests() + + def requeue_piece_download(self, pieces = []): + if self.endgame_queued_pieces: + for piece in pieces: + if not piece in self.endgame_queued_pieces: + self.endgame_queued_pieces.append(piece) + pieces = self.endgame_queued_pieces + if self.endgamemode: + if self.all_requests: + self.endgame_queued_pieces = pieces + return + self.endgamemode = False + self.endgame_queued_pieces = None + + ds = [d for d in self.downloads] + shuffle(ds) + for d in ds: + if d.choked: + d._check_interests() + else: + d._request_more() + + def start_endgame(self): + assert not self.endgamemode + self.endgamemode = True + assert not self.all_requests + for d in self.downloads: + if d.active_requests: + assert d.interested and not d.choked + for request in d.active_requests: + assert not request in self.all_requests + self.all_requests.append(request) + for d in self.downloads: + d.fix_download_endgame() + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Downloader: start_endgame: we have", len(self.all_requests), "requests remaining" + + def pause(self, flag): + self.paused = flag + if flag: + for d in self.downloads: + for index, begin, length in d.active_requests: + d.connection.send_cancel(index, begin, length) + d._letgo() + d.send_not_interested() + if self.endgamemode: + self._reset_endgame() + else: + shuffle(self.downloads) + for d in self.downloads: + d._check_interests() + if d.interested and not d.choked: + d._request_more() + + def live_invalidate(self,piece): # Arno: LIVEWRAP + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Downloader: live_invalidate",piece + for d in self.downloads: + d.have[piece] = False + self.storage.live_invalidate(piece) + diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/DownloaderFeedback.py b/tribler-mod/Tribler/Core/BitTornado/BT1/DownloaderFeedback.py new file mode 100644 index 0000000..726f794 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/DownloaderFeedback.py @@ -0,0 +1,196 @@ +from time import localtime, strftime +# Written by Bram Cohen +# see LICENSE.txt for license information + +from threading import Event + +try: + True +except: + True = 1 + False = 0 + +class DownloaderFeedback: + def __init__(self, choker, httpdl, add_task, upfunc, downfunc, + ratemeasure, leftfunc, file_length, finflag, sp, statistics, + statusfunc = None, interval = None, infohash = None, voddownload=None): + self.choker = choker + self.httpdl = httpdl + self.add_task = add_task + self.upfunc = upfunc + self.downfunc = downfunc + self.ratemeasure = ratemeasure + self.leftfunc = leftfunc + self.file_length = file_length + self.finflag = finflag + self.sp = sp + self.statistics = statistics + self.lastids = [] + self.spewdata = None + self.infohash = infohash + self.voddownload = voddownload + self.doneprocessing = Event() + self.doneprocessing.set() + if statusfunc: + self.autodisplay(statusfunc, interval) + + + def _rotate(self): + cs = self.choker.connections + for id in self.lastids: + for i in xrange(len(cs)): + if cs[i].get_id() == id: + return cs[i:] + cs[:i] + return cs + + def spews(self): + l = [] + cs = self._rotate() + self.lastids = [c.get_id() for c in cs] + for c in cs: # c: Connecter.Connection + a = {} + a['id'] = c.get_readable_id() + a['ip'] = c.get_ip() + if c.is_locally_initiated(): + a['port'] = c.get_port() + else: + a['port'] = 0 + try: + a['optimistic'] = (c is self.choker.connections[0]) + except: + a['optimistic'] = False + if c.is_locally_initiated(): + a['direction'] = 'L' + else: + a['direction'] = 'R' + ##a['unauth_permid'] = c.get_unauth_permid() + u = c.get_upload() + a['uprate'] = int(u.measure.get_rate()) + a['uinterested'] = u.is_interested() + a['uchoked'] = u.is_choked() + d = c.get_download() + a['downrate'] = int(d.measure.get_rate()) + a['dinterested'] = d.is_interested() + a['dchoked'] = d.is_choked() + a['snubbed'] = d.is_snubbed() + a['utotal'] = d.connection.upload.measure.get_total() + a['dtotal'] = d.connection.download.measure.get_total() + if d.connection.download.have: + a['completed'] = float(len(d.connection.download.have)-d.connection.download.have.numfalse)/float(len(d.connection.download.have)) + else: + a['completed'] = 1.0 + # The total download speed of the peer as measured from its + # HAVE messages. + a['speed'] = d.connection.download.peermeasure.get_rate() + a['g2g'] = c.use_g2g + a['g2g_score'] = c.g2g_score() + + l.append(a) + + for dl in self.httpdl.get_downloads(): + if dl.goodseed: + a = {} + a['id'] = 'http seed' + a['ip'] = dl.baseurl + a['optimistic'] = False + a['direction'] = 'L' + a['uprate'] = 0 + a['uinterested'] = False + a['uchoked'] = False + a['downrate'] = int(dl.measure.get_rate()) + a['dinterested'] = True + a['dchoked'] = not dl.active + a['snubbed'] = not dl.active + a['utotal'] = None + a['dtotal'] = dl.measure.get_total() + a['completed'] = 1.0 + a['speed'] = None + + l.append(a) + + return l + + + def gather(self, displayfunc = None, getpeerlist=False): + s = {'stats': self.statistics.update()} + if getpeerlist: + s['spew'] = self.spews() + else: + s['spew'] = None + s['up'] = self.upfunc() + if self.finflag.isSet(): + s['done'] = self.file_length + s['down'] = 0.0 + s['frac'] = 1.0 + s['wanted'] = 0 + s['time'] = 0 + s['vod'] = False + s['vod_prebuf_frac'] = 1.0 + s['vod_playable'] = True + s['vod_playable_after'] = 0.0 + s['vod_stats'] = {} +# if self.voddownload: +# s['vod_duration'] = self.voddownload.get_duration() +# else: +# s['vod_duration'] = None + return s + s['down'] = self.downfunc() + obtained, desired, have = self.leftfunc() + s['done'] = obtained + s['wanted'] = desired + if desired > 0: + s['frac'] = float(obtained)/desired + else: + s['frac'] = 1.0 + if desired == obtained: + s['time'] = 0 + else: + s['time'] = self.ratemeasure.get_time_left(desired-obtained) + + if self.voddownload is not None: + s['vod_prebuf_frac'] = self.voddownload.get_prebuffering_progress() + s['vod_playable'] = self.voddownload.is_playable() + s['vod_playable_after'] = self.voddownload.get_playable_after() + s['vod'] = True + s['vod_stats'] = self.voddownload.get_stats() +# s['vod_duration'] = self.voddownload.get_duration() + else: + s['vod_prebuf_frac'] = 0.0 + s['vod_playable'] = False + s['vod_playable_after'] = float(2 ** 31) + s['vod'] = False + s['vod_stats'] = {} +# s['vod_duration'] = None + return s + + + def display(self, displayfunc): + if not self.doneprocessing.isSet(): + return + self.doneprocessing.clear() + stats = self.gather() + if self.finflag.isSet(): + displayfunc(dpflag = self.doneprocessing, + upRate = stats['up'], + statistics = stats['stats'], spew = stats['spew']) + elif stats['time'] is not None: + displayfunc(dpflag = self.doneprocessing, + fractionDone = stats['frac'], sizeDone = stats['done'], + downRate = stats['down'], upRate = stats['up'], + statistics = stats['stats'], spew = stats['spew'], + timeEst = stats['time']) + else: + displayfunc(dpflag = self.doneprocessing, + fractionDone = stats['frac'], sizeDone = stats['done'], + downRate = stats['down'], upRate = stats['up'], + statistics = stats['stats'], spew = stats['spew']) + + + def autodisplay(self, displayfunc, interval): + self.displayfunc = displayfunc + self.interval = interval + self._autodisplay() + + def _autodisplay(self): + self.add_task(self._autodisplay, self.interval) + self.display(self.displayfunc) diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/DownloaderFeedback.py.bak b/tribler-mod/Tribler/Core/BitTornado/BT1/DownloaderFeedback.py.bak new file mode 100644 index 0000000..2aef9ea --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/DownloaderFeedback.py.bak @@ -0,0 +1,195 @@ +# Written by Bram Cohen +# see LICENSE.txt for license information + +from threading import Event + +try: + True +except: + True = 1 + False = 0 + +class DownloaderFeedback: + def __init__(self, choker, httpdl, add_task, upfunc, downfunc, + ratemeasure, leftfunc, file_length, finflag, sp, statistics, + statusfunc = None, interval = None, infohash = None, voddownload=None): + self.choker = choker + self.httpdl = httpdl + self.add_task = add_task + self.upfunc = upfunc + self.downfunc = downfunc + self.ratemeasure = ratemeasure + self.leftfunc = leftfunc + self.file_length = file_length + self.finflag = finflag + self.sp = sp + self.statistics = statistics + self.lastids = [] + self.spewdata = None + self.infohash = infohash + self.voddownload = voddownload + self.doneprocessing = Event() + self.doneprocessing.set() + if statusfunc: + self.autodisplay(statusfunc, interval) + + + def _rotate(self): + cs = self.choker.connections + for id in self.lastids: + for i in xrange(len(cs)): + if cs[i].get_id() == id: + return cs[i:] + cs[:i] + return cs + + def spews(self): + l = [] + cs = self._rotate() + self.lastids = [c.get_id() for c in cs] + for c in cs: # c: Connecter.Connection + a = {} + a['id'] = c.get_readable_id() + a['ip'] = c.get_ip() + if c.is_locally_initiated(): + a['port'] = c.get_port() + else: + a['port'] = 0 + try: + a['optimistic'] = (c is self.choker.connections[0]) + except: + a['optimistic'] = False + if c.is_locally_initiated(): + a['direction'] = 'L' + else: + a['direction'] = 'R' + ##a['unauth_permid'] = c.get_unauth_permid() + u = c.get_upload() + a['uprate'] = int(u.measure.get_rate()) + a['uinterested'] = u.is_interested() + a['uchoked'] = u.is_choked() + d = c.get_download() + a['downrate'] = int(d.measure.get_rate()) + a['dinterested'] = d.is_interested() + a['dchoked'] = d.is_choked() + a['snubbed'] = d.is_snubbed() + a['utotal'] = d.connection.upload.measure.get_total() + a['dtotal'] = d.connection.download.measure.get_total() + if d.connection.download.have: + a['completed'] = float(len(d.connection.download.have)-d.connection.download.have.numfalse)/float(len(d.connection.download.have)) + else: + a['completed'] = 1.0 + # The total download speed of the peer as measured from its + # HAVE messages. + a['speed'] = d.connection.download.peermeasure.get_rate() + a['g2g'] = c.use_g2g + a['g2g_score'] = c.g2g_score() + + l.append(a) + + for dl in self.httpdl.get_downloads(): + if dl.goodseed: + a = {} + a['id'] = 'http seed' + a['ip'] = dl.baseurl + a['optimistic'] = False + a['direction'] = 'L' + a['uprate'] = 0 + a['uinterested'] = False + a['uchoked'] = False + a['downrate'] = int(dl.measure.get_rate()) + a['dinterested'] = True + a['dchoked'] = not dl.active + a['snubbed'] = not dl.active + a['utotal'] = None + a['dtotal'] = dl.measure.get_total() + a['completed'] = 1.0 + a['speed'] = None + + l.append(a) + + return l + + + def gather(self, displayfunc = None, getpeerlist=False): + s = {'stats': self.statistics.update()} + if getpeerlist: + s['spew'] = self.spews() + else: + s['spew'] = None + s['up'] = self.upfunc() + if self.finflag.isSet(): + s['done'] = self.file_length + s['down'] = 0.0 + s['frac'] = 1.0 + s['wanted'] = 0 + s['time'] = 0 + s['vod'] = False + s['vod_prebuf_frac'] = 1.0 + s['vod_playable'] = True + s['vod_playable_after'] = 0.0 + s['vod_stats'] = {} +# if self.voddownload: +# s['vod_duration'] = self.voddownload.get_duration() +# else: +# s['vod_duration'] = None + return s + s['down'] = self.downfunc() + obtained, desired, have = self.leftfunc() + s['done'] = obtained + s['wanted'] = desired + if desired > 0: + s['frac'] = float(obtained)/desired + else: + s['frac'] = 1.0 + if desired == obtained: + s['time'] = 0 + else: + s['time'] = self.ratemeasure.get_time_left(desired-obtained) + + if self.voddownload is not None: + s['vod_prebuf_frac'] = self.voddownload.get_prebuffering_progress() + s['vod_playable'] = self.voddownload.is_playable() + s['vod_playable_after'] = self.voddownload.get_playable_after() + s['vod'] = True + s['vod_stats'] = self.voddownload.get_stats() +# s['vod_duration'] = self.voddownload.get_duration() + else: + s['vod_prebuf_frac'] = 0.0 + s['vod_playable'] = False + s['vod_playable_after'] = float(2 ** 31) + s['vod'] = False + s['vod_stats'] = {} +# s['vod_duration'] = None + return s + + + def display(self, displayfunc): + if not self.doneprocessing.isSet(): + return + self.doneprocessing.clear() + stats = self.gather() + if self.finflag.isSet(): + displayfunc(dpflag = self.doneprocessing, + upRate = stats['up'], + statistics = stats['stats'], spew = stats['spew']) + elif stats['time'] is not None: + displayfunc(dpflag = self.doneprocessing, + fractionDone = stats['frac'], sizeDone = stats['done'], + downRate = stats['down'], upRate = stats['up'], + statistics = stats['stats'], spew = stats['spew'], + timeEst = stats['time']) + else: + displayfunc(dpflag = self.doneprocessing, + fractionDone = stats['frac'], sizeDone = stats['done'], + downRate = stats['down'], upRate = stats['up'], + statistics = stats['stats'], spew = stats['spew']) + + + def autodisplay(self, displayfunc, interval): + self.displayfunc = displayfunc + self.interval = interval + self._autodisplay() + + def _autodisplay(self): + self.add_task(self._autodisplay, self.interval) + self.display(self.displayfunc) diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/Encrypter.py b/tribler-mod/Tribler/Core/BitTornado/BT1/Encrypter.py new file mode 100644 index 0000000..e90dcd9 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/Encrypter.py @@ -0,0 +1,685 @@ +from time import localtime, strftime +# Written by Bram Cohen and Pawel Garbacki +# see LICENSE.txt for license information + +from cStringIO import StringIO +from binascii import b2a_hex +from socket import error as socketerror +from urllib import quote +from struct import unpack +from time import time +from sets import Set + +# 2fastbt_ +from traceback import print_exc +import sys +from Tribler.Core.BitTornado.BT1.MessageID import protocol_name,option_pattern +from Tribler.Core.BitTornado.BT1.convert import toint +# _2fastbt + +try: + True +except: + True = 1 + False = 0 + +DEBUG = False + +if sys.platform == 'win32': + # Arno: On windows XP SP2 there is a limit on "the number of concurrent, + # incomplete outbound TCP connection attempts. When the limit is reached, + # subsequent connection attempts are put in a queue and resolved at a fixed + # rate so that there are only a limited number of connections in the + # incomplete state. During normal operation, when programs are connecting + # to available hosts at valid IP addresses, no limit is imposed on the + # number of connections in the incomplete state. When the number of + # incomplete connections exceeds the limit, for example, as a result of + # programs connecting to IP addresses that are not valid, connection-rate + # limitations are invoked, and this event is logged." + # Source: http://go.microsoft.com/fwlink/events.asp and fill in + # Product: "Windos Operating System" + # Event: 4226 + # Which directs to: + # http://www.microsoft.com/technet/support/ee/transform.aspx?ProdName=Windows%20Operating%20System&ProdVer=5.2&EvtID=4226&EvtSrc=Tcpip&LCID=1033 + # + # The ABC/BitTornado people felt the need to therefore impose a rate limit + # themselves. Normally, I would be against this, because the kernel usually + # does a better job at this than some app programmers. But here it makes + # somewhat sense because it appears that when the Win32 "connection-rate + # limitations" are triggered, this causes socket timeout + # errors. For ABC/BitTornado this should not be a big problem, as none of + # the TCP connections it initiates are really vital that they proceed + # quickly. + # + # For Tribler, we have one very important TCP connection at the moment, + # that is when the VideoPlayer/VLC tries to connect to our HTTP-based + # VideoServer on 127.0.0.1 to play the video. We have actually seen these + # connections timeout when we set MAX_INCOMPLETE to > 10. + # + # So we keep this app-level rate limit mechanism FOR NOW and add a security + # margin. To support our SwarmPlayer that wants quick startup of many + # connections we decrease the autoclosing timeout, such that bad conns + # get removed from this rate-limit admin faster. + # + # Windows die die die. + # + MAX_INCOMPLETE = 8 # safety margin. Even 9 gives video socket timeout +else: + MAX_INCOMPLETE = 32 + +AUTOCLOSE_TIMEOUT = 15 # secs. Setting this to e.g. 7 causes Video HTTP timeouts + +def make_readable(s): + if not s: + return '' + if quote(s).find('%') >= 0: + return b2a_hex(s).upper() + return '"'+s+'"' + +def show(s): + return b2a_hex(s) + +class IncompleteCounter: + def __init__(self): + self.c = 0 + def increment(self): + self.c += 1 + def decrement(self): + #print_stack() + self.c -= 1 + def toomany(self): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","IncompleteCounter: c",self.c + return self.c >= MAX_INCOMPLETE + +# Arno: This is a global counter!!!! +incompletecounter = IncompleteCounter() + + +# header, reserved, download id, my id, [length, message] + +class Connection: +# 2fastbt_ + def __init__(self, Encoder, connection, id, ext_handshake = False, + locally_initiated = None, dns = None, coord_con = False): +# _2fastbt + self.Encoder = Encoder + self.connection = connection # SocketHandler.SingleSocket + self.connecter = Encoder.connecter + self.id = id + self.readable_id = make_readable(id) + self.coord_con = coord_con + if locally_initiated is not None: + self.locally_initiated = locally_initiated + elif coord_con: + self.locally_initiated = True + else: + self.locally_initiated = (id != None) +# _2fastbt + self.complete = False + self.keepalive = lambda: None + self.closed = False + self.buffer = StringIO() +# overlay + self.dns = dns + self.support_extend_messages = False + self.connecter_conn = None +# _overlay + self.support_merklehash= False + self.na_want_internal_conn_from = None + self.na_address_distance = None + + if self.locally_initiated: + incompletecounter.increment() +# 2fastbt_ + self.create_time = time() +# _2fastbt + if self.locally_initiated or ext_handshake: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Encoder.Connection: writing protname + options + infohash" + self.connection.write(chr(len(protocol_name)) + protocol_name + + option_pattern + self.Encoder.download_id) + if ext_handshake: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Encoder.Connection: writing my peer-ID" + self.connection.write(self.Encoder.my_id) + self.next_len, self.next_func = 20, self.read_peer_id + else: + self.next_len, self.next_func = 1, self.read_header_len + self.Encoder.raw_server.add_task(self._auto_close, AUTOCLOSE_TIMEOUT) + + def get_ip(self, real=False): + return self.connection.get_ip(real) + + def get_port(self, real=False): + return self.connection.get_port(real) + + def get_myip(self, real=False): + return self.connection.get_myip(real) + + def get_myport(self, real=False): + return self.connection.get_myport(real) + + def get_id(self): + return self.id + + def get_readable_id(self): + return self.readable_id + + def is_locally_initiated(self): + return self.locally_initiated + + def is_flushed(self): + return self.connection.is_flushed() + + def supports_merklehash(self): + return self.support_merklehash + + def supports_extend_messages(self): + return self.support_extend_messages + + def set_options(self, s): +# overlay_ + r = unpack("B", s[5]) + if r[0] & 0x10: # left + 43 bit + self.support_extend_messages = True + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: Peer supports EXTEND" + if r[0] & 0x20: # left + 42 bit + self.support_merklehash= True + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: Peer supports Merkle hashes" +# _overlay + + def read_header_len(self, s): + if ord(s) != len(protocol_name): + return None + return len(protocol_name), self.read_header + + def read_header(self, s): + if s != protocol_name: + return None + return 8, self.read_reserved + + def read_reserved(self, s): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: Reserved bits:", show(s) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: Reserved bits=", show(option_pattern) + self.set_options(s) + return 20, self.read_download_id + + def read_download_id(self, s): + if s != self.Encoder.download_id: + return None + if not self.locally_initiated: + self.Encoder.connecter.external_connection_made += 1 + self.connection.write(chr(len(protocol_name)) + protocol_name + + option_pattern + self.Encoder.download_id + self.Encoder.my_id) + return 20, self.read_peer_id + + def read_peer_id(self, s): +# 2fastbt_ + """ In the scenario of locally initiating: + - I may or may not (normally not) get the remote peerid from a tracker before connecting. + - If I've gotten the remote peerid, set it as self.id, otherwise set self.id as 0. + - I send handshake message without my peerid. + - After I received peer's handshake message, if self.id isn't 0 (i.e., I had the remote peerid), + check the remote peerid, otherwise set self.id as the remote id. If the check is failed, drop the connection. + - Then I send self.Encoder.my_id to the remote peer. + - The remote peer will record self.Encoder.id as my peerid. + - Anyway, self.id should be the same with the remote id if handshake is ok. + + Note self.Encoder.id is a unique id to each swarm I have. + Normally self.id isn't equal to self.Encoder.my_id. + + In the scenario of remotely initiating: + - I don't have remote id + - I received the handshake message to join a swarm. + - Before I read the remote id, I send my handshake with self.Encoder.my_id, my unique id of the swarm. + - I read the remote id and set it as my.id + + before read_peer_id(), self.id = 0 if locally init without remote id + self.id = remote id if locally init with remote id + self.id = None if remotely init + after read_peer_id(), self.id = remote id if locally init + self.id = remote id if remotely init + """ +# _2fastbt + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Encoder.Connection: read_peer_id" + + if not self.id: # remote init or local init without remote peer's id or remote init + self.id = s + self.readable_id = make_readable(s) + else: # locat init with remote id + if s != self.id: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Encoder.Connection: read_peer_id: s != self.id, returning None" + return None + self.complete = self.Encoder.got_id(self) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Encoder.Connection: read_peer_id: complete is",self.complete + + + if not self.complete: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Encoder.Connection: read_peer_id: self not complete!!!, returning None" + return None + if self.locally_initiated: + self.connection.write(self.Encoder.my_id) + incompletecounter.decrement() + # Arno: open new conn from queue if at limit. Faster than RawServer task + self.Encoder._start_connection_from_queue(sched=False) + + c = self.Encoder.connecter.connection_made(self) + self.keepalive = c.send_keepalive + return 4, self.read_len + + def read_len(self, s): + l = toint(s) + if l > self.Encoder.max_len: + return None + return l, self.read_message + + def read_message(self, s): + if s != '': + self.connecter.got_message(self, s) + #else: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: got keepalive from",s.getpeername() + return 4, self.read_len + + def read_dead(self, s): + return None + + def _auto_close(self): + if not self.complete and not self.is_coordinator_con(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: autoclosing ",self.get_myip(),self.get_myport(),"to",self.get_ip(),self.get_port() + self.close() + + def close(self,closeall=False): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: closing connection",self.get_ip() + #print_stack() + if not self.closed: + self.connection.close() + self.sever(closeall=closeall) + + + def sever(self,closeall=False): + self.closed = True + if self.Encoder.connections.has_key(self.connection): + self.Encoder.admin_close(self.connection) + + if self.complete: + self.connecter.connection_lost(self) + elif self.locally_initiated: + incompletecounter.decrement() + # Arno: open new conn from queue if at limit. Faster than RawServer task + if not closeall: + self.Encoder._start_connection_from_queue(sched=False) + + def send_message_raw(self, message): + if not self.closed: + self.connection.write(message) # SingleSocket + + def data_came_in(self, connection, s): + self.Encoder.measurefunc(len(s)) + while 1: + if self.closed: + return + i = self.next_len - self.buffer.tell() + if i > len(s): + self.buffer.write(s) + return + self.buffer.write(s[:i]) + s = s[i:] + m = self.buffer.getvalue() + self.buffer.reset() + self.buffer.truncate() + try: + x = self.next_func(m) + except: + print_exc() + self.next_len, self.next_func = 1, self.read_dead + raise + if x is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: function failed",self.next_func + self.close() + return + self.next_len, self.next_func = x + + def connection_flushed(self, connection): + if self.complete: + self.connecter.connection_flushed(self) + + def connection_lost(self, connection): + if self.Encoder.connections.has_key(connection): + self.sever() +# 2fastbt_ + def is_coordinator_con(self): + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: is_coordinator_con: coordinator is ",self.Encoder.coordinator_ip + if self.coord_con: + return True + elif self.get_ip() == self.Encoder.coordinator_ip and self.get_ip() != '127.0.0.1': # Arno: for testing + return True + else: + return False + + def is_helper_con(self): + coordinator = self.connecter.coordinator + if coordinator is None: + return False + return coordinator.is_helper_ip(self.get_ip()) +# _2fastbt + + # NETWORK AWARE + def na_set_address_distance(self): + """ Calc address distance. Currently simple: if same /24 then 0 + else 1. TODO: IPv6 + """ + hisip = self.get_ip(real=True) + myip = self.get_myip(real=True) + + a = hisip.split(".") + b = myip.split(".") + if a[0] == b[0] and a[1] == b[1] and a[2] == b[2]: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder.connection: na: Found peer on local LAN",self.get_ip() + self.na_address_distance = 0 + else: + self.na_address_distance = 1 + + def na_get_address_distance(self): + return self.na_address_distance + + + + + +class Encoder: + def __init__(self, connecter, raw_server, my_id, max_len, + schedulefunc, keepalive_delay, download_id, + measurefunc, config): + self.raw_server = raw_server + self.connecter = connecter + self.my_id = my_id + self.max_len = max_len + self.schedulefunc = schedulefunc + self.keepalive_delay = keepalive_delay + self.download_id = download_id + self.measurefunc = measurefunc + self.config = config + self.connections = {} + self.banned = {} + self.to_connect = Set() + self.trackertime = 0 + self.paused = False + if self.config['max_connections'] == 0: + self.max_connections = 2 ** 30 + else: + self.max_connections = self.config['max_connections'] + """ + In r529 there was a problem when a single Windows client + would connect to our text-based seeder (i.e. btlaunchmany) + with no other clients present. Apparently both the seeder + and client would connect to eachother simultaneously, but + not end up with a good connection, halting the client. + + Arno, 2006-03-10: Reappears in ~r890, fixed in r892. It + appears to be a problem of writing to a nonblocking socket + before it signalled it is ready for writing, although the + evidence is inconclusive. + + Arno: 2006-12-15: Reappears in r2319. There is some weird + socket problem here. Using Python 2.4.4 doesn't solve it. + The problem I see here is that as soon as we register + at the tracker, the single seeder tries to connect to + us. He succeeds, but after a short while the connection + appears to be closed by him. We then wind up with no + connection at all and have to wait until we recontact + the tracker. + + My workaround is to refuse these initial connections from + the seeder and wait until I've started connecting to peers + based on the info I got from the tracker before accepting + remote connections. + + Arno: 2007-02-16: I think I finally found it. The Tribler + tracker (BitTornado/BT1/track.py) will do a NAT check + (BitTornado/BT1/NATCheck) by default, which consists of + initiating a connection and then closing it after a good + BT handshake was received. + + The solution now is to make sure we check IP and port to + identify existing connections. I already added that 2006-12-15, + so I just removed the restriction on initial connections, + which are superfluous. + """ + self.rerequest = None +# 2fastbt_ + self.toofast_banned = {} + self.coordinator_ip = None +# _2fastbt + schedulefunc(self.send_keepalives, keepalive_delay) + + + def send_keepalives(self): + self.schedulefunc(self.send_keepalives, self.keepalive_delay) + if self.paused: + return + for c in self.connections.values(): + c.keepalive() + + def start_connections(self, dnsidlist): + """ Arno: dnsidlist is a list of tuples (dns,id) where dns is a (ip,port) tuple + and id is apparently always 0. It must be unequal to None at least, + because Encrypter.Connection used the id to see if a connection is + locally initiated?! """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: adding",len(dnsidlist),"peers to queue, current len",len(self.to_connect) + if not self.to_connect: + self.raw_server.add_task(self._start_connection_from_queue) + self.to_connect.update(dnsidlist) + # make sure addrs from various sources, like tracker, ut_pex and DHT are mixed + # TODO: or not? For Tribler Supported we may want the tracker to + # be more authoritative, such that official seeders found fast. Nah. + + #random.shuffle(self.to_connect) + #Jelle: Since objects are already placed in the Set in pseudo random order, they don't have to + # be shuffled (and a Set cannot be shuffled). + + self.trackertime = int(time()) + + def _start_connection_from_queue(self,sched=True): + try: + if not self.to_connect: + return + + if self.connecter.external_connection_made: + max_initiate = self.config['max_initiate'] + else: + max_initiate = int(self.config['max_initiate']*1.5) + cons = len(self.connections) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: conns",cons,"max conns",self.max_connections,"max init",max_initiate + + if cons >= self.max_connections or cons >= max_initiate: + delay = 60.0 + elif self.paused or incompletecounter.toomany(): + delay = 1.0 + else: + delay = 0.0 + dns, id = self.to_connect.pop() + self.start_connection(dns, id) + if self.to_connect and sched: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: start_from_queue delay",delay + self.raw_server.add_task(self._start_connection_from_queue, delay) + except: + print_exc() + raise + + def start_connection(self, dns, id, coord_con = False, forcenew = False): + """ Locally initiated connection """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: start_connection:",dns + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: start_connection: qlen",len(self.to_connect),"nconns",len(self.connections),"maxi",self.config['max_initiate'],"maxc",self.config['max_connections'] + + if ( self.paused + or len(self.connections) >= self.max_connections + or id == self.my_id + or self.banned.has_key(dns[0]) ) and not forcenew: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: start_connection: we're paused or too busy" + return True + for v in self.connections.values(): # avoid duplicated connection from a single ip + if v is None: + continue + if id and v.id == id and not forcenew: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: start_connection: already connected to peer",`id` + return True + ip = v.get_ip(True) + port = v.get_port(False) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: start_connection: candidate",ip,port,"want",dns[0],dns[1] + + if self.config['security'] and ip != 'unknown' and ip == dns[0] and port == dns[1] and not forcenew: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: start_connection: using existing",ip,"want port",dns[1],"existing port",port,"id",`id` + return True + try: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: start_connection: Setting up new to peer", dns,"id",`id` + c = self.raw_server.start_connection(dns) + con = Connection(self, c, id, dns = dns, coord_con = coord_con) + self.connections[c] = con + c.set_handler(con) + except socketerror: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Encoder.connection failed" + return False + return True + + def _start_connection(self, dns, id): + def foo(self=self, dns=dns, id=id): + self.start_connection(dns, id) + + self.schedulefunc(foo, 0) + + def got_id(self, connection): + """ check if the connection can be accepted """ + + if connection.id == self.my_id: + # NETWORK AWARE + ret = self.connecter.na_got_loopback(connection) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: got_id: connection to myself? keep",ret + if ret == False: + self.connecter.external_connection_made -= 1 + return ret + + ip = connection.get_ip(True) + port = connection.get_port(False) + + # NETWORK AWARE + connection.na_set_address_distance() + + if self.config['security'] and self.banned.has_key(ip): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: got_id: security ban on IP" + return False + for v in self.connections.values(): + if connection is not v: + # NETWORK AWARE + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: got_id: new internal conn from peer? ids",connection.id,v.id + if connection.id == v.id: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: got_id: new internal conn from peer? addrs",v.na_want_internal_conn_from,ip + if v.na_want_internal_conn_from == ip: + # We were expecting a connection from this peer that shares + # a NAT with us via the internal network. This is it. + self.connecter.na_got_internal_connection(v,connection) + return True + elif v.create_time < connection.create_time: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: got_id: create time bad?!" + return False + # don't allow multiple connections from the same ip if security is set. + if self.config['security'] and ip != 'unknown' and ip == v.get_ip(True) and port == v.get_port(False): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: got_id: closing duplicate connection" + v.close() + return True + + def external_connection_made(self, connection): + """ Remotely initiated connection """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: external_conn_made",connection.get_ip() + if self.paused or len(self.connections) >= self.max_connections: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: external_conn_made: paused or too many" + connection.close() + return False + con = Connection(self, connection, None) + self.connections[connection] = con + connection.set_handler(con) + return True + + def externally_handshaked_connection_made(self, connection, options, msg_remainder): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: external_handshaked_conn_made",connection.get_ip() + # 2fastbt_ + if self.paused or len(self.connections) >= self.max_connections: + connection.close() + return False + + con = Connection(self, connection, None, True) + con.set_options(options) + # before: connection.handler = Encoder + # Don't forget to count the external conns! + self.connections[connection] = con + connection.set_handler(con) + # after: connection.handler = Encrypter.Connecter + + if msg_remainder: + con.data_came_in(con, msg_remainder) + return True + + def close_all(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: closing all connections" + copy = self.connections.values()[:] + for c in copy: + c.close(closeall=True) + self.connections = {} + + def ban(self, ip): + self.banned[ip] = 1 + + def pause(self, flag): + self.paused = flag + +# 2fastbt_ + def set_coordinator_ip(self,ip): + self.coordinator_ip = ip +# _2fastbt + + def set_rerequester(self,rerequest): + self.rerequest = rerequest + + def admin_close(self,conn): + del self.connections[conn] + now = int(time()) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: admin_close: now-tt is",now-self.trackertime + if len(self.connections) == 0 and (now-self.trackertime) < 20: + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: admin_close: Recontacting tracker, last request got just dead peers: TEMP DISABLED, ARNO WORKING ON IT" + ###self.rerequest.encoder_wants_new_peers() + pass diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/Encrypter.py.bak b/tribler-mod/Tribler/Core/BitTornado/BT1/Encrypter.py.bak new file mode 100644 index 0000000..f6f883c --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/Encrypter.py.bak @@ -0,0 +1,684 @@ +# Written by Bram Cohen and Pawel Garbacki +# see LICENSE.txt for license information + +from cStringIO import StringIO +from binascii import b2a_hex +from socket import error as socketerror +from urllib import quote +from struct import unpack +from time import time +from sets import Set + +# 2fastbt_ +from traceback import print_exc +import sys +from Tribler.Core.BitTornado.BT1.MessageID import protocol_name,option_pattern +from Tribler.Core.BitTornado.BT1.convert import toint +# _2fastbt + +try: + True +except: + True = 1 + False = 0 + +DEBUG = False + +if sys.platform == 'win32': + # Arno: On windows XP SP2 there is a limit on "the number of concurrent, + # incomplete outbound TCP connection attempts. When the limit is reached, + # subsequent connection attempts are put in a queue and resolved at a fixed + # rate so that there are only a limited number of connections in the + # incomplete state. During normal operation, when programs are connecting + # to available hosts at valid IP addresses, no limit is imposed on the + # number of connections in the incomplete state. When the number of + # incomplete connections exceeds the limit, for example, as a result of + # programs connecting to IP addresses that are not valid, connection-rate + # limitations are invoked, and this event is logged." + # Source: http://go.microsoft.com/fwlink/events.asp and fill in + # Product: "Windos Operating System" + # Event: 4226 + # Which directs to: + # http://www.microsoft.com/technet/support/ee/transform.aspx?ProdName=Windows%20Operating%20System&ProdVer=5.2&EvtID=4226&EvtSrc=Tcpip&LCID=1033 + # + # The ABC/BitTornado people felt the need to therefore impose a rate limit + # themselves. Normally, I would be against this, because the kernel usually + # does a better job at this than some app programmers. But here it makes + # somewhat sense because it appears that when the Win32 "connection-rate + # limitations" are triggered, this causes socket timeout + # errors. For ABC/BitTornado this should not be a big problem, as none of + # the TCP connections it initiates are really vital that they proceed + # quickly. + # + # For Tribler, we have one very important TCP connection at the moment, + # that is when the VideoPlayer/VLC tries to connect to our HTTP-based + # VideoServer on 127.0.0.1 to play the video. We have actually seen these + # connections timeout when we set MAX_INCOMPLETE to > 10. + # + # So we keep this app-level rate limit mechanism FOR NOW and add a security + # margin. To support our SwarmPlayer that wants quick startup of many + # connections we decrease the autoclosing timeout, such that bad conns + # get removed from this rate-limit admin faster. + # + # Windows die die die. + # + MAX_INCOMPLETE = 8 # safety margin. Even 9 gives video socket timeout +else: + MAX_INCOMPLETE = 32 + +AUTOCLOSE_TIMEOUT = 15 # secs. Setting this to e.g. 7 causes Video HTTP timeouts + +def make_readable(s): + if not s: + return '' + if quote(s).find('%') >= 0: + return b2a_hex(s).upper() + return '"'+s+'"' + +def show(s): + return b2a_hex(s) + +class IncompleteCounter: + def __init__(self): + self.c = 0 + def increment(self): + self.c += 1 + def decrement(self): + #print_stack() + self.c -= 1 + def toomany(self): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","IncompleteCounter: c",self.c + return self.c >= MAX_INCOMPLETE + +# Arno: This is a global counter!!!! +incompletecounter = IncompleteCounter() + + +# header, reserved, download id, my id, [length, message] + +class Connection: +# 2fastbt_ + def __init__(self, Encoder, connection, id, ext_handshake = False, + locally_initiated = None, dns = None, coord_con = False): +# _2fastbt + self.Encoder = Encoder + self.connection = connection # SocketHandler.SingleSocket + self.connecter = Encoder.connecter + self.id = id + self.readable_id = make_readable(id) + self.coord_con = coord_con + if locally_initiated is not None: + self.locally_initiated = locally_initiated + elif coord_con: + self.locally_initiated = True + else: + self.locally_initiated = (id != None) +# _2fastbt + self.complete = False + self.keepalive = lambda: None + self.closed = False + self.buffer = StringIO() +# overlay + self.dns = dns + self.support_extend_messages = False + self.connecter_conn = None +# _overlay + self.support_merklehash= False + self.na_want_internal_conn_from = None + self.na_address_distance = None + + if self.locally_initiated: + incompletecounter.increment() +# 2fastbt_ + self.create_time = time() +# _2fastbt + if self.locally_initiated or ext_handshake: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Encoder.Connection: writing protname + options + infohash" + self.connection.write(chr(len(protocol_name)) + protocol_name + + option_pattern + self.Encoder.download_id) + if ext_handshake: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Encoder.Connection: writing my peer-ID" + self.connection.write(self.Encoder.my_id) + self.next_len, self.next_func = 20, self.read_peer_id + else: + self.next_len, self.next_func = 1, self.read_header_len + self.Encoder.raw_server.add_task(self._auto_close, AUTOCLOSE_TIMEOUT) + + def get_ip(self, real=False): + return self.connection.get_ip(real) + + def get_port(self, real=False): + return self.connection.get_port(real) + + def get_myip(self, real=False): + return self.connection.get_myip(real) + + def get_myport(self, real=False): + return self.connection.get_myport(real) + + def get_id(self): + return self.id + + def get_readable_id(self): + return self.readable_id + + def is_locally_initiated(self): + return self.locally_initiated + + def is_flushed(self): + return self.connection.is_flushed() + + def supports_merklehash(self): + return self.support_merklehash + + def supports_extend_messages(self): + return self.support_extend_messages + + def set_options(self, s): +# overlay_ + r = unpack("B", s[5]) + if r[0] & 0x10: # left + 43 bit + self.support_extend_messages = True + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: Peer supports EXTEND" + if r[0] & 0x20: # left + 42 bit + self.support_merklehash= True + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: Peer supports Merkle hashes" +# _overlay + + def read_header_len(self, s): + if ord(s) != len(protocol_name): + return None + return len(protocol_name), self.read_header + + def read_header(self, s): + if s != protocol_name: + return None + return 8, self.read_reserved + + def read_reserved(self, s): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: Reserved bits:", show(s) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: Reserved bits=", show(option_pattern) + self.set_options(s) + return 20, self.read_download_id + + def read_download_id(self, s): + if s != self.Encoder.download_id: + return None + if not self.locally_initiated: + self.Encoder.connecter.external_connection_made += 1 + self.connection.write(chr(len(protocol_name)) + protocol_name + + option_pattern + self.Encoder.download_id + self.Encoder.my_id) + return 20, self.read_peer_id + + def read_peer_id(self, s): +# 2fastbt_ + """ In the scenario of locally initiating: + - I may or may not (normally not) get the remote peerid from a tracker before connecting. + - If I've gotten the remote peerid, set it as self.id, otherwise set self.id as 0. + - I send handshake message without my peerid. + - After I received peer's handshake message, if self.id isn't 0 (i.e., I had the remote peerid), + check the remote peerid, otherwise set self.id as the remote id. If the check is failed, drop the connection. + - Then I send self.Encoder.my_id to the remote peer. + - The remote peer will record self.Encoder.id as my peerid. + - Anyway, self.id should be the same with the remote id if handshake is ok. + + Note self.Encoder.id is a unique id to each swarm I have. + Normally self.id isn't equal to self.Encoder.my_id. + + In the scenario of remotely initiating: + - I don't have remote id + - I received the handshake message to join a swarm. + - Before I read the remote id, I send my handshake with self.Encoder.my_id, my unique id of the swarm. + - I read the remote id and set it as my.id + + before read_peer_id(), self.id = 0 if locally init without remote id + self.id = remote id if locally init with remote id + self.id = None if remotely init + after read_peer_id(), self.id = remote id if locally init + self.id = remote id if remotely init + """ +# _2fastbt + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Encoder.Connection: read_peer_id" + + if not self.id: # remote init or local init without remote peer's id or remote init + self.id = s + self.readable_id = make_readable(s) + else: # locat init with remote id + if s != self.id: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Encoder.Connection: read_peer_id: s != self.id, returning None" + return None + self.complete = self.Encoder.got_id(self) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Encoder.Connection: read_peer_id: complete is",self.complete + + + if not self.complete: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Encoder.Connection: read_peer_id: self not complete!!!, returning None" + return None + if self.locally_initiated: + self.connection.write(self.Encoder.my_id) + incompletecounter.decrement() + # Arno: open new conn from queue if at limit. Faster than RawServer task + self.Encoder._start_connection_from_queue(sched=False) + + c = self.Encoder.connecter.connection_made(self) + self.keepalive = c.send_keepalive + return 4, self.read_len + + def read_len(self, s): + l = toint(s) + if l > self.Encoder.max_len: + return None + return l, self.read_message + + def read_message(self, s): + if s != '': + self.connecter.got_message(self, s) + #else: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: got keepalive from",s.getpeername() + return 4, self.read_len + + def read_dead(self, s): + return None + + def _auto_close(self): + if not self.complete and not self.is_coordinator_con(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: autoclosing ",self.get_myip(),self.get_myport(),"to",self.get_ip(),self.get_port() + self.close() + + def close(self,closeall=False): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: closing connection",self.get_ip() + #print_stack() + if not self.closed: + self.connection.close() + self.sever(closeall=closeall) + + + def sever(self,closeall=False): + self.closed = True + if self.Encoder.connections.has_key(self.connection): + self.Encoder.admin_close(self.connection) + + if self.complete: + self.connecter.connection_lost(self) + elif self.locally_initiated: + incompletecounter.decrement() + # Arno: open new conn from queue if at limit. Faster than RawServer task + if not closeall: + self.Encoder._start_connection_from_queue(sched=False) + + def send_message_raw(self, message): + if not self.closed: + self.connection.write(message) # SingleSocket + + def data_came_in(self, connection, s): + self.Encoder.measurefunc(len(s)) + while 1: + if self.closed: + return + i = self.next_len - self.buffer.tell() + if i > len(s): + self.buffer.write(s) + return + self.buffer.write(s[:i]) + s = s[i:] + m = self.buffer.getvalue() + self.buffer.reset() + self.buffer.truncate() + try: + x = self.next_func(m) + except: + print_exc() + self.next_len, self.next_func = 1, self.read_dead + raise + if x is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: function failed",self.next_func + self.close() + return + self.next_len, self.next_func = x + + def connection_flushed(self, connection): + if self.complete: + self.connecter.connection_flushed(self) + + def connection_lost(self, connection): + if self.Encoder.connections.has_key(connection): + self.sever() +# 2fastbt_ + def is_coordinator_con(self): + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: is_coordinator_con: coordinator is ",self.Encoder.coordinator_ip + if self.coord_con: + return True + elif self.get_ip() == self.Encoder.coordinator_ip and self.get_ip() != '127.0.0.1': # Arno: for testing + return True + else: + return False + + def is_helper_con(self): + coordinator = self.connecter.coordinator + if coordinator is None: + return False + return coordinator.is_helper_ip(self.get_ip()) +# _2fastbt + + # NETWORK AWARE + def na_set_address_distance(self): + """ Calc address distance. Currently simple: if same /24 then 0 + else 1. TODO: IPv6 + """ + hisip = self.get_ip(real=True) + myip = self.get_myip(real=True) + + a = hisip.split(".") + b = myip.split(".") + if a[0] == b[0] and a[1] == b[1] and a[2] == b[2]: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder.connection: na: Found peer on local LAN",self.get_ip() + self.na_address_distance = 0 + else: + self.na_address_distance = 1 + + def na_get_address_distance(self): + return self.na_address_distance + + + + + +class Encoder: + def __init__(self, connecter, raw_server, my_id, max_len, + schedulefunc, keepalive_delay, download_id, + measurefunc, config): + self.raw_server = raw_server + self.connecter = connecter + self.my_id = my_id + self.max_len = max_len + self.schedulefunc = schedulefunc + self.keepalive_delay = keepalive_delay + self.download_id = download_id + self.measurefunc = measurefunc + self.config = config + self.connections = {} + self.banned = {} + self.to_connect = Set() + self.trackertime = 0 + self.paused = False + if self.config['max_connections'] == 0: + self.max_connections = 2 ** 30 + else: + self.max_connections = self.config['max_connections'] + """ + In r529 there was a problem when a single Windows client + would connect to our text-based seeder (i.e. btlaunchmany) + with no other clients present. Apparently both the seeder + and client would connect to eachother simultaneously, but + not end up with a good connection, halting the client. + + Arno, 2006-03-10: Reappears in ~r890, fixed in r892. It + appears to be a problem of writing to a nonblocking socket + before it signalled it is ready for writing, although the + evidence is inconclusive. + + Arno: 2006-12-15: Reappears in r2319. There is some weird + socket problem here. Using Python 2.4.4 doesn't solve it. + The problem I see here is that as soon as we register + at the tracker, the single seeder tries to connect to + us. He succeeds, but after a short while the connection + appears to be closed by him. We then wind up with no + connection at all and have to wait until we recontact + the tracker. + + My workaround is to refuse these initial connections from + the seeder and wait until I've started connecting to peers + based on the info I got from the tracker before accepting + remote connections. + + Arno: 2007-02-16: I think I finally found it. The Tribler + tracker (BitTornado/BT1/track.py) will do a NAT check + (BitTornado/BT1/NATCheck) by default, which consists of + initiating a connection and then closing it after a good + BT handshake was received. + + The solution now is to make sure we check IP and port to + identify existing connections. I already added that 2006-12-15, + so I just removed the restriction on initial connections, + which are superfluous. + """ + self.rerequest = None +# 2fastbt_ + self.toofast_banned = {} + self.coordinator_ip = None +# _2fastbt + schedulefunc(self.send_keepalives, keepalive_delay) + + + def send_keepalives(self): + self.schedulefunc(self.send_keepalives, self.keepalive_delay) + if self.paused: + return + for c in self.connections.values(): + c.keepalive() + + def start_connections(self, dnsidlist): + """ Arno: dnsidlist is a list of tuples (dns,id) where dns is a (ip,port) tuple + and id is apparently always 0. It must be unequal to None at least, + because Encrypter.Connection used the id to see if a connection is + locally initiated?! """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: adding",len(dnsidlist),"peers to queue, current len",len(self.to_connect) + if not self.to_connect: + self.raw_server.add_task(self._start_connection_from_queue) + self.to_connect.update(dnsidlist) + # make sure addrs from various sources, like tracker, ut_pex and DHT are mixed + # TODO: or not? For Tribler Supported we may want the tracker to + # be more authoritative, such that official seeders found fast. Nah. + + #random.shuffle(self.to_connect) + #Jelle: Since objects are already placed in the Set in pseudo random order, they don't have to + # be shuffled (and a Set cannot be shuffled). + + self.trackertime = int(time()) + + def _start_connection_from_queue(self,sched=True): + try: + if not self.to_connect: + return + + if self.connecter.external_connection_made: + max_initiate = self.config['max_initiate'] + else: + max_initiate = int(self.config['max_initiate']*1.5) + cons = len(self.connections) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: conns",cons,"max conns",self.max_connections,"max init",max_initiate + + if cons >= self.max_connections or cons >= max_initiate: + delay = 60.0 + elif self.paused or incompletecounter.toomany(): + delay = 1.0 + else: + delay = 0.0 + dns, id = self.to_connect.pop() + self.start_connection(dns, id) + if self.to_connect and sched: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: start_from_queue delay",delay + self.raw_server.add_task(self._start_connection_from_queue, delay) + except: + print_exc() + raise + + def start_connection(self, dns, id, coord_con = False, forcenew = False): + """ Locally initiated connection """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: start_connection:",dns + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: start_connection: qlen",len(self.to_connect),"nconns",len(self.connections),"maxi",self.config['max_initiate'],"maxc",self.config['max_connections'] + + if ( self.paused + or len(self.connections) >= self.max_connections + or id == self.my_id + or self.banned.has_key(dns[0]) ) and not forcenew: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: start_connection: we're paused or too busy" + return True + for v in self.connections.values(): # avoid duplicated connection from a single ip + if v is None: + continue + if id and v.id == id and not forcenew: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: start_connection: already connected to peer",`id` + return True + ip = v.get_ip(True) + port = v.get_port(False) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: start_connection: candidate",ip,port,"want",dns[0],dns[1] + + if self.config['security'] and ip != 'unknown' and ip == dns[0] and port == dns[1] and not forcenew: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: start_connection: using existing",ip,"want port",dns[1],"existing port",port,"id",`id` + return True + try: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: start_connection: Setting up new to peer", dns,"id",`id` + c = self.raw_server.start_connection(dns) + con = Connection(self, c, id, dns = dns, coord_con = coord_con) + self.connections[c] = con + c.set_handler(con) + except socketerror: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Encoder.connection failed" + return False + return True + + def _start_connection(self, dns, id): + def foo(self=self, dns=dns, id=id): + self.start_connection(dns, id) + + self.schedulefunc(foo, 0) + + def got_id(self, connection): + """ check if the connection can be accepted """ + + if connection.id == self.my_id: + # NETWORK AWARE + ret = self.connecter.na_got_loopback(connection) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: got_id: connection to myself? keep",ret + if ret == False: + self.connecter.external_connection_made -= 1 + return ret + + ip = connection.get_ip(True) + port = connection.get_port(False) + + # NETWORK AWARE + connection.na_set_address_distance() + + if self.config['security'] and self.banned.has_key(ip): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: got_id: security ban on IP" + return False + for v in self.connections.values(): + if connection is not v: + # NETWORK AWARE + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: got_id: new internal conn from peer? ids",connection.id,v.id + if connection.id == v.id: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: got_id: new internal conn from peer? addrs",v.na_want_internal_conn_from,ip + if v.na_want_internal_conn_from == ip: + # We were expecting a connection from this peer that shares + # a NAT with us via the internal network. This is it. + self.connecter.na_got_internal_connection(v,connection) + return True + elif v.create_time < connection.create_time: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: got_id: create time bad?!" + return False + # don't allow multiple connections from the same ip if security is set. + if self.config['security'] and ip != 'unknown' and ip == v.get_ip(True) and port == v.get_port(False): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: got_id: closing duplicate connection" + v.close() + return True + + def external_connection_made(self, connection): + """ Remotely initiated connection """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: external_conn_made",connection.get_ip() + if self.paused or len(self.connections) >= self.max_connections: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: external_conn_made: paused or too many" + connection.close() + return False + con = Connection(self, connection, None) + self.connections[connection] = con + connection.set_handler(con) + return True + + def externally_handshaked_connection_made(self, connection, options, msg_remainder): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: external_handshaked_conn_made",connection.get_ip() + # 2fastbt_ + if self.paused or len(self.connections) >= self.max_connections: + connection.close() + return False + + con = Connection(self, connection, None, True) + con.set_options(options) + # before: connection.handler = Encoder + # Don't forget to count the external conns! + self.connections[connection] = con + connection.set_handler(con) + # after: connection.handler = Encrypter.Connecter + + if msg_remainder: + con.data_came_in(con, msg_remainder) + return True + + def close_all(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: closing all connections" + copy = self.connections.values()[:] + for c in copy: + c.close(closeall=True) + self.connections = {} + + def ban(self, ip): + self.banned[ip] = 1 + + def pause(self, flag): + self.paused = flag + +# 2fastbt_ + def set_coordinator_ip(self,ip): + self.coordinator_ip = ip +# _2fastbt + + def set_rerequester(self,rerequest): + self.rerequest = rerequest + + def admin_close(self,conn): + del self.connections[conn] + now = int(time()) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: admin_close: now-tt is",now-self.trackertime + if len(self.connections) == 0 and (now-self.trackertime) < 20: + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","encoder: admin_close: Recontacting tracker, last request got just dead peers: TEMP DISABLED, ARNO WORKING ON IT" + ###self.rerequest.encoder_wants_new_peers() + pass diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/FileSelector.py b/tribler-mod/Tribler/Core/BitTornado/BT1/FileSelector.py new file mode 100644 index 0000000..de3b589 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/FileSelector.py @@ -0,0 +1,244 @@ +from time import localtime, strftime +# Written by John Hoffman +# see LICENSE.txt for license information + +from random import shuffle +try: + True +except: + True = 1 + False = 0 + + +class FileSelector: + def __init__(self, files, piece_length, bufferdir, + storage, storagewrapper, sched, failfunc): + self.files = files + + # JD: Store piece length + self.piece_length = piece_length + + self.storage = storage + self.storagewrapper = storagewrapper + self.sched = sched + self.failfunc = failfunc + self.downloader = None + self.picker = None + + storage.set_bufferdir(bufferdir) + + self.numfiles = len(files) + self.priority = [1] * self.numfiles + self.new_priority = None + self.new_partials = None + self.filepieces = [] + total = 0L + for file, length in files: + if not length: + self.filepieces.append(()) + else: + pieces = range( int(total/piece_length), + int((total+length-1)/piece_length)+1 ) + self.filepieces.append(tuple(pieces)) + total += length + self.numpieces = int((total+piece_length-1)/piece_length) + self.piece_priority = [1] * self.numpieces + + + + def init_priority(self, new_priority): + try: + assert len(new_priority) == self.numfiles + for v in new_priority: + assert type(v) in (type(0), type(0L)) + assert v >= -1 + assert v <= 2 + except: +# print_exc() + return False + try: + for f in xrange(self.numfiles): + if new_priority[f] < 0: + self.storage.disable_file(f) + self.new_priority = new_priority + except (IOError, OSError), e: + self.failfunc("can't open partial file for " + + self.files[f][0] + ': ' + str(e)) + return False + return True + + ''' + d['priority'] = [file #1 priority [,file #2 priority...] ] + a list of download priorities for each file. + Priority may be -1, 0, 1, 2. -1 = download disabled, + 0 = highest, 1 = normal, 2 = lowest. + Also see Storage.pickle and StorageWrapper.pickle for additional keys. + ''' + def unpickle(self, d): + if d.has_key('priority'): + if not self.init_priority(d['priority']): + return + pieces = self.storage.unpickle(d) + if not pieces: # don't bother, nothing restoreable + return + new_piece_priority = self._get_piece_priority_list(self.new_priority) + self.storagewrapper.reblock([i == -1 for i in new_piece_priority]) + self.new_partials = self.storagewrapper.unpickle(d, pieces) + + + def tie_in(self, picker, cancelfunc, requestmorefunc): + self.picker = picker + self.cancelfunc = cancelfunc + self.requestmorefunc = requestmorefunc + + if self.new_priority: + self.priority = self.new_priority + self.new_priority = None + self.new_piece_priority = self._set_piece_priority(self.priority) + + if self.new_partials: + shuffle(self.new_partials) + for p in self.new_partials: + self.picker.requested(p) + self.new_partials = None + + + def _set_files_disabled(self, old_priority, new_priority): + old_disabled = [p == -1 for p in old_priority] + new_disabled = [p == -1 for p in new_priority] + data_to_update = [] + for f in xrange(self.numfiles): + if new_disabled[f] != old_disabled[f]: + data_to_update.extend(self.storage.get_piece_update_list(f)) + buffer = [] + for piece, start, length in data_to_update: + if self.storagewrapper.has_data(piece): + data = self.storagewrapper.read_raw(piece, start, length) + if data is None: + return False + buffer.append((piece, start, data)) + + files_updated = False + try: + for f in xrange(self.numfiles): + if new_disabled[f] and not old_disabled[f]: + self.storage.disable_file(f) + files_updated = True + if old_disabled[f] and not new_disabled[f]: + self.storage.enable_file(f) + files_updated = True + except (IOError, OSError), e: + if new_disabled[f]: + msg = "can't open partial file for " + else: + msg = 'unable to open ' + self.failfunc(msg + self.files[f][0] + ': ' + str(e)) + return False + if files_updated: + self.storage.reset_file_status() + + changed_pieces = {} + for piece, start, data in buffer: + if not self.storagewrapper.write_raw(piece, start, data): + return False + data.release() + changed_pieces[piece] = 1 + if not self.storagewrapper.doublecheck_data(changed_pieces): + return False + + return True + + + def _get_piece_priority_list(self, file_priority_list): + l = [-1] * self.numpieces + for f in xrange(self.numfiles): + if file_priority_list[f] == -1: + continue + for i in self.filepieces[f]: + if l[i] == -1: + l[i] = file_priority_list[f] + continue + l[i] = min(l[i], file_priority_list[f]) + return l + + + def _set_piece_priority(self, new_priority): + new_piece_priority = self._get_piece_priority_list(new_priority) + pieces = range(self.numpieces) + shuffle(pieces) + new_blocked = [] + new_unblocked = [] + for piece in pieces: + self.picker.set_priority(piece, new_piece_priority[piece]) + o = self.piece_priority[piece] == -1 + n = new_piece_priority[piece] == -1 + if n and not o: + new_blocked.append(piece) + if o and not n: + new_unblocked.append(piece) + if new_blocked: + self.cancelfunc(new_blocked) + self.storagewrapper.reblock([i == -1 for i in new_piece_priority]) + if new_unblocked: + self.requestmorefunc(new_unblocked) + + return new_piece_priority + + + def set_priorities_now(self, new_priority = None): + if not new_priority: + new_priority = self.new_priority + self.new_priority = None # potential race condition + if not new_priority: + return + old_priority = self.priority + self.priority = new_priority + if not self._set_files_disabled(old_priority, new_priority): + return + self.piece_priority = self._set_piece_priority(new_priority) + + def set_priorities(self, new_priority): + self.new_priority = new_priority + def s(self=self): + self.set_priorities_now() + self.sched(s) + + def set_priority(self, f, p): + new_priority = self.get_priorities() + new_priority[f] = p + self.set_priorities(new_priority) + + def get_priorities(self): + priority = self.new_priority + if not priority: + priority = self.priority # potential race condition + return [i for i in priority] + + def __setitem__(self, index, val): + self.set_priority(index, val) + + def __getitem__(self, index): + try: + return self.new_priority[index] + except: + return self.priority[index] + + + def finish(self): + pass +# for f in xrange(self.numfiles): +# if self.priority[f] == -1: +# self.storage.delete_file(f) + + def pickle(self): + d = {'priority': self.priority} + try: + s = self.storage.pickle() + sw = self.storagewrapper.pickle() + for k in s.keys(): + d[k] = s[k] + for k in sw.keys(): + d[k] = sw[k] + except (IOError, OSError): + pass + return d diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/FileSelector.py.bak b/tribler-mod/Tribler/Core/BitTornado/BT1/FileSelector.py.bak new file mode 100644 index 0000000..e9bbfa0 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/FileSelector.py.bak @@ -0,0 +1,243 @@ +# Written by John Hoffman +# see LICENSE.txt for license information + +from random import shuffle +try: + True +except: + True = 1 + False = 0 + + +class FileSelector: + def __init__(self, files, piece_length, bufferdir, + storage, storagewrapper, sched, failfunc): + self.files = files + + # JD: Store piece length + self.piece_length = piece_length + + self.storage = storage + self.storagewrapper = storagewrapper + self.sched = sched + self.failfunc = failfunc + self.downloader = None + self.picker = None + + storage.set_bufferdir(bufferdir) + + self.numfiles = len(files) + self.priority = [1] * self.numfiles + self.new_priority = None + self.new_partials = None + self.filepieces = [] + total = 0L + for file, length in files: + if not length: + self.filepieces.append(()) + else: + pieces = range( int(total/piece_length), + int((total+length-1)/piece_length)+1 ) + self.filepieces.append(tuple(pieces)) + total += length + self.numpieces = int((total+piece_length-1)/piece_length) + self.piece_priority = [1] * self.numpieces + + + + def init_priority(self, new_priority): + try: + assert len(new_priority) == self.numfiles + for v in new_priority: + assert type(v) in (type(0), type(0L)) + assert v >= -1 + assert v <= 2 + except: +# print_exc() + return False + try: + for f in xrange(self.numfiles): + if new_priority[f] < 0: + self.storage.disable_file(f) + self.new_priority = new_priority + except (IOError, OSError), e: + self.failfunc("can't open partial file for " + + self.files[f][0] + ': ' + str(e)) + return False + return True + + ''' + d['priority'] = [file #1 priority [,file #2 priority...] ] + a list of download priorities for each file. + Priority may be -1, 0, 1, 2. -1 = download disabled, + 0 = highest, 1 = normal, 2 = lowest. + Also see Storage.pickle and StorageWrapper.pickle for additional keys. + ''' + def unpickle(self, d): + if d.has_key('priority'): + if not self.init_priority(d['priority']): + return + pieces = self.storage.unpickle(d) + if not pieces: # don't bother, nothing restoreable + return + new_piece_priority = self._get_piece_priority_list(self.new_priority) + self.storagewrapper.reblock([i == -1 for i in new_piece_priority]) + self.new_partials = self.storagewrapper.unpickle(d, pieces) + + + def tie_in(self, picker, cancelfunc, requestmorefunc): + self.picker = picker + self.cancelfunc = cancelfunc + self.requestmorefunc = requestmorefunc + + if self.new_priority: + self.priority = self.new_priority + self.new_priority = None + self.new_piece_priority = self._set_piece_priority(self.priority) + + if self.new_partials: + shuffle(self.new_partials) + for p in self.new_partials: + self.picker.requested(p) + self.new_partials = None + + + def _set_files_disabled(self, old_priority, new_priority): + old_disabled = [p == -1 for p in old_priority] + new_disabled = [p == -1 for p in new_priority] + data_to_update = [] + for f in xrange(self.numfiles): + if new_disabled[f] != old_disabled[f]: + data_to_update.extend(self.storage.get_piece_update_list(f)) + buffer = [] + for piece, start, length in data_to_update: + if self.storagewrapper.has_data(piece): + data = self.storagewrapper.read_raw(piece, start, length) + if data is None: + return False + buffer.append((piece, start, data)) + + files_updated = False + try: + for f in xrange(self.numfiles): + if new_disabled[f] and not old_disabled[f]: + self.storage.disable_file(f) + files_updated = True + if old_disabled[f] and not new_disabled[f]: + self.storage.enable_file(f) + files_updated = True + except (IOError, OSError), e: + if new_disabled[f]: + msg = "can't open partial file for " + else: + msg = 'unable to open ' + self.failfunc(msg + self.files[f][0] + ': ' + str(e)) + return False + if files_updated: + self.storage.reset_file_status() + + changed_pieces = {} + for piece, start, data in buffer: + if not self.storagewrapper.write_raw(piece, start, data): + return False + data.release() + changed_pieces[piece] = 1 + if not self.storagewrapper.doublecheck_data(changed_pieces): + return False + + return True + + + def _get_piece_priority_list(self, file_priority_list): + l = [-1] * self.numpieces + for f in xrange(self.numfiles): + if file_priority_list[f] == -1: + continue + for i in self.filepieces[f]: + if l[i] == -1: + l[i] = file_priority_list[f] + continue + l[i] = min(l[i], file_priority_list[f]) + return l + + + def _set_piece_priority(self, new_priority): + new_piece_priority = self._get_piece_priority_list(new_priority) + pieces = range(self.numpieces) + shuffle(pieces) + new_blocked = [] + new_unblocked = [] + for piece in pieces: + self.picker.set_priority(piece, new_piece_priority[piece]) + o = self.piece_priority[piece] == -1 + n = new_piece_priority[piece] == -1 + if n and not o: + new_blocked.append(piece) + if o and not n: + new_unblocked.append(piece) + if new_blocked: + self.cancelfunc(new_blocked) + self.storagewrapper.reblock([i == -1 for i in new_piece_priority]) + if new_unblocked: + self.requestmorefunc(new_unblocked) + + return new_piece_priority + + + def set_priorities_now(self, new_priority = None): + if not new_priority: + new_priority = self.new_priority + self.new_priority = None # potential race condition + if not new_priority: + return + old_priority = self.priority + self.priority = new_priority + if not self._set_files_disabled(old_priority, new_priority): + return + self.piece_priority = self._set_piece_priority(new_priority) + + def set_priorities(self, new_priority): + self.new_priority = new_priority + def s(self=self): + self.set_priorities_now() + self.sched(s) + + def set_priority(self, f, p): + new_priority = self.get_priorities() + new_priority[f] = p + self.set_priorities(new_priority) + + def get_priorities(self): + priority = self.new_priority + if not priority: + priority = self.priority # potential race condition + return [i for i in priority] + + def __setitem__(self, index, val): + self.set_priority(index, val) + + def __getitem__(self, index): + try: + return self.new_priority[index] + except: + return self.priority[index] + + + def finish(self): + pass +# for f in xrange(self.numfiles): +# if self.priority[f] == -1: +# self.storage.delete_file(f) + + def pickle(self): + d = {'priority': self.priority} + try: + s = self.storage.pickle() + sw = self.storagewrapper.pickle() + for k in s.keys(): + d[k] = s[k] + for k in sw.keys(): + d[k] = sw[k] + except (IOError, OSError): + pass + return d diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/Filter.py b/tribler-mod/Tribler/Core/BitTornado/BT1/Filter.py new file mode 100644 index 0000000..acd0d08 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/Filter.py @@ -0,0 +1,16 @@ +from time import localtime, strftime +# Written by Bram Cohen +# see LICENSE.txt for license information + +class Filter: + def __init__(self, callback): + self.callback = callback + + def check(self, ip, paramslist, headers): + + def params(key, default = None, l = paramslist): + if l.has_key(key): + return l[key][0] + return default + + return None diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/Filter.py.bak b/tribler-mod/Tribler/Core/BitTornado/BT1/Filter.py.bak new file mode 100644 index 0000000..a564efb --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/Filter.py.bak @@ -0,0 +1,15 @@ +# Written by Bram Cohen +# see LICENSE.txt for license information + +class Filter: + def __init__(self, callback): + self.callback = callback + + def check(self, ip, paramslist, headers): + + def params(key, default = None, l = paramslist): + if l.has_key(key): + return l[key][0] + return default + + return None diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/HTTPDownloader.py b/tribler-mod/Tribler/Core/BitTornado/BT1/HTTPDownloader.py new file mode 100644 index 0000000..ff3eb14 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/HTTPDownloader.py @@ -0,0 +1,291 @@ +from time import localtime, strftime +# Written by John Hoffman +# see LICENSE.txt for license information + +from Tribler.Core.BitTornado.CurrentRateMeasure import Measure +from random import randint +from urlparse import urlparse +from httplib import HTTPConnection +from urllib import quote +from threading import Thread +from Tribler.Core.BitTornado.__init__ import product_name,version_short +# 2fastbt_ +from Tribler.Core.CoopDownload.Helper import SingleDownloadHelperInterface +# _2fastbt + +try: + True +except: + True = 1 + False = 0 + +# 2fastbt_ +DEBUG = False +# _2fastbt + +EXPIRE_TIME = 60 * 60 + +VERSION = product_name+'/'+version_short + +class haveComplete: + def complete(self): + return True + def __getitem__(self, x): + return True +haveall = haveComplete() + +# 2fastbt_ +class SingleDownload(SingleDownloadHelperInterface): +# _2fastbt + def __init__(self, downloader, url): +# 2fastbt_ + SingleDownloadHelperInterface.__init__(self) +# _2fastbt + self.downloader = downloader + self.baseurl = url + try: + (scheme, self.netloc, path, pars, query, fragment) = urlparse(url) + except: + self.downloader.errorfunc('cannot parse http seed address: '+url) + return + if scheme != 'http': + self.downloader.errorfunc('http seed url not http: '+url) + return + try: + self.connection = HTTPConnection(self.netloc) + except: + self.downloader.errorfunc('cannot connect to http seed: '+url) + return + self.seedurl = path + if pars: + self.seedurl += ';'+pars + self.seedurl += '?' + if query: + self.seedurl += query+'&' + self.seedurl += 'info_hash='+quote(self.downloader.infohash) + + self.measure = Measure(downloader.max_rate_period) + self.index = None + self.url = '' + self.requests = [] + self.request_size = 0 + self.endflag = False + self.error = None + self.retry_period = 30 + self._retry_period = None + self.errorcount = 0 + self.goodseed = False + self.active = False + self.cancelled = False + self.resched(randint(2, 10)) + + def resched(self, len = None): + if len is None: + len = self.retry_period + if self.errorcount > 3: + len = len * (self.errorcount - 2) + self.downloader.rawserver.add_task(self.download, len) + + def _want(self, index): + if self.endflag: + return self.downloader.storage.do_I_have_requests(index) + else: + return self.downloader.storage.is_unstarted(index) + + def download(self): +# 2fastbt_ + if DEBUG: + print "http-sdownload: download()" + if self.is_frozen_by_helper(): + if DEBUG: + print "http-sdownload: blocked, rescheduling" + self.resched(1) + return +# _2fastbt + self.cancelled = False + if self.downloader.picker.am_I_complete(): + self.downloader.downloads.remove(self) + return + self.index = self.downloader.picker.next(haveall, self._want, self) +# 2fastbt_ + if self.index is None and self.frozen_by_helper: + self.resched(0.01) + return +# _2fastbt + if ( self.index is None and not self.endflag + and not self.downloader.peerdownloader.has_downloaders() ): + self.endflag = True + self.index = self.downloader.picker.next(haveall, self._want, self) + if self.index is None: + self.endflag = True + self.resched() + else: + self.url = ( self.seedurl+'&piece='+str(self.index) ) + self._get_requests() + if self.request_size < self.downloader.storage._piecelen(self.index): + self.url += '&ranges='+self._request_ranges() + rq = Thread(target = self._request) + rq.setName( "HTTPDownloader"+rq.getName() ) + rq.setDaemon(True) + rq.start() + self.active = True + + def _request(self): + import encodings.ascii + import encodings.punycode + import encodings.idna + + self.error = None + self.received_data = None + try: + self.connection.request('GET', self.url, None, + {'User-Agent': VERSION}) + r = self.connection.getresponse() + self.connection_status = r.status + self.received_data = r.read() + except Exception, e: + self.error = 'error accessing http seed: '+str(e) + try: + self.connection.close() + except: + pass + try: + self.connection = HTTPConnection(self.netloc) + except: + self.connection = None # will cause an exception and retry next cycle + self.downloader.rawserver.add_task(self.request_finished) + + def request_finished(self): + self.active = False + if self.error is not None: + if self.goodseed: + self.downloader.errorfunc(self.error) + self.errorcount += 1 + if self.received_data: + self.errorcount = 0 + if not self._got_data(): + self.received_data = None + if not self.received_data: + self._release_requests() + self.downloader.peerdownloader.piece_flunked(self.index) + if self._retry_period: + self.resched(self._retry_period) + self._retry_period = None + return + self.resched() + + def _got_data(self): + if self.connection_status == 503: # seed is busy + try: + self.retry_period = max(int(self.received_data), 5) + except: + pass + return False + if self.connection_status != 200: + self.errorcount += 1 + return False + self._retry_period = 1 + if len(self.received_data) != self.request_size: + if self.goodseed: + self.downloader.errorfunc('corrupt data from http seed - redownloading') + return False + self.measure.update_rate(len(self.received_data)) + self.downloader.measurefunc(len(self.received_data)) + if self.cancelled: + return False + if not self._fulfill_requests(): + return False + if not self.goodseed: + self.goodseed = True + self.downloader.seedsfound += 1 + if self.downloader.storage.do_I_have(self.index): + self.downloader.picker.complete(self.index) + self.downloader.peerdownloader.check_complete(self.index) + self.downloader.gotpiecefunc(self.index) + return True + + def _get_requests(self): + self.requests = [] + self.request_size = 0L + while self.downloader.storage.do_I_have_requests(self.index): + r = self.downloader.storage.new_request(self.index) + self.requests.append(r) + self.request_size += r[1] + self.requests.sort() + + def _fulfill_requests(self): + start = 0L + success = True + while self.requests: + begin, length = self.requests.pop(0) +# 2fastbt_ + if not self.downloader.storage.piece_came_in(self.index, begin, [], + self.received_data[start:start+length], length): +# _2fastbt + success = False + break + start += length + return success + + def _release_requests(self): + for begin, length in self.requests: + self.downloader.storage.request_lost(self.index, begin, length) + self.requests = [] + + def _request_ranges(self): + s = '' + begin, length = self.requests[0] + for begin1, length1 in self.requests[1:]: + if begin + length == begin1: + length += length1 + continue + else: + if s: + s += ',' + s += str(begin)+'-'+str(begin+length-1) + begin, length = begin1, length1 + if s: + s += ',' + s += str(begin)+'-'+str(begin+length-1) + return s + +# 2fastbt_ + def helper_forces_unchoke(self): + pass + + def helper_set_freezing(self,val): + self.frozen_by_helper = val +# _2fastbt + + + +class HTTPDownloader: + def __init__(self, storage, picker, rawserver, + finflag, errorfunc, peerdownloader, + max_rate_period, infohash, measurefunc, gotpiecefunc): + self.storage = storage + self.picker = picker + self.rawserver = rawserver + self.finflag = finflag + self.errorfunc = errorfunc + self.peerdownloader = peerdownloader + self.infohash = infohash + self.max_rate_period = max_rate_period + self.gotpiecefunc = gotpiecefunc + self.measurefunc = measurefunc + self.downloads = [] + self.seedsfound = 0 + + def make_download(self, url): + self.downloads.append(SingleDownload(self, url)) + return self.downloads[-1] + + def get_downloads(self): + if self.finflag.isSet(): + return [] + return self.downloads + + def cancel_piece_download(self, pieces): + for d in self.downloads: + if d.active and d.index in pieces: + d.cancelled = True diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/HTTPDownloader.py.bak b/tribler-mod/Tribler/Core/BitTornado/BT1/HTTPDownloader.py.bak new file mode 100644 index 0000000..9cb57d9 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/HTTPDownloader.py.bak @@ -0,0 +1,290 @@ +# Written by John Hoffman +# see LICENSE.txt for license information + +from Tribler.Core.BitTornado.CurrentRateMeasure import Measure +from random import randint +from urlparse import urlparse +from httplib import HTTPConnection +from urllib import quote +from threading import Thread +from Tribler.Core.BitTornado.__init__ import product_name,version_short +# 2fastbt_ +from Tribler.Core.CoopDownload.Helper import SingleDownloadHelperInterface +# _2fastbt + +try: + True +except: + True = 1 + False = 0 + +# 2fastbt_ +DEBUG = False +# _2fastbt + +EXPIRE_TIME = 60 * 60 + +VERSION = product_name+'/'+version_short + +class haveComplete: + def complete(self): + return True + def __getitem__(self, x): + return True +haveall = haveComplete() + +# 2fastbt_ +class SingleDownload(SingleDownloadHelperInterface): +# _2fastbt + def __init__(self, downloader, url): +# 2fastbt_ + SingleDownloadHelperInterface.__init__(self) +# _2fastbt + self.downloader = downloader + self.baseurl = url + try: + (scheme, self.netloc, path, pars, query, fragment) = urlparse(url) + except: + self.downloader.errorfunc('cannot parse http seed address: '+url) + return + if scheme != 'http': + self.downloader.errorfunc('http seed url not http: '+url) + return + try: + self.connection = HTTPConnection(self.netloc) + except: + self.downloader.errorfunc('cannot connect to http seed: '+url) + return + self.seedurl = path + if pars: + self.seedurl += ';'+pars + self.seedurl += '?' + if query: + self.seedurl += query+'&' + self.seedurl += 'info_hash='+quote(self.downloader.infohash) + + self.measure = Measure(downloader.max_rate_period) + self.index = None + self.url = '' + self.requests = [] + self.request_size = 0 + self.endflag = False + self.error = None + self.retry_period = 30 + self._retry_period = None + self.errorcount = 0 + self.goodseed = False + self.active = False + self.cancelled = False + self.resched(randint(2, 10)) + + def resched(self, len = None): + if len is None: + len = self.retry_period + if self.errorcount > 3: + len = len * (self.errorcount - 2) + self.downloader.rawserver.add_task(self.download, len) + + def _want(self, index): + if self.endflag: + return self.downloader.storage.do_I_have_requests(index) + else: + return self.downloader.storage.is_unstarted(index) + + def download(self): +# 2fastbt_ + if DEBUG: + print "http-sdownload: download()" + if self.is_frozen_by_helper(): + if DEBUG: + print "http-sdownload: blocked, rescheduling" + self.resched(1) + return +# _2fastbt + self.cancelled = False + if self.downloader.picker.am_I_complete(): + self.downloader.downloads.remove(self) + return + self.index = self.downloader.picker.next(haveall, self._want, self) +# 2fastbt_ + if self.index is None and self.frozen_by_helper: + self.resched(0.01) + return +# _2fastbt + if ( self.index is None and not self.endflag + and not self.downloader.peerdownloader.has_downloaders() ): + self.endflag = True + self.index = self.downloader.picker.next(haveall, self._want, self) + if self.index is None: + self.endflag = True + self.resched() + else: + self.url = ( self.seedurl+'&piece='+str(self.index) ) + self._get_requests() + if self.request_size < self.downloader.storage._piecelen(self.index): + self.url += '&ranges='+self._request_ranges() + rq = Thread(target = self._request) + rq.setName( "HTTPDownloader"+rq.getName() ) + rq.setDaemon(True) + rq.start() + self.active = True + + def _request(self): + import encodings.ascii + import encodings.punycode + import encodings.idna + + self.error = None + self.received_data = None + try: + self.connection.request('GET', self.url, None, + {'User-Agent': VERSION}) + r = self.connection.getresponse() + self.connection_status = r.status + self.received_data = r.read() + except Exception, e: + self.error = 'error accessing http seed: '+str(e) + try: + self.connection.close() + except: + pass + try: + self.connection = HTTPConnection(self.netloc) + except: + self.connection = None # will cause an exception and retry next cycle + self.downloader.rawserver.add_task(self.request_finished) + + def request_finished(self): + self.active = False + if self.error is not None: + if self.goodseed: + self.downloader.errorfunc(self.error) + self.errorcount += 1 + if self.received_data: + self.errorcount = 0 + if not self._got_data(): + self.received_data = None + if not self.received_data: + self._release_requests() + self.downloader.peerdownloader.piece_flunked(self.index) + if self._retry_period: + self.resched(self._retry_period) + self._retry_period = None + return + self.resched() + + def _got_data(self): + if self.connection_status == 503: # seed is busy + try: + self.retry_period = max(int(self.received_data), 5) + except: + pass + return False + if self.connection_status != 200: + self.errorcount += 1 + return False + self._retry_period = 1 + if len(self.received_data) != self.request_size: + if self.goodseed: + self.downloader.errorfunc('corrupt data from http seed - redownloading') + return False + self.measure.update_rate(len(self.received_data)) + self.downloader.measurefunc(len(self.received_data)) + if self.cancelled: + return False + if not self._fulfill_requests(): + return False + if not self.goodseed: + self.goodseed = True + self.downloader.seedsfound += 1 + if self.downloader.storage.do_I_have(self.index): + self.downloader.picker.complete(self.index) + self.downloader.peerdownloader.check_complete(self.index) + self.downloader.gotpiecefunc(self.index) + return True + + def _get_requests(self): + self.requests = [] + self.request_size = 0L + while self.downloader.storage.do_I_have_requests(self.index): + r = self.downloader.storage.new_request(self.index) + self.requests.append(r) + self.request_size += r[1] + self.requests.sort() + + def _fulfill_requests(self): + start = 0L + success = True + while self.requests: + begin, length = self.requests.pop(0) +# 2fastbt_ + if not self.downloader.storage.piece_came_in(self.index, begin, [], + self.received_data[start:start+length], length): +# _2fastbt + success = False + break + start += length + return success + + def _release_requests(self): + for begin, length in self.requests: + self.downloader.storage.request_lost(self.index, begin, length) + self.requests = [] + + def _request_ranges(self): + s = '' + begin, length = self.requests[0] + for begin1, length1 in self.requests[1:]: + if begin + length == begin1: + length += length1 + continue + else: + if s: + s += ',' + s += str(begin)+'-'+str(begin+length-1) + begin, length = begin1, length1 + if s: + s += ',' + s += str(begin)+'-'+str(begin+length-1) + return s + +# 2fastbt_ + def helper_forces_unchoke(self): + pass + + def helper_set_freezing(self,val): + self.frozen_by_helper = val +# _2fastbt + + + +class HTTPDownloader: + def __init__(self, storage, picker, rawserver, + finflag, errorfunc, peerdownloader, + max_rate_period, infohash, measurefunc, gotpiecefunc): + self.storage = storage + self.picker = picker + self.rawserver = rawserver + self.finflag = finflag + self.errorfunc = errorfunc + self.peerdownloader = peerdownloader + self.infohash = infohash + self.max_rate_period = max_rate_period + self.gotpiecefunc = gotpiecefunc + self.measurefunc = measurefunc + self.downloads = [] + self.seedsfound = 0 + + def make_download(self, url): + self.downloads.append(SingleDownload(self, url)) + return self.downloads[-1] + + def get_downloads(self): + if self.finflag.isSet(): + return [] + return self.downloads + + def cancel_piece_download(self, pieces): + for d in self.downloads: + if d.active and d.index in pieces: + d.cancelled = True diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/MessageID.py b/tribler-mod/Tribler/Core/BitTornado/BT1/MessageID.py new file mode 100644 index 0000000..2ce8aab --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/MessageID.py @@ -0,0 +1,236 @@ +from time import localtime, strftime +# Written by Jie Yang, Arno Bakker +# see LICENSE.txt for license information +# +# All message IDs in BitTorrent Protocol and our extensions +# +# Arno: please don't define stuff until the spec is ready +# + +protocol_name = 'BitTorrent protocol' +# Enable Tribler extensions: +# Left-most bit = Azureus Enhanced Messaging Protocol (AEMP) +# Left+42 bit = Tribler Simple Merkle Hashes extension +# Left+43 bit = Tribler Overlay swarm extension +# AND uTorrent extended protocol, conflicting. See EXTEND message +# Right-most bit = BitTorrent DHT extension +#option_pattern = chr(0)*8 +option_pattern = '\x00\x00\x00\x00\x00\x30\x00\x00' +disabled_overlay_option_pattern = '\x00\x00\x00\x00\x00\x20\x00\x00' + + +CHOKE = chr(0) +UNCHOKE = chr(1) +INTERESTED = chr(2) +NOT_INTERESTED = chr(3) + +# index +HAVE = chr(4) +# index, bitfield +BITFIELD = chr(5) +# index, begin, length +REQUEST = chr(6) +# index, begin, piece +PIECE = chr(7) +# index, begin, piece +CANCEL = chr(8) +# 2-byte port +PORT = chr(9) + +# uTorrent and Bram's BitTorrent now support an extended protocol +EXTEND = chr(20) + + +## IDs 255 and 254 are reserved. Tribler extensions number downwards + +## PermID /Overlay Swarm Extension +# ctxt +CHALLENGE = chr(253) +# rdata1 +RESPONSE1 = chr(252) +# rdata2 +RESPONSE2 = chr(251) + +PermIDMessages = [CHALLENGE, RESPONSE1, RESPONSE2] + +## Merkle Hash Extension +# Merkle: PIECE message with hashes +HASHPIECE = chr(250) + +## Buddycast Extension +""" +{'preferences':[[infohash]], + #'permid': my permid, # not used since 3.3.2 + 'connectable': self connectability, # used since version > 3.5 + 'name': my name, + 'ip':current ip, + 'port':current listening port, + 'taste_buddies':[{'preferences':[[infohash]], + 'permid':Permanent ID, + 'ip':the last known IP, + 'port':the last known listen port, + 'age':the age of this preference list in integer seconds + }], + 'random_peers':[{'permid':Permanent ID, + 'ip':the last known IP, + 'port':the last known listen port, + 'age':the age of this preference list in integer seconds + }] + 'npeers': Number of peers known to peer + 'nfiles': Number of files known to peer + 'ndls': Number of downloads by peer +} +""" +# payload is beencoded dict +BUDDYCAST = chr(249) +# empty payload +KEEP_ALIVE = chr(240) +# Bartercast, payload is bencoded dict +BARTERCAST = chr(236) + +VOTECAST = chr(226) +MODERATIONCAST_HAVE = chr(227) +MODERATIONCAST_REQUEST = chr(228) +MODERATIONCAST_REPLY = chr(229) + +#BuddyCastMessages = [BARTERCAST, BUDDYCAST, KEEP_ALIVE] +BuddyCastMessages = [MODERATIONCAST_HAVE, MODERATIONCAST_REQUEST, MODERATIONCAST_REPLY, VOTECAST, BARTERCAST, BUDDYCAST, KEEP_ALIVE] + +# bencoded torrent_hash (Arno,2007-08-14: shouldn't be bencoded, but is) +GET_METADATA = chr(248) +# {'torrent_hash', 'metadata', ... } +METADATA = chr(247) + +MetadataMessages = [GET_METADATA, METADATA] + +# 2fastbt_ +## Cooperative Download Extension +# torrent_hash +DOWNLOAD_HELP = chr(246) +# torrent_hash +STOP_DOWNLOAD_HELP = chr(245) + +# For connectability test +DIALBACK_REQUEST = chr(244) +DIALBACK_REPLY = chr(243) + +DialbackMessages = [DIALBACK_REQUEST,DIALBACK_REPLY] + +# torrent_hash + 1-byte all_or_nothing + bencode([piece num,...]) +RESERVE_PIECES = chr(242) +# torrent_hash + bencode([piece num,...]) +PIECES_RESERVED = chr(241) + +HelpCoordinatorMessages = [DOWNLOAD_HELP,STOP_DOWNLOAD_HELP,PIECES_RESERVED] +HelpHelperMessages = [RESERVE_PIECES] +# _2fastbt + +# Note: SecureOverlay's KEEP_ALIVE is 240 +## Social-Network feature +SOCIAL_OVERLAP = chr(239) + +SocialNetworkMessages = [SOCIAL_OVERLAP] + +# Remote query extension +QUERY = chr(238) +QUERY_REPLY = chr(237) + +RemoteQueryMessages = [QUERY,QUERY_REPLY] + +# g2g info (uplink statistics, etc) +G2G_PIECE_XFER = chr(235) + +VoDMessages = [G2G_PIECE_XFER] + +# Friendship messages +FRIENDSHIP = chr(234) + +FriendshipMessages = [FRIENDSHIP] + +####### FREE ID = 233 + +# Generic Crawler messages +CRAWLER_REQUEST = chr(232) +CRAWLER_REPLY = chr(231) + +CrawlerMessages = [CRAWLER_REQUEST, CRAWLER_REPLY] + +# All overlay-swarm messages +OverlaySwarmMessages = PermIDMessages + BuddyCastMessages + MetadataMessages + HelpCoordinatorMessages + HelpHelperMessages + SocialNetworkMessages + RemoteQueryMessages + CrawlerMessages + +# Crawler sub-messages +CRAWLER_DATABASE_QUERY = chr(1) +CRAWLER_SEEDINGSTATS_QUERY = chr(2) +CRAWLER_NATCHECK = chr(3) +CRAWLER_FRIENDSHIP_STATS = chr(4) +CRAWLER_NATTRAVERSAL = chr(5) +CRAWLER_VIDEOPLAYBACK_INFO_QUERY = chr(6) +CRAWLER_VIDEOPLAYBACK_EVENT_QUERY = chr(7) + +message_map = { + CHOKE:"CHOKE", + UNCHOKE:"UNCHOKE", + INTERESTED:"INTEREST", + NOT_INTERESTED:"NOT_INTEREST", + HAVE:"HAVE", + BITFIELD:"BITFIELD", + REQUEST:"REQUEST", + CANCEL:"CANCEL", + PIECE:"PIECE", + PORT:"PORT", + EXTEND:"EXTEND", + + CHALLENGE:"CHALLENGE", + RESPONSE1:"RESPONSE1", + RESPONSE2:"RESPONSE2", + HASHPIECE:"HASHPIECE", + BUDDYCAST:"BUDDYCAST", + GET_METADATA:"GET_METADATA", + METADATA:"METADATA", + DOWNLOAD_HELP:"DOWNLOAD_HELP", + STOP_DOWNLOAD_HELP:"STOP_DOWNLOAD_HELP", + PIECES_RESERVED:"PIECES_RESERVED", + RESERVE_PIECES:"RESERVE_PIECES", + DIALBACK_REQUEST:"DIALBACK_REQUEST", + DIALBACK_REPLY:"DIALBACK_REPLY", + KEEP_ALIVE:"KEEP_ALIVE", + SOCIAL_OVERLAP:"SOCIAL_OVERLAP", + QUERY:"QUERY", + QUERY_REPLY:"QUERY_REPLY", + MODERATIONCAST_HAVE:"MODERATIONCAST_HAVE", + MODERATIONCAST_REQUEST:"MODERATIONCAST_REQUEST", + MODERATIONCAST_REPLY:"MODERATIONCAST_REPLY", + VOTECAST:"VOTECAST", + BARTERCAST:"BARTERCAST", + G2G_PIECE_XFER: "G2G_PIECE_XFER", + FRIENDSHIP:"FRIENDSHIP", + + CRAWLER_REQUEST:"CRAWLER_REQUEST", + CRAWLER_REQUEST+CRAWLER_DATABASE_QUERY:"CRAWLER_DATABASE_QUERY_REQUEST", + CRAWLER_REQUEST+CRAWLER_SEEDINGSTATS_QUERY:"CRAWLER_SEEDINGSTATS_QUERY_REQUEST", + CRAWLER_REQUEST+CRAWLER_NATCHECK:"CRAWLER_NATCHECK_QUERY_REQUEST", + CRAWLER_REQUEST+CRAWLER_NATTRAVERSAL:"CRAWLER_NATTRAVERSAL_QUERY_REQUEST", + CRAWLER_REQUEST+CRAWLER_FRIENDSHIP_STATS:"CRAWLER_FRIENDSHIP_STATS_REQUEST", + CRAWLER_REQUEST+CRAWLER_VIDEOPLAYBACK_INFO_QUERY:"CRAWLER_VIDEOPLAYBACK_INFO_QUERY_REQUEST", + CRAWLER_REQUEST+CRAWLER_VIDEOPLAYBACK_EVENT_QUERY:"CRAWLER_VIDEOPLAYBACK_EVENT_QUERY_REQUEST", + + CRAWLER_REPLY:"CRAWLER_REPLY", + CRAWLER_REPLY+CRAWLER_DATABASE_QUERY:"CRAWLER_DATABASE_QUERY_REPLY", + CRAWLER_REPLY+CRAWLER_SEEDINGSTATS_QUERY:"CRAWLER_SEEDINGSTATS_QUERY_REPLY", + CRAWLER_REPLY+CRAWLER_NATCHECK:"CRAWLER_NATCHECK_QUERY_REPLY", + CRAWLER_REPLY+CRAWLER_NATTRAVERSAL:"CRAWLER_NATTRAVERSAL_QUERY_REPLY", + CRAWLER_REPLY+CRAWLER_FRIENDSHIP_STATS:"CRAWLER_FRIENDSHIP_STATS", + CRAWLER_REPLY+CRAWLER_FRIENDSHIP_STATS:"CRAWLER_FRIENDSHIP_STATS_REPLY", + CRAWLER_REPLY+CRAWLER_VIDEOPLAYBACK_INFO_QUERY:"CRAWLER_VIDEOPLAYBACK_INFO_QUERY_REPLY", + CRAWLER_REPLY+CRAWLER_VIDEOPLAYBACK_EVENT_QUERY:"CRAWLER_VIDEOPLAYBACK_EVENT_QUERY_REPLY" +} + +def getMessageName(s): + """ + Return the message name for message id s. This may be either a one + or a two byte sting + """ + if s in message_map: + return message_map[s] + else: + return "Unknown_MessageID_" + "_".join([str(ord(c)) for c in s]) diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/MessageID.py.bak b/tribler-mod/Tribler/Core/BitTornado/BT1/MessageID.py.bak new file mode 100644 index 0000000..cd542ea --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/MessageID.py.bak @@ -0,0 +1,235 @@ +# Written by Jie Yang, Arno Bakker +# see LICENSE.txt for license information +# +# All message IDs in BitTorrent Protocol and our extensions +# +# Arno: please don't define stuff until the spec is ready +# + +protocol_name = 'BitTorrent protocol' +# Enable Tribler extensions: +# Left-most bit = Azureus Enhanced Messaging Protocol (AEMP) +# Left+42 bit = Tribler Simple Merkle Hashes extension +# Left+43 bit = Tribler Overlay swarm extension +# AND uTorrent extended protocol, conflicting. See EXTEND message +# Right-most bit = BitTorrent DHT extension +#option_pattern = chr(0)*8 +option_pattern = '\x00\x00\x00\x00\x00\x30\x00\x00' +disabled_overlay_option_pattern = '\x00\x00\x00\x00\x00\x20\x00\x00' + + +CHOKE = chr(0) +UNCHOKE = chr(1) +INTERESTED = chr(2) +NOT_INTERESTED = chr(3) + +# index +HAVE = chr(4) +# index, bitfield +BITFIELD = chr(5) +# index, begin, length +REQUEST = chr(6) +# index, begin, piece +PIECE = chr(7) +# index, begin, piece +CANCEL = chr(8) +# 2-byte port +PORT = chr(9) + +# uTorrent and Bram's BitTorrent now support an extended protocol +EXTEND = chr(20) + + +## IDs 255 and 254 are reserved. Tribler extensions number downwards + +## PermID /Overlay Swarm Extension +# ctxt +CHALLENGE = chr(253) +# rdata1 +RESPONSE1 = chr(252) +# rdata2 +RESPONSE2 = chr(251) + +PermIDMessages = [CHALLENGE, RESPONSE1, RESPONSE2] + +## Merkle Hash Extension +# Merkle: PIECE message with hashes +HASHPIECE = chr(250) + +## Buddycast Extension +""" +{'preferences':[[infohash]], + #'permid': my permid, # not used since 3.3.2 + 'connectable': self connectability, # used since version > 3.5 + 'name': my name, + 'ip':current ip, + 'port':current listening port, + 'taste_buddies':[{'preferences':[[infohash]], + 'permid':Permanent ID, + 'ip':the last known IP, + 'port':the last known listen port, + 'age':the age of this preference list in integer seconds + }], + 'random_peers':[{'permid':Permanent ID, + 'ip':the last known IP, + 'port':the last known listen port, + 'age':the age of this preference list in integer seconds + }] + 'npeers': Number of peers known to peer + 'nfiles': Number of files known to peer + 'ndls': Number of downloads by peer +} +""" +# payload is beencoded dict +BUDDYCAST = chr(249) +# empty payload +KEEP_ALIVE = chr(240) +# Bartercast, payload is bencoded dict +BARTERCAST = chr(236) + +VOTECAST = chr(226) +MODERATIONCAST_HAVE = chr(227) +MODERATIONCAST_REQUEST = chr(228) +MODERATIONCAST_REPLY = chr(229) + +#BuddyCastMessages = [BARTERCAST, BUDDYCAST, KEEP_ALIVE] +BuddyCastMessages = [MODERATIONCAST_HAVE, MODERATIONCAST_REQUEST, MODERATIONCAST_REPLY, VOTECAST, BARTERCAST, BUDDYCAST, KEEP_ALIVE] + +# bencoded torrent_hash (Arno,2007-08-14: shouldn't be bencoded, but is) +GET_METADATA = chr(248) +# {'torrent_hash', 'metadata', ... } +METADATA = chr(247) + +MetadataMessages = [GET_METADATA, METADATA] + +# 2fastbt_ +## Cooperative Download Extension +# torrent_hash +DOWNLOAD_HELP = chr(246) +# torrent_hash +STOP_DOWNLOAD_HELP = chr(245) + +# For connectability test +DIALBACK_REQUEST = chr(244) +DIALBACK_REPLY = chr(243) + +DialbackMessages = [DIALBACK_REQUEST,DIALBACK_REPLY] + +# torrent_hash + 1-byte all_or_nothing + bencode([piece num,...]) +RESERVE_PIECES = chr(242) +# torrent_hash + bencode([piece num,...]) +PIECES_RESERVED = chr(241) + +HelpCoordinatorMessages = [DOWNLOAD_HELP,STOP_DOWNLOAD_HELP,PIECES_RESERVED] +HelpHelperMessages = [RESERVE_PIECES] +# _2fastbt + +# Note: SecureOverlay's KEEP_ALIVE is 240 +## Social-Network feature +SOCIAL_OVERLAP = chr(239) + +SocialNetworkMessages = [SOCIAL_OVERLAP] + +# Remote query extension +QUERY = chr(238) +QUERY_REPLY = chr(237) + +RemoteQueryMessages = [QUERY,QUERY_REPLY] + +# g2g info (uplink statistics, etc) +G2G_PIECE_XFER = chr(235) + +VoDMessages = [G2G_PIECE_XFER] + +# Friendship messages +FRIENDSHIP = chr(234) + +FriendshipMessages = [FRIENDSHIP] + +####### FREE ID = 233 + +# Generic Crawler messages +CRAWLER_REQUEST = chr(232) +CRAWLER_REPLY = chr(231) + +CrawlerMessages = [CRAWLER_REQUEST, CRAWLER_REPLY] + +# All overlay-swarm messages +OverlaySwarmMessages = PermIDMessages + BuddyCastMessages + MetadataMessages + HelpCoordinatorMessages + HelpHelperMessages + SocialNetworkMessages + RemoteQueryMessages + CrawlerMessages + +# Crawler sub-messages +CRAWLER_DATABASE_QUERY = chr(1) +CRAWLER_SEEDINGSTATS_QUERY = chr(2) +CRAWLER_NATCHECK = chr(3) +CRAWLER_FRIENDSHIP_STATS = chr(4) +CRAWLER_NATTRAVERSAL = chr(5) +CRAWLER_VIDEOPLAYBACK_INFO_QUERY = chr(6) +CRAWLER_VIDEOPLAYBACK_EVENT_QUERY = chr(7) + +message_map = { + CHOKE:"CHOKE", + UNCHOKE:"UNCHOKE", + INTERESTED:"INTEREST", + NOT_INTERESTED:"NOT_INTEREST", + HAVE:"HAVE", + BITFIELD:"BITFIELD", + REQUEST:"REQUEST", + CANCEL:"CANCEL", + PIECE:"PIECE", + PORT:"PORT", + EXTEND:"EXTEND", + + CHALLENGE:"CHALLENGE", + RESPONSE1:"RESPONSE1", + RESPONSE2:"RESPONSE2", + HASHPIECE:"HASHPIECE", + BUDDYCAST:"BUDDYCAST", + GET_METADATA:"GET_METADATA", + METADATA:"METADATA", + DOWNLOAD_HELP:"DOWNLOAD_HELP", + STOP_DOWNLOAD_HELP:"STOP_DOWNLOAD_HELP", + PIECES_RESERVED:"PIECES_RESERVED", + RESERVE_PIECES:"RESERVE_PIECES", + DIALBACK_REQUEST:"DIALBACK_REQUEST", + DIALBACK_REPLY:"DIALBACK_REPLY", + KEEP_ALIVE:"KEEP_ALIVE", + SOCIAL_OVERLAP:"SOCIAL_OVERLAP", + QUERY:"QUERY", + QUERY_REPLY:"QUERY_REPLY", + MODERATIONCAST_HAVE:"MODERATIONCAST_HAVE", + MODERATIONCAST_REQUEST:"MODERATIONCAST_REQUEST", + MODERATIONCAST_REPLY:"MODERATIONCAST_REPLY", + VOTECAST:"VOTECAST", + BARTERCAST:"BARTERCAST", + G2G_PIECE_XFER: "G2G_PIECE_XFER", + FRIENDSHIP:"FRIENDSHIP", + + CRAWLER_REQUEST:"CRAWLER_REQUEST", + CRAWLER_REQUEST+CRAWLER_DATABASE_QUERY:"CRAWLER_DATABASE_QUERY_REQUEST", + CRAWLER_REQUEST+CRAWLER_SEEDINGSTATS_QUERY:"CRAWLER_SEEDINGSTATS_QUERY_REQUEST", + CRAWLER_REQUEST+CRAWLER_NATCHECK:"CRAWLER_NATCHECK_QUERY_REQUEST", + CRAWLER_REQUEST+CRAWLER_NATTRAVERSAL:"CRAWLER_NATTRAVERSAL_QUERY_REQUEST", + CRAWLER_REQUEST+CRAWLER_FRIENDSHIP_STATS:"CRAWLER_FRIENDSHIP_STATS_REQUEST", + CRAWLER_REQUEST+CRAWLER_VIDEOPLAYBACK_INFO_QUERY:"CRAWLER_VIDEOPLAYBACK_INFO_QUERY_REQUEST", + CRAWLER_REQUEST+CRAWLER_VIDEOPLAYBACK_EVENT_QUERY:"CRAWLER_VIDEOPLAYBACK_EVENT_QUERY_REQUEST", + + CRAWLER_REPLY:"CRAWLER_REPLY", + CRAWLER_REPLY+CRAWLER_DATABASE_QUERY:"CRAWLER_DATABASE_QUERY_REPLY", + CRAWLER_REPLY+CRAWLER_SEEDINGSTATS_QUERY:"CRAWLER_SEEDINGSTATS_QUERY_REPLY", + CRAWLER_REPLY+CRAWLER_NATCHECK:"CRAWLER_NATCHECK_QUERY_REPLY", + CRAWLER_REPLY+CRAWLER_NATTRAVERSAL:"CRAWLER_NATTRAVERSAL_QUERY_REPLY", + CRAWLER_REPLY+CRAWLER_FRIENDSHIP_STATS:"CRAWLER_FRIENDSHIP_STATS", + CRAWLER_REPLY+CRAWLER_FRIENDSHIP_STATS:"CRAWLER_FRIENDSHIP_STATS_REPLY", + CRAWLER_REPLY+CRAWLER_VIDEOPLAYBACK_INFO_QUERY:"CRAWLER_VIDEOPLAYBACK_INFO_QUERY_REPLY", + CRAWLER_REPLY+CRAWLER_VIDEOPLAYBACK_EVENT_QUERY:"CRAWLER_VIDEOPLAYBACK_EVENT_QUERY_REPLY" +} + +def getMessageName(s): + """ + Return the message name for message id s. This may be either a one + or a two byte sting + """ + if s in message_map: + return message_map[s] + else: + return "Unknown_MessageID_" + "_".join([str(ord(c)) for c in s]) diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/NatCheck.py b/tribler-mod/Tribler/Core/BitTornado/BT1/NatCheck.py new file mode 100644 index 0000000..4ebe0a0 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/NatCheck.py @@ -0,0 +1,95 @@ +from time import localtime, strftime +# Written by Bram Cohen +# see LICENSE.txt for license information + +from cStringIO import StringIO +from socket import error as socketerror +try: + True +except: + True = 1 + False = 0 + +protocol_name = 'BitTorrent protocol' + +# header, reserved, download id, my id, [length, message] + +class NatCheck: + def __init__(self, resultfunc, downloadid, peerid, ip, port, rawserver): + self.resultfunc = resultfunc + self.downloadid = downloadid + self.peerid = peerid + self.ip = ip + self.port = port + self.closed = False + self.buffer = StringIO() + self.next_len = 1 + self.next_func = self.read_header_len + try: + self.connection = rawserver.start_connection((ip, port), self) + self.connection.write(chr(len(protocol_name)) + protocol_name + + (chr(0) * 8) + downloadid) + except socketerror: + self.answer(False) + except IOError: + self.answer(False) + + def answer(self, result): + self.closed = True + try: + self.connection.close() + except AttributeError: + pass + self.resultfunc(result, self.downloadid, self.peerid, self.ip, self.port) + + def read_header_len(self, s): + if ord(s) != len(protocol_name): + return None + return len(protocol_name), self.read_header + + def read_header(self, s): + if s != protocol_name: + return None + return 8, self.read_reserved + + def read_reserved(self, s): + return 20, self.read_download_id + + def read_download_id(self, s): + if s != self.downloadid: + return None + return 20, self.read_peer_id + + def read_peer_id(self, s): + if s != self.peerid: + return None + self.answer(True) + return None + + def data_came_in(self, connection, s): + while 1: + if self.closed: + return + i = self.next_len - self.buffer.tell() + if i > len(s): + self.buffer.write(s) + return + self.buffer.write(s[:i]) + s = s[i:] + m = self.buffer.getvalue() + self.buffer.reset() + self.buffer.truncate() + x = self.next_func(m) + if x is None: + if not self.closed: + self.answer(False) + return + self.next_len, self.next_func = x + + def connection_lost(self, connection): + if not self.closed: + self.closed = True + self.resultfunc(False, self.downloadid, self.peerid, self.ip, self.port) + + def connection_flushed(self, connection): + pass diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/NatCheck.py.bak b/tribler-mod/Tribler/Core/BitTornado/BT1/NatCheck.py.bak new file mode 100644 index 0000000..a44379a --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/NatCheck.py.bak @@ -0,0 +1,94 @@ +# Written by Bram Cohen +# see LICENSE.txt for license information + +from cStringIO import StringIO +from socket import error as socketerror +try: + True +except: + True = 1 + False = 0 + +protocol_name = 'BitTorrent protocol' + +# header, reserved, download id, my id, [length, message] + +class NatCheck: + def __init__(self, resultfunc, downloadid, peerid, ip, port, rawserver): + self.resultfunc = resultfunc + self.downloadid = downloadid + self.peerid = peerid + self.ip = ip + self.port = port + self.closed = False + self.buffer = StringIO() + self.next_len = 1 + self.next_func = self.read_header_len + try: + self.connection = rawserver.start_connection((ip, port), self) + self.connection.write(chr(len(protocol_name)) + protocol_name + + (chr(0) * 8) + downloadid) + except socketerror: + self.answer(False) + except IOError: + self.answer(False) + + def answer(self, result): + self.closed = True + try: + self.connection.close() + except AttributeError: + pass + self.resultfunc(result, self.downloadid, self.peerid, self.ip, self.port) + + def read_header_len(self, s): + if ord(s) != len(protocol_name): + return None + return len(protocol_name), self.read_header + + def read_header(self, s): + if s != protocol_name: + return None + return 8, self.read_reserved + + def read_reserved(self, s): + return 20, self.read_download_id + + def read_download_id(self, s): + if s != self.downloadid: + return None + return 20, self.read_peer_id + + def read_peer_id(self, s): + if s != self.peerid: + return None + self.answer(True) + return None + + def data_came_in(self, connection, s): + while 1: + if self.closed: + return + i = self.next_len - self.buffer.tell() + if i > len(s): + self.buffer.write(s) + return + self.buffer.write(s[:i]) + s = s[i:] + m = self.buffer.getvalue() + self.buffer.reset() + self.buffer.truncate() + x = self.next_func(m) + if x is None: + if not self.closed: + self.answer(False) + return + self.next_len, self.next_func = x + + def connection_lost(self, connection): + if not self.closed: + self.closed = True + self.resultfunc(False, self.downloadid, self.peerid, self.ip, self.port) + + def connection_flushed(self, connection): + pass diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/PiecePicker.py b/tribler-mod/Tribler/Core/BitTornado/BT1/PiecePicker.py new file mode 100644 index 0000000..77a7675 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/PiecePicker.py @@ -0,0 +1,576 @@ +from time import localtime, strftime +# Written by Bram Cohen and Pawel Garbacki +# see LICENSE.txt for license information + +from random import randrange, shuffle +from Tribler.Core.BitTornado.clock import clock +# 2fastbt_ +from traceback import extract_tb,print_stack +import sys +# _2fastbt + +try: + True +except: + True = 1 + False = 0 + +DEBUG = False + +""" + rarest_first_cutoff = number of downloaded pieces at which to switch from random to rarest first. + rarest_first_priority_cutoff = number of peers which need to have a piece before other partials + take priority over rarest first. +""" + +class PiecePicker: +# 2fastbt_ + def __init__(self, numpieces, + rarest_first_cutoff = 1, rarest_first_priority_cutoff = 3, + priority_step = 20, helper = None, rate_predictor = None): +# _2fastbt + # If we have less than the cutoff pieces, choose pieces at random. Otherwise, + # go for rarest first. + self.rarest_first_cutoff = rarest_first_cutoff + + self.priority_step = priority_step + + # cutoff = number of non-seeds which need to have a piece before other + # partials take priority over rarest first. In effect, equal to: + # rarest_first_priority_cutoff + priority_step - #seeds + # before a seed is discovered, it is equal to (as set here): + # rarest_first_priority_cutoff + # + # This cutoff is used as an interest level (see below). When in random piece + # mode, asking for really rare pieces is disfavoured. + self.rarest_first_priority_cutoff = rarest_first_priority_cutoff + priority_step + self.cutoff = rarest_first_priority_cutoff + + # total number of pieces + self.numpieces = numpieces + + # pieces we have started to download (in transit) + self.started = [] + + # !!! the following statistics involve peers, and exclude seeds !!! + + # total number of pieces owned by peers + self.totalcount = 0 + + # how many pees have a certain piece + self.numhaves = [0] * numpieces + + # priority of each peace; -1 to avoid downloading it + self.priority = [1] * numpieces + + self.removed_partials = {} + + # self.crosscount[x] = the number of pieces owned by x peers + # (inverse of self.numhaves) + self.crosscount = [numpieces] + + # self.crosscount2[x] = the number of pieces owned by x peers and me + # (inverse of self.numhaves[x]+self.has[x]) + self.crosscount2 = [numpieces] + + # whether we have a certain piece + self.has = [0] * numpieces + + # number of (complete) pieces we got + self.numgot = 0 + + # whether we're done downloading + self.done = False + + # peer information + self.peer_connections = {} + + # seeding information + self.seed_connections = {} + self.seed_time = None + self.superseed = False + self.seeds_connected = 0 + +# 2fastbt_ + self.helper = helper + self.rate_predictor = rate_predictor + self.videostatus = None +# _2fastbt + self._init_interests() + + def _init_interests(self): + """ + Interests are sets of pieces ordered by priority (0 = high). The + priority to the outside world is coarse-grained and is fine-tuned + by the number of peers owning a piece. + + The interest level of a piece is self.level_in_interests[piece], + which is equal to: + + self.priority[piece] * self.priority_step + self.numhaves[piece]. + + Every level is a subset of peers. The placement in the subset + with self.pos_interests[piece, so + + piece == self.interests + [self.level_in_interests[piece]] + [self.pos_in_interests[piece]] + + holds. Pieces within the same subset are kept shuffled. + """ + + self.interests = [[] for x in xrange(self.priority_step)] + self.level_in_interests = [self.priority_step] * self.numpieces + interests = range(self.numpieces) + shuffle(interests) + self.pos_in_interests = [0] * self.numpieces + for i in xrange(self.numpieces): + self.pos_in_interests[interests[i]] = i + self.interests.append(interests) + + def got_piece(self, piece, begin, length): + """ + Used by the streaming piece picker for additional information. + """ + pass + + def check_outstanding_requests(self, downloads): + """ + Used by the streaming piece picker to cancel slow requests. + """ + pass + + def got_have(self, piece, connection = None): + """ A peer reports to have the given piece. """ + + self.totalcount+=1 + numint = self.numhaves[piece] + self.numhaves[piece] += 1 + self.crosscount[numint] -= 1 + if numint+1==len(self.crosscount): + self.crosscount.append(0) + self.crosscount[numint+1] += 1 + if not self.done: + numintplus = numint+self.has[piece] + self.crosscount2[numintplus] -= 1 + if numintplus+1 == len(self.crosscount2): + self.crosscount2.append(0) + self.crosscount2[numintplus+1] += 1 + numint = self.level_in_interests[piece] + self.level_in_interests[piece] += 1 + if self.superseed: + self.seed_got_haves[piece] += 1 + numint = self.level_in_interests[piece] + self.level_in_interests[piece] += 1 + elif self.has[piece] or self.priority[piece] == -1: + return + if numint == len(self.interests) - 1: + self.interests.append([]) + self._shift_over(piece, self.interests[numint], self.interests[numint + 1]) + + def lost_have(self, piece): + """ We lost a peer owning the given piece. """ + + self.totalcount-=1 + numint = self.numhaves[piece] + self.numhaves[piece] -= 1 + self.crosscount[numint] -= 1 + self.crosscount[numint-1] += 1 + if not self.done: + numintplus = numint+self.has[piece] + self.crosscount2[numintplus] -= 1 + self.crosscount2[numintplus-1] += 1 + numint = self.level_in_interests[piece] + self.level_in_interests[piece] -= 1 + if self.superseed: + numint = self.level_in_interests[piece] + self.level_in_interests[piece] -= 1 + elif self.has[piece] or self.priority[piece] == -1: + return + self._shift_over(piece, self.interests[numint], self.interests[numint - 1]) + + + # Arno: LIVEWRAP + def is_valid_piece(self, piece): + return True + + def get_valid_range_iterator(self): + return xrange(0,len(self.has)) + + def invalidate_piece(self,piece): + """ A piece ceases to exist at the neighbours. Required for LIVEWRAP. """ + + if self.has[piece]: + self.has[piece] = 0 + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePicker: Clearing piece",piece + self.numgot -= 1 + + # undo self._remove_from_interests(piece); ripped from set_priority + + # reinsert into interests + p = self.priority[piece] + level = self.numhaves[piece] + (self.priority_step * p) + self.level_in_interests[piece] = level + while len(self.interests) < level+1: + self.interests.append([]) + + # insert at a random spot in the list at the current level + l2 = self.interests[level] + parray = self.pos_in_interests + newp = randrange(len(l2)+1) + if newp == len(l2): + parray[piece] = len(l2) + l2.append(piece) + else: + old = l2[newp] + parray[old] = len(l2) + l2.append(old) + l2[newp] = piece + parray[piece] = newp + + # modelled after lost_have + + #assert not self.done + #assert not self.seeds_connected + + numint = self.numhaves[piece] + if numint == 0: + return + + # set numhaves to 0 + self.totalcount -= numint + self.numhaves[piece] = 0 + self.crosscount[numint] -= 1 + self.crosscount[0] += 1 + numintplus = numint+0 + self.crosscount2[numintplus] -= 1 + self.crosscount2[0] += 1 + numint = self.level_in_interests[piece] + self.level_in_interests[piece] = 0 + self._shift_over(piece, self.interests[numint], self.interests[0]) + + def set_downloader(self,dl): + self.downloader = dl + + def _shift_over(self, piece, l1, l2): + """ Moves 'piece' from interests list l1 to l2. """ + + assert self.superseed or (not self.has[piece] and self.priority[piece] >= 0) + parray = self.pos_in_interests + + # remove piece from l1 + p = parray[piece] + assert l1[p] == piece + q = l1[-1] + l1[p] = q + parray[q] = p + del l1[-1] + + # add piece to a random place in l2 + newp = randrange(len(l2)+1) + if newp == len(l2): + parray[piece] = len(l2) + l2.append(piece) + else: + old = l2[newp] + parray[old] = len(l2) + l2.append(old) + l2[newp] = piece + parray[piece] = newp + + def got_seed(self): + self.seeds_connected += 1 + self.cutoff = max(self.rarest_first_priority_cutoff-self.seeds_connected, 0) + + def became_seed(self): + """ A peer just became a seed. """ + + self.got_seed() + self.totalcount -= self.numpieces + self.numhaves = [i-1 for i in self.numhaves] + if self.superseed or not self.done: + self.level_in_interests = [i-1 for i in self.level_in_interests] + del self.interests[0] + del self.crosscount[0] + if not self.done: + del self.crosscount2[0] + + def lost_seed(self): + self.seeds_connected -= 1 + self.cutoff = max(self.rarest_first_priority_cutoff-self.seeds_connected, 0) + + # boudewijn: for VOD we need additional information. added BEGIN + # and LENGTH parameter + def requested(self, piece, begin=None, length=None): + """ Given piece has been requested or a partial of it is on disk. """ + if piece not in self.started: + self.started.append(piece) + + def _remove_from_interests(self, piece, keep_partial = False): + l = self.interests[self.level_in_interests[piece]] + p = self.pos_in_interests[piece] + assert l[p] == piece + q = l[-1] + l[p] = q + self.pos_in_interests[q] = p + del l[-1] + try: + self.started.remove(piece) + if keep_partial: + self.removed_partials[piece] = 1 + except ValueError: + pass + + def complete(self, piece): + """ Succesfully received the given piece. """ + assert not self.has[piece] + self.has[piece] = 1 + self.numgot += 1 + if self.numgot == self.numpieces: + self.done = True + self.crosscount2 = self.crosscount + else: + numhaves = self.numhaves[piece] + self.crosscount2[numhaves] -= 1 + if numhaves+1 == len(self.crosscount2): + self.crosscount2.append(0) + self.crosscount2[numhaves+1] += 1 + self._remove_from_interests(piece) + +# 2fastbt_ + def _next(self, haves, wantfunc, complete_first, helper_con, willrequest=True, connection=None): +# _2fastbt + """ Determine which piece to download next from a peer. + + haves: set of pieces owned by that peer + wantfunc: custom piece filter + complete_first: whether to complete partial pieces first + helper_con: + + """ + + # First few (rarest_first_cutoff) pieces are selected at random + # and completed. Subsequent pieces are downloaded rarest-first. + + # cutoff = True: random mode + # False: rarest-first mode + cutoff = self.numgot < self.rarest_first_cutoff + + # whether to complete existing partials first -- do so before the + # cutoff, or if forced by complete_first, but not for seeds. + complete_first = (complete_first or cutoff) and not haves.complete() + + # most interesting piece + best = None + + # interest level of best piece + bestnum = 2 ** 30 + + # select piece we started to download with best interest index. + for i in self.started: +# 2fastbt_ + if haves[i] and wantfunc(i) and (self.helper is None or helper_con or not self.helper.is_ignored(i)): +# _2fastbt + if self.level_in_interests[i] < bestnum: + best = i + bestnum = self.level_in_interests[i] + + if best is not None: + # found a piece -- return it if we are completing partials first + # or if there is a cutoff + if complete_first or (cutoff and len(self.interests) > self.cutoff): + return best + + if haves.complete(): + # peer has all pieces - look for any more interesting piece + r = [ (0, min(bestnum, len(self.interests))) ] + elif cutoff and len(self.interests) > self.cutoff: + # no best piece - start looking for low-priority pieces first + r = [ (self.cutoff, min(bestnum, len(self.interests))), + (0, self.cutoff) ] + else: + # look for the most interesting piece + r = [ (0, min(bestnum, len(self.interests))) ] + + # select first acceptable piece, best interest index first. + for lo, hi in r: + for i in xrange(lo, hi): + for j in self.interests[i]: +# 2fastbt_ + if haves[j] and wantfunc(j) and (self.helper is None or helper_con or not self.helper.is_ignored(j)): +# _2fastbt + return j + + if best is not None: + return best + return None + +# 2fastbt_ + def next(self, haves, wantfunc, sdownload, complete_first = False, helper_con = False, slowpieces= [], willrequest = True, connection = None): +# try: + while True: + piece = self._next(haves, wantfunc, complete_first, helper_con, willrequest = willrequest, connection = connection) + if piece is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePicker: next: _next returned no pieces!", + break + if self.helper is None or helper_con: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePicker: next: helper None or helper conn, returning",piece + return piece + + if self.helper.reserve_piece(piece,sdownload): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePicker: next: helper: reserve SHOULD DL PIECE",piece + return piece + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePicker: next: helper.reserve_piece failed" + return None + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePicker: next:helper: NONE SHOULD DL PIECE",piece + return piece + # Arno, 2008-05-20: 2fast code: if we got capacity to DL something, + # ask coordinator what new pieces to dl for it. + if self.rate_predictor and self.rate_predictor.has_capacity(): + return self._next(haves, wantfunc, complete_first, True, willrequest = willrequest, connection = connection) + else: + return None + +# except: +# if DEBUG: +# print_exc() + + def set_rate_predictor(self, rate_predictor): + self.rate_predictor = rate_predictor +# _2fastbt + + def am_I_complete(self): + return self.done + + def bump(self, piece): + """ Piece was received but contained bad data? """ + + l = self.interests[self.level_in_interests[piece]] + pos = self.pos_in_interests[piece] + del l[pos] + l.append(piece) + for i in range(pos, len(l)): + self.pos_in_interests[l[i]] = i + try: + self.started.remove(piece) + except: + pass + + def set_priority(self, piece, p): + """ Define the priority with which a piece needs to be downloaded. + A priority of -1 means 'do not download'. """ + + if self.superseed: + return False # don't muck with this if you're a superseed + oldp = self.priority[piece] + if oldp == p: + return False + self.priority[piece] = p + if p == -1: + # when setting priority -1, + # make sure to cancel any downloads for this piece + if not self.has[piece]: + self._remove_from_interests(piece, True) + return True + if oldp == -1: + level = self.numhaves[piece] + (self.priority_step * p) + self.level_in_interests[piece] = level + if self.has[piece]: + return True + while len(self.interests) < level+1: + self.interests.append([]) + l2 = self.interests[level] + parray = self.pos_in_interests + newp = randrange(len(l2)+1) + if newp == len(l2): + parray[piece] = len(l2) + l2.append(piece) + else: + old = l2[newp] + parray[old] = len(l2) + l2.append(old) + l2[newp] = piece + parray[piece] = newp + if self.removed_partials.has_key(piece): + del self.removed_partials[piece] + self.started.append(piece) + # now go to downloader and try requesting more + return True + numint = self.level_in_interests[piece] + newint = numint + ((p - oldp) * self.priority_step) + self.level_in_interests[piece] = newint + if self.has[piece]: + return False + while len(self.interests) < newint+1: + self.interests.append([]) + self._shift_over(piece, self.interests[numint], self.interests[newint]) + return False + + def is_blocked(self, piece): + return self.priority[piece] < 0 + + + def set_superseed(self): + assert self.done + self.superseed = True + self.seed_got_haves = [0] * self.numpieces + self._init_interests() # assume everyone is disconnected + + def next_have(self, connection, looser_upload): + if self.seed_time is None: + self.seed_time = clock() + return None + if clock() < self.seed_time+10: # wait 10 seconds after seeing the first peers + return None # to give time to grab have lists + if not connection.upload.super_seeding: + return None + if connection in self.seed_connections: + if looser_upload: + num = 1 # send a new have even if it hasn't spread that piece elsewhere + else: + num = 2 + if self.seed_got_haves[self.seed_connections[connection]] < num: + return None + if not connection.upload.was_ever_interested: # it never downloaded it? + connection.upload.skipped_count += 1 + if connection.upload.skipped_count >= 3: # probably another stealthed seed + return -1 # signal to close it + for tier in self.interests: + for piece in tier: + if not connection.download.have[piece]: + seedint = self.level_in_interests[piece] + self.level_in_interests[piece] += 1 # tweak it up one, so you don't duplicate effort + if seedint == len(self.interests) - 1: + self.interests.append([]) + self._shift_over(piece, + self.interests[seedint], self.interests[seedint + 1]) + self.seed_got_haves[piece] = 0 # reset this + self.seed_connections[connection] = piece + connection.upload.seed_have_list.append(piece) + return piece + return -1 # something screwy; terminate connection + + def got_peer(self, connection): + self.peer_connections[connection] = { "connection": connection } + + def lost_peer(self, connection): + if connection.download.have.complete(): + self.lost_seed() + else: + has = connection.download.have + for i in xrange(0, self.numpieces): + if has[i]: + self.lost_have(i) + + if connection in self.seed_connections: + del self.seed_connections[connection] + del self.peer_connections[connection] + + diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/PiecePicker.py.bak b/tribler-mod/Tribler/Core/BitTornado/BT1/PiecePicker.py.bak new file mode 100644 index 0000000..49f52eb --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/PiecePicker.py.bak @@ -0,0 +1,575 @@ +# Written by Bram Cohen and Pawel Garbacki +# see LICENSE.txt for license information + +from random import randrange, shuffle +from Tribler.Core.BitTornado.clock import clock +# 2fastbt_ +from traceback import extract_tb,print_stack +import sys +# _2fastbt + +try: + True +except: + True = 1 + False = 0 + +DEBUG = False + +""" + rarest_first_cutoff = number of downloaded pieces at which to switch from random to rarest first. + rarest_first_priority_cutoff = number of peers which need to have a piece before other partials + take priority over rarest first. +""" + +class PiecePicker: +# 2fastbt_ + def __init__(self, numpieces, + rarest_first_cutoff = 1, rarest_first_priority_cutoff = 3, + priority_step = 20, helper = None, rate_predictor = None): +# _2fastbt + # If we have less than the cutoff pieces, choose pieces at random. Otherwise, + # go for rarest first. + self.rarest_first_cutoff = rarest_first_cutoff + + self.priority_step = priority_step + + # cutoff = number of non-seeds which need to have a piece before other + # partials take priority over rarest first. In effect, equal to: + # rarest_first_priority_cutoff + priority_step - #seeds + # before a seed is discovered, it is equal to (as set here): + # rarest_first_priority_cutoff + # + # This cutoff is used as an interest level (see below). When in random piece + # mode, asking for really rare pieces is disfavoured. + self.rarest_first_priority_cutoff = rarest_first_priority_cutoff + priority_step + self.cutoff = rarest_first_priority_cutoff + + # total number of pieces + self.numpieces = numpieces + + # pieces we have started to download (in transit) + self.started = [] + + # !!! the following statistics involve peers, and exclude seeds !!! + + # total number of pieces owned by peers + self.totalcount = 0 + + # how many pees have a certain piece + self.numhaves = [0] * numpieces + + # priority of each peace; -1 to avoid downloading it + self.priority = [1] * numpieces + + self.removed_partials = {} + + # self.crosscount[x] = the number of pieces owned by x peers + # (inverse of self.numhaves) + self.crosscount = [numpieces] + + # self.crosscount2[x] = the number of pieces owned by x peers and me + # (inverse of self.numhaves[x]+self.has[x]) + self.crosscount2 = [numpieces] + + # whether we have a certain piece + self.has = [0] * numpieces + + # number of (complete) pieces we got + self.numgot = 0 + + # whether we're done downloading + self.done = False + + # peer information + self.peer_connections = {} + + # seeding information + self.seed_connections = {} + self.seed_time = None + self.superseed = False + self.seeds_connected = 0 + +# 2fastbt_ + self.helper = helper + self.rate_predictor = rate_predictor + self.videostatus = None +# _2fastbt + self._init_interests() + + def _init_interests(self): + """ + Interests are sets of pieces ordered by priority (0 = high). The + priority to the outside world is coarse-grained and is fine-tuned + by the number of peers owning a piece. + + The interest level of a piece is self.level_in_interests[piece], + which is equal to: + + self.priority[piece] * self.priority_step + self.numhaves[piece]. + + Every level is a subset of peers. The placement in the subset + with self.pos_interests[piece, so + + piece == self.interests + [self.level_in_interests[piece]] + [self.pos_in_interests[piece]] + + holds. Pieces within the same subset are kept shuffled. + """ + + self.interests = [[] for x in xrange(self.priority_step)] + self.level_in_interests = [self.priority_step] * self.numpieces + interests = range(self.numpieces) + shuffle(interests) + self.pos_in_interests = [0] * self.numpieces + for i in xrange(self.numpieces): + self.pos_in_interests[interests[i]] = i + self.interests.append(interests) + + def got_piece(self, piece, begin, length): + """ + Used by the streaming piece picker for additional information. + """ + pass + + def check_outstanding_requests(self, downloads): + """ + Used by the streaming piece picker to cancel slow requests. + """ + pass + + def got_have(self, piece, connection = None): + """ A peer reports to have the given piece. """ + + self.totalcount+=1 + numint = self.numhaves[piece] + self.numhaves[piece] += 1 + self.crosscount[numint] -= 1 + if numint+1==len(self.crosscount): + self.crosscount.append(0) + self.crosscount[numint+1] += 1 + if not self.done: + numintplus = numint+self.has[piece] + self.crosscount2[numintplus] -= 1 + if numintplus+1 == len(self.crosscount2): + self.crosscount2.append(0) + self.crosscount2[numintplus+1] += 1 + numint = self.level_in_interests[piece] + self.level_in_interests[piece] += 1 + if self.superseed: + self.seed_got_haves[piece] += 1 + numint = self.level_in_interests[piece] + self.level_in_interests[piece] += 1 + elif self.has[piece] or self.priority[piece] == -1: + return + if numint == len(self.interests) - 1: + self.interests.append([]) + self._shift_over(piece, self.interests[numint], self.interests[numint + 1]) + + def lost_have(self, piece): + """ We lost a peer owning the given piece. """ + + self.totalcount-=1 + numint = self.numhaves[piece] + self.numhaves[piece] -= 1 + self.crosscount[numint] -= 1 + self.crosscount[numint-1] += 1 + if not self.done: + numintplus = numint+self.has[piece] + self.crosscount2[numintplus] -= 1 + self.crosscount2[numintplus-1] += 1 + numint = self.level_in_interests[piece] + self.level_in_interests[piece] -= 1 + if self.superseed: + numint = self.level_in_interests[piece] + self.level_in_interests[piece] -= 1 + elif self.has[piece] or self.priority[piece] == -1: + return + self._shift_over(piece, self.interests[numint], self.interests[numint - 1]) + + + # Arno: LIVEWRAP + def is_valid_piece(self, piece): + return True + + def get_valid_range_iterator(self): + return xrange(0,len(self.has)) + + def invalidate_piece(self,piece): + """ A piece ceases to exist at the neighbours. Required for LIVEWRAP. """ + + if self.has[piece]: + self.has[piece] = 0 + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePicker: Clearing piece",piece + self.numgot -= 1 + + # undo self._remove_from_interests(piece); ripped from set_priority + + # reinsert into interests + p = self.priority[piece] + level = self.numhaves[piece] + (self.priority_step * p) + self.level_in_interests[piece] = level + while len(self.interests) < level+1: + self.interests.append([]) + + # insert at a random spot in the list at the current level + l2 = self.interests[level] + parray = self.pos_in_interests + newp = randrange(len(l2)+1) + if newp == len(l2): + parray[piece] = len(l2) + l2.append(piece) + else: + old = l2[newp] + parray[old] = len(l2) + l2.append(old) + l2[newp] = piece + parray[piece] = newp + + # modelled after lost_have + + #assert not self.done + #assert not self.seeds_connected + + numint = self.numhaves[piece] + if numint == 0: + return + + # set numhaves to 0 + self.totalcount -= numint + self.numhaves[piece] = 0 + self.crosscount[numint] -= 1 + self.crosscount[0] += 1 + numintplus = numint+0 + self.crosscount2[numintplus] -= 1 + self.crosscount2[0] += 1 + numint = self.level_in_interests[piece] + self.level_in_interests[piece] = 0 + self._shift_over(piece, self.interests[numint], self.interests[0]) + + def set_downloader(self,dl): + self.downloader = dl + + def _shift_over(self, piece, l1, l2): + """ Moves 'piece' from interests list l1 to l2. """ + + assert self.superseed or (not self.has[piece] and self.priority[piece] >= 0) + parray = self.pos_in_interests + + # remove piece from l1 + p = parray[piece] + assert l1[p] == piece + q = l1[-1] + l1[p] = q + parray[q] = p + del l1[-1] + + # add piece to a random place in l2 + newp = randrange(len(l2)+1) + if newp == len(l2): + parray[piece] = len(l2) + l2.append(piece) + else: + old = l2[newp] + parray[old] = len(l2) + l2.append(old) + l2[newp] = piece + parray[piece] = newp + + def got_seed(self): + self.seeds_connected += 1 + self.cutoff = max(self.rarest_first_priority_cutoff-self.seeds_connected, 0) + + def became_seed(self): + """ A peer just became a seed. """ + + self.got_seed() + self.totalcount -= self.numpieces + self.numhaves = [i-1 for i in self.numhaves] + if self.superseed or not self.done: + self.level_in_interests = [i-1 for i in self.level_in_interests] + del self.interests[0] + del self.crosscount[0] + if not self.done: + del self.crosscount2[0] + + def lost_seed(self): + self.seeds_connected -= 1 + self.cutoff = max(self.rarest_first_priority_cutoff-self.seeds_connected, 0) + + # boudewijn: for VOD we need additional information. added BEGIN + # and LENGTH parameter + def requested(self, piece, begin=None, length=None): + """ Given piece has been requested or a partial of it is on disk. """ + if piece not in self.started: + self.started.append(piece) + + def _remove_from_interests(self, piece, keep_partial = False): + l = self.interests[self.level_in_interests[piece]] + p = self.pos_in_interests[piece] + assert l[p] == piece + q = l[-1] + l[p] = q + self.pos_in_interests[q] = p + del l[-1] + try: + self.started.remove(piece) + if keep_partial: + self.removed_partials[piece] = 1 + except ValueError: + pass + + def complete(self, piece): + """ Succesfully received the given piece. """ + assert not self.has[piece] + self.has[piece] = 1 + self.numgot += 1 + if self.numgot == self.numpieces: + self.done = True + self.crosscount2 = self.crosscount + else: + numhaves = self.numhaves[piece] + self.crosscount2[numhaves] -= 1 + if numhaves+1 == len(self.crosscount2): + self.crosscount2.append(0) + self.crosscount2[numhaves+1] += 1 + self._remove_from_interests(piece) + +# 2fastbt_ + def _next(self, haves, wantfunc, complete_first, helper_con, willrequest=True, connection=None): +# _2fastbt + """ Determine which piece to download next from a peer. + + haves: set of pieces owned by that peer + wantfunc: custom piece filter + complete_first: whether to complete partial pieces first + helper_con: + + """ + + # First few (rarest_first_cutoff) pieces are selected at random + # and completed. Subsequent pieces are downloaded rarest-first. + + # cutoff = True: random mode + # False: rarest-first mode + cutoff = self.numgot < self.rarest_first_cutoff + + # whether to complete existing partials first -- do so before the + # cutoff, or if forced by complete_first, but not for seeds. + complete_first = (complete_first or cutoff) and not haves.complete() + + # most interesting piece + best = None + + # interest level of best piece + bestnum = 2 ** 30 + + # select piece we started to download with best interest index. + for i in self.started: +# 2fastbt_ + if haves[i] and wantfunc(i) and (self.helper is None or helper_con or not self.helper.is_ignored(i)): +# _2fastbt + if self.level_in_interests[i] < bestnum: + best = i + bestnum = self.level_in_interests[i] + + if best is not None: + # found a piece -- return it if we are completing partials first + # or if there is a cutoff + if complete_first or (cutoff and len(self.interests) > self.cutoff): + return best + + if haves.complete(): + # peer has all pieces - look for any more interesting piece + r = [ (0, min(bestnum, len(self.interests))) ] + elif cutoff and len(self.interests) > self.cutoff: + # no best piece - start looking for low-priority pieces first + r = [ (self.cutoff, min(bestnum, len(self.interests))), + (0, self.cutoff) ] + else: + # look for the most interesting piece + r = [ (0, min(bestnum, len(self.interests))) ] + + # select first acceptable piece, best interest index first. + for lo, hi in r: + for i in xrange(lo, hi): + for j in self.interests[i]: +# 2fastbt_ + if haves[j] and wantfunc(j) and (self.helper is None or helper_con or not self.helper.is_ignored(j)): +# _2fastbt + return j + + if best is not None: + return best + return None + +# 2fastbt_ + def next(self, haves, wantfunc, sdownload, complete_first = False, helper_con = False, slowpieces= [], willrequest = True, connection = None): +# try: + while True: + piece = self._next(haves, wantfunc, complete_first, helper_con, willrequest = willrequest, connection = connection) + if piece is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePicker: next: _next returned no pieces!", + break + if self.helper is None or helper_con: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePicker: next: helper None or helper conn, returning",piece + return piece + + if self.helper.reserve_piece(piece,sdownload): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePicker: next: helper: reserve SHOULD DL PIECE",piece + return piece + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePicker: next: helper.reserve_piece failed" + return None + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePicker: next:helper: NONE SHOULD DL PIECE",piece + return piece + # Arno, 2008-05-20: 2fast code: if we got capacity to DL something, + # ask coordinator what new pieces to dl for it. + if self.rate_predictor and self.rate_predictor.has_capacity(): + return self._next(haves, wantfunc, complete_first, True, willrequest = willrequest, connection = connection) + else: + return None + +# except: +# if DEBUG: +# print_exc() + + def set_rate_predictor(self, rate_predictor): + self.rate_predictor = rate_predictor +# _2fastbt + + def am_I_complete(self): + return self.done + + def bump(self, piece): + """ Piece was received but contained bad data? """ + + l = self.interests[self.level_in_interests[piece]] + pos = self.pos_in_interests[piece] + del l[pos] + l.append(piece) + for i in range(pos, len(l)): + self.pos_in_interests[l[i]] = i + try: + self.started.remove(piece) + except: + pass + + def set_priority(self, piece, p): + """ Define the priority with which a piece needs to be downloaded. + A priority of -1 means 'do not download'. """ + + if self.superseed: + return False # don't muck with this if you're a superseed + oldp = self.priority[piece] + if oldp == p: + return False + self.priority[piece] = p + if p == -1: + # when setting priority -1, + # make sure to cancel any downloads for this piece + if not self.has[piece]: + self._remove_from_interests(piece, True) + return True + if oldp == -1: + level = self.numhaves[piece] + (self.priority_step * p) + self.level_in_interests[piece] = level + if self.has[piece]: + return True + while len(self.interests) < level+1: + self.interests.append([]) + l2 = self.interests[level] + parray = self.pos_in_interests + newp = randrange(len(l2)+1) + if newp == len(l2): + parray[piece] = len(l2) + l2.append(piece) + else: + old = l2[newp] + parray[old] = len(l2) + l2.append(old) + l2[newp] = piece + parray[piece] = newp + if self.removed_partials.has_key(piece): + del self.removed_partials[piece] + self.started.append(piece) + # now go to downloader and try requesting more + return True + numint = self.level_in_interests[piece] + newint = numint + ((p - oldp) * self.priority_step) + self.level_in_interests[piece] = newint + if self.has[piece]: + return False + while len(self.interests) < newint+1: + self.interests.append([]) + self._shift_over(piece, self.interests[numint], self.interests[newint]) + return False + + def is_blocked(self, piece): + return self.priority[piece] < 0 + + + def set_superseed(self): + assert self.done + self.superseed = True + self.seed_got_haves = [0] * self.numpieces + self._init_interests() # assume everyone is disconnected + + def next_have(self, connection, looser_upload): + if self.seed_time is None: + self.seed_time = clock() + return None + if clock() < self.seed_time+10: # wait 10 seconds after seeing the first peers + return None # to give time to grab have lists + if not connection.upload.super_seeding: + return None + if connection in self.seed_connections: + if looser_upload: + num = 1 # send a new have even if it hasn't spread that piece elsewhere + else: + num = 2 + if self.seed_got_haves[self.seed_connections[connection]] < num: + return None + if not connection.upload.was_ever_interested: # it never downloaded it? + connection.upload.skipped_count += 1 + if connection.upload.skipped_count >= 3: # probably another stealthed seed + return -1 # signal to close it + for tier in self.interests: + for piece in tier: + if not connection.download.have[piece]: + seedint = self.level_in_interests[piece] + self.level_in_interests[piece] += 1 # tweak it up one, so you don't duplicate effort + if seedint == len(self.interests) - 1: + self.interests.append([]) + self._shift_over(piece, + self.interests[seedint], self.interests[seedint + 1]) + self.seed_got_haves[piece] = 0 # reset this + self.seed_connections[connection] = piece + connection.upload.seed_have_list.append(piece) + return piece + return -1 # something screwy; terminate connection + + def got_peer(self, connection): + self.peer_connections[connection] = { "connection": connection } + + def lost_peer(self, connection): + if connection.download.have.complete(): + self.lost_seed() + else: + has = connection.download.have + for i in xrange(0, self.numpieces): + if has[i]: + self.lost_have(i) + + if connection in self.seed_connections: + del self.seed_connections[connection] + del self.peer_connections[connection] + + diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/Rerequester.py b/tribler-mod/Tribler/Core/BitTornado/BT1/Rerequester.py new file mode 100644 index 0000000..678e413 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/Rerequester.py @@ -0,0 +1,538 @@ +from time import localtime, strftime +# Written by Bram Cohen +# modified for multitracker operation by John Hoffman +# modified for mainline DHT support by Fabian van der Werf +# see LICENSE.txt for license information + +import sys +from Tribler.Core.BitTornado.zurllib import urlopen +from urllib import quote +from btformats import check_peers +from Tribler.Core.BitTornado.bencode import bdecode +from threading import Thread, Lock, currentThread +from cStringIO import StringIO +from traceback import print_exc,print_stack +from socket import error, gethostbyname, inet_aton, inet_ntoa +from random import shuffle +from sha import sha +from time import time +from struct import pack, unpack +import binascii + +from Tribler.Core.NATFirewall.DialbackMsgHandler import DialbackMsgHandler +import Tribler.Core.DecentralizedTracking.mainlineDHT as mainlineDHT + +try: + from os import getpid +except ImportError: + def getpid(): + return 1 + +try: + True +except: + True = 1 + False = 0 + +DEBUG = False +DEBUG_DHT = False + +mapbase64 = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz.-' +keys = {} +basekeydata = str(getpid()) + repr(time()) + 'tracker' + +def add_key(tracker): + key = '' + for i in sha(basekeydata+tracker).digest()[-6:]: + key += mapbase64[ord(i) & 0x3F] + keys[tracker] = key + +def get_key(tracker): + try: + return "&key="+keys[tracker] + except: + add_key(tracker) + return "&key="+keys[tracker] + +class fakeflag: + def __init__(self, state=False): + self.state = state + def wait(self): + pass + def isSet(self): + return self.state + +class Rerequester: + def __init__(self, trackerlist, interval, sched, howmany, minpeers, + connect, externalsched, amount_left, up, down, + port, ip, myid, infohash, timeout, errorfunc, excfunc, + maxpeers, doneflag, upratefunc, downratefunc, + unpauseflag = fakeflag(True), config=None): + + self.excfunc = excfunc + newtrackerlist = [] + for tier in trackerlist: + if len(tier) > 1: + shuffle(tier) + newtrackerlist += [tier] + self.trackerlist = newtrackerlist + self.lastsuccessful = '' + self.rejectedmessage = 'rejected by tracker - ' + self.port = port + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequest tracker: infohash is",`infohash`,"port is",self.port,"myid",`myid`,"quoted id",quote(myid) + + self.url = ('?info_hash=%s&peer_id=%s&port=%s' % + (quote(infohash), quote(myid), str(port))) + self.ip = ip + self.interval = interval + self.last = None + self.trackerid = None + self.announce_interval = 1 * 60 + self.sched = sched + self.howmany = howmany + self.minpeers = minpeers + self.connect = connect + self.externalsched = externalsched + self.amount_left = amount_left + self.up = up + self.down = down + self.timeout = timeout + self.errorfunc = errorfunc + self.maxpeers = maxpeers + self.doneflag = doneflag + self.upratefunc = upratefunc + self.downratefunc = downratefunc + self.unpauseflag = unpauseflag + self.last_failed = True + self.never_succeeded = True + self.errorcodes = {} + self.lock = SuccessLock() + self.special = None + self.stopped = False + self.schedid = 'arno481' + self.infohash = infohash + self.dht = mainlineDHT.dht + self.config = config + + + def start(self): + + self.sched(self.c, self.interval/2) + self.d(0) + + def c(self): + if self.stopped: + return + if not self.unpauseflag.isSet() and self.howmany() < self.minpeers: + self.announce(3, self._c) + else: + self._c() + + def _c(self): + self.sched(self.c, self.interval) + + def d(self, event = 3): + if self.stopped: + return + if not self.unpauseflag.isSet(): + self._d() + return + self.announce(event, self._d) + + def _d(self): + if self.never_succeeded: + self.sched(self.d, 60) # retry in 60 seconds + else: + self.sched(self.d, self.announce_interval) + + def encoder_wants_new_peers(self): + """ The list of peers we gave to the encoder via self.connect() + did not give any live connections, reconnect to get some more. + Officially we should cancel the outstanding + self.sched(self.d,self.announce_interval) + """ + self.d(0) + + def announce(self, event = 3, callback = lambda: None, specialurl = None): + + if specialurl is not None: + s = self.url+'&uploaded=0&downloaded=0&left=1' # don't add to statistics + if self.howmany() >= self.maxpeers: + s += '&numwant=0' + else: + s += '&no_peer_id=1&compact=1' + self.last_failed = True # force true, so will display an error + self.special = specialurl + self.rerequest(s, callback) + return + + else: + s = ('%s&uploaded=%s&downloaded=%s&left=%s' % + (self.url, str(self.up()), str(self.down()), + str(self.amount_left()))) + if self.last is not None: + s += '&last=' + quote(str(self.last)) + if self.trackerid is not None: + s += '&trackerid=' + quote(str(self.trackerid)) + if self.howmany() >= self.maxpeers: + s += '&numwant=0' + else: + s += '&no_peer_id=1&compact=1' + if event != 3: + s += '&event=' + ['started', 'completed', 'stopped'][event] + if event == 2: + self.stopped = True + self.rerequest(s, callback) + + + def snoop(self, peers, callback = lambda: None): # tracker call support + self.rerequest(self.url + +'&event=stopped&port=0&uploaded=0&downloaded=0&left=1&tracker=1&numwant=' + +str(peers), callback) + + + def rerequest(self, s, callback): + if not self.lock.isfinished(): # still waiting for prior cycle to complete?? + def retry(self = self, s = s, callback = callback): + self.rerequest(s, callback) + self.sched(retry, 5) # retry in 5 seconds + return + self.lock.reset() + rq = Thread(target = self._rerequest, args = [s, callback]) + rq.setName( "TrackerRerequestA"+rq.getName() ) + # Arno: make this a daemon thread so the client closes sooner. + rq.setDaemon(True) + rq.start() + + def _rerequest(self, s, callback): + try: + def fail(self = self, callback = callback): + self._fail(callback) + if self.ip: + try: + s += '&ip=' + gethostbyname(self.ip) + except: + self.errorcodes['troublecode'] = 'unable to resolve: '+self.ip + self.externalsched(fail) + self.errorcodes = {} + if self.special is None: + + #Do dht request + if self.dht: + self._dht_rerequest() + elif DEBUG_DHT: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequester: No DHT support loaded" + + for t in range(len(self.trackerlist)): + for tr in range(len(self.trackerlist[t])): + tracker = self.trackerlist[t][tr] + # Arno: no udp support yet + if tracker.startswith( 'udp:' ): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequester: Ignoring tracker",tracker + continue + #elif DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequester: Trying tracker",tracker + if self.rerequest_single(tracker, s, callback): + if not self.last_failed and tr != 0: + del self.trackerlist[t][tr] + self.trackerlist[t] = [tracker] + self.trackerlist[t] + return + else: + tracker = self.special + self.special = None + if self.rerequest_single(tracker, s, callback): + return + # no success from any tracker + self.externalsched(fail) + except: + self.exception(callback) + + + def _fail(self, callback): + if ( (self.upratefunc() < 100 and self.downratefunc() < 100) + or not self.amount_left() ): + for f in ['rejected', 'bad_data', 'troublecode']: + if self.errorcodes.has_key(f): + r = self.errorcodes[f] + break + else: + r = 'Problem connecting to tracker - unspecified error:'+`self.errorcodes` + self.errorfunc(r) + + self.last_failed = True + self.lock.give_up() + self.externalsched(callback) + + + def rerequest_single(self, t, s, callback): + l = self.lock.set() + rq = Thread(target = self._rerequest_single, args = [t, s+get_key(t), l, callback]) + rq.setName( "TrackerRerequestB"+rq.getName() ) + # Arno: make this a daemon thread so the client closes sooner. + rq.setDaemon(True) + rq.start() + self.lock.wait() + if self.lock.success: + self.lastsuccessful = t + self.last_failed = False + self.never_succeeded = False + return True + if not self.last_failed and self.lastsuccessful == t: + # if the last tracker hit was successful, and you've just tried the tracker + # you'd contacted before, don't go any further, just fail silently. + self.last_failed = True + self.externalsched(callback) + self.lock.give_up() + return True + return False # returns true if it wants rerequest() to exit + + + def _rerequest_single(self, t, s, l, callback): + try: + closer = [None] + def timedout(self = self, l = l, closer = closer): + if self.lock.trip(l): + self.errorcodes['troublecode'] = 'Problem connecting to tracker - timeout exceeded' + self.lock.unwait(l) + try: + closer[0]() + except: + pass + + self.externalsched(timedout, self.timeout) + + err = None + try: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequest tracker:" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",t+s + h = urlopen(t+s) + closer[0] = h.close + data = h.read() + except (IOError, error), e: + err = 'Problem connecting to tracker - ' + str(e) + if DEBUG: + print_exc() + except: + err = 'Problem connecting to tracker' + if DEBUG: + print_exc() + + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rerequest: Got data",data + + try: + h.close() + except: + pass + if err: + if self.lock.trip(l): + self.errorcodes['troublecode'] = err + self.lock.unwait(l) + return + + if not data: + if self.lock.trip(l): + self.errorcodes['troublecode'] = 'no data from tracker' + self.lock.unwait(l) + return + + try: + r = bdecode(data, sloppy=1) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequester: Tracker returns:", r + check_peers(r) + except ValueError, e: + if DEBUG: + print_exc() + if self.lock.trip(l): + self.errorcodes['bad_data'] = 'bad data from tracker - ' + str(e) + self.lock.unwait(l) + return + + if r.has_key('failure reason'): + if self.lock.trip(l): + self.errorcodes['rejected'] = self.rejectedmessage + r['failure reason'] + self.lock.unwait(l) + return + + if self.lock.trip(l, True): # success! + self.lock.unwait(l) + else: + callback = lambda: None # attempt timed out, don't do a callback + + # even if the attempt timed out, go ahead and process data + def add(self = self, r = r, callback = callback): + self.postrequest(r, callback) + self.externalsched(add) + except: + self.exception(callback) + + def _dht_rerequest(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequester: _dht_rerequest",`self.infohash` + if 'dialback' in self.config and self.config['dialback']: + if DialbackMsgHandler.getInstance().isConnectable(): + self.dht.getPeersAndAnnounce(self.infohash, self.port, self._dht_got_peers) + else: + self.dht.getPeers(self.infohash, self._dht_got_peers) + + def _dht_got_peers(self, peers): + if DEBUG_DHT: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequester: DHT: Received",len(peers),"peers",currentThread().getName() + p = [] + for peer in peers: + try: + ip, port = unpack("!4sH", peer) + if DEBUG_DHT: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequester: DHT: Got",inet_ntoa(ip), int(port) + p.append({'ip': inet_ntoa(ip), + 'port':port}) + except: + pass + + if p: + r = {'peers':p} + def add(self = self, r = r): + self.postrequest(r, lambda : None) + self.externalsched(add) + + def postrequest(self, r, callback): + try: + if r.has_key('warning message'): + self.errorfunc('warning from tracker - ' + r['warning message']) + self.announce_interval = r.get('interval', self.announce_interval) + self.interval = r.get('min interval', self.interval) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequester: announce min is",self.announce_interval,self.interval + + self.trackerid = r.get('tracker id', self.trackerid) + self.last = r.get('last', self.last) + # ps = len(r['peers']) + self.howmany() + peers = [] + p = r.get('peers') + if p is not None: + if type(p) == type(''): + for x in xrange(0, len(p), 6): + ip = '.'.join([str(ord(i)) for i in p[x:x+4]]) + port = (ord(p[x+4]) << 8) | ord(p[x+5]) + peers.append(((ip, port), 0)) # Arno: note: not just (ip,port)!!! + else: + for x in p: + peers.append(((x['ip'].strip(), x['port']), x.get('peer id', 0))) + else: + # IPv6 Tracker Extension, http://www.bittorrent.org/beps/bep_0007.html + p = r.get('peers6') + if type(p) == type(''): + for x in xrange(0, len(p), 18): + #ip = '.'.join([str(ord(i)) for i in p[x:x+16]]) + hexip = binascii.b2a_hex(p[x:x+16]) + ip = '' + for i in xrange(0,len(hexip),4): + ip += hexip[i:i+4] + if i+4 != len(hexip): + ip += ':' + port = (ord(p[x+16]) << 8) | ord(p[x+17]) + peers.append(((ip, port), 0)) # Arno: note: not just (ip,port)!!! + else: + for x in p: + peers.append(((x['ip'].strip(), x['port']), x.get('peer id', 0))) + + + # Arno, 2009-04-06: Need more effort to support IPv6, e.g. + # see SocketHandler.SingleSocket.get_ip(). The getsockname() + # + getpeername() calls should be make to accept IPv6 returns. + # + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequester: Got IPv6 peer addresses, not yet supported, ignoring." + peers = [] + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequester: postrequest: Got peers",peers + ps = len(peers) + self.howmany() + if ps < self.maxpeers: + if self.doneflag.isSet(): + if r.get('num peers', 1000) - r.get('done peers', 0) > ps * 1.2: + self.last = None + else: + if r.get('num peers', 1000) > ps * 1.2: + self.last = None + + + if peers: + shuffle(peers) + self.connect(peers) # Encoder.start_connections(peers) + callback() + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequester: Error in postrequest" + import traceback + traceback.print_exc() + + def exception(self, callback): + data = StringIO() + print_exc(file = data) + def r(s = data.getvalue(), callback = callback): + if self.excfunc: + self.excfunc(s) + else: + print s + callback() + self.externalsched(r) + + +class SuccessLock: + def __init__(self): + self.lock = Lock() + self.pause = Lock() + self.code = 0L + self.success = False + self.finished = True + + def reset(self): + self.success = False + self.finished = False + + def set(self): + self.lock.acquire() + if not self.pause.locked(): + self.pause.acquire() + self.first = True + self.code += 1L + self.lock.release() + return self.code + + def trip(self, code, s = False): + self.lock.acquire() + try: + if code == self.code and not self.finished: + r = self.first + self.first = False + if s: + self.finished = True + self.success = True + return r + finally: + self.lock.release() + + def give_up(self): + self.lock.acquire() + self.success = False + self.finished = True + self.lock.release() + + def wait(self): + self.pause.acquire() + + def unwait(self, code): + if code == self.code and self.pause.locked(): + self.pause.release() + + def isfinished(self): + self.lock.acquire() + x = self.finished + self.lock.release() + return x diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/Rerequester.py.bak b/tribler-mod/Tribler/Core/BitTornado/BT1/Rerequester.py.bak new file mode 100644 index 0000000..ca4d00c --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/Rerequester.py.bak @@ -0,0 +1,537 @@ +# Written by Bram Cohen +# modified for multitracker operation by John Hoffman +# modified for mainline DHT support by Fabian van der Werf +# see LICENSE.txt for license information + +import sys +from Tribler.Core.BitTornado.zurllib import urlopen +from urllib import quote +from btformats import check_peers +from Tribler.Core.BitTornado.bencode import bdecode +from threading import Thread, Lock, currentThread +from cStringIO import StringIO +from traceback import print_exc,print_stack +from socket import error, gethostbyname, inet_aton, inet_ntoa +from random import shuffle +from sha import sha +from time import time +from struct import pack, unpack +import binascii + +from Tribler.Core.NATFirewall.DialbackMsgHandler import DialbackMsgHandler +import Tribler.Core.DecentralizedTracking.mainlineDHT as mainlineDHT + +try: + from os import getpid +except ImportError: + def getpid(): + return 1 + +try: + True +except: + True = 1 + False = 0 + +DEBUG = False +DEBUG_DHT = False + +mapbase64 = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz.-' +keys = {} +basekeydata = str(getpid()) + repr(time()) + 'tracker' + +def add_key(tracker): + key = '' + for i in sha(basekeydata+tracker).digest()[-6:]: + key += mapbase64[ord(i) & 0x3F] + keys[tracker] = key + +def get_key(tracker): + try: + return "&key="+keys[tracker] + except: + add_key(tracker) + return "&key="+keys[tracker] + +class fakeflag: + def __init__(self, state=False): + self.state = state + def wait(self): + pass + def isSet(self): + return self.state + +class Rerequester: + def __init__(self, trackerlist, interval, sched, howmany, minpeers, + connect, externalsched, amount_left, up, down, + port, ip, myid, infohash, timeout, errorfunc, excfunc, + maxpeers, doneflag, upratefunc, downratefunc, + unpauseflag = fakeflag(True), config=None): + + self.excfunc = excfunc + newtrackerlist = [] + for tier in trackerlist: + if len(tier) > 1: + shuffle(tier) + newtrackerlist += [tier] + self.trackerlist = newtrackerlist + self.lastsuccessful = '' + self.rejectedmessage = 'rejected by tracker - ' + self.port = port + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequest tracker: infohash is",`infohash`,"port is",self.port,"myid",`myid`,"quoted id",quote(myid) + + self.url = ('?info_hash=%s&peer_id=%s&port=%s' % + (quote(infohash), quote(myid), str(port))) + self.ip = ip + self.interval = interval + self.last = None + self.trackerid = None + self.announce_interval = 1 * 60 + self.sched = sched + self.howmany = howmany + self.minpeers = minpeers + self.connect = connect + self.externalsched = externalsched + self.amount_left = amount_left + self.up = up + self.down = down + self.timeout = timeout + self.errorfunc = errorfunc + self.maxpeers = maxpeers + self.doneflag = doneflag + self.upratefunc = upratefunc + self.downratefunc = downratefunc + self.unpauseflag = unpauseflag + self.last_failed = True + self.never_succeeded = True + self.errorcodes = {} + self.lock = SuccessLock() + self.special = None + self.stopped = False + self.schedid = 'arno481' + self.infohash = infohash + self.dht = mainlineDHT.dht + self.config = config + + + def start(self): + + self.sched(self.c, self.interval/2) + self.d(0) + + def c(self): + if self.stopped: + return + if not self.unpauseflag.isSet() and self.howmany() < self.minpeers: + self.announce(3, self._c) + else: + self._c() + + def _c(self): + self.sched(self.c, self.interval) + + def d(self, event = 3): + if self.stopped: + return + if not self.unpauseflag.isSet(): + self._d() + return + self.announce(event, self._d) + + def _d(self): + if self.never_succeeded: + self.sched(self.d, 60) # retry in 60 seconds + else: + self.sched(self.d, self.announce_interval) + + def encoder_wants_new_peers(self): + """ The list of peers we gave to the encoder via self.connect() + did not give any live connections, reconnect to get some more. + Officially we should cancel the outstanding + self.sched(self.d,self.announce_interval) + """ + self.d(0) + + def announce(self, event = 3, callback = lambda: None, specialurl = None): + + if specialurl is not None: + s = self.url+'&uploaded=0&downloaded=0&left=1' # don't add to statistics + if self.howmany() >= self.maxpeers: + s += '&numwant=0' + else: + s += '&no_peer_id=1&compact=1' + self.last_failed = True # force true, so will display an error + self.special = specialurl + self.rerequest(s, callback) + return + + else: + s = ('%s&uploaded=%s&downloaded=%s&left=%s' % + (self.url, str(self.up()), str(self.down()), + str(self.amount_left()))) + if self.last is not None: + s += '&last=' + quote(str(self.last)) + if self.trackerid is not None: + s += '&trackerid=' + quote(str(self.trackerid)) + if self.howmany() >= self.maxpeers: + s += '&numwant=0' + else: + s += '&no_peer_id=1&compact=1' + if event != 3: + s += '&event=' + ['started', 'completed', 'stopped'][event] + if event == 2: + self.stopped = True + self.rerequest(s, callback) + + + def snoop(self, peers, callback = lambda: None): # tracker call support + self.rerequest(self.url + +'&event=stopped&port=0&uploaded=0&downloaded=0&left=1&tracker=1&numwant=' + +str(peers), callback) + + + def rerequest(self, s, callback): + if not self.lock.isfinished(): # still waiting for prior cycle to complete?? + def retry(self = self, s = s, callback = callback): + self.rerequest(s, callback) + self.sched(retry, 5) # retry in 5 seconds + return + self.lock.reset() + rq = Thread(target = self._rerequest, args = [s, callback]) + rq.setName( "TrackerRerequestA"+rq.getName() ) + # Arno: make this a daemon thread so the client closes sooner. + rq.setDaemon(True) + rq.start() + + def _rerequest(self, s, callback): + try: + def fail(self = self, callback = callback): + self._fail(callback) + if self.ip: + try: + s += '&ip=' + gethostbyname(self.ip) + except: + self.errorcodes['troublecode'] = 'unable to resolve: '+self.ip + self.externalsched(fail) + self.errorcodes = {} + if self.special is None: + + #Do dht request + if self.dht: + self._dht_rerequest() + elif DEBUG_DHT: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequester: No DHT support loaded" + + for t in range(len(self.trackerlist)): + for tr in range(len(self.trackerlist[t])): + tracker = self.trackerlist[t][tr] + # Arno: no udp support yet + if tracker.startswith( 'udp:' ): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequester: Ignoring tracker",tracker + continue + #elif DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequester: Trying tracker",tracker + if self.rerequest_single(tracker, s, callback): + if not self.last_failed and tr != 0: + del self.trackerlist[t][tr] + self.trackerlist[t] = [tracker] + self.trackerlist[t] + return + else: + tracker = self.special + self.special = None + if self.rerequest_single(tracker, s, callback): + return + # no success from any tracker + self.externalsched(fail) + except: + self.exception(callback) + + + def _fail(self, callback): + if ( (self.upratefunc() < 100 and self.downratefunc() < 100) + or not self.amount_left() ): + for f in ['rejected', 'bad_data', 'troublecode']: + if self.errorcodes.has_key(f): + r = self.errorcodes[f] + break + else: + r = 'Problem connecting to tracker - unspecified error:'+`self.errorcodes` + self.errorfunc(r) + + self.last_failed = True + self.lock.give_up() + self.externalsched(callback) + + + def rerequest_single(self, t, s, callback): + l = self.lock.set() + rq = Thread(target = self._rerequest_single, args = [t, s+get_key(t), l, callback]) + rq.setName( "TrackerRerequestB"+rq.getName() ) + # Arno: make this a daemon thread so the client closes sooner. + rq.setDaemon(True) + rq.start() + self.lock.wait() + if self.lock.success: + self.lastsuccessful = t + self.last_failed = False + self.never_succeeded = False + return True + if not self.last_failed and self.lastsuccessful == t: + # if the last tracker hit was successful, and you've just tried the tracker + # you'd contacted before, don't go any further, just fail silently. + self.last_failed = True + self.externalsched(callback) + self.lock.give_up() + return True + return False # returns true if it wants rerequest() to exit + + + def _rerequest_single(self, t, s, l, callback): + try: + closer = [None] + def timedout(self = self, l = l, closer = closer): + if self.lock.trip(l): + self.errorcodes['troublecode'] = 'Problem connecting to tracker - timeout exceeded' + self.lock.unwait(l) + try: + closer[0]() + except: + pass + + self.externalsched(timedout, self.timeout) + + err = None + try: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequest tracker:" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",t+s + h = urlopen(t+s) + closer[0] = h.close + data = h.read() + except (IOError, error), e: + err = 'Problem connecting to tracker - ' + str(e) + if DEBUG: + print_exc() + except: + err = 'Problem connecting to tracker' + if DEBUG: + print_exc() + + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rerequest: Got data",data + + try: + h.close() + except: + pass + if err: + if self.lock.trip(l): + self.errorcodes['troublecode'] = err + self.lock.unwait(l) + return + + if not data: + if self.lock.trip(l): + self.errorcodes['troublecode'] = 'no data from tracker' + self.lock.unwait(l) + return + + try: + r = bdecode(data, sloppy=1) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequester: Tracker returns:", r + check_peers(r) + except ValueError, e: + if DEBUG: + print_exc() + if self.lock.trip(l): + self.errorcodes['bad_data'] = 'bad data from tracker - ' + str(e) + self.lock.unwait(l) + return + + if r.has_key('failure reason'): + if self.lock.trip(l): + self.errorcodes['rejected'] = self.rejectedmessage + r['failure reason'] + self.lock.unwait(l) + return + + if self.lock.trip(l, True): # success! + self.lock.unwait(l) + else: + callback = lambda: None # attempt timed out, don't do a callback + + # even if the attempt timed out, go ahead and process data + def add(self = self, r = r, callback = callback): + self.postrequest(r, callback) + self.externalsched(add) + except: + self.exception(callback) + + def _dht_rerequest(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequester: _dht_rerequest",`self.infohash` + if 'dialback' in self.config and self.config['dialback']: + if DialbackMsgHandler.getInstance().isConnectable(): + self.dht.getPeersAndAnnounce(self.infohash, self.port, self._dht_got_peers) + else: + self.dht.getPeers(self.infohash, self._dht_got_peers) + + def _dht_got_peers(self, peers): + if DEBUG_DHT: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequester: DHT: Received",len(peers),"peers",currentThread().getName() + p = [] + for peer in peers: + try: + ip, port = unpack("!4sH", peer) + if DEBUG_DHT: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequester: DHT: Got",inet_ntoa(ip), int(port) + p.append({'ip': inet_ntoa(ip), + 'port':port}) + except: + pass + + if p: + r = {'peers':p} + def add(self = self, r = r): + self.postrequest(r, lambda : None) + self.externalsched(add) + + def postrequest(self, r, callback): + try: + if r.has_key('warning message'): + self.errorfunc('warning from tracker - ' + r['warning message']) + self.announce_interval = r.get('interval', self.announce_interval) + self.interval = r.get('min interval', self.interval) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequester: announce min is",self.announce_interval,self.interval + + self.trackerid = r.get('tracker id', self.trackerid) + self.last = r.get('last', self.last) + # ps = len(r['peers']) + self.howmany() + peers = [] + p = r.get('peers') + if p is not None: + if type(p) == type(''): + for x in xrange(0, len(p), 6): + ip = '.'.join([str(ord(i)) for i in p[x:x+4]]) + port = (ord(p[x+4]) << 8) | ord(p[x+5]) + peers.append(((ip, port), 0)) # Arno: note: not just (ip,port)!!! + else: + for x in p: + peers.append(((x['ip'].strip(), x['port']), x.get('peer id', 0))) + else: + # IPv6 Tracker Extension, http://www.bittorrent.org/beps/bep_0007.html + p = r.get('peers6') + if type(p) == type(''): + for x in xrange(0, len(p), 18): + #ip = '.'.join([str(ord(i)) for i in p[x:x+16]]) + hexip = binascii.b2a_hex(p[x:x+16]) + ip = '' + for i in xrange(0,len(hexip),4): + ip += hexip[i:i+4] + if i+4 != len(hexip): + ip += ':' + port = (ord(p[x+16]) << 8) | ord(p[x+17]) + peers.append(((ip, port), 0)) # Arno: note: not just (ip,port)!!! + else: + for x in p: + peers.append(((x['ip'].strip(), x['port']), x.get('peer id', 0))) + + + # Arno, 2009-04-06: Need more effort to support IPv6, e.g. + # see SocketHandler.SingleSocket.get_ip(). The getsockname() + # + getpeername() calls should be make to accept IPv6 returns. + # + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequester: Got IPv6 peer addresses, not yet supported, ignoring." + peers = [] + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequester: postrequest: Got peers",peers + ps = len(peers) + self.howmany() + if ps < self.maxpeers: + if self.doneflag.isSet(): + if r.get('num peers', 1000) - r.get('done peers', 0) > ps * 1.2: + self.last = None + else: + if r.get('num peers', 1000) > ps * 1.2: + self.last = None + + + if peers: + shuffle(peers) + self.connect(peers) # Encoder.start_connections(peers) + callback() + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Rerequester: Error in postrequest" + import traceback + traceback.print_exc() + + def exception(self, callback): + data = StringIO() + print_exc(file = data) + def r(s = data.getvalue(), callback = callback): + if self.excfunc: + self.excfunc(s) + else: + print s + callback() + self.externalsched(r) + + +class SuccessLock: + def __init__(self): + self.lock = Lock() + self.pause = Lock() + self.code = 0L + self.success = False + self.finished = True + + def reset(self): + self.success = False + self.finished = False + + def set(self): + self.lock.acquire() + if not self.pause.locked(): + self.pause.acquire() + self.first = True + self.code += 1L + self.lock.release() + return self.code + + def trip(self, code, s = False): + self.lock.acquire() + try: + if code == self.code and not self.finished: + r = self.first + self.first = False + if s: + self.finished = True + self.success = True + return r + finally: + self.lock.release() + + def give_up(self): + self.lock.acquire() + self.success = False + self.finished = True + self.lock.release() + + def wait(self): + self.pause.acquire() + + def unwait(self, code): + if code == self.code and self.pause.locked(): + self.pause.release() + + def isfinished(self): + self.lock.acquire() + x = self.finished + self.lock.release() + return x diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/Statistics.py b/tribler-mod/Tribler/Core/BitTornado/BT1/Statistics.py new file mode 100644 index 0000000..6be7ed8 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/Statistics.py @@ -0,0 +1,180 @@ +from time import localtime, strftime +# Written by Edward Keyes +# see LICENSE.txt for license information + +from threading import Event +try: + True +except: + True = 1 + False = 0 + +class Statistics_Response: + pass # empty class + + +class Statistics: + def __init__(self, upmeasure, downmeasure, connecter, httpdl, + ratelimiter, rerequest_lastfailed, fdatflag): + self.upmeasure = upmeasure + self.downmeasure = downmeasure + self.connecter = connecter + self.httpdl = httpdl + self.ratelimiter = ratelimiter + self.downloader = connecter.downloader + self.picker = connecter.downloader.picker + self.storage = connecter.downloader.storage + self.torrentmeasure = connecter.downloader.totalmeasure + self.rerequest_lastfailed = rerequest_lastfailed + self.fdatflag = fdatflag + self.fdatactive = False + self.piecescomplete = None + self.placesopen = None + self.storage_totalpieces = len(self.storage.hashes) + + + def set_dirstats(self, files, piece_length): + self.piecescomplete = 0 + self.placesopen = 0 + self.filelistupdated = Event() + self.filelistupdated.set() + frange = xrange(len(files)) + self.filepieces = [[] for x in frange] + self.filepieces2 = [[] for x in frange] + self.fileamtdone = [0.0 for x in frange] + self.filecomplete = [False for x in frange] + self.fileinplace = [False for x in frange] + start = 0L + for i in frange: + l = files[i][1] + if l == 0: + self.fileamtdone[i] = 1.0 + self.filecomplete[i] = True + self.fileinplace[i] = True + else: + fp = self.filepieces[i] + fp2 = self.filepieces2[i] + for piece in range(int(start/piece_length), + int((start+l-1)/piece_length)+1): + fp.append(piece) + fp2.append(piece) + start += l + + + def update(self): + s = Statistics_Response() + s.upTotal = self.upmeasure.get_total() + s.downTotal = self.downmeasure.get_total() + s.last_failed = self.rerequest_lastfailed() + s.external_connection_made = self.connecter.external_connection_made + if s.downTotal > 0: + s.shareRating = float(s.upTotal)/s.downTotal + elif s.upTotal == 0: + s.shareRating = 0.0 + else: + s.shareRating = -1.0 + s.torrentRate = self.torrentmeasure.get_rate() + s.torrentTotal = self.torrentmeasure.get_total() + s.numSeeds = self.picker.seeds_connected + s.numOldSeeds = self.downloader.num_disconnected_seeds() + s.numPeers = len(self.downloader.downloads)-s.numSeeds + s.numCopies = 0.0 + for i in self.picker.crosscount: + if i==0: + s.numCopies+=1 + else: + s.numCopies+=1-float(i)/self.picker.numpieces + break + if self.picker.done: + s.numCopies2 = s.numCopies + 1 + else: + s.numCopies2 = 0.0 + for i in self.picker.crosscount2: + if i==0: + s.numCopies2+=1 + else: + s.numCopies2+=1-float(i)/self.picker.numpieces + break + s.discarded = self.downloader.discarded + s.numSeeds += self.httpdl.seedsfound + s.numOldSeeds += self.httpdl.seedsfound + if s.numPeers == 0 or self.picker.numpieces == 0: + s.percentDone = 0.0 + else: + s.percentDone = 100.0*(float(self.picker.totalcount)/self.picker.numpieces)/s.numPeers + + s.backgroundallocating = self.storage.bgalloc_active + s.storage_totalpieces = len(self.storage.hashes) + s.storage_active = len(self.storage.stat_active) + s.storage_new = len(self.storage.stat_new) + s.storage_dirty = len(self.storage.dirty) + numdownloaded = self.storage.stat_numdownloaded + s.storage_justdownloaded = numdownloaded + s.storage_numcomplete = self.storage.stat_numfound + numdownloaded + s.storage_numflunked = self.storage.stat_numflunked + s.storage_isendgame = self.downloader.endgamemode + + s.peers_kicked = self.downloader.kicked.items() + s.peers_banned = self.downloader.banned.items() + + try: + s.upRate = int(self.ratelimiter.upload_rate/1000) + assert s.upRate < 5000 + except: + s.upRate = 0 + s.upSlots = self.ratelimiter.slots + + s.have = self.storage.get_have_copy() + + if self.piecescomplete is None: # not a multi-file torrent + return s + + if self.fdatflag.isSet(): + if not self.fdatactive: + self.fdatactive = True + else: + self.fdatactive = False + + if self.piecescomplete != self.picker.numgot: + for i in xrange(len(self.filecomplete)): + if self.filecomplete[i]: + continue + oldlist = self.filepieces[i] + newlist = [ piece + for piece in oldlist + if not self.storage.have[piece] ] + if len(newlist) != len(oldlist): + self.filepieces[i] = newlist + self.fileamtdone[i] = ( + (len(self.filepieces2[i])-len(newlist)) + /float(len(self.filepieces2[i])) ) + if not newlist: + self.filecomplete[i] = True + self.filelistupdated.set() + + self.piecescomplete = self.picker.numgot + + if ( self.filelistupdated.isSet() + or self.placesopen != len(self.storage.places) ): + for i in xrange(len(self.filecomplete)): + if not self.filecomplete[i] or self.fileinplace[i]: + continue + while self.filepieces2[i]: + piece = self.filepieces2[i][-1] + if self.storage.places[piece] != piece: + break + del self.filepieces2[i][-1] + if not self.filepieces2[i]: + self.fileinplace[i] = True + self.storage.set_file_readonly(i) + self.filelistupdated.set() + + self.placesopen = len(self.storage.places) + + s.fileamtdone = self.fileamtdone + s.filecomplete = self.filecomplete + s.fileinplace = self.fileinplace + s.filelistupdated = self.filelistupdated + + return s + diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/Statistics.py.bak b/tribler-mod/Tribler/Core/BitTornado/BT1/Statistics.py.bak new file mode 100644 index 0000000..17ad53c --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/Statistics.py.bak @@ -0,0 +1,179 @@ +# Written by Edward Keyes +# see LICENSE.txt for license information + +from threading import Event +try: + True +except: + True = 1 + False = 0 + +class Statistics_Response: + pass # empty class + + +class Statistics: + def __init__(self, upmeasure, downmeasure, connecter, httpdl, + ratelimiter, rerequest_lastfailed, fdatflag): + self.upmeasure = upmeasure + self.downmeasure = downmeasure + self.connecter = connecter + self.httpdl = httpdl + self.ratelimiter = ratelimiter + self.downloader = connecter.downloader + self.picker = connecter.downloader.picker + self.storage = connecter.downloader.storage + self.torrentmeasure = connecter.downloader.totalmeasure + self.rerequest_lastfailed = rerequest_lastfailed + self.fdatflag = fdatflag + self.fdatactive = False + self.piecescomplete = None + self.placesopen = None + self.storage_totalpieces = len(self.storage.hashes) + + + def set_dirstats(self, files, piece_length): + self.piecescomplete = 0 + self.placesopen = 0 + self.filelistupdated = Event() + self.filelistupdated.set() + frange = xrange(len(files)) + self.filepieces = [[] for x in frange] + self.filepieces2 = [[] for x in frange] + self.fileamtdone = [0.0 for x in frange] + self.filecomplete = [False for x in frange] + self.fileinplace = [False for x in frange] + start = 0L + for i in frange: + l = files[i][1] + if l == 0: + self.fileamtdone[i] = 1.0 + self.filecomplete[i] = True + self.fileinplace[i] = True + else: + fp = self.filepieces[i] + fp2 = self.filepieces2[i] + for piece in range(int(start/piece_length), + int((start+l-1)/piece_length)+1): + fp.append(piece) + fp2.append(piece) + start += l + + + def update(self): + s = Statistics_Response() + s.upTotal = self.upmeasure.get_total() + s.downTotal = self.downmeasure.get_total() + s.last_failed = self.rerequest_lastfailed() + s.external_connection_made = self.connecter.external_connection_made + if s.downTotal > 0: + s.shareRating = float(s.upTotal)/s.downTotal + elif s.upTotal == 0: + s.shareRating = 0.0 + else: + s.shareRating = -1.0 + s.torrentRate = self.torrentmeasure.get_rate() + s.torrentTotal = self.torrentmeasure.get_total() + s.numSeeds = self.picker.seeds_connected + s.numOldSeeds = self.downloader.num_disconnected_seeds() + s.numPeers = len(self.downloader.downloads)-s.numSeeds + s.numCopies = 0.0 + for i in self.picker.crosscount: + if i==0: + s.numCopies+=1 + else: + s.numCopies+=1-float(i)/self.picker.numpieces + break + if self.picker.done: + s.numCopies2 = s.numCopies + 1 + else: + s.numCopies2 = 0.0 + for i in self.picker.crosscount2: + if i==0: + s.numCopies2+=1 + else: + s.numCopies2+=1-float(i)/self.picker.numpieces + break + s.discarded = self.downloader.discarded + s.numSeeds += self.httpdl.seedsfound + s.numOldSeeds += self.httpdl.seedsfound + if s.numPeers == 0 or self.picker.numpieces == 0: + s.percentDone = 0.0 + else: + s.percentDone = 100.0*(float(self.picker.totalcount)/self.picker.numpieces)/s.numPeers + + s.backgroundallocating = self.storage.bgalloc_active + s.storage_totalpieces = len(self.storage.hashes) + s.storage_active = len(self.storage.stat_active) + s.storage_new = len(self.storage.stat_new) + s.storage_dirty = len(self.storage.dirty) + numdownloaded = self.storage.stat_numdownloaded + s.storage_justdownloaded = numdownloaded + s.storage_numcomplete = self.storage.stat_numfound + numdownloaded + s.storage_numflunked = self.storage.stat_numflunked + s.storage_isendgame = self.downloader.endgamemode + + s.peers_kicked = self.downloader.kicked.items() + s.peers_banned = self.downloader.banned.items() + + try: + s.upRate = int(self.ratelimiter.upload_rate/1000) + assert s.upRate < 5000 + except: + s.upRate = 0 + s.upSlots = self.ratelimiter.slots + + s.have = self.storage.get_have_copy() + + if self.piecescomplete is None: # not a multi-file torrent + return s + + if self.fdatflag.isSet(): + if not self.fdatactive: + self.fdatactive = True + else: + self.fdatactive = False + + if self.piecescomplete != self.picker.numgot: + for i in xrange(len(self.filecomplete)): + if self.filecomplete[i]: + continue + oldlist = self.filepieces[i] + newlist = [ piece + for piece in oldlist + if not self.storage.have[piece] ] + if len(newlist) != len(oldlist): + self.filepieces[i] = newlist + self.fileamtdone[i] = ( + (len(self.filepieces2[i])-len(newlist)) + /float(len(self.filepieces2[i])) ) + if not newlist: + self.filecomplete[i] = True + self.filelistupdated.set() + + self.piecescomplete = self.picker.numgot + + if ( self.filelistupdated.isSet() + or self.placesopen != len(self.storage.places) ): + for i in xrange(len(self.filecomplete)): + if not self.filecomplete[i] or self.fileinplace[i]: + continue + while self.filepieces2[i]: + piece = self.filepieces2[i][-1] + if self.storage.places[piece] != piece: + break + del self.filepieces2[i][-1] + if not self.filepieces2[i]: + self.fileinplace[i] = True + self.storage.set_file_readonly(i) + self.filelistupdated.set() + + self.placesopen = len(self.storage.places) + + s.fileamtdone = self.fileamtdone + s.filecomplete = self.filecomplete + s.fileinplace = self.fileinplace + s.filelistupdated = self.filelistupdated + + return s + diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/Storage.py b/tribler-mod/Tribler/Core/BitTornado/BT1/Storage.py new file mode 100644 index 0000000..aad2039 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/Storage.py @@ -0,0 +1,595 @@ +from time import localtime, strftime +# Written by Bram Cohen +# see LICENSE.txt for license information + +from Tribler.Core.BitTornado.piecebuffer import BufferPool +from threading import Lock +from time import strftime, localtime +import os +from os.path import exists, getsize, getmtime as getmtime_, basename +from traceback import print_exc +try: + from os import fsync +except ImportError: + fsync = lambda x: None +from bisect import bisect +import sys + +try: + True +except: + True = 1 + False = 0 + +DEBUG = False + +#MAXREADSIZE = 32768 +MAXREADSIZE = 2 ** 16 # Arno: speed opt +MAXLOCKSIZE = 1000000000L +MAXLOCKRANGE = 3999999999L # only lock first 4 gig of file + +_pool = BufferPool() +PieceBuffer = _pool.new + +def getmtime(path): + # On some OS's, getmtime returns a float + return int(getmtime_(path)) + +def dummy_status(fractionDone = None, activity = None): + pass + +class Storage: + def __init__(self, files, piece_length, doneflag, config, + disabled_files = None): + # can raise IOError and ValueError + self.files = files + self.piece_length = piece_length + self.doneflag = doneflag + self.disabled = [False] * len(files) + self.file_ranges = [] + self.disabled_ranges = [] + self.working_ranges = [] + numfiles = 0 + total = 0L + so_far = 0L + self.handles = {} + self.whandles = {} + self.tops = {} + self.sizes = {} + self.mtimes = {} + if config.get('lock_files', True): + self.lock_file, self.unlock_file = self._lock_file, self._unlock_file + else: + self.lock_file, self.unlock_file = lambda x1, x2: None, lambda x1, x2: None + self.lock_while_reading = config.get('lock_while_reading', False) + self.lock = Lock() + + if not disabled_files: + disabled_files = [False] * len(files) + + for i in xrange(len(files)): + file, length = files[i] + if doneflag.isSet(): # bail out if doneflag is set + return + self.disabled_ranges.append(None) + if length == 0: + self.file_ranges.append(None) + self.working_ranges.append([]) + else: + range = (total, total + length, 0, file) + self.file_ranges.append(range) + self.working_ranges.append([range]) + numfiles += 1 + total += length + if disabled_files[i]: + l = 0 + else: + if exists(file): + l = getsize(file) + if l > length: + h = open(file, 'rb+') + h.truncate(length) + h.flush() + h.close() + l = length + else: + l = 0 + h = open(file, 'wb+') + h.flush() + h.close() + self.mtimes[file] = getmtime(file) + self.tops[file] = l + self.sizes[file] = length + so_far += l + + self.total_length = total + self._reset_ranges() + + self.max_files_open = config['max_files_open'] + if self.max_files_open > 0 and numfiles > self.max_files_open: + self.handlebuffer = [] + else: + self.handlebuffer = None + + + if os.name == 'nt': + def _lock_file(self, name, f): + import msvcrt + for p in range(0, min(self.sizes[name], MAXLOCKRANGE), MAXLOCKSIZE): + f.seek(p) + msvcrt.locking(f.fileno(), msvcrt.LK_LOCK, + min(MAXLOCKSIZE, self.sizes[name]-p)) + + def _unlock_file(self, name, f): + import msvcrt + for p in range(0, min(self.sizes[name], MAXLOCKRANGE), MAXLOCKSIZE): + f.seek(p) + msvcrt.locking(f.fileno(), msvcrt.LK_UNLCK, + min(MAXLOCKSIZE, self.sizes[name]-p)) + + elif os.name == 'posix': + def _lock_file(self, name, f): + import fcntl + fcntl.flock(f.fileno(), fcntl.LOCK_EX) + + def _unlock_file(self, name, f): + import fcntl + fcntl.flock(f.fileno(), fcntl.LOCK_UN) + + else: + def _lock_file(self, name, f): + pass + def _unlock_file(self, name, f): + pass + + + def was_preallocated(self, pos, length): + for file, begin, end in self._intervals(pos, length): + if self.tops.get(file, 0) < end: + return False + return True + + + def _sync(self, file): + self._close(file) + if self.handlebuffer: + self.handlebuffer.remove(file) + + def sync(self): + # may raise IOError or OSError + for file in self.whandles.keys(): + self._sync(file) + + + def set_readonly(self, f=None): + if f is None: + self.sync() + return + file = self.files[f][0] + if self.whandles.has_key(file): + self._sync(file) + + + def get_total_length(self): + return self.total_length + + + def _open(self, file, mode): + if self.mtimes.has_key(file): + try: + if self.handlebuffer is not None: + assert getsize(file) == self.tops[file] + newmtime = getmtime(file) + oldmtime = self.mtimes[file] + assert newmtime <= oldmtime+1 + assert newmtime >= oldmtime-1 + except: + if DEBUG: + print( file+' modified: ' + +strftime('(%x %X)', localtime(self.mtimes[file])) + +strftime(' != (%x %X) ?', localtime(getmtime(file))) ) + raise IOError('modified during download') + try: + return open(file, mode) + except: + if DEBUG: + print_exc() + raise + + + def _close(self, file): + f = self.handles[file] + del self.handles[file] + if self.whandles.has_key(file): + del self.whandles[file] + f.flush() + self.unlock_file(file, f) + f.close() + self.tops[file] = getsize(file) + self.mtimes[file] = getmtime(file) + else: + if self.lock_while_reading: + self.unlock_file(file, f) + f.close() + + + def _close_file(self, file): + if not self.handles.has_key(file): + return + self._close(file) + if self.handlebuffer: + self.handlebuffer.remove(file) + + + def _get_file_handle(self, file, for_write): + if self.handles.has_key(file): + if for_write and not self.whandles.has_key(file): + self._close(file) + try: + f = self._open(file, 'rb+') + self.handles[file] = f + self.whandles[file] = 1 + self.lock_file(file, f) + except (IOError, OSError), e: + if DEBUG: + print_exc() + raise IOError('unable to reopen '+file+': '+str(e)) + + if self.handlebuffer: + if self.handlebuffer[-1] != file: + self.handlebuffer.remove(file) + self.handlebuffer.append(file) + elif self.handlebuffer is not None: + self.handlebuffer.append(file) + else: + try: + if for_write: + f = self._open(file, 'rb+') + self.handles[file] = f + self.whandles[file] = 1 + self.lock_file(file, f) + else: + f = self._open(file, 'rb') + self.handles[file] = f + if self.lock_while_reading: + self.lock_file(file, f) + except (IOError, OSError), e: + if DEBUG: + print_exc() + raise IOError('unable to open '+file+': '+str(e)) + + if self.handlebuffer is not None: + self.handlebuffer.append(file) + if len(self.handlebuffer) > self.max_files_open: + self._close(self.handlebuffer.pop(0)) + + return self.handles[file] + + + def _reset_ranges(self): + self.ranges = [] + for l in self.working_ranges: + self.ranges.extend(l) + self.begins = [i[0] for i in self.ranges] + + def _intervals(self, pos, amount): + r = [] + stop = pos + amount + p = bisect(self.begins, pos) - 1 + while p < len(self.ranges): + begin, end, offset, file = self.ranges[p] + if begin >= stop: + break + r.append(( file, + offset + max(pos, begin) - begin, + offset + min(end, stop) - begin )) + p += 1 + return r + + + def read(self, pos, amount, flush_first = False): + r = PieceBuffer() + for file, pos, end in self._intervals(pos, amount): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'reading '+file+' from '+str(pos)+' to '+str(end)+' amount '+str(amount) + try: + self.lock.acquire() + h = self._get_file_handle(file, False) + if flush_first and self.whandles.has_key(file): + h.flush() + fsync(h) + h.seek(pos) + while pos < end: + length = min(end-pos, MAXREADSIZE) + data = h.read(length) + if len(data) != length: + raise IOError('error reading data from '+ file) + r.append(data) + pos += length + self.lock.release() + except: + self.lock.release() + raise IOError('error reading data from '+ file) + return r + + def write(self, pos, s): + # might raise an IOError + total = 0 + for file, begin, end in self._intervals(pos, len(s)): + if DEBUG: + print 'writing '+file+' from '+str(pos)+' to '+str(end) + self.lock.acquire() + h = self._get_file_handle(file, True) + h.seek(begin) + h.write(s[total: total + end - begin]) + self.lock.release() + total += end - begin + + def top_off(self): + for begin, end, offset, file in self.ranges: + l = offset + end - begin + if l > self.tops.get(file, 0): + self.lock.acquire() + h = self._get_file_handle(file, True) + h.seek(l-1) + h.write(chr(0xFF)) + self.lock.release() + + def flush(self): + # may raise IOError or OSError + for file in self.whandles.keys(): + self.lock.acquire() + self.handles[file].flush() + self.lock.release() + + def close(self): + for file, f in self.handles.items(): + try: + self.unlock_file(file, f) + except: + pass + try: + f.close() + except: + pass + self.handles = {} + self.whandles = {} + self.handlebuffer = None + + + def _get_disabled_ranges(self, f): + if not self.file_ranges[f]: + return ((), (), ()) + r = self.disabled_ranges[f] + if r: + return r + start, end, offset, file = self.file_ranges[f] + if DEBUG: + print 'calculating disabled range for '+self.files[f][0] + print 'bytes: '+str(start)+'-'+str(end) + print 'file spans pieces '+str(int(start/self.piece_length))+'-'+str(int((end-1)/self.piece_length)+1) + pieces = range(int(start/self.piece_length), + int((end-1)/self.piece_length)+1) + offset = 0 + disabled_files = [] + if len(pieces) == 1: + if ( start % self.piece_length == 0 + and end % self.piece_length == 0 ): # happens to be a single, + # perfect piece + working_range = [(start, end, offset, file)] + update_pieces = [] + else: + midfile = os.path.join(self.bufferdir, str(f)) + working_range = [(start, end, 0, midfile)] + disabled_files.append((midfile, start, end)) + length = end - start + self.sizes[midfile] = length + piece = pieces[0] + update_pieces = [(piece, start-(piece*self.piece_length), length)] + else: + update_pieces = [] + if start % self.piece_length != 0: # doesn't begin on an even piece boundary + end_b = pieces[1]*self.piece_length + startfile = os.path.join(self.bufferdir, str(f)+'b') + working_range_b = [ ( start, end_b, 0, startfile ) ] + disabled_files.append((startfile, start, end_b)) + length = end_b - start + self.sizes[startfile] = length + offset = length + piece = pieces.pop(0) + update_pieces.append((piece, start-(piece*self.piece_length), length)) + else: + working_range_b = [] + if f != len(self.files)-1 and end % self.piece_length != 0: + # doesn't end on an even piece boundary + start_e = pieces[-1] * self.piece_length + endfile = os.path.join(self.bufferdir, str(f)+'e') + working_range_e = [ ( start_e, end, 0, endfile ) ] + disabled_files.append((endfile, start_e, end)) + length = end - start_e + self.sizes[endfile] = length + piece = pieces.pop(-1) + update_pieces.append((piece, 0, length)) + else: + working_range_e = [] + if pieces: + working_range_m = [ ( pieces[0]*self.piece_length, + (pieces[-1]+1)*self.piece_length, + offset, file ) ] + else: + working_range_m = [] + working_range = working_range_b + working_range_m + working_range_e + + if DEBUG: + print str(working_range) + print str(update_pieces) + r = (tuple(working_range), tuple(update_pieces), tuple(disabled_files)) + self.disabled_ranges[f] = r + return r + + + def set_bufferdir(self, dir): + self.bufferdir = dir + + def enable_file(self, f): + if not self.disabled[f]: + return + self.disabled[f] = False + r = self.file_ranges[f] + if not r: + return + file = r[3] + if not exists(file): + h = open(file, 'wb+') + h.flush() + h.close() + if not self.tops.has_key(file): + self.tops[file] = getsize(file) + if not self.mtimes.has_key(file): + self.mtimes[file] = getmtime(file) + self.working_ranges[f] = [r] + + def disable_file(self, f): + if self.disabled[f]: + return + self.disabled[f] = True + r = self._get_disabled_ranges(f) + if not r: + return + for file, begin, end in r[2]: + if not os.path.isdir(self.bufferdir): + os.makedirs(self.bufferdir) + if not exists(file): + h = open(file, 'wb+') + h.flush() + h.close() + if not self.tops.has_key(file): + self.tops[file] = getsize(file) + if not self.mtimes.has_key(file): + self.mtimes[file] = getmtime(file) + self.working_ranges[f] = r[0] + + reset_file_status = _reset_ranges + + + def get_piece_update_list(self, f): + return self._get_disabled_ranges(f)[1] + + + def delete_file(self, f): + try: + os.remove(self.files[f][0]) + except: + pass + + + ''' + Pickled data format: + + d['files'] = [ file #, size, mtime {, file #, size, mtime...} ] + file # in torrent, and the size and last modification + time for those files. Missing files are either empty + or disabled. + d['partial files'] = [ name, size, mtime... ] + Names, sizes and last modification times of files containing + partial piece data. Filenames go by the following convention: + {file #, 0-based}{nothing, "b" or "e"} + eg: "0e" "3" "4b" "4e" + Where "b" specifies the partial data for the first piece in + the file, "e" the last piece, and no letter signifying that + the file is disabled but is smaller than one piece, and that + all the data is cached inside so adjacent files may be + verified. + ''' + def pickle(self): + files = [] + pfiles = [] + for i in xrange(len(self.files)): + if not self.files[i][1]: # length == 0 + continue + if self.disabled[i]: + for file, start, end in self._get_disabled_ranges(i)[2]: + pfiles.extend([basename(file), getsize(file), getmtime(file)]) + continue + file = self.files[i][0] + files.extend([i, getsize(file), getmtime(file)]) + return {'files': files, 'partial files': pfiles} + + + def unpickle(self, data): + # assume all previously-disabled files have already been disabled + try: + files = {} + pfiles = {} + l = data['files'] + assert len(l) % 3 == 0 + l = [l[x:x+3] for x in xrange(0, len(l), 3)] + for f, size, mtime in l: + files[f] = (size, mtime) + l = data.get('partial files', []) + assert len(l) % 3 == 0 + l = [l[x:x+3] for x in xrange(0, len(l), 3)] + for file, size, mtime in l: + pfiles[file] = (size, mtime) + + valid_pieces = {} + for i in xrange(len(self.files)): + if self.disabled[i]: + continue + r = self.file_ranges[i] + if not r: + continue + start, end, offset, file = r + if DEBUG: + print 'adding '+file + for p in xrange( int(start/self.piece_length), + int((end-1)/self.piece_length)+1 ): + valid_pieces[p] = 1 + + if DEBUG: + print valid_pieces.keys() + + def test(old, size, mtime): + oldsize, oldmtime = old + if size != oldsize: + return False + if mtime > oldmtime+1: + return False + if mtime < oldmtime-1: + return False + return True + + for i in xrange(len(self.files)): + if self.disabled[i]: + for file, start, end in self._get_disabled_ranges(i)[2]: + f1 = basename(file) + if ( not pfiles.has_key(f1) + or not test(pfiles[f1],getsize(file),getmtime(file)) ): + if DEBUG: + print 'removing '+file + for p in xrange( int(start/self.piece_length), + int((end-1)/self.piece_length)+1 ): + if valid_pieces.has_key(p): + del valid_pieces[p] + continue + file, size = self.files[i] + if not size: + continue + if ( not files.has_key(i) + or not test(files[i], getsize(file), getmtime(file)) ): + start, end, offset, file = self.file_ranges[i] + if DEBUG: + print 'removing '+file + for p in xrange( int(start/self.piece_length), + int((end-1)/self.piece_length)+1 ): + if valid_pieces.has_key(p): + del valid_pieces[p] + except: + if DEBUG: + print_exc() + return [] + + if DEBUG: + print valid_pieces.keys() + return valid_pieces.keys() + diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/Storage.py.bak b/tribler-mod/Tribler/Core/BitTornado/BT1/Storage.py.bak new file mode 100644 index 0000000..438ab37 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/Storage.py.bak @@ -0,0 +1,594 @@ +# Written by Bram Cohen +# see LICENSE.txt for license information + +from Tribler.Core.BitTornado.piecebuffer import BufferPool +from threading import Lock +from time import strftime, localtime +import os +from os.path import exists, getsize, getmtime as getmtime_, basename +from traceback import print_exc +try: + from os import fsync +except ImportError: + fsync = lambda x: None +from bisect import bisect +import sys + +try: + True +except: + True = 1 + False = 0 + +DEBUG = False + +#MAXREADSIZE = 32768 +MAXREADSIZE = 2 ** 16 # Arno: speed opt +MAXLOCKSIZE = 1000000000L +MAXLOCKRANGE = 3999999999L # only lock first 4 gig of file + +_pool = BufferPool() +PieceBuffer = _pool.new + +def getmtime(path): + # On some OS's, getmtime returns a float + return int(getmtime_(path)) + +def dummy_status(fractionDone = None, activity = None): + pass + +class Storage: + def __init__(self, files, piece_length, doneflag, config, + disabled_files = None): + # can raise IOError and ValueError + self.files = files + self.piece_length = piece_length + self.doneflag = doneflag + self.disabled = [False] * len(files) + self.file_ranges = [] + self.disabled_ranges = [] + self.working_ranges = [] + numfiles = 0 + total = 0L + so_far = 0L + self.handles = {} + self.whandles = {} + self.tops = {} + self.sizes = {} + self.mtimes = {} + if config.get('lock_files', True): + self.lock_file, self.unlock_file = self._lock_file, self._unlock_file + else: + self.lock_file, self.unlock_file = lambda x1, x2: None, lambda x1, x2: None + self.lock_while_reading = config.get('lock_while_reading', False) + self.lock = Lock() + + if not disabled_files: + disabled_files = [False] * len(files) + + for i in xrange(len(files)): + file, length = files[i] + if doneflag.isSet(): # bail out if doneflag is set + return + self.disabled_ranges.append(None) + if length == 0: + self.file_ranges.append(None) + self.working_ranges.append([]) + else: + range = (total, total + length, 0, file) + self.file_ranges.append(range) + self.working_ranges.append([range]) + numfiles += 1 + total += length + if disabled_files[i]: + l = 0 + else: + if exists(file): + l = getsize(file) + if l > length: + h = open(file, 'rb+') + h.truncate(length) + h.flush() + h.close() + l = length + else: + l = 0 + h = open(file, 'wb+') + h.flush() + h.close() + self.mtimes[file] = getmtime(file) + self.tops[file] = l + self.sizes[file] = length + so_far += l + + self.total_length = total + self._reset_ranges() + + self.max_files_open = config['max_files_open'] + if self.max_files_open > 0 and numfiles > self.max_files_open: + self.handlebuffer = [] + else: + self.handlebuffer = None + + + if os.name == 'nt': + def _lock_file(self, name, f): + import msvcrt + for p in range(0, min(self.sizes[name], MAXLOCKRANGE), MAXLOCKSIZE): + f.seek(p) + msvcrt.locking(f.fileno(), msvcrt.LK_LOCK, + min(MAXLOCKSIZE, self.sizes[name]-p)) + + def _unlock_file(self, name, f): + import msvcrt + for p in range(0, min(self.sizes[name], MAXLOCKRANGE), MAXLOCKSIZE): + f.seek(p) + msvcrt.locking(f.fileno(), msvcrt.LK_UNLCK, + min(MAXLOCKSIZE, self.sizes[name]-p)) + + elif os.name == 'posix': + def _lock_file(self, name, f): + import fcntl + fcntl.flock(f.fileno(), fcntl.LOCK_EX) + + def _unlock_file(self, name, f): + import fcntl + fcntl.flock(f.fileno(), fcntl.LOCK_UN) + + else: + def _lock_file(self, name, f): + pass + def _unlock_file(self, name, f): + pass + + + def was_preallocated(self, pos, length): + for file, begin, end in self._intervals(pos, length): + if self.tops.get(file, 0) < end: + return False + return True + + + def _sync(self, file): + self._close(file) + if self.handlebuffer: + self.handlebuffer.remove(file) + + def sync(self): + # may raise IOError or OSError + for file in self.whandles.keys(): + self._sync(file) + + + def set_readonly(self, f=None): + if f is None: + self.sync() + return + file = self.files[f][0] + if self.whandles.has_key(file): + self._sync(file) + + + def get_total_length(self): + return self.total_length + + + def _open(self, file, mode): + if self.mtimes.has_key(file): + try: + if self.handlebuffer is not None: + assert getsize(file) == self.tops[file] + newmtime = getmtime(file) + oldmtime = self.mtimes[file] + assert newmtime <= oldmtime+1 + assert newmtime >= oldmtime-1 + except: + if DEBUG: + print( file+' modified: ' + +strftime('(%x %X)', localtime(self.mtimes[file])) + +strftime(' != (%x %X) ?', localtime(getmtime(file))) ) + raise IOError('modified during download') + try: + return open(file, mode) + except: + if DEBUG: + print_exc() + raise + + + def _close(self, file): + f = self.handles[file] + del self.handles[file] + if self.whandles.has_key(file): + del self.whandles[file] + f.flush() + self.unlock_file(file, f) + f.close() + self.tops[file] = getsize(file) + self.mtimes[file] = getmtime(file) + else: + if self.lock_while_reading: + self.unlock_file(file, f) + f.close() + + + def _close_file(self, file): + if not self.handles.has_key(file): + return + self._close(file) + if self.handlebuffer: + self.handlebuffer.remove(file) + + + def _get_file_handle(self, file, for_write): + if self.handles.has_key(file): + if for_write and not self.whandles.has_key(file): + self._close(file) + try: + f = self._open(file, 'rb+') + self.handles[file] = f + self.whandles[file] = 1 + self.lock_file(file, f) + except (IOError, OSError), e: + if DEBUG: + print_exc() + raise IOError('unable to reopen '+file+': '+str(e)) + + if self.handlebuffer: + if self.handlebuffer[-1] != file: + self.handlebuffer.remove(file) + self.handlebuffer.append(file) + elif self.handlebuffer is not None: + self.handlebuffer.append(file) + else: + try: + if for_write: + f = self._open(file, 'rb+') + self.handles[file] = f + self.whandles[file] = 1 + self.lock_file(file, f) + else: + f = self._open(file, 'rb') + self.handles[file] = f + if self.lock_while_reading: + self.lock_file(file, f) + except (IOError, OSError), e: + if DEBUG: + print_exc() + raise IOError('unable to open '+file+': '+str(e)) + + if self.handlebuffer is not None: + self.handlebuffer.append(file) + if len(self.handlebuffer) > self.max_files_open: + self._close(self.handlebuffer.pop(0)) + + return self.handles[file] + + + def _reset_ranges(self): + self.ranges = [] + for l in self.working_ranges: + self.ranges.extend(l) + self.begins = [i[0] for i in self.ranges] + + def _intervals(self, pos, amount): + r = [] + stop = pos + amount + p = bisect(self.begins, pos) - 1 + while p < len(self.ranges): + begin, end, offset, file = self.ranges[p] + if begin >= stop: + break + r.append(( file, + offset + max(pos, begin) - begin, + offset + min(end, stop) - begin )) + p += 1 + return r + + + def read(self, pos, amount, flush_first = False): + r = PieceBuffer() + for file, pos, end in self._intervals(pos, amount): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'reading '+file+' from '+str(pos)+' to '+str(end)+' amount '+str(amount) + try: + self.lock.acquire() + h = self._get_file_handle(file, False) + if flush_first and self.whandles.has_key(file): + h.flush() + fsync(h) + h.seek(pos) + while pos < end: + length = min(end-pos, MAXREADSIZE) + data = h.read(length) + if len(data) != length: + raise IOError('error reading data from '+ file) + r.append(data) + pos += length + self.lock.release() + except: + self.lock.release() + raise IOError('error reading data from '+ file) + return r + + def write(self, pos, s): + # might raise an IOError + total = 0 + for file, begin, end in self._intervals(pos, len(s)): + if DEBUG: + print 'writing '+file+' from '+str(pos)+' to '+str(end) + self.lock.acquire() + h = self._get_file_handle(file, True) + h.seek(begin) + h.write(s[total: total + end - begin]) + self.lock.release() + total += end - begin + + def top_off(self): + for begin, end, offset, file in self.ranges: + l = offset + end - begin + if l > self.tops.get(file, 0): + self.lock.acquire() + h = self._get_file_handle(file, True) + h.seek(l-1) + h.write(chr(0xFF)) + self.lock.release() + + def flush(self): + # may raise IOError or OSError + for file in self.whandles.keys(): + self.lock.acquire() + self.handles[file].flush() + self.lock.release() + + def close(self): + for file, f in self.handles.items(): + try: + self.unlock_file(file, f) + except: + pass + try: + f.close() + except: + pass + self.handles = {} + self.whandles = {} + self.handlebuffer = None + + + def _get_disabled_ranges(self, f): + if not self.file_ranges[f]: + return ((), (), ()) + r = self.disabled_ranges[f] + if r: + return r + start, end, offset, file = self.file_ranges[f] + if DEBUG: + print 'calculating disabled range for '+self.files[f][0] + print 'bytes: '+str(start)+'-'+str(end) + print 'file spans pieces '+str(int(start/self.piece_length))+'-'+str(int((end-1)/self.piece_length)+1) + pieces = range(int(start/self.piece_length), + int((end-1)/self.piece_length)+1) + offset = 0 + disabled_files = [] + if len(pieces) == 1: + if ( start % self.piece_length == 0 + and end % self.piece_length == 0 ): # happens to be a single, + # perfect piece + working_range = [(start, end, offset, file)] + update_pieces = [] + else: + midfile = os.path.join(self.bufferdir, str(f)) + working_range = [(start, end, 0, midfile)] + disabled_files.append((midfile, start, end)) + length = end - start + self.sizes[midfile] = length + piece = pieces[0] + update_pieces = [(piece, start-(piece*self.piece_length), length)] + else: + update_pieces = [] + if start % self.piece_length != 0: # doesn't begin on an even piece boundary + end_b = pieces[1]*self.piece_length + startfile = os.path.join(self.bufferdir, str(f)+'b') + working_range_b = [ ( start, end_b, 0, startfile ) ] + disabled_files.append((startfile, start, end_b)) + length = end_b - start + self.sizes[startfile] = length + offset = length + piece = pieces.pop(0) + update_pieces.append((piece, start-(piece*self.piece_length), length)) + else: + working_range_b = [] + if f != len(self.files)-1 and end % self.piece_length != 0: + # doesn't end on an even piece boundary + start_e = pieces[-1] * self.piece_length + endfile = os.path.join(self.bufferdir, str(f)+'e') + working_range_e = [ ( start_e, end, 0, endfile ) ] + disabled_files.append((endfile, start_e, end)) + length = end - start_e + self.sizes[endfile] = length + piece = pieces.pop(-1) + update_pieces.append((piece, 0, length)) + else: + working_range_e = [] + if pieces: + working_range_m = [ ( pieces[0]*self.piece_length, + (pieces[-1]+1)*self.piece_length, + offset, file ) ] + else: + working_range_m = [] + working_range = working_range_b + working_range_m + working_range_e + + if DEBUG: + print str(working_range) + print str(update_pieces) + r = (tuple(working_range), tuple(update_pieces), tuple(disabled_files)) + self.disabled_ranges[f] = r + return r + + + def set_bufferdir(self, dir): + self.bufferdir = dir + + def enable_file(self, f): + if not self.disabled[f]: + return + self.disabled[f] = False + r = self.file_ranges[f] + if not r: + return + file = r[3] + if not exists(file): + h = open(file, 'wb+') + h.flush() + h.close() + if not self.tops.has_key(file): + self.tops[file] = getsize(file) + if not self.mtimes.has_key(file): + self.mtimes[file] = getmtime(file) + self.working_ranges[f] = [r] + + def disable_file(self, f): + if self.disabled[f]: + return + self.disabled[f] = True + r = self._get_disabled_ranges(f) + if not r: + return + for file, begin, end in r[2]: + if not os.path.isdir(self.bufferdir): + os.makedirs(self.bufferdir) + if not exists(file): + h = open(file, 'wb+') + h.flush() + h.close() + if not self.tops.has_key(file): + self.tops[file] = getsize(file) + if not self.mtimes.has_key(file): + self.mtimes[file] = getmtime(file) + self.working_ranges[f] = r[0] + + reset_file_status = _reset_ranges + + + def get_piece_update_list(self, f): + return self._get_disabled_ranges(f)[1] + + + def delete_file(self, f): + try: + os.remove(self.files[f][0]) + except: + pass + + + ''' + Pickled data format: + + d['files'] = [ file #, size, mtime {, file #, size, mtime...} ] + file # in torrent, and the size and last modification + time for those files. Missing files are either empty + or disabled. + d['partial files'] = [ name, size, mtime... ] + Names, sizes and last modification times of files containing + partial piece data. Filenames go by the following convention: + {file #, 0-based}{nothing, "b" or "e"} + eg: "0e" "3" "4b" "4e" + Where "b" specifies the partial data for the first piece in + the file, "e" the last piece, and no letter signifying that + the file is disabled but is smaller than one piece, and that + all the data is cached inside so adjacent files may be + verified. + ''' + def pickle(self): + files = [] + pfiles = [] + for i in xrange(len(self.files)): + if not self.files[i][1]: # length == 0 + continue + if self.disabled[i]: + for file, start, end in self._get_disabled_ranges(i)[2]: + pfiles.extend([basename(file), getsize(file), getmtime(file)]) + continue + file = self.files[i][0] + files.extend([i, getsize(file), getmtime(file)]) + return {'files': files, 'partial files': pfiles} + + + def unpickle(self, data): + # assume all previously-disabled files have already been disabled + try: + files = {} + pfiles = {} + l = data['files'] + assert len(l) % 3 == 0 + l = [l[x:x+3] for x in xrange(0, len(l), 3)] + for f, size, mtime in l: + files[f] = (size, mtime) + l = data.get('partial files', []) + assert len(l) % 3 == 0 + l = [l[x:x+3] for x in xrange(0, len(l), 3)] + for file, size, mtime in l: + pfiles[file] = (size, mtime) + + valid_pieces = {} + for i in xrange(len(self.files)): + if self.disabled[i]: + continue + r = self.file_ranges[i] + if not r: + continue + start, end, offset, file = r + if DEBUG: + print 'adding '+file + for p in xrange( int(start/self.piece_length), + int((end-1)/self.piece_length)+1 ): + valid_pieces[p] = 1 + + if DEBUG: + print valid_pieces.keys() + + def test(old, size, mtime): + oldsize, oldmtime = old + if size != oldsize: + return False + if mtime > oldmtime+1: + return False + if mtime < oldmtime-1: + return False + return True + + for i in xrange(len(self.files)): + if self.disabled[i]: + for file, start, end in self._get_disabled_ranges(i)[2]: + f1 = basename(file) + if ( not pfiles.has_key(f1) + or not test(pfiles[f1],getsize(file),getmtime(file)) ): + if DEBUG: + print 'removing '+file + for p in xrange( int(start/self.piece_length), + int((end-1)/self.piece_length)+1 ): + if valid_pieces.has_key(p): + del valid_pieces[p] + continue + file, size = self.files[i] + if not size: + continue + if ( not files.has_key(i) + or not test(files[i], getsize(file), getmtime(file)) ): + start, end, offset, file = self.file_ranges[i] + if DEBUG: + print 'removing '+file + for p in xrange( int(start/self.piece_length), + int((end-1)/self.piece_length)+1 ): + if valid_pieces.has_key(p): + del valid_pieces[p] + except: + if DEBUG: + print_exc() + return [] + + if DEBUG: + print valid_pieces.keys() + return valid_pieces.keys() + diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/StorageWrapper.py b/tribler-mod/Tribler/Core/BitTornado/BT1/StorageWrapper.py new file mode 100644 index 0000000..885dd59 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/StorageWrapper.py @@ -0,0 +1,1269 @@ +from time import localtime, strftime +# Written by Bram Cohen +# see LICENSE.txt for license information + +from Tribler.Core.BitTornado.bitfield import Bitfield +from sha import sha +from Tribler.Core.BitTornado.clock import clock +from traceback import print_exc +from random import randrange +from copy import deepcopy +import pickle +import traceback, sys + +from Tribler.Core.Merkle.merkle import MerkleTree + +try: + True +except: + True = 1 + False = 0 +from bisect import insort + +DEBUG = False + +STATS_INTERVAL = 0.2 +RARE_RAWSERVER_TASKID = -481 # This must be a rawserver task ID that is never valid. + + +def dummy_status(fractionDone = None, activity = None): + pass + +class Olist: + def __init__(self, l = []): + self.d = {} + for i in l: + self.d[i] = 1 + def __len__(self): + return len(self.d) + def includes(self, i): + return self.d.has_key(i) + def add(self, i): + self.d[i] = 1 + def extend(self, l): + for i in l: + self.d[i] = 1 + def pop(self, n=0): + # assert self.d + k = self.d.keys() + if n == 0: + i = min(k) + elif n == -1: + i = max(k) + else: + k.sort() + i = k[n] + del self.d[i] + return i + def remove(self, i): + if self.d.has_key(i): + del self.d[i] + +class fakeflag: + def __init__(self, state=False): + self.state = state + def wait(self): + pass + def isSet(self): + return self.state + + +class StorageWrapper: + def __init__(self, videoinfo, storage, request_size, hashes, + piece_size, root_hash, finished, failed, + statusfunc = dummy_status, flag = fakeflag(), check_hashes = True, + data_flunked = lambda x: None, + piece_from_live_source_func = lambda i,d: None, + backfunc = None, + config = {}, unpauseflag = fakeflag(True)): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "StorageWrapper: __init__: wrapped around", storage.files + self.videoinfo = videoinfo + self.storage = storage + self.request_size = long(request_size) + self.hashes = hashes + self.piece_size = long(piece_size) + self.piece_length = long(piece_size) + self.finished = finished + self.report_failure = failed + self.statusfunc = statusfunc + self.flag = flag + self.check_hashes = check_hashes + self.data_flunked = data_flunked + self.piece_from_live_source_func = piece_from_live_source_func + self.backfunc = backfunc + self.config = config + self.unpauseflag = unpauseflag + + self.live_streaming = self.videoinfo['live'] + + self.alloc_type = config.get('alloc_type', 'normal') + self.double_check = config.get('double_check', 0) + self.triple_check = config.get('triple_check', 0) + if self.triple_check: + self.double_check = True + self.bgalloc_enabled = False + self.bgalloc_active = False + self.total_length = storage.get_total_length() + self.amount_left = self.total_length + if self.total_length <= self.piece_size * (len(hashes) - 1): + raise ValueError, 'bad data in responsefile - total too small' + if self.total_length > self.piece_size * len(hashes): + raise ValueError, 'bad data in responsefile - total too big' + self.numactive = [0] * len(hashes) + self.inactive_requests = [1] * len(hashes) + self.amount_inactive = self.total_length + self.amount_obtained = 0 + self.amount_desired = self.total_length + self.have = Bitfield(len(hashes)) + self.have_cloaked_data = None + self.blocked = [False] * len(hashes) + self.blocked_holes = [] + self.blocked_movein = Olist() + self.blocked_moveout = Olist() + self.waschecked = [False] * len(hashes) + self.places = {} + self.holes = [] + self.stat_active = {} + self.stat_new = {} + self.dirty = {} + self.stat_numflunked = 0 + self.stat_numdownloaded = 0 + self.stat_numfound = 0 + self.download_history = {} + self.failed_pieces = {} + self.out_of_place = 0 + self.write_buf_max = config['write_buffer_size']*1048576L + self.write_buf_size = 0L + self.write_buf = {} # structure: piece: [(start, data), ...] + self.write_buf_list = [] + # Merkle: + self.merkle_torrent = (root_hash is not None) + self.root_hash = root_hash + self.initial_hashes = deepcopy(self.hashes) + if self.merkle_torrent: + self.hashes_unpickled = False + # Must see if we're initial seeder + self.check_hashes = True + # Fallback for if we're not an initial seeder or don't have a + # Merkle tree on disk. + self.merkletree = MerkleTree(self.piece_size,self.total_length,self.root_hash,None) + else: + # Normal BT + self.hashes_unpickled = True + + self.initialize_tasks = [ + ['checking existing data', 0, self.init_hashcheck, self.hashcheckfunc], + ['moving data', 1, self.init_movedata, self.movedatafunc], + ['allocating disk space', 1, self.init_alloc, self.allocfunc] ] + self.initialize_done = None + + # Arno: move starting of periodic _bgalloc to init_alloc + self.backfunc(self._bgsync, max(self.config['auto_flush']*60, 60)) + + def _bgsync(self): + if self.config['auto_flush']: + self.sync() + self.backfunc(self._bgsync, max(self.config['auto_flush']*60, 60)) + + + def old_style_init(self): + while self.initialize_tasks: + msg, done, init, next = self.initialize_tasks.pop(0) + if init(): + self.statusfunc(activity = msg, fractionDone = done) + t = clock() + STATS_INTERVAL + x = 0 + while x is not None: + if t < clock(): + t = clock() + STATS_INTERVAL + self.statusfunc(fractionDone = x) + self.unpauseflag.wait() + if self.flag.isSet(): + return False + x = next() + + self.statusfunc(fractionDone = 0) + return True + + + def initialize(self, donefunc, statusfunc = None): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","StorageWrapper: initialize: enter, backfunc is",self.backfunc + + self.initialize_done = donefunc + if statusfunc is None: + statusfunc = self.statusfunc + self.initialize_status = statusfunc + self.initialize_next = None + + """ + Arno: 2007-01-02: + This next line used to read: + self.backfunc(self._initialize) + So without the task ID. I've changed this to accomodate the + following situation. In video-on-demand, it may occur that + a torrent is stopped and then immediately after it is + restarted. In particular, we use this when a user selects + a torrent from the mainwin to be played (again). Because the + torrent does not necessarily use a VOD-piecepicker we have + to stop the current DL process and start a new one. + + When stopping and starting a torrent quickly a problem occurs. + When a torrent is stopped, its infohash is registered in kill list + of the (real) RawServer class. The next time the rawserver looks + for tasks to execute it will first check the kill list. If it's not + empty it will remove all tasks that have the given infohash as taskID. + This mechanism ensures that when a torrent is stopped, any outstanding + tasks belonging to the torrent are removed from the rawserver task queue. + + It can occur that we've stopped the torrent and the + infohash is on the kill list, but the queue has not yet been cleared of + old entries because the thread that runs the rawserver did not get to + executing new tasks yet. This causes a problem right here, because + we now want to schedule a new task on behalf of the new download process. + If it is enqueued now, it will be removed the next time the rawserver + checks its task list and because the infohash is on the kill list be + deleted. + + My fix is to schedule this first task of the new torrent under a + different task ID. Hence, when the rawserver checks its queue it + will not delete it, thinking it belonged to the old download + process. The really clean solution is to stop using infohash as + taskid, and use a unique ID for a download process. This will + take a bit of work to ensure it works correctly, so in the mean + time we'll use this fix. + """ + self.backfunc(self._initialize, id = RARE_RAWSERVER_TASKID) + + def _initialize(self): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","StorageWrapper: _initialize: enter" + if not self.unpauseflag.isSet(): + self.backfunc(self._initialize, 1) + return + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","StorageWrapper: _initialize: next is",self.initialize_next + + if self.initialize_next: + x = self.initialize_next() + if x is None: + self.initialize_next = None + else: + self.initialize_status(fractionDone = x) + else: + if not self.initialize_tasks: + self.initialize_done(success=True) + self.initialize_done = None + return + msg, done, init, next = self.initialize_tasks.pop(0) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","StorageWrapper: _initialize performing task",msg + if init(): + self.initialize_status(activity = msg, fractionDone = done) + self.initialize_next = next + + self.backfunc(self._initialize) + + def init_hashcheck(self): + if self.flag.isSet(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","StorageWrapper: init_hashcheck: FLAG IS SET" + return False + self.check_list = [] + if not self.hashes or self.amount_left == 0: + self.check_total = 0 + self.finished() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","StorageWrapper: init_hashcheck: Download finished" + return False + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","StorageWrapper: init_hashcheck: self.places",`self.places` + + self.check_targets = {} + got = {} + for p, v in self.places.items(): + assert not got.has_key(v) + got[v] = 1 + for i in xrange(len(self.hashes)): + if self.places.has_key(i): # restored from pickled + self.check_targets[self.hashes[i]] = [] + if self.places[i] == i: + continue + else: + assert not got.has_key(i) + self.out_of_place += 1 + if got.has_key(i): + continue + if self._waspre(i) and not self.live_streaming: + if self.blocked[i]: + self.places[i] = i + else: + self.check_list.append(i) + continue + if not self.live_streaming and not self.check_hashes: + self.failed('file supposed to be complete on start-up, but data is missing') + return False + self.holes.append(i) + if self.blocked[i] or self.check_targets.has_key(self.hashes[i]): + self.check_targets[self.hashes[i]] = [] # in case of a hash collision, discard + else: + self.check_targets[self.hashes[i]] = [i] + self.check_total = len(self.check_list) + self.check_numchecked = 0.0 + self.lastlen = self._piecelen(len(self.hashes) - 1) + self.numchecked = 0.0 + if DEBUG: + print "StorageWrapper: init_hashcheck: checking",self.check_list + print "StorageWrapper: init_hashcheck: return self.check_total > 0 is ",(self.check_total > 0) + return self.check_total > 0 + + def _markgot(self, piece, pos): + if DEBUG: + print str(piece)+' at '+str(pos) + self.places[piece] = pos + self.have[piece] = True + len = self._piecelen(piece) + self.amount_obtained += len + self.amount_left -= len + self.amount_inactive -= len + self.inactive_requests[piece] = None + self.waschecked[piece] = self.check_hashes + self.stat_numfound += 1 + + def hashcheckfunc(self): + try: + if self.flag.isSet(): + return None + if not self.check_list: + return None + if self.live_streaming: + return None + + i = self.check_list.pop(0) + if not self.check_hashes: + self._markgot(i, i) + else: + d1 = self.read_raw(i, 0, self.lastlen) + if d1 is None: + return None + sh = sha(d1[:]) + d1.release() + sp = sh.digest() + d2 = self.read_raw(i, self.lastlen, self._piecelen(i)-self.lastlen) + if d2 is None: + return None + sh.update(d2[:]) + d2.release() + s = sh.digest() + + + if DEBUG: + if s != self.hashes[i]: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","StorageWrapper: hashcheckfunc: piece corrupt",i + + # Merkle: If we didn't read the hashes from persistent storage then + # we can't check anything. Exception is the case where we are the + # initial seeder. In that case we first calculate all hashes, + # and then compute the hash tree. If the root hash equals the + # root hash in the .torrent we're a seeder. Otherwise, we are + # client with messed up data and no (local) way of checking it. + # + if not self.hashes_unpickled: + if DEBUG: + print "StorageWrapper: Merkle torrent, saving calculated hash",i + self.initial_hashes[i] = s + self._markgot(i, i) + elif s == self.hashes[i]: + self._markgot(i, i) + elif (self.check_targets.get(s) + and self._piecelen(i) == self._piecelen(self.check_targets[s][-1])): + self._markgot(self.check_targets[s].pop(), i) + self.out_of_place += 1 + elif (not self.have[-1] and sp == self.hashes[-1] + and (i == len(self.hashes) - 1 + or not self._waspre(len(self.hashes) - 1))): + self._markgot(len(self.hashes) - 1, i) + self.out_of_place += 1 + else: + self.places[i] = i + self.numchecked += 1 + if self.amount_left == 0: + if not self.hashes_unpickled: + # Merkle: The moment of truth. Are we an initial seeder? + self.merkletree = MerkleTree(self.piece_size,self.total_length,None,self.initial_hashes) + if self.merkletree.compare_root_hashes(self.root_hash): + if DEBUG: + print "StorageWrapper: Merkle torrent, initial seeder!" + self.hashes = self.initial_hashes + else: + # Bad luck + if DEBUG: + print "StorageWrapper: Merkle torrent, NOT a seeder!" + self.failed('download corrupted, hash tree does not compute; please delete and restart') + return 1 + self.finished() + return (self.numchecked / self.check_total) + + except Exception, e: + print_exc() + self.failed('download corrupted: '+str(e)+'; please delete and restart') + + + def init_movedata(self): + if self.flag.isSet(): + return False + if self.alloc_type != 'sparse': + return False + self.storage.top_off() # sets file lengths to their final size + self.movelist = [] + if self.out_of_place == 0: + for i in self.holes: + self.places[i] = i + self.holes = [] + return False + self.tomove = float(self.out_of_place) + for i in xrange(len(self.hashes)): + if not self.places.has_key(i): + self.places[i] = i + elif self.places[i] != i: + self.movelist.append(i) + self.holes = [] + return True + + def movedatafunc(self): + if self.flag.isSet(): + return None + if not self.movelist: + return None + i = self.movelist.pop(0) + old = self.read_raw(self.places[i], 0, self._piecelen(i)) + if old is None: + return None + if not self.write_raw(i, 0, old): + return None + if self.double_check and self.have[i]: + if self.triple_check: + old.release() + old = self.read_raw(i, 0, self._piecelen(i), + flush_first = True) + if old is None: + return None + if sha(old[:]).digest() != self.hashes[i]: + self.failed('download corrupted, piece on disk failed triple check; please delete and restart') + return None + old.release() + + self.places[i] = i + self.tomove -= 1 + return (self.tomove / self.out_of_place) + + + def init_alloc(self): + if self.flag.isSet(): + return False + if not self.holes: + return False + self.numholes = float(len(self.holes)) + self.alloc_buf = chr(0xFF) * self.piece_size + ret = False + if self.alloc_type == 'pre-allocate': + self.bgalloc_enabled = True + ret = True + if self.alloc_type == 'background': + self.bgalloc_enabled = True + # Arno: only enable this here, eats CPU otherwise + if self.bgalloc_enabled: + self.backfunc(self._bgalloc, 0.1) + if ret: + return ret + if self.blocked_moveout: + return True + return False + + + def _allocfunc(self): + while self.holes: + n = self.holes.pop(0) + if self.blocked[n]: # assume not self.blocked[index] + if not self.blocked_movein: + self.blocked_holes.append(n) + continue + if not self.places.has_key(n): + b = self.blocked_movein.pop(0) + oldpos = self._move_piece(b, n) + self.places[oldpos] = oldpos + return None + if self.places.has_key(n): + oldpos = self._move_piece(n, n) + self.places[oldpos] = oldpos + return None + return n + return None + + def allocfunc(self): + if self.flag.isSet(): + return None + + if self.blocked_moveout: + self.bgalloc_active = True + n = self._allocfunc() + if n is not None: + if self.blocked_moveout.includes(n): + self.blocked_moveout.remove(n) + b = n + else: + b = self.blocked_moveout.pop(0) + oldpos = self._move_piece(b, n) + self.places[oldpos] = oldpos + return len(self.holes) / self.numholes + + if self.holes and self.bgalloc_enabled: + self.bgalloc_active = True + n = self._allocfunc() + if n is not None: + self.write_raw(n, 0, self.alloc_buf[:self._piecelen(n)]) + self.places[n] = n + return len(self.holes) / self.numholes + + self.bgalloc_active = False + return None + + def bgalloc(self): + if self.bgalloc_enabled: + if not self.holes and not self.blocked_moveout and self.backfunc: + self.backfunc(self.storage.flush) + # force a flush whenever the "finish allocation" button is hit + self.bgalloc_enabled = True + return False + + def _bgalloc(self): + self.allocfunc() + if self.config.get('alloc_rate', 0) < 0.1: + self.config['alloc_rate'] = 0.1 + self.backfunc(self._bgalloc, + float(self.piece_size)/(self.config['alloc_rate']*1048576)) + + def _waspre(self, piece): + return self.storage.was_preallocated(piece * self.piece_size, self._piecelen(piece)) + + def _piecelen(self, piece): + if piece < len(self.hashes) - 1: + return self.piece_size + else: + return self.total_length - (piece * self.piece_size) + + def get_amount_left(self): + return self.amount_left + + def do_I_have_anything(self): + return self.amount_left < self.total_length + + def _make_inactive(self, index): + length = self._piecelen(index) + l = [] + x = 0 + while x + self.request_size < length: + l.append((x, self.request_size)) + x += self.request_size + l.append((x, length - x)) + self.inactive_requests[index] = l # Note: letter L not number 1 + + def is_endgame(self): + return not self.amount_inactive + + def reset_endgame(self, requestlist): + for index, begin, length in requestlist: + self.request_lost(index, begin, length) + + def get_have_list(self): + return self.have.tostring() + + def get_have_copy(self): + return self.have.copy() + + def get_have_list_cloaked(self): + if self.have_cloaked_data is None: + newhave = Bitfield(copyfrom = self.have) + unhaves = [] + n = min(randrange(2, 5), len(self.hashes)) # between 2-4 unless torrent is small + while len(unhaves) < n: + unhave = randrange(min(32, len(self.hashes))) # all in first 4 bytes + if not unhave in unhaves: + unhaves.append(unhave) + newhave[unhave] = False + self.have_cloaked_data = (newhave.tostring(), unhaves) + return self.have_cloaked_data + + def do_I_have(self, index): + return self.have[index] + + def do_I_have_requests(self, index): + return not not self.inactive_requests[index] + + def is_unstarted(self, index): + return (not self.have[index] and not self.numactive[index] + and not self.dirty.has_key(index)) + + def get_hash(self, index): + return self.hashes[index] + + def get_stats(self): + return self.amount_obtained, self.amount_desired, self.have + + def new_request(self, index): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","StorageWrapper: new_request",index,"#" + + # returns (begin, length) + if self.inactive_requests[index] == 1: # number 1, not letter L + self._make_inactive(index) + self.numactive[index] += 1 + self.stat_active[index] = 1 + if not self.dirty.has_key(index): + self.stat_new[index] = 1 + rs = self.inactive_requests[index] +# r = min(rs) +# rs.remove(r) + r = rs.pop(0) + self.amount_inactive -= r[1] + return r + + + def request_too_slow(self,index): + """ Arno's addition to get pieces we requested from slow peers to be + back in the PiecePicker's list of candidates """ + if self.amount_inactive == 0: + # all has been requested, endgame about to start, don't mess around + return + + self.inactive_requests[index] = 1 # number 1, not letter L + self.amount_inactive += self._piecelen(index) + + + def write_raw(self, index, begin, data): + try: + self.storage.write(self.piece_size * index + begin, data) + return True + except IOError, e: + traceback.print_exc() + self.failed('IO Error: ' + str(e)) + return False + + + def _write_to_buffer(self, piece, start, data): + if not self.write_buf_max: + return self.write_raw(self.places[piece], start, data) + self.write_buf_size += len(data) + while self.write_buf_size > self.write_buf_max: + old = self.write_buf_list.pop(0) + if not self._flush_buffer(old, True): + return False + if self.write_buf.has_key(piece): + self.write_buf_list.remove(piece) + else: + self.write_buf[piece] = [] + self.write_buf_list.append(piece) + self.write_buf[piece].append((start, data)) + return True + + def _flush_buffer(self, piece, popped = False): + if not self.write_buf.has_key(piece): + return True + if not popped: + self.write_buf_list.remove(piece) + l = self.write_buf[piece] + del self.write_buf[piece] + l.sort() + for start, data in l: + self.write_buf_size -= len(data) + if not self.write_raw(self.places[piece], start, data): + return False + return True + + def sync(self): + spots = {} + for p in self.write_buf_list: + spots[self.places[p]] = p + l = spots.keys() + l.sort() + for i in l: + try: + self._flush_buffer(spots[i]) + except: + pass + try: + self.storage.sync() + except IOError, e: + self.failed('IO Error: ' + str(e)) + except OSError, e: + self.failed('OS Error: ' + str(e)) + + + def _move_piece(self, index, newpos): + oldpos = self.places[index] + if DEBUG: + print 'moving '+str(index)+' from '+str(oldpos)+' to '+str(newpos) + assert oldpos != index + assert oldpos != newpos + assert index == newpos or not self.places.has_key(newpos) + old = self.read_raw(oldpos, 0, self._piecelen(index)) + if old is None: + return -1 + if not self.write_raw(newpos, 0, old): + return -1 + self.places[index] = newpos + if self.have[index] and ( + self.triple_check or (self.double_check and index == newpos)): + if self.triple_check: + old.release() + old = self.read_raw(newpos, 0, self._piecelen(index), + flush_first = True) + if old is None: + return -1 + if sha(old[:]).digest() != self.hashes[index]: + self.failed('download corrupted, piece on disk failed triple check; please delete and restart') + return -1 + old.release() + + if self.blocked[index]: + self.blocked_moveout.remove(index) + if self.blocked[newpos]: + self.blocked_movein.remove(index) + else: + self.blocked_movein.add(index) + else: + self.blocked_movein.remove(index) + if self.blocked[newpos]: + self.blocked_moveout.add(index) + else: + self.blocked_moveout.remove(index) + + return oldpos + + def _clear_space(self, index): + h = self.holes.pop(0) + n = h + if self.blocked[n]: # assume not self.blocked[index] + if not self.blocked_movein: + self.blocked_holes.append(n) + return True # repeat + if not self.places.has_key(n): + b = self.blocked_movein.pop(0) + oldpos = self._move_piece(b, n) + if oldpos < 0: + return False + n = oldpos + if self.places.has_key(n): + oldpos = self._move_piece(n, n) + if oldpos < 0: + return False + n = oldpos + if index == n or index in self.holes: + if n == h: + self.write_raw(n, 0, self.alloc_buf[:self._piecelen(n)]) + self.places[index] = n + if self.blocked[n]: + # because n may be a spot cleared 10 lines above, it's possible + # for it to be blocked. While that spot could be left cleared + # and a new spot allocated, this condition might occur several + # times in a row, resulting in a significant amount of disk I/O, + # delaying the operation of the engine. Rather than do this, + # queue the piece to be moved out again, which will be performed + # by the background allocator, with which data movement is + # automatically limited. + self.blocked_moveout.add(index) + return False + for p, v in self.places.items(): + if v == index: + break + else: + self.failed('download corrupted; please delete and restart') + return False + self._move_piece(p, n) + self.places[index] = index + return False + + ## Arno: don't think we need length here, FIXME + def piece_came_in(self, index, begin, hashlist, piece, baddataguard, source = None): + assert not self.have[index] + # Merkle: Check that the hashes are valid using the known root_hash + # If so, put them in the hash tree and the normal list of hashes to + # allow (1) us to send this piece to others using the right hashes + # and (2) us to check the validity of the piece when it has been + # received completely. + # + if self.merkle_torrent and len(hashlist) > 0: + if self.merkletree.check_hashes(hashlist): + self.merkletree.update_hash_admin(hashlist,self.hashes) + # if the check wasn't right, the peer will be discovered as bad later + # TODO: make bad now? + if not self.places.has_key(index): + while self._clear_space(index): + pass + if DEBUG: + print 'new place for '+str(index)+' at '+str(self.places[index]) + if self.flag.isSet(): + return False + + if self.failed_pieces.has_key(index): + old = self.read_raw(self.places[index], begin, len(piece)) + if old is None: + return True + if old[:].tostring() != piece: + try: + self.failed_pieces[index][self.download_history[index][begin]] = 1 + except: + self.failed_pieces[index][None] = 1 + old.release() + self.download_history.setdefault(index, {})[begin] = source + + if not self._write_to_buffer(index, begin, piece): + return True + + self.amount_obtained += len(piece) + self.dirty.setdefault(index, []).append((begin, len(piece))) + self.numactive[index] -= 1 + assert self.numactive[index] >= 0 + if not self.numactive[index]: + del self.stat_active[index] + if self.stat_new.has_key(index): + del self.stat_new[index] + + if self.inactive_requests[index] or self.numactive[index]: + return True + + del self.dirty[index] + if not self._flush_buffer(index): + return True + + length = self._piecelen(index) + # Check hash + data = self.read_raw(self.places[index], 0, length, + flush_first = self.triple_check) + if data is None: + return True + + pieceok = False + if self.live_streaming: + # LIVESOURCEAUTH + if self.piece_from_live_source_func(index,data[:]): + pieceok = True + else: + hash = sha(data[:]).digest() + data.release() + if hash == self.hashes[index]: + pieceok = True + + if not pieceok: + self.amount_obtained -= length + self.data_flunked(length, index) + self.inactive_requests[index] = 1 # number 1, not letter L + self.amount_inactive += length + self.stat_numflunked += 1 + + self.failed_pieces[index] = {} + allsenders = {} + for d in self.download_history[index].values(): + allsenders[d] = 1 + if len(allsenders) == 1: + culprit = allsenders.keys()[0] + if culprit is not None: + culprit.failed(index, bump = True) + del self.failed_pieces[index] # found the culprit already + + if self.live_streaming: + # TODO: figure out how to use the Download.BadDataGuard + # cf. the culprit business above. + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","////////////////////////////////////////////////////////////// kicking peer" + raise ValueError("Arno quick fix: Unauth data unacceptable") + + return False + + self.have[index] = True + self.inactive_requests[index] = None + self.waschecked[index] = True + + self.amount_left -= length + self.stat_numdownloaded += 1 + + for d in self.download_history[index].values(): + if d is not None: + d.good(index) + del self.download_history[index] + if self.failed_pieces.has_key(index): + for d in self.failed_pieces[index].keys(): + if d is not None: + d.failed(index) + del self.failed_pieces[index] + + if self.amount_left == 0: + self.finished() + return True + + + def request_lost(self, index, begin, length): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","StorageWrapper: request_lost",index,"#" + + assert not (begin, length) in self.inactive_requests[index] + insort(self.inactive_requests[index], (begin, length)) + self.amount_inactive += length + self.numactive[index] -= 1 + if not self.numactive[index]: + del self.stat_active[index] + if self.stat_new.has_key(index): + del self.stat_new[index] + + + def get_piece(self, index, begin, length): + # Merkle: Get (sub)piece from disk and its associated hashes + # do_get_piece() returns PieceBuffer + pb = self.do_get_piece(index,begin,length) + if self.merkle_torrent and pb is not None and begin == 0: + hashlist = self.merkletree.get_hashes_for_piece(index) + else: + hashlist = [] + return [pb,hashlist] + + def do_get_piece(self, index, begin, length): + if not self.have[index]: + return None + data = None + if not self.waschecked[index]: + data = self.read_raw(self.places[index], 0, self._piecelen(index)) + if data is None: + return None + if not self.live_streaming and sha(data[:]).digest() != self.hashes[index]: + self.failed('file supposed to be complete on start-up, but piece failed hash check') + return None + self.waschecked[index] = True + if length == -1 and begin == 0: + return data # optimization + if length == -1: + if begin > self._piecelen(index): + return None + length = self._piecelen(index)-begin + if begin == 0: + return self.read_raw(self.places[index], 0, length) + elif begin + length > self._piecelen(index): + return None + if data is not None: + s = data[begin:begin+length] + data.release() + return s + data = self.read_raw(self.places[index], begin, length) + if data is None: + return None + s = data.getarray() + data.release() + return s + + def read_raw(self, piece, begin, length, flush_first = False): + try: + return self.storage.read(self.piece_size * piece + begin, + length, flush_first) + except IOError, e: + self.failed('IO Error: ' + str(e)) + return None + + + def set_file_readonly(self, n): + try: + self.storage.set_readonly(n) + except IOError, e: + self.failed('IO Error: ' + str(e)) + except OSError, e: + self.failed('OS Error: ' + str(e)) + + + def has_data(self, index): + return index not in self.holes and index not in self.blocked_holes + + def doublecheck_data(self, pieces_to_check): + if not self.double_check: + return + sources = [] + for p, v in self.places.items(): + if pieces_to_check.has_key(v): + sources.append(p) + assert len(sources) == len(pieces_to_check) + sources.sort() + for index in sources: + if self.have[index]: + piece = self.read_raw(self.places[index], 0, self._piecelen(index), + flush_first = True) + if piece is None: + return False + if sha(piece[:]).digest() != self.hashes[index]: + self.failed('download corrupted, piece on disk failed double check; please delete and restart') + return False + piece.release() + return True + + + def reblock(self, new_blocked): + # assume downloads have already been canceled and chunks made inactive + for i in xrange(len(new_blocked)): + if new_blocked[i] and not self.blocked[i]: + length = self._piecelen(i) + self.amount_desired -= length + if self.have[i]: + self.amount_obtained -= length + continue + if self.inactive_requests[i] == 1: # number 1, not letter L + self.amount_inactive -= length + continue + inactive = 0 + for nb, nl in self.inactive_requests[i]: + inactive += nl + self.amount_inactive -= inactive + self.amount_obtained -= length - inactive + + if self.blocked[i] and not new_blocked[i]: + length = self._piecelen(i) + self.amount_desired += length + if self.have[i]: + self.amount_obtained += length + continue + if self.inactive_requests[i] == 1: + self.amount_inactive += length + continue + inactive = 0 + for nb, nl in self.inactive_requests[i]: + inactive += nl + self.amount_inactive += inactive + self.amount_obtained += length - inactive + + self.blocked = new_blocked + + self.blocked_movein = Olist() + self.blocked_moveout = Olist() + for p, v in self.places.items(): + if p != v: + if self.blocked[p] and not self.blocked[v]: + self.blocked_movein.add(p) + elif self.blocked[v] and not self.blocked[p]: + self.blocked_moveout.add(p) + + self.holes.extend(self.blocked_holes) # reset holes list + self.holes.sort() + self.blocked_holes = [] + + + ''' + Pickled data format: + + d['pieces'] = either a string containing a bitfield of complete pieces, + or the numeric value "1" signifying a seed. If it is + a seed, d['places'] and d['partials'] should be empty + and needn't even exist. d['merkletree'] must exist + if it's a seed and a Merkle torrent. + d['partials'] = [ piece, [ offset, length... ]... ] + a list of partial data that had been previously + downloaded, plus the given offsets. Adjacent partials + are merged so as to save space, and so that if the + request size changes then new requests can be + calculated more efficiently. + d['places'] = [ piece, place, {,piece, place ...} ] + the piece index, and the place it's stored. + If d['pieces'] specifies a complete piece or d['partials'] + specifies a set of partials for a piece which has no + entry in d['places'], it can be assumed that + place[index] = index. A place specified with no + corresponding data in d['pieces'] or d['partials'] + indicates allocated space with no valid data, and is + reserved so it doesn't need to be hash-checked. + d['merkletree'] = pickle.dumps(self.merkletree) + if we're using a Merkle torrent the Merkle tree, otherwise + there is no 'merkletree' in the dictionary. + ''' + def pickle(self): + if self.have.complete(): + if self.merkle_torrent: + return {'pieces': 1, 'merkletree': pickle.dumps(self.merkletree) } + else: + return {'pieces': 1 } + pieces = Bitfield(len(self.hashes)) + places = [] + partials = [] + for p in xrange(len(self.hashes)): + if self.blocked[p] or not self.places.has_key(p): + continue + h = self.have[p] + pieces[p] = h + pp = self.dirty.get(p) + if not h and not pp: # no data + places.extend([self.places[p], self.places[p]]) + elif self.places[p] != p: + places.extend([p, self.places[p]]) + if h or not pp: + continue + pp.sort() + r = [] + while len(pp) > 1: + if pp[0][0]+pp[0][1] == pp[1][0]: + pp[0] = list(pp[0]) + pp[0][1] += pp[1][1] + del pp[1] + else: + r.extend(pp[0]) + del pp[0] + r.extend(pp[0]) + partials.extend([p, r]) + if self.merkle_torrent: + return {'pieces': pieces.tostring(), 'places': places, 'partials': partials, 'merkletree': pickle.dumps(self.merkletree) } + else: + return {'pieces': pieces.tostring(), 'places': places, 'partials': partials } + + + def unpickle(self, data, valid_places): + got = {} + places = {} + dirty = {} + download_history = {} + stat_active = {} + stat_numfound = self.stat_numfound + amount_obtained = self.amount_obtained + amount_inactive = self.amount_inactive + amount_left = self.amount_left + inactive_requests = [x for x in self.inactive_requests] + restored_partials = [] + + try: + if data.has_key('merkletree'): + try: + if DEBUG: + print "StorageWrapper: Unpickling Merkle tree!" + self.merkletree = pickle.loads(data['merkletree']) + self.hashes = self.merkletree.get_piece_hashes() + self.hashes_unpickled = True + except Exception, e: + print "StorageWrapper: Exception while unpickling Merkle tree",str(e) + print_exc() + if data['pieces'] == 1: # a seed + assert not data.get('places', None) + assert not data.get('partials', None) + # Merkle: restore Merkle tree + have = Bitfield(len(self.hashes)) + for i in xrange(len(self.hashes)): + have[i] = True + assert have.complete() + _places = [] + _partials = [] + else: + have = Bitfield(len(self.hashes), data['pieces']) + _places = data['places'] + assert len(_places) % 2 == 0 + _places = [_places[x:x+2] for x in xrange(0, len(_places), 2)] + _partials = data['partials'] + assert len(_partials) % 2 == 0 + _partials = [_partials[x:x+2] for x in xrange(0, len(_partials), 2)] + + for index, place in _places: + if place not in valid_places: + continue + assert not got.has_key(index) + assert not got.has_key(place) + places[index] = place + got[index] = 1 + got[place] = 1 + + for index in xrange(len(self.hashes)): + if DEBUG: + print "StorageWrapper: Unpickle: Checking if we have piece",index + if have[index]: + if not places.has_key(index): + if index not in valid_places: + have[index] = False + continue + assert not got.has_key(index) + places[index] = index + got[index] = 1 + length = self._piecelen(index) + amount_obtained += length + stat_numfound += 1 + amount_inactive -= length + amount_left -= length + inactive_requests[index] = None + + for index, plist in _partials: + assert not dirty.has_key(index) + assert not have[index] + if not places.has_key(index): + if index not in valid_places: + continue + assert not got.has_key(index) + places[index] = index + got[index] = 1 + assert len(plist) % 2 == 0 + plist = [plist[x:x+2] for x in xrange(0, len(plist), 2)] + dirty[index] = plist + stat_active[index] = 1 + download_history[index] = {} + # invert given partials + length = self._piecelen(index) + l = [] + if plist[0][0] > 0: + l.append((0, plist[0][0])) + for i in xrange(len(plist)-1): + end = plist[i][0]+plist[i][1] + assert not end > plist[i+1][0] + l.append((end, plist[i+1][0]-end)) + end = plist[-1][0]+plist[-1][1] + assert not end > length + if end < length: + l.append((end, length-end)) + # split them to request_size + ll = [] + amount_obtained += length + amount_inactive -= length + for nb, nl in l: + while nl > 0: + r = min(nl, self.request_size) + ll.append((nb, r)) + amount_inactive += r + amount_obtained -= r + nb += self.request_size + nl -= self.request_size + inactive_requests[index] = ll + restored_partials.append(index) + + assert amount_obtained + amount_inactive == self.amount_desired + except: +# print_exc() + return [] # invalid data, discard everything + + self.have = have + self.places = places + self.dirty = dirty + self.download_history = download_history + self.stat_active = stat_active + self.stat_numfound = stat_numfound + self.amount_obtained = amount_obtained + self.amount_inactive = amount_inactive + self.amount_left = amount_left + self.inactive_requests = inactive_requests + + return restored_partials + + def failed(self,s): + # Arno: report failure of hash check + self.report_failure(s) + if self.initialize_done is not None: + self.initialize_done(success=False) + + def live_invalidate(self,piece): # Arno: LIVEWRAP + # Assumption: not outstanding requests + length = self._piecelen(piece) + oldhave = self.have[piece] + self.have[piece] = False + #self.waschecked[piece] = False + self.inactive_requests[piece] = 1 + if oldhave: + self.amount_left += length + self.amount_obtained -= length diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/StorageWrapper.py.bak b/tribler-mod/Tribler/Core/BitTornado/BT1/StorageWrapper.py.bak new file mode 100644 index 0000000..074b802 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/StorageWrapper.py.bak @@ -0,0 +1,1268 @@ +# Written by Bram Cohen +# see LICENSE.txt for license information + +from Tribler.Core.BitTornado.bitfield import Bitfield +from sha import sha +from Tribler.Core.BitTornado.clock import clock +from traceback import print_exc +from random import randrange +from copy import deepcopy +import pickle +import traceback, sys + +from Tribler.Core.Merkle.merkle import MerkleTree + +try: + True +except: + True = 1 + False = 0 +from bisect import insort + +DEBUG = False + +STATS_INTERVAL = 0.2 +RARE_RAWSERVER_TASKID = -481 # This must be a rawserver task ID that is never valid. + + +def dummy_status(fractionDone = None, activity = None): + pass + +class Olist: + def __init__(self, l = []): + self.d = {} + for i in l: + self.d[i] = 1 + def __len__(self): + return len(self.d) + def includes(self, i): + return self.d.has_key(i) + def add(self, i): + self.d[i] = 1 + def extend(self, l): + for i in l: + self.d[i] = 1 + def pop(self, n=0): + # assert self.d + k = self.d.keys() + if n == 0: + i = min(k) + elif n == -1: + i = max(k) + else: + k.sort() + i = k[n] + del self.d[i] + return i + def remove(self, i): + if self.d.has_key(i): + del self.d[i] + +class fakeflag: + def __init__(self, state=False): + self.state = state + def wait(self): + pass + def isSet(self): + return self.state + + +class StorageWrapper: + def __init__(self, videoinfo, storage, request_size, hashes, + piece_size, root_hash, finished, failed, + statusfunc = dummy_status, flag = fakeflag(), check_hashes = True, + data_flunked = lambda x: None, + piece_from_live_source_func = lambda i,d: None, + backfunc = None, + config = {}, unpauseflag = fakeflag(True)): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "StorageWrapper: __init__: wrapped around", storage.files + self.videoinfo = videoinfo + self.storage = storage + self.request_size = long(request_size) + self.hashes = hashes + self.piece_size = long(piece_size) + self.piece_length = long(piece_size) + self.finished = finished + self.report_failure = failed + self.statusfunc = statusfunc + self.flag = flag + self.check_hashes = check_hashes + self.data_flunked = data_flunked + self.piece_from_live_source_func = piece_from_live_source_func + self.backfunc = backfunc + self.config = config + self.unpauseflag = unpauseflag + + self.live_streaming = self.videoinfo['live'] + + self.alloc_type = config.get('alloc_type', 'normal') + self.double_check = config.get('double_check', 0) + self.triple_check = config.get('triple_check', 0) + if self.triple_check: + self.double_check = True + self.bgalloc_enabled = False + self.bgalloc_active = False + self.total_length = storage.get_total_length() + self.amount_left = self.total_length + if self.total_length <= self.piece_size * (len(hashes) - 1): + raise ValueError, 'bad data in responsefile - total too small' + if self.total_length > self.piece_size * len(hashes): + raise ValueError, 'bad data in responsefile - total too big' + self.numactive = [0] * len(hashes) + self.inactive_requests = [1] * len(hashes) + self.amount_inactive = self.total_length + self.amount_obtained = 0 + self.amount_desired = self.total_length + self.have = Bitfield(len(hashes)) + self.have_cloaked_data = None + self.blocked = [False] * len(hashes) + self.blocked_holes = [] + self.blocked_movein = Olist() + self.blocked_moveout = Olist() + self.waschecked = [False] * len(hashes) + self.places = {} + self.holes = [] + self.stat_active = {} + self.stat_new = {} + self.dirty = {} + self.stat_numflunked = 0 + self.stat_numdownloaded = 0 + self.stat_numfound = 0 + self.download_history = {} + self.failed_pieces = {} + self.out_of_place = 0 + self.write_buf_max = config['write_buffer_size']*1048576L + self.write_buf_size = 0L + self.write_buf = {} # structure: piece: [(start, data), ...] + self.write_buf_list = [] + # Merkle: + self.merkle_torrent = (root_hash is not None) + self.root_hash = root_hash + self.initial_hashes = deepcopy(self.hashes) + if self.merkle_torrent: + self.hashes_unpickled = False + # Must see if we're initial seeder + self.check_hashes = True + # Fallback for if we're not an initial seeder or don't have a + # Merkle tree on disk. + self.merkletree = MerkleTree(self.piece_size,self.total_length,self.root_hash,None) + else: + # Normal BT + self.hashes_unpickled = True + + self.initialize_tasks = [ + ['checking existing data', 0, self.init_hashcheck, self.hashcheckfunc], + ['moving data', 1, self.init_movedata, self.movedatafunc], + ['allocating disk space', 1, self.init_alloc, self.allocfunc] ] + self.initialize_done = None + + # Arno: move starting of periodic _bgalloc to init_alloc + self.backfunc(self._bgsync, max(self.config['auto_flush']*60, 60)) + + def _bgsync(self): + if self.config['auto_flush']: + self.sync() + self.backfunc(self._bgsync, max(self.config['auto_flush']*60, 60)) + + + def old_style_init(self): + while self.initialize_tasks: + msg, done, init, next = self.initialize_tasks.pop(0) + if init(): + self.statusfunc(activity = msg, fractionDone = done) + t = clock() + STATS_INTERVAL + x = 0 + while x is not None: + if t < clock(): + t = clock() + STATS_INTERVAL + self.statusfunc(fractionDone = x) + self.unpauseflag.wait() + if self.flag.isSet(): + return False + x = next() + + self.statusfunc(fractionDone = 0) + return True + + + def initialize(self, donefunc, statusfunc = None): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","StorageWrapper: initialize: enter, backfunc is",self.backfunc + + self.initialize_done = donefunc + if statusfunc is None: + statusfunc = self.statusfunc + self.initialize_status = statusfunc + self.initialize_next = None + + """ + Arno: 2007-01-02: + This next line used to read: + self.backfunc(self._initialize) + So without the task ID. I've changed this to accomodate the + following situation. In video-on-demand, it may occur that + a torrent is stopped and then immediately after it is + restarted. In particular, we use this when a user selects + a torrent from the mainwin to be played (again). Because the + torrent does not necessarily use a VOD-piecepicker we have + to stop the current DL process and start a new one. + + When stopping and starting a torrent quickly a problem occurs. + When a torrent is stopped, its infohash is registered in kill list + of the (real) RawServer class. The next time the rawserver looks + for tasks to execute it will first check the kill list. If it's not + empty it will remove all tasks that have the given infohash as taskID. + This mechanism ensures that when a torrent is stopped, any outstanding + tasks belonging to the torrent are removed from the rawserver task queue. + + It can occur that we've stopped the torrent and the + infohash is on the kill list, but the queue has not yet been cleared of + old entries because the thread that runs the rawserver did not get to + executing new tasks yet. This causes a problem right here, because + we now want to schedule a new task on behalf of the new download process. + If it is enqueued now, it will be removed the next time the rawserver + checks its task list and because the infohash is on the kill list be + deleted. + + My fix is to schedule this first task of the new torrent under a + different task ID. Hence, when the rawserver checks its queue it + will not delete it, thinking it belonged to the old download + process. The really clean solution is to stop using infohash as + taskid, and use a unique ID for a download process. This will + take a bit of work to ensure it works correctly, so in the mean + time we'll use this fix. + """ + self.backfunc(self._initialize, id = RARE_RAWSERVER_TASKID) + + def _initialize(self): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","StorageWrapper: _initialize: enter" + if not self.unpauseflag.isSet(): + self.backfunc(self._initialize, 1) + return + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","StorageWrapper: _initialize: next is",self.initialize_next + + if self.initialize_next: + x = self.initialize_next() + if x is None: + self.initialize_next = None + else: + self.initialize_status(fractionDone = x) + else: + if not self.initialize_tasks: + self.initialize_done(success=True) + self.initialize_done = None + return + msg, done, init, next = self.initialize_tasks.pop(0) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","StorageWrapper: _initialize performing task",msg + if init(): + self.initialize_status(activity = msg, fractionDone = done) + self.initialize_next = next + + self.backfunc(self._initialize) + + def init_hashcheck(self): + if self.flag.isSet(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","StorageWrapper: init_hashcheck: FLAG IS SET" + return False + self.check_list = [] + if not self.hashes or self.amount_left == 0: + self.check_total = 0 + self.finished() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","StorageWrapper: init_hashcheck: Download finished" + return False + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","StorageWrapper: init_hashcheck: self.places",`self.places` + + self.check_targets = {} + got = {} + for p, v in self.places.items(): + assert not got.has_key(v) + got[v] = 1 + for i in xrange(len(self.hashes)): + if self.places.has_key(i): # restored from pickled + self.check_targets[self.hashes[i]] = [] + if self.places[i] == i: + continue + else: + assert not got.has_key(i) + self.out_of_place += 1 + if got.has_key(i): + continue + if self._waspre(i) and not self.live_streaming: + if self.blocked[i]: + self.places[i] = i + else: + self.check_list.append(i) + continue + if not self.live_streaming and not self.check_hashes: + self.failed('file supposed to be complete on start-up, but data is missing') + return False + self.holes.append(i) + if self.blocked[i] or self.check_targets.has_key(self.hashes[i]): + self.check_targets[self.hashes[i]] = [] # in case of a hash collision, discard + else: + self.check_targets[self.hashes[i]] = [i] + self.check_total = len(self.check_list) + self.check_numchecked = 0.0 + self.lastlen = self._piecelen(len(self.hashes) - 1) + self.numchecked = 0.0 + if DEBUG: + print "StorageWrapper: init_hashcheck: checking",self.check_list + print "StorageWrapper: init_hashcheck: return self.check_total > 0 is ",(self.check_total > 0) + return self.check_total > 0 + + def _markgot(self, piece, pos): + if DEBUG: + print str(piece)+' at '+str(pos) + self.places[piece] = pos + self.have[piece] = True + len = self._piecelen(piece) + self.amount_obtained += len + self.amount_left -= len + self.amount_inactive -= len + self.inactive_requests[piece] = None + self.waschecked[piece] = self.check_hashes + self.stat_numfound += 1 + + def hashcheckfunc(self): + try: + if self.flag.isSet(): + return None + if not self.check_list: + return None + if self.live_streaming: + return None + + i = self.check_list.pop(0) + if not self.check_hashes: + self._markgot(i, i) + else: + d1 = self.read_raw(i, 0, self.lastlen) + if d1 is None: + return None + sh = sha(d1[:]) + d1.release() + sp = sh.digest() + d2 = self.read_raw(i, self.lastlen, self._piecelen(i)-self.lastlen) + if d2 is None: + return None + sh.update(d2[:]) + d2.release() + s = sh.digest() + + + if DEBUG: + if s != self.hashes[i]: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","StorageWrapper: hashcheckfunc: piece corrupt",i + + # Merkle: If we didn't read the hashes from persistent storage then + # we can't check anything. Exception is the case where we are the + # initial seeder. In that case we first calculate all hashes, + # and then compute the hash tree. If the root hash equals the + # root hash in the .torrent we're a seeder. Otherwise, we are + # client with messed up data and no (local) way of checking it. + # + if not self.hashes_unpickled: + if DEBUG: + print "StorageWrapper: Merkle torrent, saving calculated hash",i + self.initial_hashes[i] = s + self._markgot(i, i) + elif s == self.hashes[i]: + self._markgot(i, i) + elif (self.check_targets.get(s) + and self._piecelen(i) == self._piecelen(self.check_targets[s][-1])): + self._markgot(self.check_targets[s].pop(), i) + self.out_of_place += 1 + elif (not self.have[-1] and sp == self.hashes[-1] + and (i == len(self.hashes) - 1 + or not self._waspre(len(self.hashes) - 1))): + self._markgot(len(self.hashes) - 1, i) + self.out_of_place += 1 + else: + self.places[i] = i + self.numchecked += 1 + if self.amount_left == 0: + if not self.hashes_unpickled: + # Merkle: The moment of truth. Are we an initial seeder? + self.merkletree = MerkleTree(self.piece_size,self.total_length,None,self.initial_hashes) + if self.merkletree.compare_root_hashes(self.root_hash): + if DEBUG: + print "StorageWrapper: Merkle torrent, initial seeder!" + self.hashes = self.initial_hashes + else: + # Bad luck + if DEBUG: + print "StorageWrapper: Merkle torrent, NOT a seeder!" + self.failed('download corrupted, hash tree does not compute; please delete and restart') + return 1 + self.finished() + return (self.numchecked / self.check_total) + + except Exception, e: + print_exc() + self.failed('download corrupted: '+str(e)+'; please delete and restart') + + + def init_movedata(self): + if self.flag.isSet(): + return False + if self.alloc_type != 'sparse': + return False + self.storage.top_off() # sets file lengths to their final size + self.movelist = [] + if self.out_of_place == 0: + for i in self.holes: + self.places[i] = i + self.holes = [] + return False + self.tomove = float(self.out_of_place) + for i in xrange(len(self.hashes)): + if not self.places.has_key(i): + self.places[i] = i + elif self.places[i] != i: + self.movelist.append(i) + self.holes = [] + return True + + def movedatafunc(self): + if self.flag.isSet(): + return None + if not self.movelist: + return None + i = self.movelist.pop(0) + old = self.read_raw(self.places[i], 0, self._piecelen(i)) + if old is None: + return None + if not self.write_raw(i, 0, old): + return None + if self.double_check and self.have[i]: + if self.triple_check: + old.release() + old = self.read_raw(i, 0, self._piecelen(i), + flush_first = True) + if old is None: + return None + if sha(old[:]).digest() != self.hashes[i]: + self.failed('download corrupted, piece on disk failed triple check; please delete and restart') + return None + old.release() + + self.places[i] = i + self.tomove -= 1 + return (self.tomove / self.out_of_place) + + + def init_alloc(self): + if self.flag.isSet(): + return False + if not self.holes: + return False + self.numholes = float(len(self.holes)) + self.alloc_buf = chr(0xFF) * self.piece_size + ret = False + if self.alloc_type == 'pre-allocate': + self.bgalloc_enabled = True + ret = True + if self.alloc_type == 'background': + self.bgalloc_enabled = True + # Arno: only enable this here, eats CPU otherwise + if self.bgalloc_enabled: + self.backfunc(self._bgalloc, 0.1) + if ret: + return ret + if self.blocked_moveout: + return True + return False + + + def _allocfunc(self): + while self.holes: + n = self.holes.pop(0) + if self.blocked[n]: # assume not self.blocked[index] + if not self.blocked_movein: + self.blocked_holes.append(n) + continue + if not self.places.has_key(n): + b = self.blocked_movein.pop(0) + oldpos = self._move_piece(b, n) + self.places[oldpos] = oldpos + return None + if self.places.has_key(n): + oldpos = self._move_piece(n, n) + self.places[oldpos] = oldpos + return None + return n + return None + + def allocfunc(self): + if self.flag.isSet(): + return None + + if self.blocked_moveout: + self.bgalloc_active = True + n = self._allocfunc() + if n is not None: + if self.blocked_moveout.includes(n): + self.blocked_moveout.remove(n) + b = n + else: + b = self.blocked_moveout.pop(0) + oldpos = self._move_piece(b, n) + self.places[oldpos] = oldpos + return len(self.holes) / self.numholes + + if self.holes and self.bgalloc_enabled: + self.bgalloc_active = True + n = self._allocfunc() + if n is not None: + self.write_raw(n, 0, self.alloc_buf[:self._piecelen(n)]) + self.places[n] = n + return len(self.holes) / self.numholes + + self.bgalloc_active = False + return None + + def bgalloc(self): + if self.bgalloc_enabled: + if not self.holes and not self.blocked_moveout and self.backfunc: + self.backfunc(self.storage.flush) + # force a flush whenever the "finish allocation" button is hit + self.bgalloc_enabled = True + return False + + def _bgalloc(self): + self.allocfunc() + if self.config.get('alloc_rate', 0) < 0.1: + self.config['alloc_rate'] = 0.1 + self.backfunc(self._bgalloc, + float(self.piece_size)/(self.config['alloc_rate']*1048576)) + + def _waspre(self, piece): + return self.storage.was_preallocated(piece * self.piece_size, self._piecelen(piece)) + + def _piecelen(self, piece): + if piece < len(self.hashes) - 1: + return self.piece_size + else: + return self.total_length - (piece * self.piece_size) + + def get_amount_left(self): + return self.amount_left + + def do_I_have_anything(self): + return self.amount_left < self.total_length + + def _make_inactive(self, index): + length = self._piecelen(index) + l = [] + x = 0 + while x + self.request_size < length: + l.append((x, self.request_size)) + x += self.request_size + l.append((x, length - x)) + self.inactive_requests[index] = l # Note: letter L not number 1 + + def is_endgame(self): + return not self.amount_inactive + + def reset_endgame(self, requestlist): + for index, begin, length in requestlist: + self.request_lost(index, begin, length) + + def get_have_list(self): + return self.have.tostring() + + def get_have_copy(self): + return self.have.copy() + + def get_have_list_cloaked(self): + if self.have_cloaked_data is None: + newhave = Bitfield(copyfrom = self.have) + unhaves = [] + n = min(randrange(2, 5), len(self.hashes)) # between 2-4 unless torrent is small + while len(unhaves) < n: + unhave = randrange(min(32, len(self.hashes))) # all in first 4 bytes + if not unhave in unhaves: + unhaves.append(unhave) + newhave[unhave] = False + self.have_cloaked_data = (newhave.tostring(), unhaves) + return self.have_cloaked_data + + def do_I_have(self, index): + return self.have[index] + + def do_I_have_requests(self, index): + return not not self.inactive_requests[index] + + def is_unstarted(self, index): + return (not self.have[index] and not self.numactive[index] + and not self.dirty.has_key(index)) + + def get_hash(self, index): + return self.hashes[index] + + def get_stats(self): + return self.amount_obtained, self.amount_desired, self.have + + def new_request(self, index): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","StorageWrapper: new_request",index,"#" + + # returns (begin, length) + if self.inactive_requests[index] == 1: # number 1, not letter L + self._make_inactive(index) + self.numactive[index] += 1 + self.stat_active[index] = 1 + if not self.dirty.has_key(index): + self.stat_new[index] = 1 + rs = self.inactive_requests[index] +# r = min(rs) +# rs.remove(r) + r = rs.pop(0) + self.amount_inactive -= r[1] + return r + + + def request_too_slow(self,index): + """ Arno's addition to get pieces we requested from slow peers to be + back in the PiecePicker's list of candidates """ + if self.amount_inactive == 0: + # all has been requested, endgame about to start, don't mess around + return + + self.inactive_requests[index] = 1 # number 1, not letter L + self.amount_inactive += self._piecelen(index) + + + def write_raw(self, index, begin, data): + try: + self.storage.write(self.piece_size * index + begin, data) + return True + except IOError, e: + traceback.print_exc() + self.failed('IO Error: ' + str(e)) + return False + + + def _write_to_buffer(self, piece, start, data): + if not self.write_buf_max: + return self.write_raw(self.places[piece], start, data) + self.write_buf_size += len(data) + while self.write_buf_size > self.write_buf_max: + old = self.write_buf_list.pop(0) + if not self._flush_buffer(old, True): + return False + if self.write_buf.has_key(piece): + self.write_buf_list.remove(piece) + else: + self.write_buf[piece] = [] + self.write_buf_list.append(piece) + self.write_buf[piece].append((start, data)) + return True + + def _flush_buffer(self, piece, popped = False): + if not self.write_buf.has_key(piece): + return True + if not popped: + self.write_buf_list.remove(piece) + l = self.write_buf[piece] + del self.write_buf[piece] + l.sort() + for start, data in l: + self.write_buf_size -= len(data) + if not self.write_raw(self.places[piece], start, data): + return False + return True + + def sync(self): + spots = {} + for p in self.write_buf_list: + spots[self.places[p]] = p + l = spots.keys() + l.sort() + for i in l: + try: + self._flush_buffer(spots[i]) + except: + pass + try: + self.storage.sync() + except IOError, e: + self.failed('IO Error: ' + str(e)) + except OSError, e: + self.failed('OS Error: ' + str(e)) + + + def _move_piece(self, index, newpos): + oldpos = self.places[index] + if DEBUG: + print 'moving '+str(index)+' from '+str(oldpos)+' to '+str(newpos) + assert oldpos != index + assert oldpos != newpos + assert index == newpos or not self.places.has_key(newpos) + old = self.read_raw(oldpos, 0, self._piecelen(index)) + if old is None: + return -1 + if not self.write_raw(newpos, 0, old): + return -1 + self.places[index] = newpos + if self.have[index] and ( + self.triple_check or (self.double_check and index == newpos)): + if self.triple_check: + old.release() + old = self.read_raw(newpos, 0, self._piecelen(index), + flush_first = True) + if old is None: + return -1 + if sha(old[:]).digest() != self.hashes[index]: + self.failed('download corrupted, piece on disk failed triple check; please delete and restart') + return -1 + old.release() + + if self.blocked[index]: + self.blocked_moveout.remove(index) + if self.blocked[newpos]: + self.blocked_movein.remove(index) + else: + self.blocked_movein.add(index) + else: + self.blocked_movein.remove(index) + if self.blocked[newpos]: + self.blocked_moveout.add(index) + else: + self.blocked_moveout.remove(index) + + return oldpos + + def _clear_space(self, index): + h = self.holes.pop(0) + n = h + if self.blocked[n]: # assume not self.blocked[index] + if not self.blocked_movein: + self.blocked_holes.append(n) + return True # repeat + if not self.places.has_key(n): + b = self.blocked_movein.pop(0) + oldpos = self._move_piece(b, n) + if oldpos < 0: + return False + n = oldpos + if self.places.has_key(n): + oldpos = self._move_piece(n, n) + if oldpos < 0: + return False + n = oldpos + if index == n or index in self.holes: + if n == h: + self.write_raw(n, 0, self.alloc_buf[:self._piecelen(n)]) + self.places[index] = n + if self.blocked[n]: + # because n may be a spot cleared 10 lines above, it's possible + # for it to be blocked. While that spot could be left cleared + # and a new spot allocated, this condition might occur several + # times in a row, resulting in a significant amount of disk I/O, + # delaying the operation of the engine. Rather than do this, + # queue the piece to be moved out again, which will be performed + # by the background allocator, with which data movement is + # automatically limited. + self.blocked_moveout.add(index) + return False + for p, v in self.places.items(): + if v == index: + break + else: + self.failed('download corrupted; please delete and restart') + return False + self._move_piece(p, n) + self.places[index] = index + return False + + ## Arno: don't think we need length here, FIXME + def piece_came_in(self, index, begin, hashlist, piece, baddataguard, source = None): + assert not self.have[index] + # Merkle: Check that the hashes are valid using the known root_hash + # If so, put them in the hash tree and the normal list of hashes to + # allow (1) us to send this piece to others using the right hashes + # and (2) us to check the validity of the piece when it has been + # received completely. + # + if self.merkle_torrent and len(hashlist) > 0: + if self.merkletree.check_hashes(hashlist): + self.merkletree.update_hash_admin(hashlist,self.hashes) + # if the check wasn't right, the peer will be discovered as bad later + # TODO: make bad now? + if not self.places.has_key(index): + while self._clear_space(index): + pass + if DEBUG: + print 'new place for '+str(index)+' at '+str(self.places[index]) + if self.flag.isSet(): + return False + + if self.failed_pieces.has_key(index): + old = self.read_raw(self.places[index], begin, len(piece)) + if old is None: + return True + if old[:].tostring() != piece: + try: + self.failed_pieces[index][self.download_history[index][begin]] = 1 + except: + self.failed_pieces[index][None] = 1 + old.release() + self.download_history.setdefault(index, {})[begin] = source + + if not self._write_to_buffer(index, begin, piece): + return True + + self.amount_obtained += len(piece) + self.dirty.setdefault(index, []).append((begin, len(piece))) + self.numactive[index] -= 1 + assert self.numactive[index] >= 0 + if not self.numactive[index]: + del self.stat_active[index] + if self.stat_new.has_key(index): + del self.stat_new[index] + + if self.inactive_requests[index] or self.numactive[index]: + return True + + del self.dirty[index] + if not self._flush_buffer(index): + return True + + length = self._piecelen(index) + # Check hash + data = self.read_raw(self.places[index], 0, length, + flush_first = self.triple_check) + if data is None: + return True + + pieceok = False + if self.live_streaming: + # LIVESOURCEAUTH + if self.piece_from_live_source_func(index,data[:]): + pieceok = True + else: + hash = sha(data[:]).digest() + data.release() + if hash == self.hashes[index]: + pieceok = True + + if not pieceok: + self.amount_obtained -= length + self.data_flunked(length, index) + self.inactive_requests[index] = 1 # number 1, not letter L + self.amount_inactive += length + self.stat_numflunked += 1 + + self.failed_pieces[index] = {} + allsenders = {} + for d in self.download_history[index].values(): + allsenders[d] = 1 + if len(allsenders) == 1: + culprit = allsenders.keys()[0] + if culprit is not None: + culprit.failed(index, bump = True) + del self.failed_pieces[index] # found the culprit already + + if self.live_streaming: + # TODO: figure out how to use the Download.BadDataGuard + # cf. the culprit business above. + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","////////////////////////////////////////////////////////////// kicking peer" + raise ValueError("Arno quick fix: Unauth data unacceptable") + + return False + + self.have[index] = True + self.inactive_requests[index] = None + self.waschecked[index] = True + + self.amount_left -= length + self.stat_numdownloaded += 1 + + for d in self.download_history[index].values(): + if d is not None: + d.good(index) + del self.download_history[index] + if self.failed_pieces.has_key(index): + for d in self.failed_pieces[index].keys(): + if d is not None: + d.failed(index) + del self.failed_pieces[index] + + if self.amount_left == 0: + self.finished() + return True + + + def request_lost(self, index, begin, length): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","StorageWrapper: request_lost",index,"#" + + assert not (begin, length) in self.inactive_requests[index] + insort(self.inactive_requests[index], (begin, length)) + self.amount_inactive += length + self.numactive[index] -= 1 + if not self.numactive[index]: + del self.stat_active[index] + if self.stat_new.has_key(index): + del self.stat_new[index] + + + def get_piece(self, index, begin, length): + # Merkle: Get (sub)piece from disk and its associated hashes + # do_get_piece() returns PieceBuffer + pb = self.do_get_piece(index,begin,length) + if self.merkle_torrent and pb is not None and begin == 0: + hashlist = self.merkletree.get_hashes_for_piece(index) + else: + hashlist = [] + return [pb,hashlist] + + def do_get_piece(self, index, begin, length): + if not self.have[index]: + return None + data = None + if not self.waschecked[index]: + data = self.read_raw(self.places[index], 0, self._piecelen(index)) + if data is None: + return None + if not self.live_streaming and sha(data[:]).digest() != self.hashes[index]: + self.failed('file supposed to be complete on start-up, but piece failed hash check') + return None + self.waschecked[index] = True + if length == -1 and begin == 0: + return data # optimization + if length == -1: + if begin > self._piecelen(index): + return None + length = self._piecelen(index)-begin + if begin == 0: + return self.read_raw(self.places[index], 0, length) + elif begin + length > self._piecelen(index): + return None + if data is not None: + s = data[begin:begin+length] + data.release() + return s + data = self.read_raw(self.places[index], begin, length) + if data is None: + return None + s = data.getarray() + data.release() + return s + + def read_raw(self, piece, begin, length, flush_first = False): + try: + return self.storage.read(self.piece_size * piece + begin, + length, flush_first) + except IOError, e: + self.failed('IO Error: ' + str(e)) + return None + + + def set_file_readonly(self, n): + try: + self.storage.set_readonly(n) + except IOError, e: + self.failed('IO Error: ' + str(e)) + except OSError, e: + self.failed('OS Error: ' + str(e)) + + + def has_data(self, index): + return index not in self.holes and index not in self.blocked_holes + + def doublecheck_data(self, pieces_to_check): + if not self.double_check: + return + sources = [] + for p, v in self.places.items(): + if pieces_to_check.has_key(v): + sources.append(p) + assert len(sources) == len(pieces_to_check) + sources.sort() + for index in sources: + if self.have[index]: + piece = self.read_raw(self.places[index], 0, self._piecelen(index), + flush_first = True) + if piece is None: + return False + if sha(piece[:]).digest() != self.hashes[index]: + self.failed('download corrupted, piece on disk failed double check; please delete and restart') + return False + piece.release() + return True + + + def reblock(self, new_blocked): + # assume downloads have already been canceled and chunks made inactive + for i in xrange(len(new_blocked)): + if new_blocked[i] and not self.blocked[i]: + length = self._piecelen(i) + self.amount_desired -= length + if self.have[i]: + self.amount_obtained -= length + continue + if self.inactive_requests[i] == 1: # number 1, not letter L + self.amount_inactive -= length + continue + inactive = 0 + for nb, nl in self.inactive_requests[i]: + inactive += nl + self.amount_inactive -= inactive + self.amount_obtained -= length - inactive + + if self.blocked[i] and not new_blocked[i]: + length = self._piecelen(i) + self.amount_desired += length + if self.have[i]: + self.amount_obtained += length + continue + if self.inactive_requests[i] == 1: + self.amount_inactive += length + continue + inactive = 0 + for nb, nl in self.inactive_requests[i]: + inactive += nl + self.amount_inactive += inactive + self.amount_obtained += length - inactive + + self.blocked = new_blocked + + self.blocked_movein = Olist() + self.blocked_moveout = Olist() + for p, v in self.places.items(): + if p != v: + if self.blocked[p] and not self.blocked[v]: + self.blocked_movein.add(p) + elif self.blocked[v] and not self.blocked[p]: + self.blocked_moveout.add(p) + + self.holes.extend(self.blocked_holes) # reset holes list + self.holes.sort() + self.blocked_holes = [] + + + ''' + Pickled data format: + + d['pieces'] = either a string containing a bitfield of complete pieces, + or the numeric value "1" signifying a seed. If it is + a seed, d['places'] and d['partials'] should be empty + and needn't even exist. d['merkletree'] must exist + if it's a seed and a Merkle torrent. + d['partials'] = [ piece, [ offset, length... ]... ] + a list of partial data that had been previously + downloaded, plus the given offsets. Adjacent partials + are merged so as to save space, and so that if the + request size changes then new requests can be + calculated more efficiently. + d['places'] = [ piece, place, {,piece, place ...} ] + the piece index, and the place it's stored. + If d['pieces'] specifies a complete piece or d['partials'] + specifies a set of partials for a piece which has no + entry in d['places'], it can be assumed that + place[index] = index. A place specified with no + corresponding data in d['pieces'] or d['partials'] + indicates allocated space with no valid data, and is + reserved so it doesn't need to be hash-checked. + d['merkletree'] = pickle.dumps(self.merkletree) + if we're using a Merkle torrent the Merkle tree, otherwise + there is no 'merkletree' in the dictionary. + ''' + def pickle(self): + if self.have.complete(): + if self.merkle_torrent: + return {'pieces': 1, 'merkletree': pickle.dumps(self.merkletree) } + else: + return {'pieces': 1 } + pieces = Bitfield(len(self.hashes)) + places = [] + partials = [] + for p in xrange(len(self.hashes)): + if self.blocked[p] or not self.places.has_key(p): + continue + h = self.have[p] + pieces[p] = h + pp = self.dirty.get(p) + if not h and not pp: # no data + places.extend([self.places[p], self.places[p]]) + elif self.places[p] != p: + places.extend([p, self.places[p]]) + if h or not pp: + continue + pp.sort() + r = [] + while len(pp) > 1: + if pp[0][0]+pp[0][1] == pp[1][0]: + pp[0] = list(pp[0]) + pp[0][1] += pp[1][1] + del pp[1] + else: + r.extend(pp[0]) + del pp[0] + r.extend(pp[0]) + partials.extend([p, r]) + if self.merkle_torrent: + return {'pieces': pieces.tostring(), 'places': places, 'partials': partials, 'merkletree': pickle.dumps(self.merkletree) } + else: + return {'pieces': pieces.tostring(), 'places': places, 'partials': partials } + + + def unpickle(self, data, valid_places): + got = {} + places = {} + dirty = {} + download_history = {} + stat_active = {} + stat_numfound = self.stat_numfound + amount_obtained = self.amount_obtained + amount_inactive = self.amount_inactive + amount_left = self.amount_left + inactive_requests = [x for x in self.inactive_requests] + restored_partials = [] + + try: + if data.has_key('merkletree'): + try: + if DEBUG: + print "StorageWrapper: Unpickling Merkle tree!" + self.merkletree = pickle.loads(data['merkletree']) + self.hashes = self.merkletree.get_piece_hashes() + self.hashes_unpickled = True + except Exception, e: + print "StorageWrapper: Exception while unpickling Merkle tree",str(e) + print_exc() + if data['pieces'] == 1: # a seed + assert not data.get('places', None) + assert not data.get('partials', None) + # Merkle: restore Merkle tree + have = Bitfield(len(self.hashes)) + for i in xrange(len(self.hashes)): + have[i] = True + assert have.complete() + _places = [] + _partials = [] + else: + have = Bitfield(len(self.hashes), data['pieces']) + _places = data['places'] + assert len(_places) % 2 == 0 + _places = [_places[x:x+2] for x in xrange(0, len(_places), 2)] + _partials = data['partials'] + assert len(_partials) % 2 == 0 + _partials = [_partials[x:x+2] for x in xrange(0, len(_partials), 2)] + + for index, place in _places: + if place not in valid_places: + continue + assert not got.has_key(index) + assert not got.has_key(place) + places[index] = place + got[index] = 1 + got[place] = 1 + + for index in xrange(len(self.hashes)): + if DEBUG: + print "StorageWrapper: Unpickle: Checking if we have piece",index + if have[index]: + if not places.has_key(index): + if index not in valid_places: + have[index] = False + continue + assert not got.has_key(index) + places[index] = index + got[index] = 1 + length = self._piecelen(index) + amount_obtained += length + stat_numfound += 1 + amount_inactive -= length + amount_left -= length + inactive_requests[index] = None + + for index, plist in _partials: + assert not dirty.has_key(index) + assert not have[index] + if not places.has_key(index): + if index not in valid_places: + continue + assert not got.has_key(index) + places[index] = index + got[index] = 1 + assert len(plist) % 2 == 0 + plist = [plist[x:x+2] for x in xrange(0, len(plist), 2)] + dirty[index] = plist + stat_active[index] = 1 + download_history[index] = {} + # invert given partials + length = self._piecelen(index) + l = [] + if plist[0][0] > 0: + l.append((0, plist[0][0])) + for i in xrange(len(plist)-1): + end = plist[i][0]+plist[i][1] + assert not end > plist[i+1][0] + l.append((end, plist[i+1][0]-end)) + end = plist[-1][0]+plist[-1][1] + assert not end > length + if end < length: + l.append((end, length-end)) + # split them to request_size + ll = [] + amount_obtained += length + amount_inactive -= length + for nb, nl in l: + while nl > 0: + r = min(nl, self.request_size) + ll.append((nb, r)) + amount_inactive += r + amount_obtained -= r + nb += self.request_size + nl -= self.request_size + inactive_requests[index] = ll + restored_partials.append(index) + + assert amount_obtained + amount_inactive == self.amount_desired + except: +# print_exc() + return [] # invalid data, discard everything + + self.have = have + self.places = places + self.dirty = dirty + self.download_history = download_history + self.stat_active = stat_active + self.stat_numfound = stat_numfound + self.amount_obtained = amount_obtained + self.amount_inactive = amount_inactive + self.amount_left = amount_left + self.inactive_requests = inactive_requests + + return restored_partials + + def failed(self,s): + # Arno: report failure of hash check + self.report_failure(s) + if self.initialize_done is not None: + self.initialize_done(success=False) + + def live_invalidate(self,piece): # Arno: LIVEWRAP + # Assumption: not outstanding requests + length = self._piecelen(piece) + oldhave = self.have[piece] + self.have[piece] = False + #self.waschecked[piece] = False + self.inactive_requests[piece] = 1 + if oldhave: + self.amount_left += length + self.amount_obtained -= length diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/T2T.py b/tribler-mod/Tribler/Core/BitTornado/BT1/T2T.py new file mode 100644 index 0000000..851f1e8 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/T2T.py @@ -0,0 +1,192 @@ +from time import localtime, strftime +# Written by John Hoffman +# see LICENSE.txt for license information + +from Rerequester import Rerequester +from urllib import quote +from threading import Event +from random import randrange +import __init__ +try: + True +except: + True = 1 + False = 0 + +DEBUG = False + + +def excfunc(x): + print x + +class T2TConnection: + def __init__(self, myid, tracker, hash, interval, peers, timeout, + rawserver, disallow, isdisallowed): + self.tracker = tracker + self.interval = interval + self.hash = hash + self.operatinginterval = interval + self.peers = peers + self.rawserver = rawserver + self.disallow = disallow + self.isdisallowed = isdisallowed + self.active = True + self.busy = False + self.errors = 0 + self.rejected = 0 + self.trackererror = False + self.peerlists = [] + + self.rerequester = Rerequester([[tracker]], interval, + rawserver.add_task, lambda: 0, peers, self.addtolist, + rawserver.add_task, lambda: 1, 0, 0, 0, '', + myid, hash, timeout, self.errorfunc, excfunc, peers, Event(), + lambda: 0, lambda: 0) + + if self.isactive(): + rawserver.add_task(self.refresh, randrange(int(self.interval/10), self.interval)) + # stagger announces + + def isactive(self): + if self.isdisallowed(self.tracker): # whoops! + self.deactivate() + return self.active + + def deactivate(self): + self.active = False + + def refresh(self): + if not self.isactive(): + return + self.lastsuccessful = True + self.newpeerdata = [] + if DEBUG: + print 'contacting %s for info_hash=%s' % (self.tracker, quote(self.hash)) + self.rerequester.snoop(self.peers, self.callback) + + def callback(self): + self.busy = False + if self.lastsuccessful: + self.errors = 0 + self.rejected = 0 + if self.rerequester.announce_interval > (3*self.interval): + # I think I'm stripping from a regular tracker; boost the number of peers requested + self.peers = int(self.peers * (self.rerequester.announce_interval / self.interval)) + self.operatinginterval = self.rerequester.announce_interval + if DEBUG: + print ("%s with info_hash=%s returned %d peers" % + (self.tracker, quote(self.hash), len(self.newpeerdata))) + self.peerlists.append(self.newpeerdata) + self.peerlists = self.peerlists[-10:] # keep up to the last 10 announces + if self.isactive(): + self.rawserver.add_task(self.refresh, self.operatinginterval) + + def addtolist(self, peers): + for peer in peers: + self.newpeerdata.append((peer[1],peer[0][0],peer[0][1])) + + def errorfunc(self, r): + self.lastsuccessful = False + if DEBUG: + print "%s with info_hash=%s gives error: '%s'" % (self.tracker, quote(self.hash), r) + if r == self.rerequester.rejectedmessage + 'disallowed': # whoops! + if DEBUG: + print ' -- disallowed - deactivating' + self.deactivate() + self.disallow(self.tracker) # signal other torrents on this tracker + return + if r[:8].lower() == 'rejected': # tracker rejected this particular torrent + self.rejected += 1 + if self.rejected == 3: # rejected 3 times + if DEBUG: + print ' -- rejected 3 times - deactivating' + self.deactivate() + return + self.errors += 1 + if self.errors >= 3: # three or more errors in a row + self.operatinginterval += self.interval # lengthen the interval + if DEBUG: + print ' -- lengthening interval to '+str(self.operatinginterval)+' seconds' + + def harvest(self): + x = [] + for list in self.peerlists: + x += list + self.peerlists = [] + return x + + +class T2TList: + def __init__(self, enabled, trackerid, interval, maxpeers, timeout, rawserver): + self.enabled = enabled + self.trackerid = trackerid + self.interval = interval + self.maxpeers = maxpeers + self.timeout = timeout + self.rawserver = rawserver + self.list = {} + self.torrents = {} + self.disallowed = {} + self.oldtorrents = [] + + def parse(self, allowed_list): + if not self.enabled: + return + + # step 1: Create a new list with all tracker/torrent combinations in allowed_dir + newlist = {} + for hash, data in allowed_list.items(): + if data.has_key('announce-list'): + for tier in data['announce-list']: + for tracker in tier: + self.disallowed.setdefault(tracker, False) + newlist.setdefault(tracker, {}) + newlist[tracker][hash] = None # placeholder + + # step 2: Go through and copy old data to the new list. + # if the new list has no place for it, then it's old, so deactivate it + for tracker, hashdata in self.list.items(): + for hash, t2t in hashdata.items(): + if not newlist.has_key(tracker) or not newlist[tracker].has_key(hash): + t2t.deactivate() # this connection is no longer current + self.oldtorrents += [t2t] + # keep it referenced in case a thread comes along and tries to access. + else: + newlist[tracker][hash] = t2t + if not newlist.has_key(tracker): + self.disallowed[tracker] = False # reset when no torrents on it left + + self.list = newlist + newtorrents = {} + + # step 3: If there are any entries that haven't been initialized yet, do so. + # At the same time, copy all entries onto the by-torrent list. + for tracker, hashdata in newlist.items(): + for hash, t2t in hashdata.items(): + if t2t is None: + hashdata[hash] = T2TConnection(self.trackerid, tracker, hash, + self.interval, self.maxpeers, self.timeout, + self.rawserver, self._disallow, self._isdisallowed) + newtorrents.setdefault(hash,[]) + newtorrents[hash] += [hashdata[hash]] + + self.torrents = newtorrents + + # structures: + # list = {tracker: {hash: T2TConnection, ...}, ...} + # torrents = {hash: [T2TConnection, ...]} + # disallowed = {tracker: flag, ...} + # oldtorrents = [T2TConnection, ...] + + def _disallow(self,tracker): + self.disallowed[tracker] = True + + def _isdisallowed(self,tracker): + return self.disallowed[tracker] + + def harvest(self,hash): + harvest = [] + if self.enabled: + for t2t in self.torrents[hash]: + harvest += t2t.harvest() + return harvest diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/T2T.py.bak b/tribler-mod/Tribler/Core/BitTornado/BT1/T2T.py.bak new file mode 100644 index 0000000..2b2f857 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/T2T.py.bak @@ -0,0 +1,191 @@ +# Written by John Hoffman +# see LICENSE.txt for license information + +from Rerequester import Rerequester +from urllib import quote +from threading import Event +from random import randrange +import __init__ +try: + True +except: + True = 1 + False = 0 + +DEBUG = False + + +def excfunc(x): + print x + +class T2TConnection: + def __init__(self, myid, tracker, hash, interval, peers, timeout, + rawserver, disallow, isdisallowed): + self.tracker = tracker + self.interval = interval + self.hash = hash + self.operatinginterval = interval + self.peers = peers + self.rawserver = rawserver + self.disallow = disallow + self.isdisallowed = isdisallowed + self.active = True + self.busy = False + self.errors = 0 + self.rejected = 0 + self.trackererror = False + self.peerlists = [] + + self.rerequester = Rerequester([[tracker]], interval, + rawserver.add_task, lambda: 0, peers, self.addtolist, + rawserver.add_task, lambda: 1, 0, 0, 0, '', + myid, hash, timeout, self.errorfunc, excfunc, peers, Event(), + lambda: 0, lambda: 0) + + if self.isactive(): + rawserver.add_task(self.refresh, randrange(int(self.interval/10), self.interval)) + # stagger announces + + def isactive(self): + if self.isdisallowed(self.tracker): # whoops! + self.deactivate() + return self.active + + def deactivate(self): + self.active = False + + def refresh(self): + if not self.isactive(): + return + self.lastsuccessful = True + self.newpeerdata = [] + if DEBUG: + print 'contacting %s for info_hash=%s' % (self.tracker, quote(self.hash)) + self.rerequester.snoop(self.peers, self.callback) + + def callback(self): + self.busy = False + if self.lastsuccessful: + self.errors = 0 + self.rejected = 0 + if self.rerequester.announce_interval > (3*self.interval): + # I think I'm stripping from a regular tracker; boost the number of peers requested + self.peers = int(self.peers * (self.rerequester.announce_interval / self.interval)) + self.operatinginterval = self.rerequester.announce_interval + if DEBUG: + print ("%s with info_hash=%s returned %d peers" % + (self.tracker, quote(self.hash), len(self.newpeerdata))) + self.peerlists.append(self.newpeerdata) + self.peerlists = self.peerlists[-10:] # keep up to the last 10 announces + if self.isactive(): + self.rawserver.add_task(self.refresh, self.operatinginterval) + + def addtolist(self, peers): + for peer in peers: + self.newpeerdata.append((peer[1],peer[0][0],peer[0][1])) + + def errorfunc(self, r): + self.lastsuccessful = False + if DEBUG: + print "%s with info_hash=%s gives error: '%s'" % (self.tracker, quote(self.hash), r) + if r == self.rerequester.rejectedmessage + 'disallowed': # whoops! + if DEBUG: + print ' -- disallowed - deactivating' + self.deactivate() + self.disallow(self.tracker) # signal other torrents on this tracker + return + if r[:8].lower() == 'rejected': # tracker rejected this particular torrent + self.rejected += 1 + if self.rejected == 3: # rejected 3 times + if DEBUG: + print ' -- rejected 3 times - deactivating' + self.deactivate() + return + self.errors += 1 + if self.errors >= 3: # three or more errors in a row + self.operatinginterval += self.interval # lengthen the interval + if DEBUG: + print ' -- lengthening interval to '+str(self.operatinginterval)+' seconds' + + def harvest(self): + x = [] + for list in self.peerlists: + x += list + self.peerlists = [] + return x + + +class T2TList: + def __init__(self, enabled, trackerid, interval, maxpeers, timeout, rawserver): + self.enabled = enabled + self.trackerid = trackerid + self.interval = interval + self.maxpeers = maxpeers + self.timeout = timeout + self.rawserver = rawserver + self.list = {} + self.torrents = {} + self.disallowed = {} + self.oldtorrents = [] + + def parse(self, allowed_list): + if not self.enabled: + return + + # step 1: Create a new list with all tracker/torrent combinations in allowed_dir + newlist = {} + for hash, data in allowed_list.items(): + if data.has_key('announce-list'): + for tier in data['announce-list']: + for tracker in tier: + self.disallowed.setdefault(tracker, False) + newlist.setdefault(tracker, {}) + newlist[tracker][hash] = None # placeholder + + # step 2: Go through and copy old data to the new list. + # if the new list has no place for it, then it's old, so deactivate it + for tracker, hashdata in self.list.items(): + for hash, t2t in hashdata.items(): + if not newlist.has_key(tracker) or not newlist[tracker].has_key(hash): + t2t.deactivate() # this connection is no longer current + self.oldtorrents += [t2t] + # keep it referenced in case a thread comes along and tries to access. + else: + newlist[tracker][hash] = t2t + if not newlist.has_key(tracker): + self.disallowed[tracker] = False # reset when no torrents on it left + + self.list = newlist + newtorrents = {} + + # step 3: If there are any entries that haven't been initialized yet, do so. + # At the same time, copy all entries onto the by-torrent list. + for tracker, hashdata in newlist.items(): + for hash, t2t in hashdata.items(): + if t2t is None: + hashdata[hash] = T2TConnection(self.trackerid, tracker, hash, + self.interval, self.maxpeers, self.timeout, + self.rawserver, self._disallow, self._isdisallowed) + newtorrents.setdefault(hash,[]) + newtorrents[hash] += [hashdata[hash]] + + self.torrents = newtorrents + + # structures: + # list = {tracker: {hash: T2TConnection, ...}, ...} + # torrents = {hash: [T2TConnection, ...]} + # disallowed = {tracker: flag, ...} + # oldtorrents = [T2TConnection, ...] + + def _disallow(self,tracker): + self.disallowed[tracker] = True + + def _isdisallowed(self,tracker): + return self.disallowed[tracker] + + def harvest(self,hash): + harvest = [] + if self.enabled: + for t2t in self.torrents[hash]: + harvest += t2t.harvest() + return harvest diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/Uploader.py b/tribler-mod/Tribler/Core/BitTornado/BT1/Uploader.py new file mode 100644 index 0000000..5759cab --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/Uploader.py @@ -0,0 +1,160 @@ +from time import localtime, strftime +# Written by Bram Cohen +# see LICENSE.txt for license information + +from Tribler.Core.BitTornado.CurrentRateMeasure import Measure + +import sys + +try: + True +except: + True = 1 + False = 0 + +class Upload: + def __init__(self, connection, ratelimiter, totalup, choker, storage, + picker, config): + self.connection = connection + self.ratelimiter = ratelimiter + self.totalup = totalup + self.choker = choker + self.storage = storage + self.picker = picker + self.config = config + self.max_slice_length = config['max_slice_length'] + self.choked = True + self.cleared = True + self.interested = False + self.super_seeding = False + self.buffer = [] + self.measure = Measure(config['max_rate_period'], config['upload_rate_fudge']) + self.was_ever_interested = False + if storage.get_amount_left() == 0: + if choker.super_seed: + self.super_seeding = True # flag, and don't send bitfield + self.seed_have_list = [] # set from piecepicker + self.skipped_count = 0 + else: + if config['breakup_seed_bitfield']: + bitfield, msgs = storage.get_have_list_cloaked() + connection.send_bitfield(bitfield) + for have in msgs: + connection.send_have(have) + else: + connection.send_bitfield(storage.get_have_list()) + else: + if storage.do_I_have_anything(): + connection.send_bitfield(storage.get_have_list()) + + self.piecedl = None + self.piecebuf = None + # Merkle + self.hashlist = [] + + def got_not_interested(self): + if self.interested: + self.interested = False + del self.buffer[:] + self.piecedl = None + if self.piecebuf: + self.piecebuf.release() + self.piecebuf = None + self.choker.not_interested(self.connection) + + def got_interested(self): + if not self.interested: + self.interested = True + self.was_ever_interested = True + self.choker.interested(self.connection) + + def get_upload_chunk(self): + if self.choked or not self.buffer: + return None + index, begin, length = self.buffer.pop(0) + if self.config['buffer_reads']: + if index != self.piecedl: + if self.piecebuf: + self.piecebuf.release() + self.piecedl = index + # Merkle + [ self.piecebuf, self.hashlist ] = self.storage.get_piece(index, 0, -1) + try: + piece = self.piecebuf[begin:begin+length] + assert len(piece) == length + except: # fails if storage.get_piece returns None or if out of range + self.connection.close() + return None + if begin == 0: + hashlist = self.hashlist + else: + hashlist = [] + else: + if self.piecebuf: + self.piecebuf.release() + self.piecedl = None + [piece, hashlist] = self.storage.get_piece(index, begin, length) + if piece is None: + self.connection.close() + return None + self.measure.update_rate(len(piece)) + self.totalup.update_rate(len(piece)) + + # BarterCast counter + self.connection.total_uploaded += length + + return (index, begin, hashlist, piece) + + def got_request(self, index, begin, length): + if ((self.super_seeding and not index in self.seed_have_list) + or (not self.connection.connection.is_coordinator_con() and not self.interested) + or length > self.max_slice_length): + self.connection.close() + return + if not self.cleared: + self.buffer.append((index, begin, length)) + if not self.choked and self.connection.next_upload is None: + self.ratelimiter.queue(self.connection) + + def got_cancel(self, index, begin, length): + try: + self.buffer.remove((index, begin, length)) + except ValueError: + pass + + def choke(self): + if not self.choked: + self.choked = True + self.connection.send_choke() + self.piecedl = None + if self.piecebuf: + self.piecebuf.release() + self.piecebuf = None + + def choke_sent(self): + del self.buffer[:] + self.cleared = True + + def unchoke(self): + if self.choked: + self.choked = False + self.cleared = False + self.connection.send_unchoke() + + def disconnected(self): + if self.piecebuf: + self.piecebuf.release() + self.piecebuf = None + + def is_choked(self): + return self.choked + + def is_interested(self): + return self.interested + + def has_queries(self): + return not self.choked and self.buffer + + def get_rate(self): + return self.measure.get_rate() + diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/Uploader.py.bak b/tribler-mod/Tribler/Core/BitTornado/BT1/Uploader.py.bak new file mode 100644 index 0000000..429b9ee --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/Uploader.py.bak @@ -0,0 +1,159 @@ +# Written by Bram Cohen +# see LICENSE.txt for license information + +from Tribler.Core.BitTornado.CurrentRateMeasure import Measure + +import sys + +try: + True +except: + True = 1 + False = 0 + +class Upload: + def __init__(self, connection, ratelimiter, totalup, choker, storage, + picker, config): + self.connection = connection + self.ratelimiter = ratelimiter + self.totalup = totalup + self.choker = choker + self.storage = storage + self.picker = picker + self.config = config + self.max_slice_length = config['max_slice_length'] + self.choked = True + self.cleared = True + self.interested = False + self.super_seeding = False + self.buffer = [] + self.measure = Measure(config['max_rate_period'], config['upload_rate_fudge']) + self.was_ever_interested = False + if storage.get_amount_left() == 0: + if choker.super_seed: + self.super_seeding = True # flag, and don't send bitfield + self.seed_have_list = [] # set from piecepicker + self.skipped_count = 0 + else: + if config['breakup_seed_bitfield']: + bitfield, msgs = storage.get_have_list_cloaked() + connection.send_bitfield(bitfield) + for have in msgs: + connection.send_have(have) + else: + connection.send_bitfield(storage.get_have_list()) + else: + if storage.do_I_have_anything(): + connection.send_bitfield(storage.get_have_list()) + + self.piecedl = None + self.piecebuf = None + # Merkle + self.hashlist = [] + + def got_not_interested(self): + if self.interested: + self.interested = False + del self.buffer[:] + self.piecedl = None + if self.piecebuf: + self.piecebuf.release() + self.piecebuf = None + self.choker.not_interested(self.connection) + + def got_interested(self): + if not self.interested: + self.interested = True + self.was_ever_interested = True + self.choker.interested(self.connection) + + def get_upload_chunk(self): + if self.choked or not self.buffer: + return None + index, begin, length = self.buffer.pop(0) + if self.config['buffer_reads']: + if index != self.piecedl: + if self.piecebuf: + self.piecebuf.release() + self.piecedl = index + # Merkle + [ self.piecebuf, self.hashlist ] = self.storage.get_piece(index, 0, -1) + try: + piece = self.piecebuf[begin:begin+length] + assert len(piece) == length + except: # fails if storage.get_piece returns None or if out of range + self.connection.close() + return None + if begin == 0: + hashlist = self.hashlist + else: + hashlist = [] + else: + if self.piecebuf: + self.piecebuf.release() + self.piecedl = None + [piece, hashlist] = self.storage.get_piece(index, begin, length) + if piece is None: + self.connection.close() + return None + self.measure.update_rate(len(piece)) + self.totalup.update_rate(len(piece)) + + # BarterCast counter + self.connection.total_uploaded += length + + return (index, begin, hashlist, piece) + + def got_request(self, index, begin, length): + if ((self.super_seeding and not index in self.seed_have_list) + or (not self.connection.connection.is_coordinator_con() and not self.interested) + or length > self.max_slice_length): + self.connection.close() + return + if not self.cleared: + self.buffer.append((index, begin, length)) + if not self.choked and self.connection.next_upload is None: + self.ratelimiter.queue(self.connection) + + def got_cancel(self, index, begin, length): + try: + self.buffer.remove((index, begin, length)) + except ValueError: + pass + + def choke(self): + if not self.choked: + self.choked = True + self.connection.send_choke() + self.piecedl = None + if self.piecebuf: + self.piecebuf.release() + self.piecebuf = None + + def choke_sent(self): + del self.buffer[:] + self.cleared = True + + def unchoke(self): + if self.choked: + self.choked = False + self.cleared = False + self.connection.send_unchoke() + + def disconnected(self): + if self.piecebuf: + self.piecebuf.release() + self.piecebuf = None + + def is_choked(self): + return self.choked + + def is_interested(self): + return self.interested + + def has_queries(self): + return not self.choked and self.buffer + + def get_rate(self): + return self.measure.get_rate() + diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/__init__.py b/tribler-mod/Tribler/Core/BitTornado/BT1/__init__.py new file mode 100644 index 0000000..d3c0048 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/__init__.py @@ -0,0 +1,3 @@ +from time import localtime, strftime +# Written by Bram Cohen +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/__init__.py.bak b/tribler-mod/Tribler/Core/BitTornado/BT1/__init__.py.bak new file mode 100644 index 0000000..1902f5a --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/__init__.py.bak @@ -0,0 +1,2 @@ +# Written by Bram Cohen +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/btformats.py b/tribler-mod/Tribler/Core/BitTornado/BT1/btformats.py new file mode 100644 index 0000000..fa6e879 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/btformats.py @@ -0,0 +1,131 @@ +from time import localtime, strftime +# Written by Bram Cohen +# see LICENSE.txt for license information + +import sys +from types import UnicodeType, StringType, LongType, IntType, ListType, DictType +from re import compile + +#reg = compile(r'^[^/\\.~][^/\\]*$') +#reg = compile(r'^[^/\\]*$') + +ints = (LongType, IntType) + +def check_info(info): + if type(info) != DictType: + raise ValueError, 'bad metainfo - not a dictionary' + + if info.has_key('pieces'): + pieces = info.get('pieces') + if type(pieces) != StringType or len(pieces) % 20 != 0: + raise ValueError, 'bad metainfo - bad pieces key' + elif info.has_key('root hash'): + # Merkle + root_hash = info.get('root hash') + if type(root_hash) != StringType or len(root_hash) != 20: + raise ValueError, 'bad metainfo - bad root hash key' + piecelength = info.get('piece length') + if type(piecelength) not in ints or piecelength <= 0: + raise ValueError, 'bad metainfo - illegal piece length' + name = info.get('name') + if StringType != type(name) != UnicodeType: + raise ValueError, 'bad metainfo - bad name' + #if not reg.match(name): + # raise ValueError, 'name %s disallowed for security reasons' % name + if info.has_key('files') == info.has_key('length'): + raise ValueError, 'single/multiple file mix' + if info.has_key('length'): + length = info.get('length') + if type(length) not in ints or length < 0: + raise ValueError, 'bad metainfo - bad length' + else: + files = info.get('files') + if type(files) != ListType: + raise ValueError + for f in files: + if type(f) != DictType: + raise ValueError, 'bad metainfo - bad file value' + length = f.get('length') + if type(length) not in ints or length < 0: + raise ValueError, 'bad metainfo - bad length' + path = f.get('path') + if type(path) != ListType or path == []: + raise ValueError, 'bad metainfo - bad path' + for p in path: + if StringType != type(p) != UnicodeType: + raise ValueError, 'bad metainfo - bad path dir' + #if not reg.match(p): + # raise ValueError, 'path %s disallowed for security reasons' % p + for i in xrange(len(files)): + for j in xrange(i): + if files[i]['path'] == files[j]['path']: + raise ValueError, 'bad metainfo - duplicate path' + +def check_message(message): + if type(message) != DictType: + raise ValueError + check_info(message.get('info')) + if StringType != type(message.get('announce')) != UnicodeType: + raise ValueError + +def check_peers(message): + if type(message) != DictType: + raise ValueError + if message.has_key('failure reason'): + if type(message['failure reason']) != StringType: + raise ValueError + return + peers = message.get('peers') + if peers is not None: + if type(peers) == ListType: + for p in peers: + if type(p) != DictType: + raise ValueError + if type(p.get('ip')) != StringType: + raise ValueError + port = p.get('port') + if type(port) not in ints or p <= 0: + raise ValueError + if p.has_key('peer id'): + id = p['peer id'] + if type(id) != StringType or len(id) != 20: + raise ValueError + elif type(peers) != StringType or len(peers) % 6 != 0: + raise ValueError + + # IPv6 Tracker extension. http://www.bittorrent.org/beps/bep_0007.html + peers6 = message.get('peers6') + if peers6 is not None: + if type(peers6) == ListType: + for p in peers6: + if type(p) != DictType: + raise ValueError + if type(p.get('ip')) != StringType: + raise ValueError + port = p.get('port') + if type(port) not in ints or p <= 0: + raise ValueError + if p.has_key('peer id'): + id = p['peer id'] + if type(id) != StringType or len(id) != 20: + raise ValueError + elif type(peers6) != StringType or len(peers6) % 18 != 0: + raise ValueError + + interval = message.get('interval', 1) + if type(interval) not in ints or interval <= 0: + raise ValueError + minint = message.get('min interval', 1) + if type(minint) not in ints or minint <= 0: + raise ValueError + if type(message.get('tracker id', '')) != StringType: + raise ValueError + npeers = message.get('num peers', 0) + if type(npeers) not in ints or npeers < 0: + raise ValueError + dpeers = message.get('done peers', 0) + if type(dpeers) not in ints or dpeers < 0: + raise ValueError + last = message.get('last', 0) + if type(last) not in ints or last < 0: + raise ValueError diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/btformats.py.bak b/tribler-mod/Tribler/Core/BitTornado/BT1/btformats.py.bak new file mode 100644 index 0000000..7478791 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/btformats.py.bak @@ -0,0 +1,130 @@ +# Written by Bram Cohen +# see LICENSE.txt for license information + +import sys +from types import UnicodeType, StringType, LongType, IntType, ListType, DictType +from re import compile + +#reg = compile(r'^[^/\\.~][^/\\]*$') +#reg = compile(r'^[^/\\]*$') + +ints = (LongType, IntType) + +def check_info(info): + if type(info) != DictType: + raise ValueError, 'bad metainfo - not a dictionary' + + if info.has_key('pieces'): + pieces = info.get('pieces') + if type(pieces) != StringType or len(pieces) % 20 != 0: + raise ValueError, 'bad metainfo - bad pieces key' + elif info.has_key('root hash'): + # Merkle + root_hash = info.get('root hash') + if type(root_hash) != StringType or len(root_hash) != 20: + raise ValueError, 'bad metainfo - bad root hash key' + piecelength = info.get('piece length') + if type(piecelength) not in ints or piecelength <= 0: + raise ValueError, 'bad metainfo - illegal piece length' + name = info.get('name') + if StringType != type(name) != UnicodeType: + raise ValueError, 'bad metainfo - bad name' + #if not reg.match(name): + # raise ValueError, 'name %s disallowed for security reasons' % name + if info.has_key('files') == info.has_key('length'): + raise ValueError, 'single/multiple file mix' + if info.has_key('length'): + length = info.get('length') + if type(length) not in ints or length < 0: + raise ValueError, 'bad metainfo - bad length' + else: + files = info.get('files') + if type(files) != ListType: + raise ValueError + for f in files: + if type(f) != DictType: + raise ValueError, 'bad metainfo - bad file value' + length = f.get('length') + if type(length) not in ints or length < 0: + raise ValueError, 'bad metainfo - bad length' + path = f.get('path') + if type(path) != ListType or path == []: + raise ValueError, 'bad metainfo - bad path' + for p in path: + if StringType != type(p) != UnicodeType: + raise ValueError, 'bad metainfo - bad path dir' + #if not reg.match(p): + # raise ValueError, 'path %s disallowed for security reasons' % p + for i in xrange(len(files)): + for j in xrange(i): + if files[i]['path'] == files[j]['path']: + raise ValueError, 'bad metainfo - duplicate path' + +def check_message(message): + if type(message) != DictType: + raise ValueError + check_info(message.get('info')) + if StringType != type(message.get('announce')) != UnicodeType: + raise ValueError + +def check_peers(message): + if type(message) != DictType: + raise ValueError + if message.has_key('failure reason'): + if type(message['failure reason']) != StringType: + raise ValueError + return + peers = message.get('peers') + if peers is not None: + if type(peers) == ListType: + for p in peers: + if type(p) != DictType: + raise ValueError + if type(p.get('ip')) != StringType: + raise ValueError + port = p.get('port') + if type(port) not in ints or p <= 0: + raise ValueError + if p.has_key('peer id'): + id = p['peer id'] + if type(id) != StringType or len(id) != 20: + raise ValueError + elif type(peers) != StringType or len(peers) % 6 != 0: + raise ValueError + + # IPv6 Tracker extension. http://www.bittorrent.org/beps/bep_0007.html + peers6 = message.get('peers6') + if peers6 is not None: + if type(peers6) == ListType: + for p in peers6: + if type(p) != DictType: + raise ValueError + if type(p.get('ip')) != StringType: + raise ValueError + port = p.get('port') + if type(port) not in ints or p <= 0: + raise ValueError + if p.has_key('peer id'): + id = p['peer id'] + if type(id) != StringType or len(id) != 20: + raise ValueError + elif type(peers6) != StringType or len(peers6) % 18 != 0: + raise ValueError + + interval = message.get('interval', 1) + if type(interval) not in ints or interval <= 0: + raise ValueError + minint = message.get('min interval', 1) + if type(minint) not in ints or minint <= 0: + raise ValueError + if type(message.get('tracker id', '')) != StringType: + raise ValueError + npeers = message.get('num peers', 0) + if type(npeers) not in ints or npeers < 0: + raise ValueError + dpeers = message.get('done peers', 0) + if type(dpeers) not in ints or dpeers < 0: + raise ValueError + last = message.get('last', 0) + if type(last) not in ints or last < 0: + raise ValueError diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/convert.py b/tribler-mod/Tribler/Core/BitTornado/BT1/convert.py new file mode 100644 index 0000000..4cfbe41 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/convert.py @@ -0,0 +1,13 @@ +from time import localtime, strftime +# Written by Bram Cohen and Arno Bakker +# see LICENSE.txt for license information + +from binascii import b2a_hex + +def toint(s): + return long(b2a_hex(s), 16) + +def tobinary(i): + return (chr(i >> 24) + chr((i >> 16) & 0xFF) + + chr((i >> 8) & 0xFF) + chr(i & 0xFF)) + diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/convert.py.bak b/tribler-mod/Tribler/Core/BitTornado/BT1/convert.py.bak new file mode 100644 index 0000000..0f9a832 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/convert.py.bak @@ -0,0 +1,12 @@ +# Written by Bram Cohen and Arno Bakker +# see LICENSE.txt for license information + +from binascii import b2a_hex + +def toint(s): + return long(b2a_hex(s), 16) + +def tobinary(i): + return (chr(i >> 24) + chr((i >> 16) & 0xFF) + + chr((i >> 8) & 0xFF) + chr(i & 0xFF)) + diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/fakeopen.py b/tribler-mod/Tribler/Core/BitTornado/BT1/fakeopen.py new file mode 100644 index 0000000..9a6dabc --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/fakeopen.py @@ -0,0 +1,88 @@ +from time import localtime, strftime +# Written by Bram Cohen +# see LICENSE.txt for license information + +class FakeHandle: + def __init__(self, name, fakeopen): + self.name = name + self.fakeopen = fakeopen + self.pos = 0 + + def flush(self): + pass + + def close(self): + pass + + def seek(self, pos): + self.pos = pos + + def read(self, amount = None): + old = self.pos + f = self.fakeopen.files[self.name] + if self.pos >= len(f): + return '' + if amount is None: + self.pos = len(f) + return ''.join(f[old:]) + else: + self.pos = min(len(f), old + amount) + return ''.join(f[old:self.pos]) + + def write(self, s): + f = self.fakeopen.files[self.name] + while len(f) < self.pos: + f.append(chr(0)) + self.fakeopen.files[self.name][self.pos : self.pos + len(s)] = list(s) + self.pos += len(s) + +class FakeOpen: + def __init__(self, initial = {}): + self.files = {} + for key, value in initial.items(): + self.files[key] = list(value) + + def open(self, filename, mode): + """currently treats everything as rw - doesn't support append""" + self.files.setdefault(filename, []) + return FakeHandle(filename, self) + + def exists(self, file): + return self.files.has_key(file) + + def getsize(self, file): + return len(self.files[file]) + +def test_normal(): + f = FakeOpen({'f1': 'abcde'}) + assert f.exists('f1') + assert not f.exists('f2') + assert f.getsize('f1') == 5 + h = f.open('f1', 'rw') + assert h.read(3) == 'abc' + assert h.read(1) == 'd' + assert h.read() == 'e' + assert h.read(2) == '' + h.write('fpq') + h.seek(4) + assert h.read(2) == 'ef' + h.write('ghij') + h.seek(0) + assert h.read() == 'abcdefghij' + h.seek(2) + h.write('p') + h.write('q') + assert h.read(1) == 'e' + h.seek(1) + assert h.read(5) == 'bpqef' + + h2 = f.open('f2', 'rw') + assert h2.read() == '' + h2.write('mnop') + h2.seek(1) + assert h2.read() == 'nop' + + assert f.exists('f1') + assert f.exists('f2') + assert f.getsize('f1') == 10 + assert f.getsize('f2') == 4 diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/fakeopen.py.bak b/tribler-mod/Tribler/Core/BitTornado/BT1/fakeopen.py.bak new file mode 100644 index 0000000..659566a --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/fakeopen.py.bak @@ -0,0 +1,87 @@ +# Written by Bram Cohen +# see LICENSE.txt for license information + +class FakeHandle: + def __init__(self, name, fakeopen): + self.name = name + self.fakeopen = fakeopen + self.pos = 0 + + def flush(self): + pass + + def close(self): + pass + + def seek(self, pos): + self.pos = pos + + def read(self, amount = None): + old = self.pos + f = self.fakeopen.files[self.name] + if self.pos >= len(f): + return '' + if amount is None: + self.pos = len(f) + return ''.join(f[old:]) + else: + self.pos = min(len(f), old + amount) + return ''.join(f[old:self.pos]) + + def write(self, s): + f = self.fakeopen.files[self.name] + while len(f) < self.pos: + f.append(chr(0)) + self.fakeopen.files[self.name][self.pos : self.pos + len(s)] = list(s) + self.pos += len(s) + +class FakeOpen: + def __init__(self, initial = {}): + self.files = {} + for key, value in initial.items(): + self.files[key] = list(value) + + def open(self, filename, mode): + """currently treats everything as rw - doesn't support append""" + self.files.setdefault(filename, []) + return FakeHandle(filename, self) + + def exists(self, file): + return self.files.has_key(file) + + def getsize(self, file): + return len(self.files[file]) + +def test_normal(): + f = FakeOpen({'f1': 'abcde'}) + assert f.exists('f1') + assert not f.exists('f2') + assert f.getsize('f1') == 5 + h = f.open('f1', 'rw') + assert h.read(3) == 'abc' + assert h.read(1) == 'd' + assert h.read() == 'e' + assert h.read(2) == '' + h.write('fpq') + h.seek(4) + assert h.read(2) == 'ef' + h.write('ghij') + h.seek(0) + assert h.read() == 'abcdefghij' + h.seek(2) + h.write('p') + h.write('q') + assert h.read(1) == 'e' + h.seek(1) + assert h.read(5) == 'bpqef' + + h2 = f.open('f2', 'rw') + assert h2.read() == '' + h2.write('mnop') + h2.seek(1) + assert h2.read() == 'nop' + + assert f.exists('f1') + assert f.exists('f2') + assert f.getsize('f1') == 10 + assert f.getsize('f2') == 4 diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/track.py b/tribler-mod/Tribler/Core/BitTornado/BT1/track.py new file mode 100644 index 0000000..0c19d54 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/track.py @@ -0,0 +1,1030 @@ +from time import localtime, strftime +# Written by Bram Cohen +# see LICENSE.txt for license information + +from Tribler.Core.simpledefs import * +from Tribler.Core.BitTornado.parseargs import parseargs, formatDefinitions +from Tribler.Core.BitTornado.RawServer import RawServer +from Tribler.Core.BitTornado.HTTPHandler import HTTPHandler, months +from Tribler.Core.BitTornado.parsedir import parsedir +from NatCheck import NatCheck +from T2T import T2TList +from Tribler.Core.BitTornado.subnetparse import IP_List, ipv6_to_ipv4, to_ipv4, is_valid_ip, is_ipv4 +from Tribler.Core.BitTornado.iprangeparse import IP_List as IP_Range_List +from Tribler.Core.BitTornado.torrentlistparse import parsetorrentlist +from threading import Event, Thread +from Tribler.Core.BitTornado.bencode import bencode, bdecode, Bencached +from Tribler.Core.BitTornado.zurllib import urlopen +from urllib import quote, unquote +from Filter import Filter +from urlparse import urlparse +from os.path import exists +from cStringIO import StringIO +from traceback import print_exc +from time import time, gmtime, strftime, localtime +from Tribler.Core.BitTornado.clock import clock +from random import shuffle, seed +from types import StringType, IntType, LongType, DictType +from binascii import b2a_hex +import sys, os +import signal +import re +import pickle +from Tribler.Core.BitTornado.__init__ import version_short, createPeerID +from Tribler.Core.simpledefs import TRIBLER_TORRENT_EXT + +try: + True +except: + True = 1 + False = 0 + +DEBUG=False + +from Tribler.Core.defaults import trackerdefaults + +defaults = [] +for k,v in trackerdefaults.iteritems(): + defaults.append((k,v,"See triblerAPI")) + + +def statefiletemplate(x): + if type(x) != DictType: + raise ValueError + for cname, cinfo in x.items(): + if cname == 'peers': + for y in cinfo.values(): # The 'peers' key is a dictionary of SHA hashes (torrent ids) + if type(y) != DictType: # ... for the active torrents, and each is a dictionary + raise ValueError + for id, info in y.items(): # ... of client ids interested in that torrent + if (len(id) != 20): + raise ValueError + if type(info) != DictType: # ... each of which is also a dictionary + raise ValueError # ... which has an IP, a Port, and a Bytes Left count for that client for that torrent + if type(info.get('ip', '')) != StringType: + raise ValueError + port = info.get('port') + if type(port) not in (IntType,LongType) or port < 0: + raise ValueError + left = info.get('left') + if type(left) not in (IntType,LongType) or left < 0: + raise ValueError + elif cname == 'completed': + if (type(cinfo) != DictType): # The 'completed' key is a dictionary of SHA hashes (torrent ids) + raise ValueError # ... for keeping track of the total completions per torrent + for y in cinfo.values(): # ... each torrent has an integer value + if type(y) not in (IntType,LongType): + raise ValueError # ... for the number of reported completions for that torrent + elif cname == 'allowed': + if (type(cinfo) != DictType): # a list of info_hashes and included data + raise ValueError + if x.has_key('allowed_dir_files'): + adlist = [z[1] for z in x['allowed_dir_files'].values()] + for y in cinfo.keys(): # and each should have a corresponding key here + if not y in adlist: + raise ValueError + elif cname == 'allowed_dir_files': + if (type(cinfo) != DictType): # a list of files, their attributes and info hashes + raise ValueError + dirkeys = {} + for y in cinfo.values(): # each entry should have a corresponding info_hash + if not y[1]: + continue + if not x['allowed'].has_key(y[1]): + raise ValueError + if dirkeys.has_key(y[1]): # and each should have a unique info_hash + raise ValueError + dirkeys[y[1]] = 1 + + +alas = 'your file may exist elsewhere in the universe\nbut alas, not here\n' + +local_IPs = IP_List() +local_IPs.set_intranet_addresses() + + +def isotime(secs = None): + if secs == None: + secs = time() + return strftime('%Y-%m-%d %H:%M UTC', gmtime(secs)) + +http_via_filter = re.compile(' for ([0-9.]+)\Z') + +def _get_forwarded_ip(headers): + if headers.has_key('http_x_forwarded_for'): + header = headers['http_x_forwarded_for'] + try: + x,y = header.split(',') + except: + return header + if not local_IPs.includes(x): + return x + return y + if headers.has_key('http_client_ip'): + return headers['http_client_ip'] + if headers.has_key('http_via'): + x = http_via_filter.search(headers['http_via']) + try: + return x.group(1) + except: + pass + if headers.has_key('http_from'): + return headers['http_from'] + return None + +def get_forwarded_ip(headers): + x = _get_forwarded_ip(headers) + if not is_valid_ip(x) or local_IPs.includes(x): + return None + return x + +def compact_peer_info(ip, port): + try: + s = ( ''.join([chr(int(i)) for i in ip.split('.')]) + + chr((port & 0xFF00) >> 8) + chr(port & 0xFF) ) + if len(s) != 6: + raise ValueError + except: + s = '' # not a valid IP, must be a domain name + return s + +def compact_ip(ip): + return ''.join([chr(int(i)) for i in ip.split('.')]) + +def decompact_ip(cip): + return '.'.join([str(ord(i)) for i in cip]) + + +class Tracker: + def __init__(self, config, rawserver): + self.config = config + self.response_size = config['tracker_response_size'] + self.dfile = config['tracker_dfile'] + self.natcheck = config['tracker_nat_check'] + favicon = config['tracker_favicon'] + self.parse_dir_interval = config['tracker_parse_dir_interval'] + self.favicon = None + if favicon: + try: + h = open(favicon,'rb') + self.favicon = h.read() + h.close() + except: + print "**warning** specified favicon file -- %s -- does not exist." % favicon + self.rawserver = rawserver + self.cached = {} # format: infohash: [[time1, l1, s1], [time2, l2, s2], [time3, l3, s3]] + self.cached_t = {} # format: infohash: [time, cache] + self.times = {} + self.state = {} + self.seedcount = {} + + self.allowed_IPs = None + self.banned_IPs = None + if config['tracker_allowed_ips'] or config['tracker_banned_ips']: + self.allowed_ip_mtime = 0 + self.banned_ip_mtime = 0 + self.read_ip_lists() + + self.only_local_override_ip = config['tracker_only_local_override_ip'] + if self.only_local_override_ip == 2: + self.only_local_override_ip = not config['tracker_nat_check'] + + if exists(self.dfile): + try: + h = open(self.dfile, 'rb') + if self.config['tracker_dfile_format'] == ITRACKDBFORMAT_BENCODE: + ds = h.read() + tempstate = bdecode(ds) + else: + tempstate = pickle.load(h) + h.close() + if not tempstate.has_key('peers'): + tempstate = {'peers': tempstate} + statefiletemplate(tempstate) + self.state = tempstate + except: + print '**warning** statefile '+self.dfile+' corrupt; resetting' + self.downloads = self.state.setdefault('peers', {}) + self.completed = self.state.setdefault('completed', {}) + + self.becache = {} # format: infohash: [[l1, s1], [l2, s2], [l3, s3]] + for infohash, ds in self.downloads.items(): + self.seedcount[infohash] = 0 + for x,y in ds.items(): + ip = y['ip'] + if ( (self.allowed_IPs and not self.allowed_IPs.includes(ip)) + or (self.banned_IPs and self.banned_IPs.includes(ip)) ): + del ds[x] + continue + if not y['left']: + self.seedcount[infohash] += 1 + if y.get('nat',-1): + continue + gip = y.get('given_ip') + if is_valid_ip(gip) and ( + not self.only_local_override_ip or local_IPs.includes(ip) ): + ip = gip + self.natcheckOK(infohash,x,ip,y['port'],y['left']) + + for x in self.downloads.keys(): + self.times[x] = {} + for y in self.downloads[x].keys(): + self.times[x][y] = 0 + + self.trackerid = createPeerID('-T-') + seed(self.trackerid) + + self.reannounce_interval = config['tracker_reannounce_interval'] + self.save_dfile_interval = config['tracker_save_dfile_interval'] + self.show_names = config['tracker_show_names'] + rawserver.add_task(self.save_state, self.save_dfile_interval) + self.prevtime = clock() + self.timeout_downloaders_interval = config['tracker_timeout_downloaders_interval'] + rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval) + self.logfile = None + self.log = None + if (config['tracker_logfile']) and (config['tracker_logfile'] != '-'): + try: + self.logfile = config['tracker_logfile'] + self.log = open(self.logfile,'a') + sys.stdout = self.log + print "# Log Started: ", isotime() + except: + print "**warning** could not redirect stdout to log file: ", sys.exc_info()[0] + + if config['tracker_hupmonitor']: + def huphandler(signum, frame, self = self): + try: + self.log.close() + self.log = open(self.logfile,'a') + sys.stdout = self.log + print "# Log reopened: ", isotime() + except: + print "**warning** could not reopen logfile" + + signal.signal(signal.SIGHUP, huphandler) + + self.allow_get = config['tracker_allow_get'] + + self.t2tlist = T2TList(config['tracker_multitracker_enabled'], self.trackerid, + config['tracker_multitracker_reannounce_interval'], + config['tracker_multitracker_maxpeers'], config['tracker_multitracker_http_timeout'], + self.rawserver) + + if config['tracker_allowed_list']: + if config['tracker_allowed_dir']: + print '**warning** allowed_dir and allowed_list options cannot be used together' + print '**warning** disregarding allowed_dir' + config['tracker_allowed_dir'] = '' + self.allowed = self.state.setdefault('allowed_list',{}) + self.allowed_list_mtime = 0 + self.parse_allowed() + self.remove_from_state('allowed','allowed_dir_files') + if config['tracker_multitracker_allowed'] == ITRACKMULTI_ALLOW_AUTODETECT: + config['tracker_multitracker_allowed'] = ITRACKMULTI_ALLOW_NONE + config['tracker_allowed_controls'] = 0 + + elif config['tracker_allowed_dir']: + self.allowed = self.state.setdefault('allowed',{}) + self.allowed_dir_files = self.state.setdefault('allowed_dir_files',{}) + self.allowed_dir_blocked = {} + self.parse_allowed() + self.remove_from_state('allowed_list') + + else: + self.allowed = None + self.remove_from_state('allowed','allowed_dir_files', 'allowed_list') + if config['tracker_multitracker_allowed'] == ITRACKMULTI_ALLOW_AUTODETECT: + config['tracker_multitracker_allowed'] = ITRACKMULTI_ALLOW_NONE + config['tracker_allowed_controls'] = 0 + + self.uq_broken = unquote('+') != ' ' + self.keep_dead = config['tracker_keep_dead'] + self.Filter = Filter(rawserver.add_task) + + aggregator = config['tracker_aggregator'] + if aggregator == 0: + self.is_aggregator = False + self.aggregator_key = None + else: + self.is_aggregator = True + if aggregator == 1: + self.aggregator_key = None + else: + self.aggregator_key = aggregator + self.natcheck = False + + send = config['tracker_aggregate_forward'] + if not send: + self.aggregate_forward = None + else: + try: + self.aggregate_forward, self.aggregate_password = send + except: + self.aggregate_forward = send + self.aggregate_password = None + + self.cachetime = 0 + self.cachetimeupdate() + + def cachetimeupdate(self): + self.cachetime += 1 # raw clock, but more efficient for cache + self.rawserver.add_task(self.cachetimeupdate,1) + + def aggregate_senddata(self, query): + url = self.aggregate_forward+'?'+query + if self.aggregate_password is not None: + url += '&password='+self.aggregate_password + rq = Thread(target = self._aggregate_senddata, args = [url]) + rq.setName( "AggregateSendData"+rq.getName() ) + rq.setDaemon(True) + rq.start() + + def _aggregate_senddata(self, url): # just send, don't attempt to error check, + try: # discard any returned data + h = urlopen(url) + h.read() + h.close() + except: + return + + + def get_infopage(self): + try: + if not self.config['tracker_show_infopage']: + return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas) + red = self.config['tracker_infopage_redirect'] + if red: + return (302, 'Found', {'Content-Type': 'text/html', 'Location': red}, + 'Click Here') + + s = StringIO() + s.write('\n' \ + 'Tribler Tracker Statistics\n') + if self.favicon is not None: + s.write('\n') + s.write('\n\n' \ + '

Tribler Tracker Statistics

\n') + if self.config['tracker_allowed_dir']: + if self.show_names: + names = [ (self.allowed[hash]['name'],hash) + for hash in self.allowed.keys() ] + else: + names = [ (None,hash) + for hash in self.allowed.keys() ] + else: + names = [ (None,hash) for hash in self.downloads.keys() ] + if not names: + s.write('

Not tracking any files yet...

\n') + else: + names.sort() + tn = 0 + tc = 0 + td = 0 + tt = 0 # Total transferred + ts = 0 # Total size + nf = 0 # Number of files displayed + if self.config['tracker_allowed_dir'] and self.show_names: + s.write('\n' \ + '\n') + else: + s.write('
info hashtorrent namesizecompletedownloadingdownloadedtransferred
\n' \ + '\n') + for name,hash in names: + l = self.downloads[hash] + n = self.completed.get(hash, 0) + tn = tn + n + c = self.seedcount[hash] + tc = tc + c + d = len(l) - c + td = td + d + if self.config['tracker_allowed_dir'] and self.show_names: + if self.allowed.has_key(hash): + nf = nf + 1 + sz = self.allowed[hash]['length'] # size + ts = ts + sz + szt = sz * n # Transferred for this torrent + tt = tt + szt + if self.allow_get == 1: + linkname = '' + name + '' + else: + linkname = name + s.write('\n' \ + % (b2a_hex(hash), linkname, size_format(sz), c, d, n, size_format(szt))) + else: + s.write('\n' \ + % (b2a_hex(hash), c, d, n)) + ttn = 0 + for i in self.completed.values(): + ttn = ttn + i + if self.config['tracker_allowed_dir'] and self.show_names: + s.write('\n' + % (nf, size_format(ts), tc, td, tn, ttn, size_format(tt))) + else: + s.write('\n' + % (nf, tc, td, tn, ttn)) + s.write('
info hashcompletedownloadingdownloaded
%s%s%s%i%i%i%s
%s%i%i%i
%i files%s%i%i%i/%i%s
%i files%i%i%i/%i
\n' \ + '\n' \ + '
\n' + '
%s (%s)
\n' % (version_short, isotime())) + + + s.write('\n' \ + '\n') + return (200, 'OK', {'Content-Type': 'text/html; charset=iso-8859-1'}, s.getvalue()) + except: + print_exc() + return (500, 'Internal Server Error', {'Content-Type': 'text/html; charset=iso-8859-1'}, 'Server Error') + + + def scrapedata(self, hash, return_name = True): + l = self.downloads[hash] + n = self.completed.get(hash, 0) + c = self.seedcount[hash] + d = len(l) - c + f = {'complete': c, 'incomplete': d, 'downloaded': n} + if return_name and self.show_names and self.config['tracker_allowed_dir']: + f['name'] = self.allowed[hash]['name'] + return (f) + + def get_scrape(self, paramslist): + fs = {} + if paramslist.has_key('info_hash'): + if self.config['tracker_scrape_allowed'] not in [ITRACKSCRAPE_ALLOW_SPECIFIC,ITRACKSCRAPE_ALLOW_FULL]: + return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, + bencode({'failure reason': + 'specific scrape function is not available with this tracker.'})) + for hash in paramslist['info_hash']: + if self.allowed is not None: + if self.allowed.has_key(hash): + fs[hash] = self.scrapedata(hash) + else: + if self.downloads.has_key(hash): + fs[hash] = self.scrapedata(hash) + else: + if self.config['tracker_scrape_allowed'] != ITRACKSCRAPE_ALLOW_FULL: + return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, + bencode({'failure reason': + 'full scrape function is not available with this tracker.'})) + if self.allowed is not None: + keys = self.allowed.keys() + else: + keys = self.downloads.keys() + for hash in keys: + fs[hash] = self.scrapedata(hash) + + return (200, 'OK', {'Content-Type': 'text/plain'}, bencode({'files': fs})) + + + def get_file(self, hash): + if not self.allow_get: + return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, + 'get function is not available with this tracker.') + if not self.allowed.has_key(hash): + return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas) + fname = self.allowed[hash]['file'] + fpath = self.allowed[hash]['path'] + return (200, 'OK', {'Content-Type': 'application/x-bittorrent', + 'Content-Disposition': 'attachment; filename=' + fname}, + open(fpath, 'rb').read()) + + + def check_allowed(self, infohash, paramslist): + if ( self.aggregator_key is not None + and not ( paramslist.has_key('password') + and paramslist['password'][0] == self.aggregator_key ) ): + return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, + bencode({'failure reason': + 'Requested download is not authorized for use with this tracker.'})) + + if self.allowed is not None: + if not self.allowed.has_key(infohash): + return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, + bencode({'failure reason': + 'Requested download is not authorized for use with this tracker.'})) + if self.config['tracker_allowed_controls']: + if self.allowed[infohash].has_key('failure reason'): + return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, + bencode({'failure reason': self.allowed[infohash]['failure reason']})) + + if paramslist.has_key('tracker'): + if ( self.config['tracker_multitracker_allowed'] == ITRACKMULTI_ALLOW_NONE or # turned off + paramslist['peer_id'][0] == self.trackerid ): # oops! contacted myself + return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, + bencode({'failure reason': 'disallowed'})) + + if ( self.config['tracker_multitracker_allowed'] == ITRACKMULTI_ALLOW_AUTODETECT + and not self.allowed[infohash].has_key('announce-list') ): + return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, + bencode({'failure reason': + 'Requested download is not authorized for multitracker use.'})) + + return None + + + def add_data(self, infohash, event, ip, paramslist): + peers = self.downloads.setdefault(infohash, {}) + ts = self.times.setdefault(infohash, {}) + self.completed.setdefault(infohash, 0) + self.seedcount.setdefault(infohash, 0) + + def params(key, default = None, l = paramslist): + if l.has_key(key): + return l[key][0] + return default + + myid = params('peer_id','') + if len(myid) != 20: + raise ValueError, 'id not of length 20' + if event not in ['started', 'completed', 'stopped', 'snooped', None]: + raise ValueError, 'invalid event' + port = long(params('port','')) + if port < 0 or port > 65535: + raise ValueError, 'invalid port' + left = long(params('left','')) + if left < 0: + raise ValueError, 'invalid amount left' + uploaded = long(params('uploaded','')) + downloaded = long(params('downloaded','')) + + peer = peers.get(myid) + islocal = local_IPs.includes(ip) + mykey = params('key') + if peer: + auth = peer.get('key',-1) == mykey or peer.get('ip') == ip + + gip = params('ip') + if is_valid_ip(gip) and (islocal or not self.only_local_override_ip): + ip1 = gip + else: + ip1 = ip + + if params('numwant') is not None: + rsize = min(int(params('numwant')),self.response_size) + else: + rsize = self.response_size + + if event == 'stopped': + if peer: + if auth: + self.delete_peer(infohash,myid) + + elif not peer: + ts[myid] = clock() + peer = {'ip': ip, 'port': port, 'left': left} + if mykey: + peer['key'] = mykey + if gip: + peer['given ip'] = gip + if port: + if not self.natcheck or islocal: + peer['nat'] = 0 + self.natcheckOK(infohash,myid,ip1,port,left) + else: + NatCheck(self.connectback_result,infohash,myid,ip1,port,self.rawserver) + else: + peer['nat'] = 2**30 + if event == 'completed': + self.completed[infohash] += 1 + if not left: + self.seedcount[infohash] += 1 + + peers[myid] = peer + + else: + if not auth: + return rsize # return w/o changing stats + + ts[myid] = clock() + if not left and peer['left']: + self.completed[infohash] += 1 + self.seedcount[infohash] += 1 + if not peer.get('nat', -1): + for bc in self.becache[infohash]: + bc[1][myid] = bc[0][myid] + del bc[0][myid] + if peer['left']: + peer['left'] = left + + if port: + recheck = False + if ip != peer['ip']: + peer['ip'] = ip + recheck = True + if gip != peer.get('given ip'): + if gip: + peer['given ip'] = gip + elif peer.has_key('given ip'): + del peer['given ip'] + recheck = True + + natted = peer.get('nat', -1) + if recheck: + if natted == 0: + l = self.becache[infohash] + y = not peer['left'] + for x in l: + del x[y][myid] + if not self.natcheck or islocal: + del peer['nat'] # restart NAT testing + if natted and natted < self.natcheck: + recheck = True + + if recheck: + if not self.natcheck or islocal: + peer['nat'] = 0 + self.natcheckOK(infohash,myid,ip1,port,left) + else: + NatCheck(self.connectback_result,infohash,myid,ip1,port,self.rawserver) + + return rsize + + + def peerlist(self, infohash, stopped, tracker, is_seed, return_type, rsize): + data = {} # return data + seeds = self.seedcount[infohash] + data['complete'] = seeds + data['incomplete'] = len(self.downloads[infohash]) - seeds + + if ( self.config['tracker_allowed_controls'] + and self.allowed[infohash].has_key('warning message') ): + data['warning message'] = self.allowed[infohash]['warning message'] + + if tracker: + data['interval'] = self.config['tracker_multitracker_reannounce_interval'] + if not rsize: + return data + cache = self.cached_t.setdefault(infohash, None) + if ( not cache or len(cache[1]) < rsize + or cache[0] + self.config['tracker_min_time_between_cache_refreshes'] < clock() ): + bc = self.becache.setdefault(infohash,[[{}, {}], [{}, {}], [{}, {}]]) + cache = [ clock(), bc[0][0].values() + bc[0][1].values() ] + self.cached_t[infohash] = cache + shuffle(cache[1]) + cache = cache[1] + + data['peers'] = cache[-rsize:] + del cache[-rsize:] + return data + + data['interval'] = self.reannounce_interval + if stopped or not rsize: # save some bandwidth + data['peers'] = [] + return data + + bc = self.becache.setdefault(infohash,[[{}, {}], [{}, {}], [{}, {}]]) + len_l = len(bc[0][0]) + len_s = len(bc[0][1]) + if not (len_l+len_s): # caches are empty! + data['peers'] = [] + return data + l_get_size = int(float(rsize)*(len_l)/(len_l+len_s)) + cache = self.cached.setdefault(infohash,[None,None,None])[return_type] + if cache and ( not cache[1] + or (is_seed and len(cache[1]) < rsize) + or len(cache[1]) < l_get_size + or cache[0]+self.config['tracker_min_time_between_cache_refreshes'] < self.cachetime ): + cache = None + if not cache: + peers = self.downloads[infohash] + vv = [[],[],[]] + for key, ip, port in self.t2tlist.harvest(infohash): # empty if disabled + if not peers.has_key(key): + vv[0].append({'ip': ip, 'port': port, 'peer id': key}) + vv[1].append({'ip': ip, 'port': port}) + vv[2].append(compact_peer_info(ip, port)) + cache = [ self.cachetime, + bc[return_type][0].values()+vv[return_type], + bc[return_type][1].values() ] + shuffle(cache[1]) + shuffle(cache[2]) + self.cached[infohash][return_type] = cache + for rr in xrange(len(self.cached[infohash])): + if rr != return_type: + try: + self.cached[infohash][rr][1].extend(vv[rr]) + except: + pass + if len(cache[1]) < l_get_size: + peerdata = cache[1] + if not is_seed: + peerdata.extend(cache[2]) + cache[1] = [] + cache[2] = [] + else: + if not is_seed: + peerdata = cache[2][l_get_size-rsize:] + del cache[2][l_get_size-rsize:] + rsize -= len(peerdata) + else: + peerdata = [] + if rsize: + peerdata.extend(cache[1][-rsize:]) + del cache[1][-rsize:] + if return_type == 2: + peerdata = ''.join(peerdata) + data['peers'] = peerdata + return data + + + def get(self, connection, path, headers): + real_ip = connection.get_ip() + ip = real_ip + if is_ipv4(ip): + ipv4 = True + else: + try: + ip = ipv6_to_ipv4(ip) + ipv4 = True + except ValueError: + ipv4 = False + + if ( (self.allowed_IPs and not self.allowed_IPs.includes(ip)) + or (self.banned_IPs and self.banned_IPs.includes(ip)) ): + return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, + bencode({'failure reason': + 'your IP is not allowed on this tracker'})) + + nip = get_forwarded_ip(headers) + if nip and not self.only_local_override_ip: + ip = nip + try: + ip = to_ipv4(ip) + ipv4 = True + except ValueError: + ipv4 = False + + paramslist = {} + def params(key, default = None, l = paramslist): + if l.has_key(key): + return l[key][0] + return default + + try: + (scheme, netloc, path, pars, query, fragment) = urlparse(path) + if self.uq_broken == 1: + path = path.replace('+',' ') + query = query.replace('+',' ') + path = unquote(path)[1:] + for s in query.split('&'): + if s: + i = s.index('=') + kw = unquote(s[:i]) + paramslist.setdefault(kw, []) + paramslist[kw] += [unquote(s[i+1:])] + + if path == '' or path == 'index.html': + return self.get_infopage() + if (path == 'file'): + return self.get_file(params('info_hash')) + if path == 'favicon.ico' and self.favicon is not None: + return (200, 'OK', {'Content-Type' : 'image/x-icon'}, self.favicon) + + # automated access from here on + + if path == 'scrape': + return self.get_scrape(paramslist) + + if path != 'announce': + return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas) + + # main tracker function + + filtered = self.Filter.check(real_ip, paramslist, headers) + if filtered: + return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, + bencode({'failure reason': filtered})) + + infohash = params('info_hash') + if not infohash: + raise ValueError, 'no info hash' + + notallowed = self.check_allowed(infohash, paramslist) + if notallowed: + return notallowed + + event = params('event') + + rsize = self.add_data(infohash, event, ip, paramslist) + + except ValueError, e: + print_exc() + return (400, 'Bad Request', {'Content-Type': 'text/plain'}, + 'you sent me garbage - ' + str(e)) + + if self.aggregate_forward and not paramslist.has_key('tracker'): + self.aggregate_senddata(query) + + if self.is_aggregator: # don't return peer data here + return (200, 'OK', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, + bencode({'response': 'OK'})) + + if params('compact') and ipv4: + return_type = 2 + elif params('no_peer_id'): + return_type = 1 + else: + return_type = 0 + + data = self.peerlist(infohash, event=='stopped', + params('tracker'), not params('left'), + return_type, rsize) + + if paramslist.has_key('scrape'): + data['scrape'] = self.scrapedata(infohash, False) + + return (200, 'OK', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, bencode(data)) + + + def natcheckOK(self, infohash, peerid, ip, port, not_seed): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tracker: natcheck: Recorded succes" + bc = self.becache.setdefault(infohash,[[{}, {}], [{}, {}], [{}, {}]]) + bc[0][not not_seed][peerid] = Bencached(bencode({'ip': ip, 'port': port, + 'peer id': peerid})) + bc[1][not not_seed][peerid] = Bencached(bencode({'ip': ip, 'port': port})) + bc[2][not not_seed][peerid] = compact_peer_info(ip, port) + + + def natchecklog(self, peerid, ip, port, result): + year, month, day, hour, minute, second, a, b, c = localtime(time()) + print '%s - %s [%02d/%3s/%04d:%02d:%02d:%02d] "!natcheck-%s:%i" %i 0 - -' % ( + ip, quote(peerid), day, months[month], year, hour, minute, second, + ip, port, result) + + def connectback_result(self, result, downloadid, peerid, ip, port): + record = self.downloads.get(downloadid, {}).get(peerid) + if ( record is None + or (record['ip'] != ip and record.get('given ip') != ip) + or record['port'] != port ): + if self.config['tracker_log_nat_checks']: + self.natchecklog(peerid, ip, port, 404) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tracker: natcheck: No record found for tested peer" + return + if self.config['tracker_log_nat_checks']: + if result: + x = 200 + else: + x = 503 + self.natchecklog(peerid, ip, port, x) + if not record.has_key('nat'): + record['nat'] = int(not result) + if result: + self.natcheckOK(downloadid,peerid,ip,port,record['left']) + elif result and record['nat']: + record['nat'] = 0 + self.natcheckOK(downloadid,peerid,ip,port,record['left']) + elif not result: + record['nat'] += 1 + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tracker: natcheck: Recorded failed attempt" + + + def remove_from_state(self, *l): + for s in l: + try: + del self.state[s] + except: + pass + + def save_state(self): + self.rawserver.add_task(self.save_state, self.save_dfile_interval) + h = open(self.dfile, 'wb') + if self.config['tracker_dfile_format'] == ITRACKDBFORMAT_BENCODE: + h.write(bencode(self.state)) + else: + pickle.dump(self.state,h,-1) + h.close() + + + def parse_allowed(self,source=None): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tracker: parse_allowed: Source is",source + + if source is None: + self.rawserver.add_task(self.parse_allowed, self.parse_dir_interval) + + if self.config['tracker_allowed_dir']: + r = parsedir( self.config['tracker_allowed_dir'], self.allowed, + self.allowed_dir_files, self.allowed_dir_blocked, + [".torrent",TRIBLER_TORRENT_EXT] ) + ( self.allowed, self.allowed_dir_files, self.allowed_dir_blocked, + added, garbage2 ) = r + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tracker: parse_allowed: Found new",`added` + + self.state['allowed'] = self.allowed + self.state['allowed_dir_files'] = self.allowed_dir_files + + self.t2tlist.parse(self.allowed) + + else: + f = self.config['tracker_allowed_list'] + if self.allowed_list_mtime == os.path.getmtime(f): + return + try: + r = parsetorrentlist(f, self.allowed) + (self.allowed, added, garbage2) = r + self.state['allowed_list'] = self.allowed + except (IOError, OSError): + print '**warning** unable to read allowed torrent list' + return + self.allowed_list_mtime = os.path.getmtime(f) + + for infohash in added.keys(): + self.downloads.setdefault(infohash, {}) + self.completed.setdefault(infohash, 0) + self.seedcount.setdefault(infohash, 0) + + + def read_ip_lists(self): + self.rawserver.add_task(self.read_ip_lists,self.parse_dir_interval) + + f = self.config['tracker_allowed_ips'] + if f and self.allowed_ip_mtime != os.path.getmtime(f): + self.allowed_IPs = IP_List() + try: + self.allowed_IPs.read_fieldlist(f) + self.allowed_ip_mtime = os.path.getmtime(f) + except (IOError, OSError): + print '**warning** unable to read allowed_IP list' + + f = self.config['tracker_banned_ips'] + if f and self.banned_ip_mtime != os.path.getmtime(f): + self.banned_IPs = IP_Range_List() + try: + self.banned_IPs.read_rangelist(f) + self.banned_ip_mtime = os.path.getmtime(f) + except (IOError, OSError): + print '**warning** unable to read banned_IP list' + + + def delete_peer(self, infohash, peerid): + dls = self.downloads[infohash] + peer = dls[peerid] + if not peer['left']: + self.seedcount[infohash] -= 1 + if not peer.get('nat',-1): + l = self.becache[infohash] + y = not peer['left'] + for x in l: + del x[y][peerid] + del self.times[infohash][peerid] + del dls[peerid] + + def expire_downloaders(self): + for x in self.times.keys(): + for myid, t in self.times[x].items(): + if t < self.prevtime: + self.delete_peer(x,myid) + self.prevtime = clock() + if (self.keep_dead != 1): + for key, value in self.downloads.items(): + if len(value) == 0 and ( + self.allowed is None or not self.allowed.has_key(key) ): + del self.times[key] + del self.downloads[key] + del self.seedcount[key] + self.rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval) + + +def track(args): + if not args: + print formatDefinitions(defaults, 80) + return + try: + config, files = parseargs(args, defaults, 0, 0) + except ValueError, e: + print 'error: ' + str(e) + print 'run with no arguments for parameter explanations' + return + r = RawServer(Event(), config['tracker_timeout_check_interval'], + config['tracker_socket_timeout'], ipv6_enable = config['ipv6_enabled']) + t = Tracker(config, r) + r.bind(config['minport'], config['bind'], + reuse = True, ipv6_socket_style = config['ipv6_binds_v4']) + r.listen_forever(HTTPHandler(t.get, config['min_time_between_log_flushes'])) + t.save_state() + print '# Shutting down: ' + isotime() + +def size_format(s): + if (s < 1024): + r = str(s) + 'B' + elif (s < 1048576): + r = str(int(s/1024)) + 'KiB' + elif (s < 1073741824L): + r = str(int(s/1048576)) + 'MiB' + elif (s < 1099511627776L): + r = str(int((s/1073741824.0)*100.0)/100.0) + 'GiB' + else: + r = str(int((s/1099511627776.0)*100.0)/100.0) + 'TiB' + return(r) + diff --git a/tribler-mod/Tribler/Core/BitTornado/BT1/track.py.bak b/tribler-mod/Tribler/Core/BitTornado/BT1/track.py.bak new file mode 100644 index 0000000..121d140 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/BT1/track.py.bak @@ -0,0 +1,1029 @@ +# Written by Bram Cohen +# see LICENSE.txt for license information + +from Tribler.Core.simpledefs import * +from Tribler.Core.BitTornado.parseargs import parseargs, formatDefinitions +from Tribler.Core.BitTornado.RawServer import RawServer +from Tribler.Core.BitTornado.HTTPHandler import HTTPHandler, months +from Tribler.Core.BitTornado.parsedir import parsedir +from NatCheck import NatCheck +from T2T import T2TList +from Tribler.Core.BitTornado.subnetparse import IP_List, ipv6_to_ipv4, to_ipv4, is_valid_ip, is_ipv4 +from Tribler.Core.BitTornado.iprangeparse import IP_List as IP_Range_List +from Tribler.Core.BitTornado.torrentlistparse import parsetorrentlist +from threading import Event, Thread +from Tribler.Core.BitTornado.bencode import bencode, bdecode, Bencached +from Tribler.Core.BitTornado.zurllib import urlopen +from urllib import quote, unquote +from Filter import Filter +from urlparse import urlparse +from os.path import exists +from cStringIO import StringIO +from traceback import print_exc +from time import time, gmtime, strftime, localtime +from Tribler.Core.BitTornado.clock import clock +from random import shuffle, seed +from types import StringType, IntType, LongType, DictType +from binascii import b2a_hex +import sys, os +import signal +import re +import pickle +from Tribler.Core.BitTornado.__init__ import version_short, createPeerID +from Tribler.Core.simpledefs import TRIBLER_TORRENT_EXT + +try: + True +except: + True = 1 + False = 0 + +DEBUG=False + +from Tribler.Core.defaults import trackerdefaults + +defaults = [] +for k,v in trackerdefaults.iteritems(): + defaults.append((k,v,"See triblerAPI")) + + +def statefiletemplate(x): + if type(x) != DictType: + raise ValueError + for cname, cinfo in x.items(): + if cname == 'peers': + for y in cinfo.values(): # The 'peers' key is a dictionary of SHA hashes (torrent ids) + if type(y) != DictType: # ... for the active torrents, and each is a dictionary + raise ValueError + for id, info in y.items(): # ... of client ids interested in that torrent + if (len(id) != 20): + raise ValueError + if type(info) != DictType: # ... each of which is also a dictionary + raise ValueError # ... which has an IP, a Port, and a Bytes Left count for that client for that torrent + if type(info.get('ip', '')) != StringType: + raise ValueError + port = info.get('port') + if type(port) not in (IntType,LongType) or port < 0: + raise ValueError + left = info.get('left') + if type(left) not in (IntType,LongType) or left < 0: + raise ValueError + elif cname == 'completed': + if (type(cinfo) != DictType): # The 'completed' key is a dictionary of SHA hashes (torrent ids) + raise ValueError # ... for keeping track of the total completions per torrent + for y in cinfo.values(): # ... each torrent has an integer value + if type(y) not in (IntType,LongType): + raise ValueError # ... for the number of reported completions for that torrent + elif cname == 'allowed': + if (type(cinfo) != DictType): # a list of info_hashes and included data + raise ValueError + if x.has_key('allowed_dir_files'): + adlist = [z[1] for z in x['allowed_dir_files'].values()] + for y in cinfo.keys(): # and each should have a corresponding key here + if not y in adlist: + raise ValueError + elif cname == 'allowed_dir_files': + if (type(cinfo) != DictType): # a list of files, their attributes and info hashes + raise ValueError + dirkeys = {} + for y in cinfo.values(): # each entry should have a corresponding info_hash + if not y[1]: + continue + if not x['allowed'].has_key(y[1]): + raise ValueError + if dirkeys.has_key(y[1]): # and each should have a unique info_hash + raise ValueError + dirkeys[y[1]] = 1 + + +alas = 'your file may exist elsewhere in the universe\nbut alas, not here\n' + +local_IPs = IP_List() +local_IPs.set_intranet_addresses() + + +def isotime(secs = None): + if secs == None: + secs = time() + return strftime('%Y-%m-%d %H:%M UTC', gmtime(secs)) + +http_via_filter = re.compile(' for ([0-9.]+)\Z') + +def _get_forwarded_ip(headers): + if headers.has_key('http_x_forwarded_for'): + header = headers['http_x_forwarded_for'] + try: + x,y = header.split(',') + except: + return header + if not local_IPs.includes(x): + return x + return y + if headers.has_key('http_client_ip'): + return headers['http_client_ip'] + if headers.has_key('http_via'): + x = http_via_filter.search(headers['http_via']) + try: + return x.group(1) + except: + pass + if headers.has_key('http_from'): + return headers['http_from'] + return None + +def get_forwarded_ip(headers): + x = _get_forwarded_ip(headers) + if not is_valid_ip(x) or local_IPs.includes(x): + return None + return x + +def compact_peer_info(ip, port): + try: + s = ( ''.join([chr(int(i)) for i in ip.split('.')]) + + chr((port & 0xFF00) >> 8) + chr(port & 0xFF) ) + if len(s) != 6: + raise ValueError + except: + s = '' # not a valid IP, must be a domain name + return s + +def compact_ip(ip): + return ''.join([chr(int(i)) for i in ip.split('.')]) + +def decompact_ip(cip): + return '.'.join([str(ord(i)) for i in cip]) + + +class Tracker: + def __init__(self, config, rawserver): + self.config = config + self.response_size = config['tracker_response_size'] + self.dfile = config['tracker_dfile'] + self.natcheck = config['tracker_nat_check'] + favicon = config['tracker_favicon'] + self.parse_dir_interval = config['tracker_parse_dir_interval'] + self.favicon = None + if favicon: + try: + h = open(favicon,'rb') + self.favicon = h.read() + h.close() + except: + print "**warning** specified favicon file -- %s -- does not exist." % favicon + self.rawserver = rawserver + self.cached = {} # format: infohash: [[time1, l1, s1], [time2, l2, s2], [time3, l3, s3]] + self.cached_t = {} # format: infohash: [time, cache] + self.times = {} + self.state = {} + self.seedcount = {} + + self.allowed_IPs = None + self.banned_IPs = None + if config['tracker_allowed_ips'] or config['tracker_banned_ips']: + self.allowed_ip_mtime = 0 + self.banned_ip_mtime = 0 + self.read_ip_lists() + + self.only_local_override_ip = config['tracker_only_local_override_ip'] + if self.only_local_override_ip == 2: + self.only_local_override_ip = not config['tracker_nat_check'] + + if exists(self.dfile): + try: + h = open(self.dfile, 'rb') + if self.config['tracker_dfile_format'] == ITRACKDBFORMAT_BENCODE: + ds = h.read() + tempstate = bdecode(ds) + else: + tempstate = pickle.load(h) + h.close() + if not tempstate.has_key('peers'): + tempstate = {'peers': tempstate} + statefiletemplate(tempstate) + self.state = tempstate + except: + print '**warning** statefile '+self.dfile+' corrupt; resetting' + self.downloads = self.state.setdefault('peers', {}) + self.completed = self.state.setdefault('completed', {}) + + self.becache = {} # format: infohash: [[l1, s1], [l2, s2], [l3, s3]] + for infohash, ds in self.downloads.items(): + self.seedcount[infohash] = 0 + for x,y in ds.items(): + ip = y['ip'] + if ( (self.allowed_IPs and not self.allowed_IPs.includes(ip)) + or (self.banned_IPs and self.banned_IPs.includes(ip)) ): + del ds[x] + continue + if not y['left']: + self.seedcount[infohash] += 1 + if y.get('nat',-1): + continue + gip = y.get('given_ip') + if is_valid_ip(gip) and ( + not self.only_local_override_ip or local_IPs.includes(ip) ): + ip = gip + self.natcheckOK(infohash,x,ip,y['port'],y['left']) + + for x in self.downloads.keys(): + self.times[x] = {} + for y in self.downloads[x].keys(): + self.times[x][y] = 0 + + self.trackerid = createPeerID('-T-') + seed(self.trackerid) + + self.reannounce_interval = config['tracker_reannounce_interval'] + self.save_dfile_interval = config['tracker_save_dfile_interval'] + self.show_names = config['tracker_show_names'] + rawserver.add_task(self.save_state, self.save_dfile_interval) + self.prevtime = clock() + self.timeout_downloaders_interval = config['tracker_timeout_downloaders_interval'] + rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval) + self.logfile = None + self.log = None + if (config['tracker_logfile']) and (config['tracker_logfile'] != '-'): + try: + self.logfile = config['tracker_logfile'] + self.log = open(self.logfile,'a') + sys.stdout = self.log + print "# Log Started: ", isotime() + except: + print "**warning** could not redirect stdout to log file: ", sys.exc_info()[0] + + if config['tracker_hupmonitor']: + def huphandler(signum, frame, self = self): + try: + self.log.close() + self.log = open(self.logfile,'a') + sys.stdout = self.log + print "# Log reopened: ", isotime() + except: + print "**warning** could not reopen logfile" + + signal.signal(signal.SIGHUP, huphandler) + + self.allow_get = config['tracker_allow_get'] + + self.t2tlist = T2TList(config['tracker_multitracker_enabled'], self.trackerid, + config['tracker_multitracker_reannounce_interval'], + config['tracker_multitracker_maxpeers'], config['tracker_multitracker_http_timeout'], + self.rawserver) + + if config['tracker_allowed_list']: + if config['tracker_allowed_dir']: + print '**warning** allowed_dir and allowed_list options cannot be used together' + print '**warning** disregarding allowed_dir' + config['tracker_allowed_dir'] = '' + self.allowed = self.state.setdefault('allowed_list',{}) + self.allowed_list_mtime = 0 + self.parse_allowed() + self.remove_from_state('allowed','allowed_dir_files') + if config['tracker_multitracker_allowed'] == ITRACKMULTI_ALLOW_AUTODETECT: + config['tracker_multitracker_allowed'] = ITRACKMULTI_ALLOW_NONE + config['tracker_allowed_controls'] = 0 + + elif config['tracker_allowed_dir']: + self.allowed = self.state.setdefault('allowed',{}) + self.allowed_dir_files = self.state.setdefault('allowed_dir_files',{}) + self.allowed_dir_blocked = {} + self.parse_allowed() + self.remove_from_state('allowed_list') + + else: + self.allowed = None + self.remove_from_state('allowed','allowed_dir_files', 'allowed_list') + if config['tracker_multitracker_allowed'] == ITRACKMULTI_ALLOW_AUTODETECT: + config['tracker_multitracker_allowed'] = ITRACKMULTI_ALLOW_NONE + config['tracker_allowed_controls'] = 0 + + self.uq_broken = unquote('+') != ' ' + self.keep_dead = config['tracker_keep_dead'] + self.Filter = Filter(rawserver.add_task) + + aggregator = config['tracker_aggregator'] + if aggregator == 0: + self.is_aggregator = False + self.aggregator_key = None + else: + self.is_aggregator = True + if aggregator == 1: + self.aggregator_key = None + else: + self.aggregator_key = aggregator + self.natcheck = False + + send = config['tracker_aggregate_forward'] + if not send: + self.aggregate_forward = None + else: + try: + self.aggregate_forward, self.aggregate_password = send + except: + self.aggregate_forward = send + self.aggregate_password = None + + self.cachetime = 0 + self.cachetimeupdate() + + def cachetimeupdate(self): + self.cachetime += 1 # raw clock, but more efficient for cache + self.rawserver.add_task(self.cachetimeupdate,1) + + def aggregate_senddata(self, query): + url = self.aggregate_forward+'?'+query + if self.aggregate_password is not None: + url += '&password='+self.aggregate_password + rq = Thread(target = self._aggregate_senddata, args = [url]) + rq.setName( "AggregateSendData"+rq.getName() ) + rq.setDaemon(True) + rq.start() + + def _aggregate_senddata(self, url): # just send, don't attempt to error check, + try: # discard any returned data + h = urlopen(url) + h.read() + h.close() + except: + return + + + def get_infopage(self): + try: + if not self.config['tracker_show_infopage']: + return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas) + red = self.config['tracker_infopage_redirect'] + if red: + return (302, 'Found', {'Content-Type': 'text/html', 'Location': red}, + 'Click Here') + + s = StringIO() + s.write('\n' \ + 'Tribler Tracker Statistics\n') + if self.favicon is not None: + s.write('\n') + s.write('\n\n' \ + '

Tribler Tracker Statistics

\n') + if self.config['tracker_allowed_dir']: + if self.show_names: + names = [ (self.allowed[hash]['name'],hash) + for hash in self.allowed.keys() ] + else: + names = [ (None,hash) + for hash in self.allowed.keys() ] + else: + names = [ (None,hash) for hash in self.downloads.keys() ] + if not names: + s.write('

Not tracking any files yet...

\n') + else: + names.sort() + tn = 0 + tc = 0 + td = 0 + tt = 0 # Total transferred + ts = 0 # Total size + nf = 0 # Number of files displayed + if self.config['tracker_allowed_dir'] and self.show_names: + s.write('\n' \ + '\n') + else: + s.write('
info hashtorrent namesizecompletedownloadingdownloadedtransferred
\n' \ + '\n') + for name,hash in names: + l = self.downloads[hash] + n = self.completed.get(hash, 0) + tn = tn + n + c = self.seedcount[hash] + tc = tc + c + d = len(l) - c + td = td + d + if self.config['tracker_allowed_dir'] and self.show_names: + if self.allowed.has_key(hash): + nf = nf + 1 + sz = self.allowed[hash]['length'] # size + ts = ts + sz + szt = sz * n # Transferred for this torrent + tt = tt + szt + if self.allow_get == 1: + linkname = '' + name + '' + else: + linkname = name + s.write('\n' \ + % (b2a_hex(hash), linkname, size_format(sz), c, d, n, size_format(szt))) + else: + s.write('\n' \ + % (b2a_hex(hash), c, d, n)) + ttn = 0 + for i in self.completed.values(): + ttn = ttn + i + if self.config['tracker_allowed_dir'] and self.show_names: + s.write('\n' + % (nf, size_format(ts), tc, td, tn, ttn, size_format(tt))) + else: + s.write('\n' + % (nf, tc, td, tn, ttn)) + s.write('
info hashcompletedownloadingdownloaded
%s%s%s%i%i%i%s
%s%i%i%i
%i files%s%i%i%i/%i%s
%i files%i%i%i/%i
\n' \ + '\n' \ + '
\n' + '
%s (%s)
\n' % (version_short, isotime())) + + + s.write('\n' \ + '\n') + return (200, 'OK', {'Content-Type': 'text/html; charset=iso-8859-1'}, s.getvalue()) + except: + print_exc() + return (500, 'Internal Server Error', {'Content-Type': 'text/html; charset=iso-8859-1'}, 'Server Error') + + + def scrapedata(self, hash, return_name = True): + l = self.downloads[hash] + n = self.completed.get(hash, 0) + c = self.seedcount[hash] + d = len(l) - c + f = {'complete': c, 'incomplete': d, 'downloaded': n} + if return_name and self.show_names and self.config['tracker_allowed_dir']: + f['name'] = self.allowed[hash]['name'] + return (f) + + def get_scrape(self, paramslist): + fs = {} + if paramslist.has_key('info_hash'): + if self.config['tracker_scrape_allowed'] not in [ITRACKSCRAPE_ALLOW_SPECIFIC,ITRACKSCRAPE_ALLOW_FULL]: + return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, + bencode({'failure reason': + 'specific scrape function is not available with this tracker.'})) + for hash in paramslist['info_hash']: + if self.allowed is not None: + if self.allowed.has_key(hash): + fs[hash] = self.scrapedata(hash) + else: + if self.downloads.has_key(hash): + fs[hash] = self.scrapedata(hash) + else: + if self.config['tracker_scrape_allowed'] != ITRACKSCRAPE_ALLOW_FULL: + return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, + bencode({'failure reason': + 'full scrape function is not available with this tracker.'})) + if self.allowed is not None: + keys = self.allowed.keys() + else: + keys = self.downloads.keys() + for hash in keys: + fs[hash] = self.scrapedata(hash) + + return (200, 'OK', {'Content-Type': 'text/plain'}, bencode({'files': fs})) + + + def get_file(self, hash): + if not self.allow_get: + return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, + 'get function is not available with this tracker.') + if not self.allowed.has_key(hash): + return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas) + fname = self.allowed[hash]['file'] + fpath = self.allowed[hash]['path'] + return (200, 'OK', {'Content-Type': 'application/x-bittorrent', + 'Content-Disposition': 'attachment; filename=' + fname}, + open(fpath, 'rb').read()) + + + def check_allowed(self, infohash, paramslist): + if ( self.aggregator_key is not None + and not ( paramslist.has_key('password') + and paramslist['password'][0] == self.aggregator_key ) ): + return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, + bencode({'failure reason': + 'Requested download is not authorized for use with this tracker.'})) + + if self.allowed is not None: + if not self.allowed.has_key(infohash): + return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, + bencode({'failure reason': + 'Requested download is not authorized for use with this tracker.'})) + if self.config['tracker_allowed_controls']: + if self.allowed[infohash].has_key('failure reason'): + return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, + bencode({'failure reason': self.allowed[infohash]['failure reason']})) + + if paramslist.has_key('tracker'): + if ( self.config['tracker_multitracker_allowed'] == ITRACKMULTI_ALLOW_NONE or # turned off + paramslist['peer_id'][0] == self.trackerid ): # oops! contacted myself + return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, + bencode({'failure reason': 'disallowed'})) + + if ( self.config['tracker_multitracker_allowed'] == ITRACKMULTI_ALLOW_AUTODETECT + and not self.allowed[infohash].has_key('announce-list') ): + return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, + bencode({'failure reason': + 'Requested download is not authorized for multitracker use.'})) + + return None + + + def add_data(self, infohash, event, ip, paramslist): + peers = self.downloads.setdefault(infohash, {}) + ts = self.times.setdefault(infohash, {}) + self.completed.setdefault(infohash, 0) + self.seedcount.setdefault(infohash, 0) + + def params(key, default = None, l = paramslist): + if l.has_key(key): + return l[key][0] + return default + + myid = params('peer_id','') + if len(myid) != 20: + raise ValueError, 'id not of length 20' + if event not in ['started', 'completed', 'stopped', 'snooped', None]: + raise ValueError, 'invalid event' + port = long(params('port','')) + if port < 0 or port > 65535: + raise ValueError, 'invalid port' + left = long(params('left','')) + if left < 0: + raise ValueError, 'invalid amount left' + uploaded = long(params('uploaded','')) + downloaded = long(params('downloaded','')) + + peer = peers.get(myid) + islocal = local_IPs.includes(ip) + mykey = params('key') + if peer: + auth = peer.get('key',-1) == mykey or peer.get('ip') == ip + + gip = params('ip') + if is_valid_ip(gip) and (islocal or not self.only_local_override_ip): + ip1 = gip + else: + ip1 = ip + + if params('numwant') is not None: + rsize = min(int(params('numwant')),self.response_size) + else: + rsize = self.response_size + + if event == 'stopped': + if peer: + if auth: + self.delete_peer(infohash,myid) + + elif not peer: + ts[myid] = clock() + peer = {'ip': ip, 'port': port, 'left': left} + if mykey: + peer['key'] = mykey + if gip: + peer['given ip'] = gip + if port: + if not self.natcheck or islocal: + peer['nat'] = 0 + self.natcheckOK(infohash,myid,ip1,port,left) + else: + NatCheck(self.connectback_result,infohash,myid,ip1,port,self.rawserver) + else: + peer['nat'] = 2**30 + if event == 'completed': + self.completed[infohash] += 1 + if not left: + self.seedcount[infohash] += 1 + + peers[myid] = peer + + else: + if not auth: + return rsize # return w/o changing stats + + ts[myid] = clock() + if not left and peer['left']: + self.completed[infohash] += 1 + self.seedcount[infohash] += 1 + if not peer.get('nat', -1): + for bc in self.becache[infohash]: + bc[1][myid] = bc[0][myid] + del bc[0][myid] + if peer['left']: + peer['left'] = left + + if port: + recheck = False + if ip != peer['ip']: + peer['ip'] = ip + recheck = True + if gip != peer.get('given ip'): + if gip: + peer['given ip'] = gip + elif peer.has_key('given ip'): + del peer['given ip'] + recheck = True + + natted = peer.get('nat', -1) + if recheck: + if natted == 0: + l = self.becache[infohash] + y = not peer['left'] + for x in l: + del x[y][myid] + if not self.natcheck or islocal: + del peer['nat'] # restart NAT testing + if natted and natted < self.natcheck: + recheck = True + + if recheck: + if not self.natcheck or islocal: + peer['nat'] = 0 + self.natcheckOK(infohash,myid,ip1,port,left) + else: + NatCheck(self.connectback_result,infohash,myid,ip1,port,self.rawserver) + + return rsize + + + def peerlist(self, infohash, stopped, tracker, is_seed, return_type, rsize): + data = {} # return data + seeds = self.seedcount[infohash] + data['complete'] = seeds + data['incomplete'] = len(self.downloads[infohash]) - seeds + + if ( self.config['tracker_allowed_controls'] + and self.allowed[infohash].has_key('warning message') ): + data['warning message'] = self.allowed[infohash]['warning message'] + + if tracker: + data['interval'] = self.config['tracker_multitracker_reannounce_interval'] + if not rsize: + return data + cache = self.cached_t.setdefault(infohash, None) + if ( not cache or len(cache[1]) < rsize + or cache[0] + self.config['tracker_min_time_between_cache_refreshes'] < clock() ): + bc = self.becache.setdefault(infohash,[[{}, {}], [{}, {}], [{}, {}]]) + cache = [ clock(), bc[0][0].values() + bc[0][1].values() ] + self.cached_t[infohash] = cache + shuffle(cache[1]) + cache = cache[1] + + data['peers'] = cache[-rsize:] + del cache[-rsize:] + return data + + data['interval'] = self.reannounce_interval + if stopped or not rsize: # save some bandwidth + data['peers'] = [] + return data + + bc = self.becache.setdefault(infohash,[[{}, {}], [{}, {}], [{}, {}]]) + len_l = len(bc[0][0]) + len_s = len(bc[0][1]) + if not (len_l+len_s): # caches are empty! + data['peers'] = [] + return data + l_get_size = int(float(rsize)*(len_l)/(len_l+len_s)) + cache = self.cached.setdefault(infohash,[None,None,None])[return_type] + if cache and ( not cache[1] + or (is_seed and len(cache[1]) < rsize) + or len(cache[1]) < l_get_size + or cache[0]+self.config['tracker_min_time_between_cache_refreshes'] < self.cachetime ): + cache = None + if not cache: + peers = self.downloads[infohash] + vv = [[],[],[]] + for key, ip, port in self.t2tlist.harvest(infohash): # empty if disabled + if not peers.has_key(key): + vv[0].append({'ip': ip, 'port': port, 'peer id': key}) + vv[1].append({'ip': ip, 'port': port}) + vv[2].append(compact_peer_info(ip, port)) + cache = [ self.cachetime, + bc[return_type][0].values()+vv[return_type], + bc[return_type][1].values() ] + shuffle(cache[1]) + shuffle(cache[2]) + self.cached[infohash][return_type] = cache + for rr in xrange(len(self.cached[infohash])): + if rr != return_type: + try: + self.cached[infohash][rr][1].extend(vv[rr]) + except: + pass + if len(cache[1]) < l_get_size: + peerdata = cache[1] + if not is_seed: + peerdata.extend(cache[2]) + cache[1] = [] + cache[2] = [] + else: + if not is_seed: + peerdata = cache[2][l_get_size-rsize:] + del cache[2][l_get_size-rsize:] + rsize -= len(peerdata) + else: + peerdata = [] + if rsize: + peerdata.extend(cache[1][-rsize:]) + del cache[1][-rsize:] + if return_type == 2: + peerdata = ''.join(peerdata) + data['peers'] = peerdata + return data + + + def get(self, connection, path, headers): + real_ip = connection.get_ip() + ip = real_ip + if is_ipv4(ip): + ipv4 = True + else: + try: + ip = ipv6_to_ipv4(ip) + ipv4 = True + except ValueError: + ipv4 = False + + if ( (self.allowed_IPs and not self.allowed_IPs.includes(ip)) + or (self.banned_IPs and self.banned_IPs.includes(ip)) ): + return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, + bencode({'failure reason': + 'your IP is not allowed on this tracker'})) + + nip = get_forwarded_ip(headers) + if nip and not self.only_local_override_ip: + ip = nip + try: + ip = to_ipv4(ip) + ipv4 = True + except ValueError: + ipv4 = False + + paramslist = {} + def params(key, default = None, l = paramslist): + if l.has_key(key): + return l[key][0] + return default + + try: + (scheme, netloc, path, pars, query, fragment) = urlparse(path) + if self.uq_broken == 1: + path = path.replace('+',' ') + query = query.replace('+',' ') + path = unquote(path)[1:] + for s in query.split('&'): + if s: + i = s.index('=') + kw = unquote(s[:i]) + paramslist.setdefault(kw, []) + paramslist[kw] += [unquote(s[i+1:])] + + if path == '' or path == 'index.html': + return self.get_infopage() + if (path == 'file'): + return self.get_file(params('info_hash')) + if path == 'favicon.ico' and self.favicon is not None: + return (200, 'OK', {'Content-Type' : 'image/x-icon'}, self.favicon) + + # automated access from here on + + if path == 'scrape': + return self.get_scrape(paramslist) + + if path != 'announce': + return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas) + + # main tracker function + + filtered = self.Filter.check(real_ip, paramslist, headers) + if filtered: + return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, + bencode({'failure reason': filtered})) + + infohash = params('info_hash') + if not infohash: + raise ValueError, 'no info hash' + + notallowed = self.check_allowed(infohash, paramslist) + if notallowed: + return notallowed + + event = params('event') + + rsize = self.add_data(infohash, event, ip, paramslist) + + except ValueError, e: + print_exc() + return (400, 'Bad Request', {'Content-Type': 'text/plain'}, + 'you sent me garbage - ' + str(e)) + + if self.aggregate_forward and not paramslist.has_key('tracker'): + self.aggregate_senddata(query) + + if self.is_aggregator: # don't return peer data here + return (200, 'OK', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, + bencode({'response': 'OK'})) + + if params('compact') and ipv4: + return_type = 2 + elif params('no_peer_id'): + return_type = 1 + else: + return_type = 0 + + data = self.peerlist(infohash, event=='stopped', + params('tracker'), not params('left'), + return_type, rsize) + + if paramslist.has_key('scrape'): + data['scrape'] = self.scrapedata(infohash, False) + + return (200, 'OK', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, bencode(data)) + + + def natcheckOK(self, infohash, peerid, ip, port, not_seed): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tracker: natcheck: Recorded succes" + bc = self.becache.setdefault(infohash,[[{}, {}], [{}, {}], [{}, {}]]) + bc[0][not not_seed][peerid] = Bencached(bencode({'ip': ip, 'port': port, + 'peer id': peerid})) + bc[1][not not_seed][peerid] = Bencached(bencode({'ip': ip, 'port': port})) + bc[2][not not_seed][peerid] = compact_peer_info(ip, port) + + + def natchecklog(self, peerid, ip, port, result): + year, month, day, hour, minute, second, a, b, c = localtime(time()) + print '%s - %s [%02d/%3s/%04d:%02d:%02d:%02d] "!natcheck-%s:%i" %i 0 - -' % ( + ip, quote(peerid), day, months[month], year, hour, minute, second, + ip, port, result) + + def connectback_result(self, result, downloadid, peerid, ip, port): + record = self.downloads.get(downloadid, {}).get(peerid) + if ( record is None + or (record['ip'] != ip and record.get('given ip') != ip) + or record['port'] != port ): + if self.config['tracker_log_nat_checks']: + self.natchecklog(peerid, ip, port, 404) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tracker: natcheck: No record found for tested peer" + return + if self.config['tracker_log_nat_checks']: + if result: + x = 200 + else: + x = 503 + self.natchecklog(peerid, ip, port, x) + if not record.has_key('nat'): + record['nat'] = int(not result) + if result: + self.natcheckOK(downloadid,peerid,ip,port,record['left']) + elif result and record['nat']: + record['nat'] = 0 + self.natcheckOK(downloadid,peerid,ip,port,record['left']) + elif not result: + record['nat'] += 1 + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tracker: natcheck: Recorded failed attempt" + + + def remove_from_state(self, *l): + for s in l: + try: + del self.state[s] + except: + pass + + def save_state(self): + self.rawserver.add_task(self.save_state, self.save_dfile_interval) + h = open(self.dfile, 'wb') + if self.config['tracker_dfile_format'] == ITRACKDBFORMAT_BENCODE: + h.write(bencode(self.state)) + else: + pickle.dump(self.state,h,-1) + h.close() + + + def parse_allowed(self,source=None): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tracker: parse_allowed: Source is",source + + if source is None: + self.rawserver.add_task(self.parse_allowed, self.parse_dir_interval) + + if self.config['tracker_allowed_dir']: + r = parsedir( self.config['tracker_allowed_dir'], self.allowed, + self.allowed_dir_files, self.allowed_dir_blocked, + [".torrent",TRIBLER_TORRENT_EXT] ) + ( self.allowed, self.allowed_dir_files, self.allowed_dir_blocked, + added, garbage2 ) = r + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tracker: parse_allowed: Found new",`added` + + self.state['allowed'] = self.allowed + self.state['allowed_dir_files'] = self.allowed_dir_files + + self.t2tlist.parse(self.allowed) + + else: + f = self.config['tracker_allowed_list'] + if self.allowed_list_mtime == os.path.getmtime(f): + return + try: + r = parsetorrentlist(f, self.allowed) + (self.allowed, added, garbage2) = r + self.state['allowed_list'] = self.allowed + except (IOError, OSError): + print '**warning** unable to read allowed torrent list' + return + self.allowed_list_mtime = os.path.getmtime(f) + + for infohash in added.keys(): + self.downloads.setdefault(infohash, {}) + self.completed.setdefault(infohash, 0) + self.seedcount.setdefault(infohash, 0) + + + def read_ip_lists(self): + self.rawserver.add_task(self.read_ip_lists,self.parse_dir_interval) + + f = self.config['tracker_allowed_ips'] + if f and self.allowed_ip_mtime != os.path.getmtime(f): + self.allowed_IPs = IP_List() + try: + self.allowed_IPs.read_fieldlist(f) + self.allowed_ip_mtime = os.path.getmtime(f) + except (IOError, OSError): + print '**warning** unable to read allowed_IP list' + + f = self.config['tracker_banned_ips'] + if f and self.banned_ip_mtime != os.path.getmtime(f): + self.banned_IPs = IP_Range_List() + try: + self.banned_IPs.read_rangelist(f) + self.banned_ip_mtime = os.path.getmtime(f) + except (IOError, OSError): + print '**warning** unable to read banned_IP list' + + + def delete_peer(self, infohash, peerid): + dls = self.downloads[infohash] + peer = dls[peerid] + if not peer['left']: + self.seedcount[infohash] -= 1 + if not peer.get('nat',-1): + l = self.becache[infohash] + y = not peer['left'] + for x in l: + del x[y][peerid] + del self.times[infohash][peerid] + del dls[peerid] + + def expire_downloaders(self): + for x in self.times.keys(): + for myid, t in self.times[x].items(): + if t < self.prevtime: + self.delete_peer(x,myid) + self.prevtime = clock() + if (self.keep_dead != 1): + for key, value in self.downloads.items(): + if len(value) == 0 and ( + self.allowed is None or not self.allowed.has_key(key) ): + del self.times[key] + del self.downloads[key] + del self.seedcount[key] + self.rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval) + + +def track(args): + if not args: + print formatDefinitions(defaults, 80) + return + try: + config, files = parseargs(args, defaults, 0, 0) + except ValueError, e: + print 'error: ' + str(e) + print 'run with no arguments for parameter explanations' + return + r = RawServer(Event(), config['tracker_timeout_check_interval'], + config['tracker_socket_timeout'], ipv6_enable = config['ipv6_enabled']) + t = Tracker(config, r) + r.bind(config['minport'], config['bind'], + reuse = True, ipv6_socket_style = config['ipv6_binds_v4']) + r.listen_forever(HTTPHandler(t.get, config['min_time_between_log_flushes'])) + t.save_state() + print '# Shutting down: ' + isotime() + +def size_format(s): + if (s < 1024): + r = str(s) + 'B' + elif (s < 1048576): + r = str(int(s/1024)) + 'KiB' + elif (s < 1073741824L): + r = str(int(s/1048576)) + 'MiB' + elif (s < 1099511627776L): + r = str(int((s/1073741824.0)*100.0)/100.0) + 'GiB' + else: + r = str(int((s/1099511627776.0)*100.0)/100.0) + 'TiB' + return(r) + diff --git a/tribler-mod/Tribler/Core/BitTornado/CurrentRateMeasure.py b/tribler-mod/Tribler/Core/BitTornado/CurrentRateMeasure.py new file mode 100644 index 0000000..c697941 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/CurrentRateMeasure.py @@ -0,0 +1,39 @@ +from time import localtime, strftime +# Written by Bram Cohen +# see LICENSE.txt for license information + +from clock import clock + +class Measure: + def __init__(self, max_rate_period, fudge = 1): + self.max_rate_period = max_rate_period + self.ratesince = clock() - fudge + self.last = self.ratesince + self.rate = 0.0 + self.total = 0L + + def update_rate(self, amount): + self.total += amount + t = clock() + self.rate = (self.rate * (self.last - self.ratesince) + + amount) / (t - self.ratesince + 0.0001) + self.last = t + if self.ratesince < t - self.max_rate_period: + self.ratesince = t - self.max_rate_period + + def get_rate(self): + self.update_rate(0) + #print 'Rate: %f (%d bytes)' % (self.rate, self.total) + return self.rate + + def get_rate_noupdate(self): + return self.rate + + def time_until_rate(self, newrate): + if self.rate <= newrate: + return 0 + t = clock() - self.ratesince + return ((self.rate * t) / newrate) - t + + def get_total(self): + return self.total \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/BitTornado/CurrentRateMeasure.py.bak b/tribler-mod/Tribler/Core/BitTornado/CurrentRateMeasure.py.bak new file mode 100644 index 0000000..a4c2969 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/CurrentRateMeasure.py.bak @@ -0,0 +1,38 @@ +# Written by Bram Cohen +# see LICENSE.txt for license information + +from clock import clock + +class Measure: + def __init__(self, max_rate_period, fudge = 1): + self.max_rate_period = max_rate_period + self.ratesince = clock() - fudge + self.last = self.ratesince + self.rate = 0.0 + self.total = 0L + + def update_rate(self, amount): + self.total += amount + t = clock() + self.rate = (self.rate * (self.last - self.ratesince) + + amount) / (t - self.ratesince + 0.0001) + self.last = t + if self.ratesince < t - self.max_rate_period: + self.ratesince = t - self.max_rate_period + + def get_rate(self): + self.update_rate(0) + #print 'Rate: %f (%d bytes)' % (self.rate, self.total) + return self.rate + + def get_rate_noupdate(self): + return self.rate + + def time_until_rate(self, newrate): + if self.rate <= newrate: + return 0 + t = clock() - self.ratesince + return ((self.rate * t) / newrate) - t + + def get_total(self): + return self.total \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/BitTornado/HTTPHandler.py b/tribler-mod/Tribler/Core/BitTornado/HTTPHandler.py new file mode 100644 index 0000000..f7da91f --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/HTTPHandler.py @@ -0,0 +1,195 @@ +from time import localtime, strftime +# Written by Bram Cohen +# see LICENSE.txt for license information + +from cStringIO import StringIO +import sys +import time +from clock import clock +from gzip import GzipFile +try: + True +except: + True = 1 + False = 0 + +DEBUG = False + +weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] + +months = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', + 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] + +class HTTPConnection: + def __init__(self, handler, connection): + self.handler = handler + self.connection = connection + self.buf = '' + self.closed = False + self.done = False + self.donereading = False + self.next_func = self.read_type + + def get_ip(self): + return self.connection.get_ip() + + def data_came_in(self, data): + if self.donereading or self.next_func is None: + return True + self.buf += data + while 1: + try: + i = self.buf.index('\n') + except ValueError: + return True + val = self.buf[:i] + self.buf = self.buf[i+1:] + self.next_func = self.next_func(val) + if self.donereading: + return True + if self.next_func is None or self.closed: + return False + + def read_type(self, data): + self.header = data.strip() + words = data.split() + if len(words) == 3: + self.command, self.path, garbage = words + self.pre1 = False + elif len(words) == 2: + self.command, self.path = words + self.pre1 = True + if self.command != 'GET': + return None + else: + return None + if self.command not in ('HEAD', 'GET'): + return None + self.headers = {} + return self.read_header + + def read_header(self, data): + data = data.strip() + if data == '': + self.donereading = True + if self.headers.get('accept-encoding', '').find('gzip') > -1: + self.encoding = 'gzip' + else: + self.encoding = 'identity' + r = self.handler.getfunc(self, self.path, self.headers) + if r is not None: + self.answer(r) + return None + try: + i = data.index(':') + except ValueError: + return None + self.headers[data[:i].strip().lower()] = data[i+1:].strip() + if DEBUG: + print data[:i].strip() + ": " + data[i+1:].strip() + return self.read_header + + def answer(self, (responsecode, responsestring, headers, data)): + if self.closed: + return + if self.encoding == 'gzip': + compressed = StringIO() + gz = GzipFile(fileobj = compressed, mode = 'wb', compresslevel = 9) + gz.write(data) + gz.close() + cdata = compressed.getvalue() + if len(cdata) >= len(data): + self.encoding = 'identity' + else: + if DEBUG: + print "Compressed: %i Uncompressed: %i\n" % (len(cdata), len(data)) + data = cdata + headers['Content-Encoding'] = 'gzip' + + # i'm abusing the identd field here, but this should be ok + if self.encoding == 'identity': + ident = '-' + else: + ident = self.encoding + self.handler.log( self.connection.get_ip(), ident, '-', + self.header, responsecode, len(data), + self.headers.get('referer','-'), + self.headers.get('user-agent','-') ) + self.done = True + r = StringIO() + r.write('HTTP/1.0 ' + str(responsecode) + ' ' + + responsestring + '\r\n') + if not self.pre1: + headers['Content-Length'] = len(data) + for key, value in headers.items(): + r.write(key + ': ' + str(value) + '\r\n') + r.write('\r\n') + if self.command != 'HEAD': + r.write(data) + self.connection.write(r.getvalue()) + if self.connection.is_flushed(): + self.connection.shutdown(1) + +class HTTPHandler: + def __init__(self, getfunc, minflush): + self.connections = {} + self.getfunc = getfunc + self.minflush = minflush + self.lastflush = clock() + + def external_connection_made(self, connection): + self.connections[connection] = HTTPConnection(self, connection) + + def connection_flushed(self, connection): + if self.connections[connection].done: + connection.shutdown(1) + + def connection_lost(self, connection): + ec = self.connections[connection] + ec.closed = True + del ec.connection + del ec.next_func + del self.connections[connection] + + def data_came_in(self, connection, data): + c = self.connections[connection] + if not c.data_came_in(data) and not c.closed: + c.connection.shutdown(1) + + def log(self, ip, ident, username, header, + responsecode, length, referrer, useragent): + year, month, day, hour, minute, second, a, b, c = time.localtime(time.time()) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'HTTPHandler: %s %s %s [%02d/%3s/%04d:%02d:%02d:%02d] "%s" %i %i "%s" "%s"' % ( + ip, ident, username, day, months[month], year, hour, + minute, second, header, responsecode, length, referrer, useragent) + t = clock() + if t - self.lastflush > self.minflush: + self.lastflush = t + sys.stdout.flush() + + +class DummyHTTPHandler: + def __init__(self): + pass + + def external_connection_made(self, connection): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DummyHTTPHandler: ext_conn_made" + reply = 'HTTP/1.1 404 Not Found\r\nContent-Type: text/plain\r\n\r\nTribler Internal Tracker not activated.\r\n' + connection.write(reply) + connection.close() + + def connection_flushed(self, connection): + pass + + def connection_lost(self, connection): + pass + + def data_came_in(self, connection, data): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DummyHTTPHandler: data_came_in",len(data) + pass + + def log(self, ip, ident, username, header, + responsecode, length, referrer, useragent): + year, month, day, hour, minute, second, a, b, c = time.localtime(time.time()) + pass diff --git a/tribler-mod/Tribler/Core/BitTornado/HTTPHandler.py.bak b/tribler-mod/Tribler/Core/BitTornado/HTTPHandler.py.bak new file mode 100644 index 0000000..08106be --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/HTTPHandler.py.bak @@ -0,0 +1,194 @@ +# Written by Bram Cohen +# see LICENSE.txt for license information + +from cStringIO import StringIO +import sys +import time +from clock import clock +from gzip import GzipFile +try: + True +except: + True = 1 + False = 0 + +DEBUG = False + +weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] + +months = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', + 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] + +class HTTPConnection: + def __init__(self, handler, connection): + self.handler = handler + self.connection = connection + self.buf = '' + self.closed = False + self.done = False + self.donereading = False + self.next_func = self.read_type + + def get_ip(self): + return self.connection.get_ip() + + def data_came_in(self, data): + if self.donereading or self.next_func is None: + return True + self.buf += data + while 1: + try: + i = self.buf.index('\n') + except ValueError: + return True + val = self.buf[:i] + self.buf = self.buf[i+1:] + self.next_func = self.next_func(val) + if self.donereading: + return True + if self.next_func is None or self.closed: + return False + + def read_type(self, data): + self.header = data.strip() + words = data.split() + if len(words) == 3: + self.command, self.path, garbage = words + self.pre1 = False + elif len(words) == 2: + self.command, self.path = words + self.pre1 = True + if self.command != 'GET': + return None + else: + return None + if self.command not in ('HEAD', 'GET'): + return None + self.headers = {} + return self.read_header + + def read_header(self, data): + data = data.strip() + if data == '': + self.donereading = True + if self.headers.get('accept-encoding', '').find('gzip') > -1: + self.encoding = 'gzip' + else: + self.encoding = 'identity' + r = self.handler.getfunc(self, self.path, self.headers) + if r is not None: + self.answer(r) + return None + try: + i = data.index(':') + except ValueError: + return None + self.headers[data[:i].strip().lower()] = data[i+1:].strip() + if DEBUG: + print data[:i].strip() + ": " + data[i+1:].strip() + return self.read_header + + def answer(self, (responsecode, responsestring, headers, data)): + if self.closed: + return + if self.encoding == 'gzip': + compressed = StringIO() + gz = GzipFile(fileobj = compressed, mode = 'wb', compresslevel = 9) + gz.write(data) + gz.close() + cdata = compressed.getvalue() + if len(cdata) >= len(data): + self.encoding = 'identity' + else: + if DEBUG: + print "Compressed: %i Uncompressed: %i\n" % (len(cdata), len(data)) + data = cdata + headers['Content-Encoding'] = 'gzip' + + # i'm abusing the identd field here, but this should be ok + if self.encoding == 'identity': + ident = '-' + else: + ident = self.encoding + self.handler.log( self.connection.get_ip(), ident, '-', + self.header, responsecode, len(data), + self.headers.get('referer','-'), + self.headers.get('user-agent','-') ) + self.done = True + r = StringIO() + r.write('HTTP/1.0 ' + str(responsecode) + ' ' + + responsestring + '\r\n') + if not self.pre1: + headers['Content-Length'] = len(data) + for key, value in headers.items(): + r.write(key + ': ' + str(value) + '\r\n') + r.write('\r\n') + if self.command != 'HEAD': + r.write(data) + self.connection.write(r.getvalue()) + if self.connection.is_flushed(): + self.connection.shutdown(1) + +class HTTPHandler: + def __init__(self, getfunc, minflush): + self.connections = {} + self.getfunc = getfunc + self.minflush = minflush + self.lastflush = clock() + + def external_connection_made(self, connection): + self.connections[connection] = HTTPConnection(self, connection) + + def connection_flushed(self, connection): + if self.connections[connection].done: + connection.shutdown(1) + + def connection_lost(self, connection): + ec = self.connections[connection] + ec.closed = True + del ec.connection + del ec.next_func + del self.connections[connection] + + def data_came_in(self, connection, data): + c = self.connections[connection] + if not c.data_came_in(data) and not c.closed: + c.connection.shutdown(1) + + def log(self, ip, ident, username, header, + responsecode, length, referrer, useragent): + year, month, day, hour, minute, second, a, b, c = time.localtime(time.time()) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'HTTPHandler: %s %s %s [%02d/%3s/%04d:%02d:%02d:%02d] "%s" %i %i "%s" "%s"' % ( + ip, ident, username, day, months[month], year, hour, + minute, second, header, responsecode, length, referrer, useragent) + t = clock() + if t - self.lastflush > self.minflush: + self.lastflush = t + sys.stdout.flush() + + +class DummyHTTPHandler: + def __init__(self): + pass + + def external_connection_made(self, connection): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DummyHTTPHandler: ext_conn_made" + reply = 'HTTP/1.1 404 Not Found\r\nContent-Type: text/plain\r\n\r\nTribler Internal Tracker not activated.\r\n' + connection.write(reply) + connection.close() + + def connection_flushed(self, connection): + pass + + def connection_lost(self, connection): + pass + + def data_came_in(self, connection, data): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DummyHTTPHandler: data_came_in",len(data) + pass + + def log(self, ip, ident, username, header, + responsecode, length, referrer, useragent): + year, month, day, hour, minute, second, a, b, c = time.localtime(time.time()) + pass diff --git a/tribler-mod/Tribler/Core/BitTornado/PSYCO.py b/tribler-mod/Tribler/Core/BitTornado/PSYCO.py new file mode 100644 index 0000000..69b23a4 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/PSYCO.py @@ -0,0 +1,9 @@ +from time import localtime, strftime +# Written by BitTornado authors +# see LICENSE.txt for license information + +# edit this file to enable/disable Psyco +# psyco = 1 -- enabled +# psyco = 0 -- disabled + +psyco = 0 diff --git a/tribler-mod/Tribler/Core/BitTornado/PSYCO.py.bak b/tribler-mod/Tribler/Core/BitTornado/PSYCO.py.bak new file mode 100644 index 0000000..58fd571 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/PSYCO.py.bak @@ -0,0 +1,8 @@ +# Written by BitTornado authors +# see LICENSE.txt for license information + +# edit this file to enable/disable Psyco +# psyco = 1 -- enabled +# psyco = 0 -- disabled + +psyco = 0 diff --git a/tribler-mod/Tribler/Core/BitTornado/RateLimiter.py b/tribler-mod/Tribler/Core/BitTornado/RateLimiter.py new file mode 100644 index 0000000..cb52e43 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/RateLimiter.py @@ -0,0 +1,169 @@ +from time import localtime, strftime +# Written by Bram Cohen and Pawel Garbacki +# see LICENSE.txt for license information + +from clock import clock +from CurrentRateMeasure import Measure +from math import sqrt +import sys + +try: + True +except: + True = 1 + False = 0 +try: + sum([1]) +except: + sum = lambda a: reduce(lambda x, y: x+y, a, 0) + +DEBUG = False + +MAX_RATE_PERIOD = 20.0 +MAX_RATE = 10e10 +PING_BOUNDARY = 1.2 +PING_SAMPLES = 7 +PING_DISCARDS = 1 +PING_THRESHHOLD = 5 +PING_DELAY = 5 # cycles 'til first upward adjustment +PING_DELAY_NEXT = 2 # 'til next +ADJUST_UP = 1.05 +ADJUST_DOWN = 0.95 +UP_DELAY_FIRST = 5 +UP_DELAY_NEXT = 2 +SLOTS_STARTING = 6 +SLOTS_FACTOR = 1.66/1000 + +class RateLimiter: + def __init__(self, sched, unitsize, slotsfunc = lambda x: None): + self.sched = sched + self.last = None + self.unitsize = unitsize + self.slotsfunc = slotsfunc + self.measure = Measure(MAX_RATE_PERIOD) + self.autoadjust = False + self.upload_rate = MAX_RATE * 1000 + self.slots = SLOTS_STARTING # garbage if not automatic + + def set_upload_rate(self, rate): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "RateLimiter: set_upload_rate", rate + # rate = -1 # test automatic + if rate < 0: + if self.autoadjust: + return + self.autoadjust = True + self.autoadjustup = 0 + self.pings = [] + rate = MAX_RATE + self.slots = SLOTS_STARTING + self.slotsfunc(self.slots) + else: + self.autoadjust = False + if not rate: + rate = MAX_RATE + self.upload_rate = rate * 1000 + self.lasttime = clock() + self.bytes_sent = 0 + + def queue(self, conn): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "RateLimiter: queue", conn + assert conn.next_upload is None + if self.last is None: + self.last = conn + conn.next_upload = conn + self.try_send(True) + else: + conn.next_upload = self.last.next_upload + self.last.next_upload = conn +# 2fastbt_ + if not conn.connection.is_coordinator_con(): + self.last = conn +# _2fastbt + + def try_send(self, check_time = False): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "RateLimiter: try_send" + t = clock() + self.bytes_sent -= (t - self.lasttime) * self.upload_rate + #print 'try_send: bytes_sent: %s' % self.bytes_sent + self.lasttime = t + if check_time: + self.bytes_sent = max(self.bytes_sent, 0) + cur = self.last.next_upload + while self.bytes_sent <= 0: + bytes = cur.send_partial(self.unitsize) + self.bytes_sent += bytes + self.measure.update_rate(bytes) + if bytes == 0 or cur.backlogged(): + if self.last is cur: + self.last = None + cur.next_upload = None + break + else: + self.last.next_upload = cur.next_upload + cur.next_upload = None + cur = self.last.next_upload + else: +# 2fastbt_ + if not cur.connection.is_coordinator_con() or not cur.upload.buffer: +# _2fastbt + self.last = cur + cur = cur.next_upload +# 2fastbt_ + else: + pass +# _2fastbt + else: + self.sched(self.try_send, self.bytes_sent / self.upload_rate) + + def adjust_sent(self, bytes): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "RateLimiter: adjust_sent", bytes + self.bytes_sent = min(self.bytes_sent+bytes, self.upload_rate*3) + self.measure.update_rate(bytes) + + + def ping(self, delay): + ##raise Exception('Is this called?') + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", delay + if not self.autoadjust: + return + self.pings.append(delay > PING_BOUNDARY) + if len(self.pings) < PING_SAMPLES+PING_DISCARDS: + return + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'RateLimiter: cycle' + pings = sum(self.pings[PING_DISCARDS:]) + del self.pings[:] + if pings >= PING_THRESHHOLD: # assume flooded + if self.upload_rate == MAX_RATE: + self.upload_rate = self.measure.get_rate()*ADJUST_DOWN + else: + self.upload_rate = min(self.upload_rate, + self.measure.get_rate()*1.1) + self.upload_rate = max(int(self.upload_rate*ADJUST_DOWN), 2) + self.slots = int(sqrt(self.upload_rate*SLOTS_FACTOR)) + self.slotsfunc(self.slots) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'RateLimiter: adjust down to '+str(self.upload_rate) + self.lasttime = clock() + self.bytes_sent = 0 + self.autoadjustup = UP_DELAY_FIRST + else: # not flooded + if self.upload_rate == MAX_RATE: + return + self.autoadjustup -= 1 + if self.autoadjustup: + return + self.upload_rate = int(self.upload_rate*ADJUST_UP) + self.slots = int(sqrt(self.upload_rate*SLOTS_FACTOR)) + self.slotsfunc(self.slots) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'RateLimiter: adjust up to '+str(self.upload_rate) + self.lasttime = clock() + self.bytes_sent = 0 + self.autoadjustup = UP_DELAY_NEXT + + + + diff --git a/tribler-mod/Tribler/Core/BitTornado/RateLimiter.py.bak b/tribler-mod/Tribler/Core/BitTornado/RateLimiter.py.bak new file mode 100644 index 0000000..c1423f4 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/RateLimiter.py.bak @@ -0,0 +1,168 @@ +# Written by Bram Cohen and Pawel Garbacki +# see LICENSE.txt for license information + +from clock import clock +from CurrentRateMeasure import Measure +from math import sqrt +import sys + +try: + True +except: + True = 1 + False = 0 +try: + sum([1]) +except: + sum = lambda a: reduce(lambda x, y: x+y, a, 0) + +DEBUG = False + +MAX_RATE_PERIOD = 20.0 +MAX_RATE = 10e10 +PING_BOUNDARY = 1.2 +PING_SAMPLES = 7 +PING_DISCARDS = 1 +PING_THRESHHOLD = 5 +PING_DELAY = 5 # cycles 'til first upward adjustment +PING_DELAY_NEXT = 2 # 'til next +ADJUST_UP = 1.05 +ADJUST_DOWN = 0.95 +UP_DELAY_FIRST = 5 +UP_DELAY_NEXT = 2 +SLOTS_STARTING = 6 +SLOTS_FACTOR = 1.66/1000 + +class RateLimiter: + def __init__(self, sched, unitsize, slotsfunc = lambda x: None): + self.sched = sched + self.last = None + self.unitsize = unitsize + self.slotsfunc = slotsfunc + self.measure = Measure(MAX_RATE_PERIOD) + self.autoadjust = False + self.upload_rate = MAX_RATE * 1000 + self.slots = SLOTS_STARTING # garbage if not automatic + + def set_upload_rate(self, rate): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "RateLimiter: set_upload_rate", rate + # rate = -1 # test automatic + if rate < 0: + if self.autoadjust: + return + self.autoadjust = True + self.autoadjustup = 0 + self.pings = [] + rate = MAX_RATE + self.slots = SLOTS_STARTING + self.slotsfunc(self.slots) + else: + self.autoadjust = False + if not rate: + rate = MAX_RATE + self.upload_rate = rate * 1000 + self.lasttime = clock() + self.bytes_sent = 0 + + def queue(self, conn): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "RateLimiter: queue", conn + assert conn.next_upload is None + if self.last is None: + self.last = conn + conn.next_upload = conn + self.try_send(True) + else: + conn.next_upload = self.last.next_upload + self.last.next_upload = conn +# 2fastbt_ + if not conn.connection.is_coordinator_con(): + self.last = conn +# _2fastbt + + def try_send(self, check_time = False): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "RateLimiter: try_send" + t = clock() + self.bytes_sent -= (t - self.lasttime) * self.upload_rate + #print 'try_send: bytes_sent: %s' % self.bytes_sent + self.lasttime = t + if check_time: + self.bytes_sent = max(self.bytes_sent, 0) + cur = self.last.next_upload + while self.bytes_sent <= 0: + bytes = cur.send_partial(self.unitsize) + self.bytes_sent += bytes + self.measure.update_rate(bytes) + if bytes == 0 or cur.backlogged(): + if self.last is cur: + self.last = None + cur.next_upload = None + break + else: + self.last.next_upload = cur.next_upload + cur.next_upload = None + cur = self.last.next_upload + else: +# 2fastbt_ + if not cur.connection.is_coordinator_con() or not cur.upload.buffer: +# _2fastbt + self.last = cur + cur = cur.next_upload +# 2fastbt_ + else: + pass +# _2fastbt + else: + self.sched(self.try_send, self.bytes_sent / self.upload_rate) + + def adjust_sent(self, bytes): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "RateLimiter: adjust_sent", bytes + self.bytes_sent = min(self.bytes_sent+bytes, self.upload_rate*3) + self.measure.update_rate(bytes) + + + def ping(self, delay): + ##raise Exception('Is this called?') + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", delay + if not self.autoadjust: + return + self.pings.append(delay > PING_BOUNDARY) + if len(self.pings) < PING_SAMPLES+PING_DISCARDS: + return + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'RateLimiter: cycle' + pings = sum(self.pings[PING_DISCARDS:]) + del self.pings[:] + if pings >= PING_THRESHHOLD: # assume flooded + if self.upload_rate == MAX_RATE: + self.upload_rate = self.measure.get_rate()*ADJUST_DOWN + else: + self.upload_rate = min(self.upload_rate, + self.measure.get_rate()*1.1) + self.upload_rate = max(int(self.upload_rate*ADJUST_DOWN), 2) + self.slots = int(sqrt(self.upload_rate*SLOTS_FACTOR)) + self.slotsfunc(self.slots) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'RateLimiter: adjust down to '+str(self.upload_rate) + self.lasttime = clock() + self.bytes_sent = 0 + self.autoadjustup = UP_DELAY_FIRST + else: # not flooded + if self.upload_rate == MAX_RATE: + return + self.autoadjustup -= 1 + if self.autoadjustup: + return + self.upload_rate = int(self.upload_rate*ADJUST_UP) + self.slots = int(sqrt(self.upload_rate*SLOTS_FACTOR)) + self.slotsfunc(self.slots) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'RateLimiter: adjust up to '+str(self.upload_rate) + self.lasttime = clock() + self.bytes_sent = 0 + self.autoadjustup = UP_DELAY_NEXT + + + + diff --git a/tribler-mod/Tribler/Core/BitTornado/RateMeasure.py b/tribler-mod/Tribler/Core/BitTornado/RateMeasure.py new file mode 100644 index 0000000..1340cbb --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/RateMeasure.py @@ -0,0 +1,71 @@ +from time import localtime, strftime +# Written by Bram Cohen +# see LICENSE.txt for license information + +from clock import clock +try: + True +except: + True = 1 + False = 0 + +FACTOR = 0.999 + +class RateMeasure: + def __init__(self): + self.last = None + self.time = 1.0 + self.got = 0.0 + self.remaining = None + self.broke = False + self.got_anything = False + self.last_checked = None + self.rate = 0 + + def data_came_in(self, amount): + if not self.got_anything: + self.got_anything = True + self.last = clock() + return + self.update(amount) + + def data_rejected(self, amount): + pass + + def get_time_left(self, left): + t = clock() + if not self.got_anything: + return None + if t - self.last > 15: + self.update(0) + try: + remaining = left/self.rate + delta = max(remaining/20, 2) + if self.remaining is None: + self.remaining = remaining + elif abs(self.remaining-remaining) > delta: + self.remaining = remaining + else: + self.remaining -= t - self.last_checked + except ZeroDivisionError: + self.remaining = None + if self.remaining is not None and self.remaining < 0.1: + self.remaining = 0.1 + self.last_checked = t + return self.remaining + + def update(self, amount): + t = clock() + t1 = int(t) + l1 = int(self.last) + for i in xrange(l1, t1): + self.time *= FACTOR + self.got *= FACTOR + self.got += amount + if t - self.last < 20: + self.time += t - self.last + self.last = t + try: + self.rate = self.got / self.time + except ZeroDivisionError: + pass diff --git a/tribler-mod/Tribler/Core/BitTornado/RateMeasure.py.bak b/tribler-mod/Tribler/Core/BitTornado/RateMeasure.py.bak new file mode 100644 index 0000000..1c42a2f --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/RateMeasure.py.bak @@ -0,0 +1,70 @@ +# Written by Bram Cohen +# see LICENSE.txt for license information + +from clock import clock +try: + True +except: + True = 1 + False = 0 + +FACTOR = 0.999 + +class RateMeasure: + def __init__(self): + self.last = None + self.time = 1.0 + self.got = 0.0 + self.remaining = None + self.broke = False + self.got_anything = False + self.last_checked = None + self.rate = 0 + + def data_came_in(self, amount): + if not self.got_anything: + self.got_anything = True + self.last = clock() + return + self.update(amount) + + def data_rejected(self, amount): + pass + + def get_time_left(self, left): + t = clock() + if not self.got_anything: + return None + if t - self.last > 15: + self.update(0) + try: + remaining = left/self.rate + delta = max(remaining/20, 2) + if self.remaining is None: + self.remaining = remaining + elif abs(self.remaining-remaining) > delta: + self.remaining = remaining + else: + self.remaining -= t - self.last_checked + except ZeroDivisionError: + self.remaining = None + if self.remaining is not None and self.remaining < 0.1: + self.remaining = 0.1 + self.last_checked = t + return self.remaining + + def update(self, amount): + t = clock() + t1 = int(t) + l1 = int(self.last) + for i in xrange(l1, t1): + self.time *= FACTOR + self.got *= FACTOR + self.got += amount + if t - self.last < 20: + self.time += t - self.last + self.last = t + try: + self.rate = self.got / self.time + except ZeroDivisionError: + pass diff --git a/tribler-mod/Tribler/Core/BitTornado/RawServer.py b/tribler-mod/Tribler/Core/BitTornado/RawServer.py new file mode 100644 index 0000000..a6180c6 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/RawServer.py @@ -0,0 +1,263 @@ +from time import localtime, strftime +# Written by Bram Cohen and Pawel Garbacki +# see LICENSE.txt for license information + +from bisect import insort +from SocketHandler import SocketHandler +import socket +from cStringIO import StringIO +from traceback import print_exc +from select import error +from threading import Event, RLock +from clock import clock +import sys +import time + +try: + True +except: + True = 1 + False = 0 + +DEBUG = False + +def autodetect_ipv6(): + try: + assert sys.version_info >= (2, 3) + assert socket.has_ipv6 + socket.socket(socket.AF_INET6, socket.SOCK_STREAM) + except: + return 0 + return 1 + +def autodetect_socket_style(): + if sys.platform.find('linux') < 0: + return 1 + else: + try: + f = open('/proc/sys/net/ipv6/bindv6only', 'r') + dual_socket_style = int(f.read()) + f.close() + return int(not dual_socket_style) + except: + return 0 + + +READSIZE = 100000 + +class RawServer: + def __init__(self, doneflag, timeout_check_interval, timeout, noisy = True, + ipv6_enable = True, failfunc = lambda x: None, errorfunc = None, + sockethandler = None, excflag = Event()): + self.timeout_check_interval = timeout_check_interval + self.timeout = timeout + self.servers = {} + self.single_sockets = {} + self.dead_from_write = [] + self.doneflag = doneflag + self.noisy = noisy + self.failfunc = failfunc + self.errorfunc = errorfunc + self.exccount = 0 + self.funcs = [] + self.externally_added = [] + self.finished = Event() + self.tasks_to_kill = [] + self.excflag = excflag + self.lock = RLock() + + if sockethandler is None: + sockethandler = SocketHandler(timeout, ipv6_enable, READSIZE) + self.sockethandler = sockethandler + self.add_task(self.scan_for_timeouts, timeout_check_interval) + + def get_exception_flag(self): + return self.excflag + + def _add_task(self, func, delay, id = None): + if delay < 0: + delay = 0 + insort(self.funcs, (clock() + delay, func, id)) + + def add_task(self, func, delay = 0, id = None): + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawserver: add_task(",func,delay,")" + if delay < 0: + delay = 0 + self.lock.acquire() + self.externally_added.append((func, delay, id)) + self.lock.release() + + def scan_for_timeouts(self): + self.add_task(self.scan_for_timeouts, self.timeout_check_interval) + self.sockethandler.scan_for_timeouts() + + def bind(self, port, bind = '', reuse = False, + ipv6_socket_style = 1): + self.sockethandler.bind(port, bind, reuse, ipv6_socket_style) + + def find_and_bind(self, first_try, minport, maxport, bind = '', reuse = False, + ipv6_socket_style = 1, randomizer = False): +# 2fastbt_ + result = self.sockethandler.find_and_bind(first_try, minport, maxport, bind, reuse, + ipv6_socket_style, randomizer) +# _2fastbt + return result + + def start_connection_raw(self, dns, socktype, handler = None): + return self.sockethandler.start_connection_raw(dns, socktype, handler) + + def start_connection(self, dns, handler = None, randomize = False): + return self.sockethandler.start_connection(dns, handler, randomize) + + def get_stats(self): + return self.sockethandler.get_stats() + + def pop_external(self): + self.lock.acquire() + while self.externally_added: + (a, b, c) = self.externally_added.pop(0) + self._add_task(a, b, c) + self.lock.release() + + def listen_forever(self, handler): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawserver: listen forever()" + # handler=btlanuchmany: MultiHandler, btdownloadheadless: Encoder + self.sockethandler.set_handler(handler) + try: + while not self.doneflag.isSet(): + try: + self.pop_external() + self._kill_tasks() + if self.funcs: + period = self.funcs[0][0] + 0.001 - clock() + else: + period = 2 ** 30 + if period < 0: + period = 0 + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawserver: do_poll",period + + events = self.sockethandler.do_poll(period) + if self.doneflag.isSet(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawserver: stopping because done flag set" + return + while self.funcs and self.funcs[0][0] <= clock(): + garbage1, func, id = self.funcs.pop(0) + if id in self.tasks_to_kill: + pass + try: +# print func.func_name + if DEBUG: + if func.func_name != "_bgalloc": + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RawServer:f",func.func_name + #st = time.time() + func() + #et = time.time() + #diff = et - st + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",func,"took %.5f" % (diff) + + except (SystemError, MemoryError), e: + self.failfunc(e) + return + except KeyboardInterrupt,e: +# self.exception(e) + return + except error: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawserver: func: ERROR exception" + print_exc() + pass + except Exception,e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawserver: func: any exception" + print_exc() + if self.noisy: + self.exception(e) + self.sockethandler.close_dead() + self.sockethandler.handle_events(events) + if self.doneflag.isSet(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawserver: stopping because done flag set2" + return + self.sockethandler.close_dead() + except (SystemError, MemoryError), e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawserver: SYS/MEM exception",e + self.failfunc(e) + return + except error: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawserver: ERROR exception" + print_exc() + if self.doneflag.isSet(): + return + except KeyboardInterrupt,e: + self.failfunc(e) + return + except Exception,e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawserver: other exception" + print_exc() + self.exception(e) + ## Arno: Don't stop till we drop + ##if self.exccount > 10: + ## print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawserver: stopping because exccount > 10" + ## return + finally: +# self.sockethandler.shutdown() + self.finished.set() + + def is_finished(self): + return self.finished.isSet() + + def wait_until_finished(self): + self.finished.wait() + + def _kill_tasks(self): + if self.tasks_to_kill: + new_funcs = [] + for (t, func, id) in self.funcs: + if id not in self.tasks_to_kill: + new_funcs.append((t, func, id)) + self.funcs = new_funcs + self.tasks_to_kill = [] + + def kill_tasks(self, id): + self.tasks_to_kill.append(id) + + def exception(self,e,kbint=False): + if not kbint: + self.excflag.set() + self.exccount += 1 + if self.errorfunc is None: + print_exc() + else: + if not kbint: # don't report here if it's a keyboard interrupt + self.errorfunc(e) + + def shutdown(self): + self.sockethandler.shutdown() + + + # + # Interface for Khashmir + # + def create_udpsocket(self,port,host): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawudp: create_udp_socket",host,port + return self.sockethandler.create_udpsocket(port,host) + + def start_listening_udp(self,serversocket,handler): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawudp: start_listen:",serversocket,handler + self.sockethandler.start_listening_udp(serversocket,handler) + + def stop_listening_udp(self,serversocket): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawudp: stop_listen:",serversocket + self.sockethandler.stop_listening_udp(serversocket) + \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/BitTornado/RawServer.py.bak b/tribler-mod/Tribler/Core/BitTornado/RawServer.py.bak new file mode 100644 index 0000000..70480c2 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/RawServer.py.bak @@ -0,0 +1,262 @@ +# Written by Bram Cohen and Pawel Garbacki +# see LICENSE.txt for license information + +from bisect import insort +from SocketHandler import SocketHandler +import socket +from cStringIO import StringIO +from traceback import print_exc +from select import error +from threading import Event, RLock +from clock import clock +import sys +import time + +try: + True +except: + True = 1 + False = 0 + +DEBUG = False + +def autodetect_ipv6(): + try: + assert sys.version_info >= (2, 3) + assert socket.has_ipv6 + socket.socket(socket.AF_INET6, socket.SOCK_STREAM) + except: + return 0 + return 1 + +def autodetect_socket_style(): + if sys.platform.find('linux') < 0: + return 1 + else: + try: + f = open('/proc/sys/net/ipv6/bindv6only', 'r') + dual_socket_style = int(f.read()) + f.close() + return int(not dual_socket_style) + except: + return 0 + + +READSIZE = 100000 + +class RawServer: + def __init__(self, doneflag, timeout_check_interval, timeout, noisy = True, + ipv6_enable = True, failfunc = lambda x: None, errorfunc = None, + sockethandler = None, excflag = Event()): + self.timeout_check_interval = timeout_check_interval + self.timeout = timeout + self.servers = {} + self.single_sockets = {} + self.dead_from_write = [] + self.doneflag = doneflag + self.noisy = noisy + self.failfunc = failfunc + self.errorfunc = errorfunc + self.exccount = 0 + self.funcs = [] + self.externally_added = [] + self.finished = Event() + self.tasks_to_kill = [] + self.excflag = excflag + self.lock = RLock() + + if sockethandler is None: + sockethandler = SocketHandler(timeout, ipv6_enable, READSIZE) + self.sockethandler = sockethandler + self.add_task(self.scan_for_timeouts, timeout_check_interval) + + def get_exception_flag(self): + return self.excflag + + def _add_task(self, func, delay, id = None): + if delay < 0: + delay = 0 + insort(self.funcs, (clock() + delay, func, id)) + + def add_task(self, func, delay = 0, id = None): + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawserver: add_task(",func,delay,")" + if delay < 0: + delay = 0 + self.lock.acquire() + self.externally_added.append((func, delay, id)) + self.lock.release() + + def scan_for_timeouts(self): + self.add_task(self.scan_for_timeouts, self.timeout_check_interval) + self.sockethandler.scan_for_timeouts() + + def bind(self, port, bind = '', reuse = False, + ipv6_socket_style = 1): + self.sockethandler.bind(port, bind, reuse, ipv6_socket_style) + + def find_and_bind(self, first_try, minport, maxport, bind = '', reuse = False, + ipv6_socket_style = 1, randomizer = False): +# 2fastbt_ + result = self.sockethandler.find_and_bind(first_try, minport, maxport, bind, reuse, + ipv6_socket_style, randomizer) +# _2fastbt + return result + + def start_connection_raw(self, dns, socktype, handler = None): + return self.sockethandler.start_connection_raw(dns, socktype, handler) + + def start_connection(self, dns, handler = None, randomize = False): + return self.sockethandler.start_connection(dns, handler, randomize) + + def get_stats(self): + return self.sockethandler.get_stats() + + def pop_external(self): + self.lock.acquire() + while self.externally_added: + (a, b, c) = self.externally_added.pop(0) + self._add_task(a, b, c) + self.lock.release() + + def listen_forever(self, handler): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawserver: listen forever()" + # handler=btlanuchmany: MultiHandler, btdownloadheadless: Encoder + self.sockethandler.set_handler(handler) + try: + while not self.doneflag.isSet(): + try: + self.pop_external() + self._kill_tasks() + if self.funcs: + period = self.funcs[0][0] + 0.001 - clock() + else: + period = 2 ** 30 + if period < 0: + period = 0 + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawserver: do_poll",period + + events = self.sockethandler.do_poll(period) + if self.doneflag.isSet(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawserver: stopping because done flag set" + return + while self.funcs and self.funcs[0][0] <= clock(): + garbage1, func, id = self.funcs.pop(0) + if id in self.tasks_to_kill: + pass + try: +# print func.func_name + if DEBUG: + if func.func_name != "_bgalloc": + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RawServer:f",func.func_name + #st = time.time() + func() + #et = time.time() + #diff = et - st + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",func,"took %.5f" % (diff) + + except (SystemError, MemoryError), e: + self.failfunc(e) + return + except KeyboardInterrupt,e: +# self.exception(e) + return + except error: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawserver: func: ERROR exception" + print_exc() + pass + except Exception,e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawserver: func: any exception" + print_exc() + if self.noisy: + self.exception(e) + self.sockethandler.close_dead() + self.sockethandler.handle_events(events) + if self.doneflag.isSet(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawserver: stopping because done flag set2" + return + self.sockethandler.close_dead() + except (SystemError, MemoryError), e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawserver: SYS/MEM exception",e + self.failfunc(e) + return + except error: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawserver: ERROR exception" + print_exc() + if self.doneflag.isSet(): + return + except KeyboardInterrupt,e: + self.failfunc(e) + return + except Exception,e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawserver: other exception" + print_exc() + self.exception(e) + ## Arno: Don't stop till we drop + ##if self.exccount > 10: + ## print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawserver: stopping because exccount > 10" + ## return + finally: +# self.sockethandler.shutdown() + self.finished.set() + + def is_finished(self): + return self.finished.isSet() + + def wait_until_finished(self): + self.finished.wait() + + def _kill_tasks(self): + if self.tasks_to_kill: + new_funcs = [] + for (t, func, id) in self.funcs: + if id not in self.tasks_to_kill: + new_funcs.append((t, func, id)) + self.funcs = new_funcs + self.tasks_to_kill = [] + + def kill_tasks(self, id): + self.tasks_to_kill.append(id) + + def exception(self,e,kbint=False): + if not kbint: + self.excflag.set() + self.exccount += 1 + if self.errorfunc is None: + print_exc() + else: + if not kbint: # don't report here if it's a keyboard interrupt + self.errorfunc(e) + + def shutdown(self): + self.sockethandler.shutdown() + + + # + # Interface for Khashmir + # + def create_udpsocket(self,port,host): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawudp: create_udp_socket",host,port + return self.sockethandler.create_udpsocket(port,host) + + def start_listening_udp(self,serversocket,handler): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawudp: start_listen:",serversocket,handler + self.sockethandler.start_listening_udp(serversocket,handler) + + def stop_listening_udp(self,serversocket): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rawudp: stop_listen:",serversocket + self.sockethandler.stop_listening_udp(serversocket) + \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/BitTornado/ServerPortHandler.py b/tribler-mod/Tribler/Core/BitTornado/ServerPortHandler.py new file mode 100644 index 0000000..36e2a21 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/ServerPortHandler.py @@ -0,0 +1,239 @@ +from time import localtime, strftime +# Written by John Hoffman +# see LICENSE.txt for license information + +import sys +from cStringIO import StringIO +from binascii import b2a_hex +#from RawServer import RawServer +try: + True +except: + True = 1 + False = 0 + +# 2fastbt_ +from BT1.Encrypter import protocol_name +# _2fastbt + +def toint(s): + return long(b2a_hex(s), 16) + +default_task_id = [] + +DEBUG = False + +def show(s): + for i in xrange(len(s)): + print ord(s[i]), + print + +class SingleRawServer: + def __init__(self, info_hash, multihandler, doneflag, protocol): + self.info_hash = info_hash + self.doneflag = doneflag + self.protocol = protocol + self.multihandler = multihandler + self.rawserver = multihandler.rawserver + self.finished = False + self.running = False + self.handler = None + self.taskqueue = [] + + def shutdown(self): + if not self.finished: + self.multihandler.shutdown_torrent(self.info_hash) + + def _shutdown(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleRawServer: _shutdown" + if not self.finished: + self.finished = True + self.running = False + self.rawserver.kill_tasks(self.info_hash) + if self.handler: + self.handler.close_all() + + def _external_connection_made(self, c, options, msg_remainder): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleRawServer: _external_conn_made, running?",self.running + if self.running: + c.set_handler(self.handler) + self.handler.externally_handshaked_connection_made( + c, options, msg_remainder) + + ### RawServer functions ### + + def add_task(self, func, delay=0, id = default_task_id): + if id is default_task_id: + id = self.info_hash + if not self.finished: + self.rawserver.add_task(func, delay, id) + +# def bind(self, port, bind = '', reuse = False): +# pass # not handled here + + def start_connection(self, dns, handler = None): + if not handler: + handler = self.handler + c = self.rawserver.start_connection(dns, handler) + return c + +# def listen_forever(self, handler): +# pass # don't call with this + + def start_listening(self, handler): + self.handler = handler # Encoder + self.running = True + return self.shutdown # obviously, doesn't listen forever + + def is_finished(self): + return self.finished + + def get_exception_flag(self): + return self.rawserver.get_exception_flag() + +class NewSocketHandler: # hand a new socket off where it belongs + def __init__(self, multihandler, connection): # connection: SingleSocket + self.multihandler = multihandler + self.connection = connection + connection.set_handler(self) + self.closed = False + self.buffer = StringIO() + self.complete = False + self.next_len, self.next_func = 1, self.read_header_len + self.multihandler.rawserver.add_task(self._auto_close, 15) + + def _auto_close(self): + if not self.complete: + self.close() + + def close(self): + if not self.closed: + self.connection.close() + self.closed = True + +# header format: +# connection.write(chr(len(protocol_name)) + protocol_name + +# (chr(0) * 8) + self.encrypter.download_id + self.encrypter.my_id) + + # copied from Encrypter and modified + + def read_header_len(self, s): + if s == 'G': + self.protocol = 'HTTP' + self.firstbyte = s + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","NewSocketHandler: Got HTTP connection" + return True + else: + l = ord(s) + return l, self.read_header + + def read_header(self, s): + self.protocol = s + return 8, self.read_reserved + + def read_reserved(self, s): + self.options = s + return 20, self.read_download_id + + def read_download_id(self, s): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","NewSocketHandler: Swarm id is",`s`,self.connection.socket.getpeername() + if self.multihandler.singlerawservers.has_key(s): + if self.multihandler.singlerawservers[s].protocol == self.protocol: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","NewSocketHandler: Found rawserver for swarm id" + return True + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","NewSocketHandler: No rawserver found for swarm id",`s` + return None + + def read_dead(self, s): + return None + + def data_came_in(self, garbage, s): +# if DEBUG: +# print "NewSocketHandler data came in", sha(s).hexdigest() + while 1: + if self.closed: + return + i = self.next_len - self.buffer.tell() + if i > len(s): + self.buffer.write(s) + return + self.buffer.write(s[:i]) + s = s[i:] + m = self.buffer.getvalue() + self.buffer.reset() + self.buffer.truncate() + try: + x = self.next_func(m) + except: + self.next_len, self.next_func = 1, self.read_dead + raise + if x is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","NewSocketHandler:",self.next_func,"returned None" + self.close() + return + if x == True: # ready to process + if self.protocol == 'HTTP': + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","NewSocketHandler: Reporting HTTP connection" + self.multihandler.httphandler.external_connection_made(self.connection) + self.multihandler.httphandler.data_came_in(self.connection,self.firstbyte) + self.multihandler.httphandler.data_came_in(self.connection,s) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","NewSocketHandler: Reporting connection via",self.multihandler.singlerawservers[m]._external_connection_made + self.multihandler.singlerawservers[m]._external_connection_made(self.connection, self.options, s) + self.complete = True + return + self.next_len, self.next_func = x + + def connection_flushed(self, ss): + pass + + def connection_lost(self, ss): + self.closed = True + +class MultiHandler: + def __init__(self, rawserver, doneflag): + self.rawserver = rawserver + self.masterdoneflag = doneflag + self.singlerawservers = {} + self.connections = {} + self.taskqueues = {} + self.httphandler = None + + def newRawServer(self, info_hash, doneflag, protocol=protocol_name): + new = SingleRawServer(info_hash, self, doneflag, protocol) + self.singlerawservers[info_hash] = new + return new + + def shutdown_torrent(self, info_hash): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","MultiHandler: shutdown_torrent",`info_hash` + self.singlerawservers[info_hash]._shutdown() + del self.singlerawservers[info_hash] + + def listen_forever(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","MultiHandler: listen_forever()" + self.rawserver.listen_forever(self) + for srs in self.singlerawservers.values(): + srs.finished = True + srs.running = False + srs.doneflag.set() + + def set_httphandler(self,httphandler): + self.httphandler = httphandler + + ### RawServer handler functions ### + # be wary of name collisions + + def external_connection_made(self, ss): + # ss: SingleSocket + NewSocketHandler(self, ss) diff --git a/tribler-mod/Tribler/Core/BitTornado/ServerPortHandler.py.bak b/tribler-mod/Tribler/Core/BitTornado/ServerPortHandler.py.bak new file mode 100644 index 0000000..d69e592 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/ServerPortHandler.py.bak @@ -0,0 +1,238 @@ +# Written by John Hoffman +# see LICENSE.txt for license information + +import sys +from cStringIO import StringIO +from binascii import b2a_hex +#from RawServer import RawServer +try: + True +except: + True = 1 + False = 0 + +# 2fastbt_ +from BT1.Encrypter import protocol_name +# _2fastbt + +def toint(s): + return long(b2a_hex(s), 16) + +default_task_id = [] + +DEBUG = False + +def show(s): + for i in xrange(len(s)): + print ord(s[i]), + print + +class SingleRawServer: + def __init__(self, info_hash, multihandler, doneflag, protocol): + self.info_hash = info_hash + self.doneflag = doneflag + self.protocol = protocol + self.multihandler = multihandler + self.rawserver = multihandler.rawserver + self.finished = False + self.running = False + self.handler = None + self.taskqueue = [] + + def shutdown(self): + if not self.finished: + self.multihandler.shutdown_torrent(self.info_hash) + + def _shutdown(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleRawServer: _shutdown" + if not self.finished: + self.finished = True + self.running = False + self.rawserver.kill_tasks(self.info_hash) + if self.handler: + self.handler.close_all() + + def _external_connection_made(self, c, options, msg_remainder): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SingleRawServer: _external_conn_made, running?",self.running + if self.running: + c.set_handler(self.handler) + self.handler.externally_handshaked_connection_made( + c, options, msg_remainder) + + ### RawServer functions ### + + def add_task(self, func, delay=0, id = default_task_id): + if id is default_task_id: + id = self.info_hash + if not self.finished: + self.rawserver.add_task(func, delay, id) + +# def bind(self, port, bind = '', reuse = False): +# pass # not handled here + + def start_connection(self, dns, handler = None): + if not handler: + handler = self.handler + c = self.rawserver.start_connection(dns, handler) + return c + +# def listen_forever(self, handler): +# pass # don't call with this + + def start_listening(self, handler): + self.handler = handler # Encoder + self.running = True + return self.shutdown # obviously, doesn't listen forever + + def is_finished(self): + return self.finished + + def get_exception_flag(self): + return self.rawserver.get_exception_flag() + +class NewSocketHandler: # hand a new socket off where it belongs + def __init__(self, multihandler, connection): # connection: SingleSocket + self.multihandler = multihandler + self.connection = connection + connection.set_handler(self) + self.closed = False + self.buffer = StringIO() + self.complete = False + self.next_len, self.next_func = 1, self.read_header_len + self.multihandler.rawserver.add_task(self._auto_close, 15) + + def _auto_close(self): + if not self.complete: + self.close() + + def close(self): + if not self.closed: + self.connection.close() + self.closed = True + +# header format: +# connection.write(chr(len(protocol_name)) + protocol_name + +# (chr(0) * 8) + self.encrypter.download_id + self.encrypter.my_id) + + # copied from Encrypter and modified + + def read_header_len(self, s): + if s == 'G': + self.protocol = 'HTTP' + self.firstbyte = s + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","NewSocketHandler: Got HTTP connection" + return True + else: + l = ord(s) + return l, self.read_header + + def read_header(self, s): + self.protocol = s + return 8, self.read_reserved + + def read_reserved(self, s): + self.options = s + return 20, self.read_download_id + + def read_download_id(self, s): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","NewSocketHandler: Swarm id is",`s`,self.connection.socket.getpeername() + if self.multihandler.singlerawservers.has_key(s): + if self.multihandler.singlerawservers[s].protocol == self.protocol: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","NewSocketHandler: Found rawserver for swarm id" + return True + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","NewSocketHandler: No rawserver found for swarm id",`s` + return None + + def read_dead(self, s): + return None + + def data_came_in(self, garbage, s): +# if DEBUG: +# print "NewSocketHandler data came in", sha(s).hexdigest() + while 1: + if self.closed: + return + i = self.next_len - self.buffer.tell() + if i > len(s): + self.buffer.write(s) + return + self.buffer.write(s[:i]) + s = s[i:] + m = self.buffer.getvalue() + self.buffer.reset() + self.buffer.truncate() + try: + x = self.next_func(m) + except: + self.next_len, self.next_func = 1, self.read_dead + raise + if x is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","NewSocketHandler:",self.next_func,"returned None" + self.close() + return + if x == True: # ready to process + if self.protocol == 'HTTP': + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","NewSocketHandler: Reporting HTTP connection" + self.multihandler.httphandler.external_connection_made(self.connection) + self.multihandler.httphandler.data_came_in(self.connection,self.firstbyte) + self.multihandler.httphandler.data_came_in(self.connection,s) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","NewSocketHandler: Reporting connection via",self.multihandler.singlerawservers[m]._external_connection_made + self.multihandler.singlerawservers[m]._external_connection_made(self.connection, self.options, s) + self.complete = True + return + self.next_len, self.next_func = x + + def connection_flushed(self, ss): + pass + + def connection_lost(self, ss): + self.closed = True + +class MultiHandler: + def __init__(self, rawserver, doneflag): + self.rawserver = rawserver + self.masterdoneflag = doneflag + self.singlerawservers = {} + self.connections = {} + self.taskqueues = {} + self.httphandler = None + + def newRawServer(self, info_hash, doneflag, protocol=protocol_name): + new = SingleRawServer(info_hash, self, doneflag, protocol) + self.singlerawservers[info_hash] = new + return new + + def shutdown_torrent(self, info_hash): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","MultiHandler: shutdown_torrent",`info_hash` + self.singlerawservers[info_hash]._shutdown() + del self.singlerawservers[info_hash] + + def listen_forever(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","MultiHandler: listen_forever()" + self.rawserver.listen_forever(self) + for srs in self.singlerawservers.values(): + srs.finished = True + srs.running = False + srs.doneflag.set() + + def set_httphandler(self,httphandler): + self.httphandler = httphandler + + ### RawServer handler functions ### + # be wary of name collisions + + def external_connection_made(self, ss): + # ss: SingleSocket + NewSocketHandler(self, ss) diff --git a/tribler-mod/Tribler/Core/BitTornado/SocketHandler.py b/tribler-mod/Tribler/Core/BitTornado/SocketHandler.py new file mode 100644 index 0000000..562a3ca --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/SocketHandler.py @@ -0,0 +1,551 @@ +from time import localtime, strftime +# Written by Bram Cohen +# see LICENSE.txt for license information + +import socket +import errno +try: + from select import poll, POLLIN, POLLOUT, POLLERR, POLLHUP + timemult = 1000 +except ImportError: + from selectpoll import poll, POLLIN, POLLOUT, POLLERR, POLLHUP + timemult = 1 +from time import sleep +from clock import clock +import sys +from random import shuffle, randrange +from traceback import print_exc + +try: + True +except: + True = 1 + False = 0 + +DEBUG = False + +all = POLLIN | POLLOUT + +if sys.platform == 'win32': + SOCKET_BLOCK_ERRORCODE=10035 # WSAEWOULDBLOCK +else: + SOCKET_BLOCK_ERRORCODE=errno.EWOULDBLOCK + + +class SingleSocket: + """ + There are two places to create SingleSocket: + incoming connection -- SocketHandler.handle_events + outgoing connection -- SocketHandler.start_connection_raw + """ + + def __init__(self, socket_handler, sock, handler, ip = None): + self.socket_handler = socket_handler + self.socket = sock + self.handler = handler + self.buffer = [] + self.last_hit = clock() + self.fileno = sock.fileno() + self.connected = False + self.skipped = 0 +# self.check = StreamCheck() + self.myip = None + self.myport = -1 + self.ip = None + self.port = -1 + try: + (self.myip,self.myport) = self.socket.getsockname() + (self.ip,self.port) = self.socket.getpeername() + except: + if ip is None: + self.ip = 'unknown' + else: + self.ip = ip + + def get_ip(self, real=False): + if real: + try: + (self.ip,self.port) = self.socket.getpeername() + except: + pass + return self.ip + + def get_port(self, real=False): + if real: + self.get_ip(True) + return self.port + + def get_myip(self, real=False): + if real: + try: + (self.myip,self.myport) = self.socket.getsockname() + except: + pass + return self.myip + + def get_myport(self, real=False): + if real: + self.get_myip(True) + return self.myport + + def close(self): + ''' + for x in xrange(5,0,-1): + try: + f = inspect.currentframe(x).f_code + print (f.co_filename,f.co_firstlineno,f.co_name) + del f + except: + pass + print '' + ''' + assert self.socket + self.connected = False + sock = self.socket + self.socket = None + self.buffer = [] + del self.socket_handler.single_sockets[self.fileno] + self.socket_handler.poll.unregister(sock) + sock.close() + + def shutdown(self, val): + self.socket.shutdown(val) + + def is_flushed(self): + return not self.buffer + + def write(self, s): +# self.check.write(s) + # Arno: fishy concurrency problem, sometimes self.socket is None + if self.socket is None: + return + #assert self.socket is not None + self.buffer.append(s) + if len(self.buffer) == 1: + self.try_write() + + def try_write(self): + + if self.connected: + dead = False + try: + while self.buffer: + buf = self.buffer[0] + amount = self.socket.send(buf) + if amount == 0: + self.skipped += 1 + break + self.skipped = 0 + if amount != len(buf): + self.buffer[0] = buf[amount:] + break + del self.buffer[0] + except socket.error, e: + #if DEBUG: + # print_exc(file=sys.stderr) + blocked=False + try: + blocked = (e[0] == SOCKET_BLOCK_ERRORCODE) + dead = not blocked + except: + dead = True + if not blocked: + self.skipped += 1 + if self.skipped >= 5: + dead = True + if dead: + self.socket_handler.dead_from_write.append(self) + return + if self.buffer: + self.socket_handler.poll.register(self.socket, all) + else: + self.socket_handler.poll.register(self.socket, POLLIN) + + def set_handler(self, handler): # can be: NewSocketHandler, Encoder, En_Connection + self.handler = handler + + +class SocketHandler: + def __init__(self, timeout, ipv6_enable, readsize = 100000): + self.timeout = timeout + self.ipv6_enable = ipv6_enable + self.readsize = readsize + self.poll = poll() + # {socket: SingleSocket} + self.single_sockets = {} + self.dead_from_write = [] + self.max_connects = 1000 + self.servers = {} + self.btengine_said_reachable = False + + def scan_for_timeouts(self): + t = clock() - self.timeout + tokill = [] + for s in self.single_sockets.values(): + if s.last_hit < t: + tokill.append(s) + for k in tokill: + if k.socket is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: scan_timeout closing connection",k.get_ip() + self._close_socket(k) + + def bind(self, port, bind = [], reuse = False, ipv6_socket_style = 1): + port = int(port) + addrinfos = [] + self.servers = {} + self.interfaces = [] + # if bind != [] bind to all specified addresses (can be IPs or hostnames) + # else bind to default ipv6 and ipv4 address + if bind: + if self.ipv6_enable: + socktype = socket.AF_UNSPEC + else: + socktype = socket.AF_INET + for addr in bind: + if sys.version_info < (2, 2): + addrinfos.append((socket.AF_INET, None, None, None, (addr, port))) + else: + addrinfos.extend(socket.getaddrinfo(addr, port, + socktype, socket.SOCK_STREAM)) + else: + if self.ipv6_enable: + addrinfos.append([socket.AF_INET6, None, None, None, ('', port)]) + if not addrinfos or ipv6_socket_style != 0: + addrinfos.append([socket.AF_INET, None, None, None, ('', port)]) + for addrinfo in addrinfos: + try: + server = socket.socket(addrinfo[0], socket.SOCK_STREAM) + if reuse: + server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + server.setblocking(0) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: Try to bind socket on", addrinfo[4], "..." + server.bind(addrinfo[4]) + self.servers[server.fileno()] = server + if bind: + self.interfaces.append(server.getsockname()[0]) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: OK" + server.listen(64) + self.poll.register(server, POLLIN) + except socket.error, e: + for server in self.servers.values(): + try: + server.close() + except: + pass + if self.ipv6_enable and ipv6_socket_style == 0 and self.servers: + raise socket.error('blocked port (may require ipv6_binds_v4 to be set)') + raise socket.error(str(e)) + if not self.servers: + raise socket.error('unable to open server port') + self.port = port + + def find_and_bind(self, first_try, minport, maxport, bind = '', reuse = False, + ipv6_socket_style = 1, randomizer = False): + e = 'maxport less than minport - no ports to check' + if maxport-minport < 50 or not randomizer: + portrange = range(minport, maxport+1) + if randomizer: + shuffle(portrange) + portrange = portrange[:20] # check a maximum of 20 ports + else: + portrange = [] + while len(portrange) < 20: + listen_port = randrange(minport, maxport+1) + if not listen_port in portrange: + portrange.append(listen_port) + if first_try != 0: # try 22 first, because TU only opens port 22 for SSH... + try: + self.bind(first_try, bind, reuse = reuse, + ipv6_socket_style = ipv6_socket_style) + return first_try + except socket.error, e: + pass + for listen_port in portrange: + try: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", listen_port, bind, reuse + self.bind(listen_port, bind, reuse = reuse, + ipv6_socket_style = ipv6_socket_style) + return listen_port + except socket.error, e: + raise + raise socket.error(str(e)) + + + def set_handler(self, handler): + self.handler = handler + + + def start_connection_raw(self, dns, socktype = socket.AF_INET, handler = None): + # handler = Encoder, self.handler = Multihandler + if handler is None: + handler = self.handler + sock = socket.socket(socktype, socket.SOCK_STREAM) + sock.setblocking(0) + try: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: Initiate connection to",dns,"with socket #",sock.fileno() + # Arno,2007-01-23: http://docs.python.org/lib/socket-objects.html + # says that connect_ex returns an error code (and can still throw + # exceptions). The original code never checked the return code. + # + err = sock.connect_ex(dns) + if DEBUG: + if err == 0: + msg = 'No error' + else: + msg = errno.errorcode[err] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: connect_ex on socket #",sock.fileno(),"returned",err,msg + if err != 0: + if sys.platform == 'win32' and err == 10035: + # Arno, 2007-02-23: win32 always returns WSAEWOULDBLOCK, whether + # the connect is to a live peer or not. Win32's version + # of EINPROGRESS + pass + elif err == errno.EINPROGRESS: # or err == errno.EALREADY or err == errno.EWOULDBLOCK: + # [Stevens98] says that UNICES return EINPROGRESS when the connect + # does not immediately succeed, which is almost always the case. + pass + else: + raise socket.error((err,errno.errorcode[err])) + except socket.error, e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: SocketError in connect_ex",str(e) + raise + except Exception, e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: Exception in connect_ex",str(e) + raise socket.error(str(e)) + self.poll.register(sock, POLLIN) + s = SingleSocket(self, sock, handler, dns[0]) # create socket to connect the peers obtained from tracker + self.single_sockets[sock.fileno()] = s + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: Created Socket" + return s + + + def start_connection(self, dns, handler = None, randomize = False): + if handler is None: + handler = self.handler + if sys.version_info < (2, 2): + s = self.start_connection_raw(dns, socket.AF_INET, handler) + else: +# if self.ipv6_enable: +# socktype = socket.AF_UNSPEC +# else: +# socktype = socket.AF_INET + try: + try: + """ + Arno: When opening a new connection, the network thread calls the + getaddrinfo() function (=DNS resolve), as apparently the input + sometimes is a hostname. At the same time the tracker thread uses + this same function to resolve the tracker name to an IP address. + However, on Python for Windows this method has concurrency control + protection that allows only 1 request at a time. + + In some cases resolving the tracker name takes a very long time, + meanwhile blocking the network thread!!!! And that only wanted to + resolve some IP address to some IP address, i.e., do nothing!!! + + Sol: don't call getaddrinfo() is the input is an IP address, and + submit a bug to python that it shouldn't lock when the op is + a null op + """ + socket.inet_aton(dns[0]) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SockHand: start_conn: after inet_aton",dns[0],"<",dns,">" + addrinfos=[(socket.AF_INET, None, None, None, (dns[0], dns[1]))] + except: + #print_exc() + try: + # Jie: we attempt to use this socktype to connect ipv6 addresses. + socktype = socket.AF_UNSPEC + addrinfos = socket.getaddrinfo(dns[0], int(dns[1]), + socktype, socket.SOCK_STREAM) + except: + socktype = socket.AF_INET + addrinfos = socket.getaddrinfo(dns[0], int(dns[1]), + socktype, socket.SOCK_STREAM) + except socket.error, e: + raise + except Exception, e: + raise socket.error(str(e)) + if randomize: + shuffle(addrinfos) + for addrinfo in addrinfos: + try: + s = self.start_connection_raw(addrinfo[4], addrinfo[0], handler) + break + except Exception,e: + print_exc() + pass # FIXME Arno: ???? raise e + else: + raise socket.error('unable to connect') + return s + + + def _sleep(self): + sleep(1) + + def handle_events(self, events): + for sock, event in events: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: event on sock#",sock + s = self.servers.get(sock) # socket.socket + if s: + if event & (POLLHUP | POLLERR) != 0: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: Got event, close server socket" + self.poll.unregister(s) + if not is_udp_socket(s): + s.close() + del self.servers[sock] + elif is_udp_socket(s): + try: + (data,addr) = s.recvfrom(8192) + if not data: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: UDP no-data",addr + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: Got UDP data",addr,"len",len(data) + self.handlerudp.data_came_in(addr, data) + + except socket.error, e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: UDP Socket error",str(e) + elif len(self.single_sockets) < self.max_connects: + try: + newsock, addr = s.accept() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: Got connection from",newsock.getpeername() + if not self.btengine_said_reachable: + from Tribler.Core.NATFirewall.DialbackMsgHandler import DialbackMsgHandler + dmh = DialbackMsgHandler.getInstance() + dmh.network_btengine_reachable_callback() + self.btengine_said_reachable = True + + newsock.setblocking(0) + nss = SingleSocket(self, newsock, self.handler) # create socket for incoming peers and tracker + self.single_sockets[newsock.fileno()] = nss + self.poll.register(newsock, POLLIN) + self.handler.external_connection_made(nss) + + except socket.error,e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: SocketError while accepting new connection",str(e) + self._sleep() +# 2fastbt_ + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: too many connects" +# _2fastbt + else: + s = self.single_sockets.get(sock) + if not s: + continue + if (event & (POLLHUP | POLLERR)): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: Got event, connect socket got error" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: Got event, connect socket got error",s.ip,s.port + self._close_socket(s) + continue + if (event & POLLIN): + try: + s.last_hit = clock() + data = s.socket.recv(100000) + if not data: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: no-data closing connection",s.get_ip(),s.get_port() + self._close_socket(s) + else: + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: Got data",s.get_ip(),s.get_port(),"len",len(data) + + # btlaunchmany: NewSocketHandler, btdownloadheadless: Encrypter.Connection + s.handler.data_came_in(s, data) + except socket.error, e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: Socket error",str(e) + code, msg = e + if code != SOCKET_BLOCK_ERRORCODE: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: closing connection because not WOULDBLOCK",s.get_ip(),"error",code + self._close_socket(s) + continue + if (event & POLLOUT) and s.socket and not s.is_flushed(): + s.connected = True + s.try_write() + if s.is_flushed(): + s.handler.connection_flushed(s) + + def close_dead(self): + while self.dead_from_write: + old = self.dead_from_write + self.dead_from_write = [] + for s in old: + if s.socket: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: close_dead closing connection",s.get_ip() + self._close_socket(s) + + def _close_socket(self, s): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: closing connection to ",s.get_ip() + s.close() + s.handler.connection_lost(s) + + def do_poll(self, t): + r = self.poll.poll(t*timemult) + if r is None: + connects = len(self.single_sockets) + to_close = int(connects*0.05)+1 # close 5% of sockets + self.max_connects = connects-to_close + closelist = self.single_sockets.values() + shuffle(closelist) + closelist = closelist[:to_close] + for sock in closelist: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: do_poll closing connection",sock.get_ip() + self._close_socket(sock) + return [] + return r + + def get_stats(self): + return { 'interfaces': self.interfaces, + 'port': self.port } + + + def shutdown(self): + for ss in self.single_sockets.values(): + try: + ss.close() + except: + pass + for server in self.servers.values(): + try: + server.close() + except: + pass + + # + # Interface for Khasmir, called from RawServer + # + # + def create_udpsocket(self,port,host): + server = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) + server.bind((host,port)) + self.servers[server.fileno()] = server + server.setblocking(0) + return server + + def start_listening_udp(self,serversocket,handler): + self.handlerudp = handler + self.poll.register(serversocket, POLLIN) + + def stop_listening_udp(self,serversocket): + del self.servers[serversocket.fileno()] + + +def is_udp_socket(sock): + return sock.getsockopt(socket.SOL_SOCKET,socket.SO_TYPE) == socket.SOCK_DGRAM diff --git a/tribler-mod/Tribler/Core/BitTornado/SocketHandler.py.bak b/tribler-mod/Tribler/Core/BitTornado/SocketHandler.py.bak new file mode 100644 index 0000000..a25bf64 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/SocketHandler.py.bak @@ -0,0 +1,550 @@ +# Written by Bram Cohen +# see LICENSE.txt for license information + +import socket +import errno +try: + from select import poll, POLLIN, POLLOUT, POLLERR, POLLHUP + timemult = 1000 +except ImportError: + from selectpoll import poll, POLLIN, POLLOUT, POLLERR, POLLHUP + timemult = 1 +from time import sleep +from clock import clock +import sys +from random import shuffle, randrange +from traceback import print_exc + +try: + True +except: + True = 1 + False = 0 + +DEBUG = False + +all = POLLIN | POLLOUT + +if sys.platform == 'win32': + SOCKET_BLOCK_ERRORCODE=10035 # WSAEWOULDBLOCK +else: + SOCKET_BLOCK_ERRORCODE=errno.EWOULDBLOCK + + +class SingleSocket: + """ + There are two places to create SingleSocket: + incoming connection -- SocketHandler.handle_events + outgoing connection -- SocketHandler.start_connection_raw + """ + + def __init__(self, socket_handler, sock, handler, ip = None): + self.socket_handler = socket_handler + self.socket = sock + self.handler = handler + self.buffer = [] + self.last_hit = clock() + self.fileno = sock.fileno() + self.connected = False + self.skipped = 0 +# self.check = StreamCheck() + self.myip = None + self.myport = -1 + self.ip = None + self.port = -1 + try: + (self.myip,self.myport) = self.socket.getsockname() + (self.ip,self.port) = self.socket.getpeername() + except: + if ip is None: + self.ip = 'unknown' + else: + self.ip = ip + + def get_ip(self, real=False): + if real: + try: + (self.ip,self.port) = self.socket.getpeername() + except: + pass + return self.ip + + def get_port(self, real=False): + if real: + self.get_ip(True) + return self.port + + def get_myip(self, real=False): + if real: + try: + (self.myip,self.myport) = self.socket.getsockname() + except: + pass + return self.myip + + def get_myport(self, real=False): + if real: + self.get_myip(True) + return self.myport + + def close(self): + ''' + for x in xrange(5,0,-1): + try: + f = inspect.currentframe(x).f_code + print (f.co_filename,f.co_firstlineno,f.co_name) + del f + except: + pass + print '' + ''' + assert self.socket + self.connected = False + sock = self.socket + self.socket = None + self.buffer = [] + del self.socket_handler.single_sockets[self.fileno] + self.socket_handler.poll.unregister(sock) + sock.close() + + def shutdown(self, val): + self.socket.shutdown(val) + + def is_flushed(self): + return not self.buffer + + def write(self, s): +# self.check.write(s) + # Arno: fishy concurrency problem, sometimes self.socket is None + if self.socket is None: + return + #assert self.socket is not None + self.buffer.append(s) + if len(self.buffer) == 1: + self.try_write() + + def try_write(self): + + if self.connected: + dead = False + try: + while self.buffer: + buf = self.buffer[0] + amount = self.socket.send(buf) + if amount == 0: + self.skipped += 1 + break + self.skipped = 0 + if amount != len(buf): + self.buffer[0] = buf[amount:] + break + del self.buffer[0] + except socket.error, e: + #if DEBUG: + # print_exc(file=sys.stderr) + blocked=False + try: + blocked = (e[0] == SOCKET_BLOCK_ERRORCODE) + dead = not blocked + except: + dead = True + if not blocked: + self.skipped += 1 + if self.skipped >= 5: + dead = True + if dead: + self.socket_handler.dead_from_write.append(self) + return + if self.buffer: + self.socket_handler.poll.register(self.socket, all) + else: + self.socket_handler.poll.register(self.socket, POLLIN) + + def set_handler(self, handler): # can be: NewSocketHandler, Encoder, En_Connection + self.handler = handler + + +class SocketHandler: + def __init__(self, timeout, ipv6_enable, readsize = 100000): + self.timeout = timeout + self.ipv6_enable = ipv6_enable + self.readsize = readsize + self.poll = poll() + # {socket: SingleSocket} + self.single_sockets = {} + self.dead_from_write = [] + self.max_connects = 1000 + self.servers = {} + self.btengine_said_reachable = False + + def scan_for_timeouts(self): + t = clock() - self.timeout + tokill = [] + for s in self.single_sockets.values(): + if s.last_hit < t: + tokill.append(s) + for k in tokill: + if k.socket is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: scan_timeout closing connection",k.get_ip() + self._close_socket(k) + + def bind(self, port, bind = [], reuse = False, ipv6_socket_style = 1): + port = int(port) + addrinfos = [] + self.servers = {} + self.interfaces = [] + # if bind != [] bind to all specified addresses (can be IPs or hostnames) + # else bind to default ipv6 and ipv4 address + if bind: + if self.ipv6_enable: + socktype = socket.AF_UNSPEC + else: + socktype = socket.AF_INET + for addr in bind: + if sys.version_info < (2, 2): + addrinfos.append((socket.AF_INET, None, None, None, (addr, port))) + else: + addrinfos.extend(socket.getaddrinfo(addr, port, + socktype, socket.SOCK_STREAM)) + else: + if self.ipv6_enable: + addrinfos.append([socket.AF_INET6, None, None, None, ('', port)]) + if not addrinfos or ipv6_socket_style != 0: + addrinfos.append([socket.AF_INET, None, None, None, ('', port)]) + for addrinfo in addrinfos: + try: + server = socket.socket(addrinfo[0], socket.SOCK_STREAM) + if reuse: + server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + server.setblocking(0) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: Try to bind socket on", addrinfo[4], "..." + server.bind(addrinfo[4]) + self.servers[server.fileno()] = server + if bind: + self.interfaces.append(server.getsockname()[0]) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: OK" + server.listen(64) + self.poll.register(server, POLLIN) + except socket.error, e: + for server in self.servers.values(): + try: + server.close() + except: + pass + if self.ipv6_enable and ipv6_socket_style == 0 and self.servers: + raise socket.error('blocked port (may require ipv6_binds_v4 to be set)') + raise socket.error(str(e)) + if not self.servers: + raise socket.error('unable to open server port') + self.port = port + + def find_and_bind(self, first_try, minport, maxport, bind = '', reuse = False, + ipv6_socket_style = 1, randomizer = False): + e = 'maxport less than minport - no ports to check' + if maxport-minport < 50 or not randomizer: + portrange = range(minport, maxport+1) + if randomizer: + shuffle(portrange) + portrange = portrange[:20] # check a maximum of 20 ports + else: + portrange = [] + while len(portrange) < 20: + listen_port = randrange(minport, maxport+1) + if not listen_port in portrange: + portrange.append(listen_port) + if first_try != 0: # try 22 first, because TU only opens port 22 for SSH... + try: + self.bind(first_try, bind, reuse = reuse, + ipv6_socket_style = ipv6_socket_style) + return first_try + except socket.error, e: + pass + for listen_port in portrange: + try: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", listen_port, bind, reuse + self.bind(listen_port, bind, reuse = reuse, + ipv6_socket_style = ipv6_socket_style) + return listen_port + except socket.error, e: + raise + raise socket.error(str(e)) + + + def set_handler(self, handler): + self.handler = handler + + + def start_connection_raw(self, dns, socktype = socket.AF_INET, handler = None): + # handler = Encoder, self.handler = Multihandler + if handler is None: + handler = self.handler + sock = socket.socket(socktype, socket.SOCK_STREAM) + sock.setblocking(0) + try: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: Initiate connection to",dns,"with socket #",sock.fileno() + # Arno,2007-01-23: http://docs.python.org/lib/socket-objects.html + # says that connect_ex returns an error code (and can still throw + # exceptions). The original code never checked the return code. + # + err = sock.connect_ex(dns) + if DEBUG: + if err == 0: + msg = 'No error' + else: + msg = errno.errorcode[err] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: connect_ex on socket #",sock.fileno(),"returned",err,msg + if err != 0: + if sys.platform == 'win32' and err == 10035: + # Arno, 2007-02-23: win32 always returns WSAEWOULDBLOCK, whether + # the connect is to a live peer or not. Win32's version + # of EINPROGRESS + pass + elif err == errno.EINPROGRESS: # or err == errno.EALREADY or err == errno.EWOULDBLOCK: + # [Stevens98] says that UNICES return EINPROGRESS when the connect + # does not immediately succeed, which is almost always the case. + pass + else: + raise socket.error((err,errno.errorcode[err])) + except socket.error, e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: SocketError in connect_ex",str(e) + raise + except Exception, e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: Exception in connect_ex",str(e) + raise socket.error(str(e)) + self.poll.register(sock, POLLIN) + s = SingleSocket(self, sock, handler, dns[0]) # create socket to connect the peers obtained from tracker + self.single_sockets[sock.fileno()] = s + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: Created Socket" + return s + + + def start_connection(self, dns, handler = None, randomize = False): + if handler is None: + handler = self.handler + if sys.version_info < (2, 2): + s = self.start_connection_raw(dns, socket.AF_INET, handler) + else: +# if self.ipv6_enable: +# socktype = socket.AF_UNSPEC +# else: +# socktype = socket.AF_INET + try: + try: + """ + Arno: When opening a new connection, the network thread calls the + getaddrinfo() function (=DNS resolve), as apparently the input + sometimes is a hostname. At the same time the tracker thread uses + this same function to resolve the tracker name to an IP address. + However, on Python for Windows this method has concurrency control + protection that allows only 1 request at a time. + + In some cases resolving the tracker name takes a very long time, + meanwhile blocking the network thread!!!! And that only wanted to + resolve some IP address to some IP address, i.e., do nothing!!! + + Sol: don't call getaddrinfo() is the input is an IP address, and + submit a bug to python that it shouldn't lock when the op is + a null op + """ + socket.inet_aton(dns[0]) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SockHand: start_conn: after inet_aton",dns[0],"<",dns,">" + addrinfos=[(socket.AF_INET, None, None, None, (dns[0], dns[1]))] + except: + #print_exc() + try: + # Jie: we attempt to use this socktype to connect ipv6 addresses. + socktype = socket.AF_UNSPEC + addrinfos = socket.getaddrinfo(dns[0], int(dns[1]), + socktype, socket.SOCK_STREAM) + except: + socktype = socket.AF_INET + addrinfos = socket.getaddrinfo(dns[0], int(dns[1]), + socktype, socket.SOCK_STREAM) + except socket.error, e: + raise + except Exception, e: + raise socket.error(str(e)) + if randomize: + shuffle(addrinfos) + for addrinfo in addrinfos: + try: + s = self.start_connection_raw(addrinfo[4], addrinfo[0], handler) + break + except Exception,e: + print_exc() + pass # FIXME Arno: ???? raise e + else: + raise socket.error('unable to connect') + return s + + + def _sleep(self): + sleep(1) + + def handle_events(self, events): + for sock, event in events: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: event on sock#",sock + s = self.servers.get(sock) # socket.socket + if s: + if event & (POLLHUP | POLLERR) != 0: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: Got event, close server socket" + self.poll.unregister(s) + if not is_udp_socket(s): + s.close() + del self.servers[sock] + elif is_udp_socket(s): + try: + (data,addr) = s.recvfrom(8192) + if not data: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: UDP no-data",addr + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: Got UDP data",addr,"len",len(data) + self.handlerudp.data_came_in(addr, data) + + except socket.error, e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: UDP Socket error",str(e) + elif len(self.single_sockets) < self.max_connects: + try: + newsock, addr = s.accept() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: Got connection from",newsock.getpeername() + if not self.btengine_said_reachable: + from Tribler.Core.NATFirewall.DialbackMsgHandler import DialbackMsgHandler + dmh = DialbackMsgHandler.getInstance() + dmh.network_btengine_reachable_callback() + self.btengine_said_reachable = True + + newsock.setblocking(0) + nss = SingleSocket(self, newsock, self.handler) # create socket for incoming peers and tracker + self.single_sockets[newsock.fileno()] = nss + self.poll.register(newsock, POLLIN) + self.handler.external_connection_made(nss) + + except socket.error,e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: SocketError while accepting new connection",str(e) + self._sleep() +# 2fastbt_ + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: too many connects" +# _2fastbt + else: + s = self.single_sockets.get(sock) + if not s: + continue + if (event & (POLLHUP | POLLERR)): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: Got event, connect socket got error" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: Got event, connect socket got error",s.ip,s.port + self._close_socket(s) + continue + if (event & POLLIN): + try: + s.last_hit = clock() + data = s.socket.recv(100000) + if not data: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: no-data closing connection",s.get_ip(),s.get_port() + self._close_socket(s) + else: + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: Got data",s.get_ip(),s.get_port(),"len",len(data) + + # btlaunchmany: NewSocketHandler, btdownloadheadless: Encrypter.Connection + s.handler.data_came_in(s, data) + except socket.error, e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: Socket error",str(e) + code, msg = e + if code != SOCKET_BLOCK_ERRORCODE: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: closing connection because not WOULDBLOCK",s.get_ip(),"error",code + self._close_socket(s) + continue + if (event & POLLOUT) and s.socket and not s.is_flushed(): + s.connected = True + s.try_write() + if s.is_flushed(): + s.handler.connection_flushed(s) + + def close_dead(self): + while self.dead_from_write: + old = self.dead_from_write + self.dead_from_write = [] + for s in old: + if s.socket: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: close_dead closing connection",s.get_ip() + self._close_socket(s) + + def _close_socket(self, s): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: closing connection to ",s.get_ip() + s.close() + s.handler.connection_lost(s) + + def do_poll(self, t): + r = self.poll.poll(t*timemult) + if r is None: + connects = len(self.single_sockets) + to_close = int(connects*0.05)+1 # close 5% of sockets + self.max_connects = connects-to_close + closelist = self.single_sockets.values() + shuffle(closelist) + closelist = closelist[:to_close] + for sock in closelist: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SocketHandler: do_poll closing connection",sock.get_ip() + self._close_socket(sock) + return [] + return r + + def get_stats(self): + return { 'interfaces': self.interfaces, + 'port': self.port } + + + def shutdown(self): + for ss in self.single_sockets.values(): + try: + ss.close() + except: + pass + for server in self.servers.values(): + try: + server.close() + except: + pass + + # + # Interface for Khasmir, called from RawServer + # + # + def create_udpsocket(self,port,host): + server = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) + server.bind((host,port)) + self.servers[server.fileno()] = server + server.setblocking(0) + return server + + def start_listening_udp(self,serversocket,handler): + self.handlerudp = handler + self.poll.register(serversocket, POLLIN) + + def stop_listening_udp(self,serversocket): + del self.servers[serversocket.fileno()] + + +def is_udp_socket(sock): + return sock.getsockopt(socket.SOL_SOCKET,socket.SO_TYPE) == socket.SOCK_DGRAM diff --git a/tribler-mod/Tribler/Core/BitTornado/__init__.py b/tribler-mod/Tribler/Core/BitTornado/__init__.py new file mode 100644 index 0000000..7489049 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/__init__.py @@ -0,0 +1,99 @@ +from time import localtime, strftime +# Written by BitTornado authors and Arno Bakker +# see LICENSE.txt for license information + +## Arno: FIXME _idprefix is also defined in BitTornado.__init__ and that's the one +## actually used in connections, so make sure they are defined in one place +## (here) and correct. +## + +version_id = '5.1.2' +product_name = 'Tribler' +version_short = 'Tribler-' + version_id + +version = version_short + ' (' + product_name + ')' +report_email = 'triblersoft@gmail.com' + +from types import StringType +from sha import sha +from time import time, clock +from string import strip +import socket +import random +try: + from os import getpid +except ImportError: + def getpid(): + return 1 +from base64 import decodestring +import sys +from traceback import print_exc + +mapbase64 = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz.-' + +# Arno: looking at Azureus BTPeerIDByteDecoder this letter is free +# 'T' is BitTornado, 'A' is ABC, 'TR' is Transmission +TRIBLER_PEERID_LETTER='R' +_idprefix = TRIBLER_PEERID_LETTER + +#for subver in version_short[2:].split('.'): +for subver in version_short.split('-')[1].split('.'): + try: + subver = int(subver) + except: + subver = 0 + _idprefix += mapbase64[subver] +_idprefix += ('-' * (6-len(_idprefix))) +_idrandom = [None] + + + + +def resetPeerIDs(): + try: + f = open('/dev/urandom', 'rb') + x = f.read(20) + f.close() + except: + # Arno: make sure there is some randomization when on win32 + random.seed() + x = '' + while len(x) < 20: + #r = random.randint(0,sys.maxint) + r = random.randint(0,255) + x += chr(r) + x = x[:20] + + s = '' + for i in x: + s += mapbase64[ord(i) & 0x3F] + _idrandom[0] = s[:11] # peer id = iprefix (6) + ins (3) + random + +def createPeerID(ins = '---'): + assert type(ins) is StringType + assert len(ins) == 3 + resetPeerIDs() + return _idprefix + ins + _idrandom[0] + +def decodePeerID(id): + client = None + version = None + try: + if id[0] == '-': + # Azureus type ID: + client = id[1:3] + encversion = id[3:7] + else: + # Shadow type ID: + client = id[0] + encversion = id[1:4] + version = '' + for i in range(len(encversion)): + for j in range(len(mapbase64)): + if mapbase64[j] == encversion[i]: + if len(version) > 0: + version += '.' + version += str(j) + except: + print_exc(file=sys.stderr) + return [client,version] diff --git a/tribler-mod/Tribler/Core/BitTornado/__init__.py.bak b/tribler-mod/Tribler/Core/BitTornado/__init__.py.bak new file mode 100644 index 0000000..ada64ea --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/__init__.py.bak @@ -0,0 +1,98 @@ +# Written by BitTornado authors and Arno Bakker +# see LICENSE.txt for license information + +## Arno: FIXME _idprefix is also defined in BitTornado.__init__ and that's the one +## actually used in connections, so make sure they are defined in one place +## (here) and correct. +## + +version_id = '5.1.2' +product_name = 'Tribler' +version_short = 'Tribler-' + version_id + +version = version_short + ' (' + product_name + ')' +report_email = 'triblersoft@gmail.com' + +from types import StringType +from sha import sha +from time import time, clock +from string import strip +import socket +import random +try: + from os import getpid +except ImportError: + def getpid(): + return 1 +from base64 import decodestring +import sys +from traceback import print_exc + +mapbase64 = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz.-' + +# Arno: looking at Azureus BTPeerIDByteDecoder this letter is free +# 'T' is BitTornado, 'A' is ABC, 'TR' is Transmission +TRIBLER_PEERID_LETTER='R' +_idprefix = TRIBLER_PEERID_LETTER + +#for subver in version_short[2:].split('.'): +for subver in version_short.split('-')[1].split('.'): + try: + subver = int(subver) + except: + subver = 0 + _idprefix += mapbase64[subver] +_idprefix += ('-' * (6-len(_idprefix))) +_idrandom = [None] + + + + +def resetPeerIDs(): + try: + f = open('/dev/urandom', 'rb') + x = f.read(20) + f.close() + except: + # Arno: make sure there is some randomization when on win32 + random.seed() + x = '' + while len(x) < 20: + #r = random.randint(0,sys.maxint) + r = random.randint(0,255) + x += chr(r) + x = x[:20] + + s = '' + for i in x: + s += mapbase64[ord(i) & 0x3F] + _idrandom[0] = s[:11] # peer id = iprefix (6) + ins (3) + random + +def createPeerID(ins = '---'): + assert type(ins) is StringType + assert len(ins) == 3 + resetPeerIDs() + return _idprefix + ins + _idrandom[0] + +def decodePeerID(id): + client = None + version = None + try: + if id[0] == '-': + # Azureus type ID: + client = id[1:3] + encversion = id[3:7] + else: + # Shadow type ID: + client = id[0] + encversion = id[1:4] + version = '' + for i in range(len(encversion)): + for j in range(len(mapbase64)): + if mapbase64[j] == encversion[i]: + if len(version) > 0: + version += '.' + version += str(j) + except: + print_exc(file=sys.stderr) + return [client,version] diff --git a/tribler-mod/Tribler/Core/BitTornado/bencode.py b/tribler-mod/Tribler/Core/BitTornado/bencode.py new file mode 100644 index 0000000..df0fe70 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/bencode.py @@ -0,0 +1,345 @@ +from time import localtime, strftime +# Written by Petru Paler, Uoti Urpala, Ross Cohen and John Hoffman +# see LICENSE.txt for license information + +from types import IntType, LongType, StringType, ListType, TupleType, DictType +try: + from types import BooleanType +except ImportError: + BooleanType = None +try: + from types import UnicodeType +except ImportError: + UnicodeType = None + +from traceback import print_exc,print_stack +import sys + +DEBUG = False + +def decode_int(x, f): + f += 1 + newf = x.index('e', f) + try: + n = int(x[f:newf]) + except: + n = long(x[f:newf]) + if x[f] == '-': + if x[f + 1] == '0': + raise ValueError + elif x[f] == '0' and newf != f+1: + raise ValueError + return (n, newf+1) + +def decode_string(x, f): + colon = x.index(':', f) + try: + n = int(x[f:colon]) + except (OverflowError, ValueError): + n = long(x[f:colon]) + if x[f] == '0' and colon != f+1: + raise ValueError + colon += 1 + return (x[colon:colon+n], colon+n) + +def decode_unicode(x, f): + s, f = decode_string(x, f+1) + return (s.decode('UTF-8'), f) + +def decode_list(x, f): + r, f = [], f+1 + while x[f] != 'e': + v, f = decode_func[x[f]](x, f) + r.append(v) + return (r, f + 1) + +def decode_dict(x, f): + r, f = {}, f+1 + lastkey = None + while x[f] != 'e': + k, f = decode_string(x, f) + # Arno, 2008-09-12: uTorrent 1.8 violates the bencoding spec, its keys + # in an EXTEND handshake message are not sorted. Be liberal in what we + # receive: + ##if lastkey >= k: + ## raise ValueError + lastkey = k + r[k], f = decode_func[x[f]](x, f) + return (r, f + 1) + +decode_func = {} +decode_func['l'] = decode_list +decode_func['d'] = decode_dict +decode_func['i'] = decode_int +decode_func['0'] = decode_string +decode_func['1'] = decode_string +decode_func['2'] = decode_string +decode_func['3'] = decode_string +decode_func['4'] = decode_string +decode_func['5'] = decode_string +decode_func['6'] = decode_string +decode_func['7'] = decode_string +decode_func['8'] = decode_string +decode_func['9'] = decode_string +#decode_func['u'] = decode_unicode + +def bdecode(x, sloppy = 0): + try: + r, l = decode_func[x[0]](x, 0) +# except (IndexError, KeyError): + except (IndexError, KeyError, ValueError): + if DEBUG: + print_exc() + raise ValueError, "bad bencoded data" + if not sloppy and l != len(x): + raise ValueError, "bad bencoded data" + return r + +def test_bdecode(): + try: + bdecode('0:0:') + assert 0 + except ValueError: + pass + try: + bdecode('ie') + assert 0 + except ValueError: + pass + try: + bdecode('i341foo382e') + assert 0 + except ValueError: + pass + assert bdecode('i4e') == 4L + assert bdecode('i0e') == 0L + assert bdecode('i123456789e') == 123456789L + assert bdecode('i-10e') == -10L + try: + bdecode('i-0e') + assert 0 + except ValueError: + pass + try: + bdecode('i123') + assert 0 + except ValueError: + pass + try: + bdecode('') + assert 0 + except ValueError: + pass + try: + bdecode('i6easd') + assert 0 + except ValueError: + pass + try: + bdecode('35208734823ljdahflajhdf') + assert 0 + except ValueError: + pass + try: + bdecode('2:abfdjslhfld') + assert 0 + except ValueError: + pass + assert bdecode('0:') == '' + assert bdecode('3:abc') == 'abc' + assert bdecode('10:1234567890') == '1234567890' + try: + bdecode('02:xy') + assert 0 + except ValueError: + pass + try: + bdecode('l') + assert 0 + except ValueError: + pass + assert bdecode('le') == [] + try: + bdecode('leanfdldjfh') + assert 0 + except ValueError: + pass + assert bdecode('l0:0:0:e') == ['', '', ''] + try: + bdecode('relwjhrlewjh') + assert 0 + except ValueError: + pass + assert bdecode('li1ei2ei3ee') == [1, 2, 3] + assert bdecode('l3:asd2:xye') == ['asd', 'xy'] + assert bdecode('ll5:Alice3:Bobeli2ei3eee') == [['Alice', 'Bob'], [2, 3]] + try: + bdecode('d') + assert 0 + except ValueError: + pass + try: + bdecode('defoobar') + assert 0 + except ValueError: + pass + assert bdecode('de') == {} + assert bdecode('d3:agei25e4:eyes4:bluee') == {'age': 25, 'eyes': 'blue'} + assert bdecode('d8:spam.mp3d6:author5:Alice6:lengthi100000eee') == {'spam.mp3': {'author': 'Alice', 'length': 100000}} + try: + bdecode('d3:fooe') + assert 0 + except ValueError: + pass + try: + bdecode('di1e0:e') + assert 0 + except ValueError: + pass + try: + bdecode('d1:b0:1:a0:e') + assert 0 + except ValueError: + pass + try: + bdecode('d1:a0:1:a0:e') + assert 0 + except ValueError: + pass + try: + bdecode('i03e') + assert 0 + except ValueError: + pass + try: + bdecode('l01:ae') + assert 0 + except ValueError: + pass + try: + bdecode('9999:x') + assert 0 + except ValueError: + pass + try: + bdecode('l0:') + assert 0 + except ValueError: + pass + try: + bdecode('d0:0:') + assert 0 + except ValueError: + pass + try: + bdecode('d0:') + assert 0 + except ValueError: + pass + +bencached_marker = [] + +class Bencached: + def __init__(self, s): + self.marker = bencached_marker + self.bencoded = s + +BencachedType = type(Bencached('')) # insufficient, but good as a filter + +def encode_bencached(x, r): + assert x.marker == bencached_marker + r.append(x.bencoded) + +def encode_int(x, r): + r.extend(('i', str(x), 'e')) + +def encode_bool(x, r): + encode_int(int(x), r) + +def encode_string(x, r): + r.extend((str(len(x)), ':', x)) + +def encode_unicode(x, r): + #r.append('u') + encode_string(x.encode('UTF-8'), r) + +def encode_list(x, r): + r.append('l') + for e in x: + encode_func[type(e)](e, r) + r.append('e') + +def encode_dict(x, r): + r.append('d') + ilist = x.items() + ilist.sort() + for k, v in ilist: + try: + r.extend((str(len(k)), ':', k)) + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "k: %s" % k + raise + + encode_func[type(v)](v, r) + r.append('e') + +encode_func = {} +encode_func[BencachedType] = encode_bencached +encode_func[IntType] = encode_int +encode_func[LongType] = encode_int +encode_func[StringType] = encode_string +encode_func[ListType] = encode_list +encode_func[TupleType] = encode_list +encode_func[DictType] = encode_dict +if BooleanType: + encode_func[BooleanType] = encode_bool +if UnicodeType: + encode_func[UnicodeType] = encode_unicode + +def bencode(x): + r = [] + try: + encode_func[type(x)](x, r) + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bencode: *** error *** could not encode type %s (value: %s)" % (type(x), x) + print_stack() + + print_exc() + assert 0 + try: + return ''.join(r) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bencode: join error",x + for elem in r: + print "elem",elem,"has type",type(elem) + print_exc() + return '' + +def test_bencode(): + assert bencode(4) == 'i4e' + assert bencode(0) == 'i0e' + assert bencode(-10) == 'i-10e' + assert bencode(12345678901234567890L) == 'i12345678901234567890e' + assert bencode('') == '0:' + assert bencode('abc') == '3:abc' + assert bencode('1234567890') == '10:1234567890' + assert bencode([]) == 'le' + assert bencode([1, 2, 3]) == 'li1ei2ei3ee' + assert bencode([['Alice', 'Bob'], [2, 3]]) == 'll5:Alice3:Bobeli2ei3eee' + assert bencode({}) == 'de' + assert bencode({'age': 25, 'eyes': 'blue'}) == 'd3:agei25e4:eyes4:bluee' + assert bencode({'spam.mp3': {'author': 'Alice', 'length': 100000}}) == 'd8:spam.mp3d6:author5:Alice6:lengthi100000eee' + try: + bencode({1: 'foo'}) + assert 0 + except AssertionError: + pass + + +try: + import psyco + psyco.bind(bdecode) + psyco.bind(bencode) +except ImportError: + pass diff --git a/tribler-mod/Tribler/Core/BitTornado/bencode.py.bak b/tribler-mod/Tribler/Core/BitTornado/bencode.py.bak new file mode 100644 index 0000000..152442e --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/bencode.py.bak @@ -0,0 +1,344 @@ +# Written by Petru Paler, Uoti Urpala, Ross Cohen and John Hoffman +# see LICENSE.txt for license information + +from types import IntType, LongType, StringType, ListType, TupleType, DictType +try: + from types import BooleanType +except ImportError: + BooleanType = None +try: + from types import UnicodeType +except ImportError: + UnicodeType = None + +from traceback import print_exc,print_stack +import sys + +DEBUG = False + +def decode_int(x, f): + f += 1 + newf = x.index('e', f) + try: + n = int(x[f:newf]) + except: + n = long(x[f:newf]) + if x[f] == '-': + if x[f + 1] == '0': + raise ValueError + elif x[f] == '0' and newf != f+1: + raise ValueError + return (n, newf+1) + +def decode_string(x, f): + colon = x.index(':', f) + try: + n = int(x[f:colon]) + except (OverflowError, ValueError): + n = long(x[f:colon]) + if x[f] == '0' and colon != f+1: + raise ValueError + colon += 1 + return (x[colon:colon+n], colon+n) + +def decode_unicode(x, f): + s, f = decode_string(x, f+1) + return (s.decode('UTF-8'), f) + +def decode_list(x, f): + r, f = [], f+1 + while x[f] != 'e': + v, f = decode_func[x[f]](x, f) + r.append(v) + return (r, f + 1) + +def decode_dict(x, f): + r, f = {}, f+1 + lastkey = None + while x[f] != 'e': + k, f = decode_string(x, f) + # Arno, 2008-09-12: uTorrent 1.8 violates the bencoding spec, its keys + # in an EXTEND handshake message are not sorted. Be liberal in what we + # receive: + ##if lastkey >= k: + ## raise ValueError + lastkey = k + r[k], f = decode_func[x[f]](x, f) + return (r, f + 1) + +decode_func = {} +decode_func['l'] = decode_list +decode_func['d'] = decode_dict +decode_func['i'] = decode_int +decode_func['0'] = decode_string +decode_func['1'] = decode_string +decode_func['2'] = decode_string +decode_func['3'] = decode_string +decode_func['4'] = decode_string +decode_func['5'] = decode_string +decode_func['6'] = decode_string +decode_func['7'] = decode_string +decode_func['8'] = decode_string +decode_func['9'] = decode_string +#decode_func['u'] = decode_unicode + +def bdecode(x, sloppy = 0): + try: + r, l = decode_func[x[0]](x, 0) +# except (IndexError, KeyError): + except (IndexError, KeyError, ValueError): + if DEBUG: + print_exc() + raise ValueError, "bad bencoded data" + if not sloppy and l != len(x): + raise ValueError, "bad bencoded data" + return r + +def test_bdecode(): + try: + bdecode('0:0:') + assert 0 + except ValueError: + pass + try: + bdecode('ie') + assert 0 + except ValueError: + pass + try: + bdecode('i341foo382e') + assert 0 + except ValueError: + pass + assert bdecode('i4e') == 4L + assert bdecode('i0e') == 0L + assert bdecode('i123456789e') == 123456789L + assert bdecode('i-10e') == -10L + try: + bdecode('i-0e') + assert 0 + except ValueError: + pass + try: + bdecode('i123') + assert 0 + except ValueError: + pass + try: + bdecode('') + assert 0 + except ValueError: + pass + try: + bdecode('i6easd') + assert 0 + except ValueError: + pass + try: + bdecode('35208734823ljdahflajhdf') + assert 0 + except ValueError: + pass + try: + bdecode('2:abfdjslhfld') + assert 0 + except ValueError: + pass + assert bdecode('0:') == '' + assert bdecode('3:abc') == 'abc' + assert bdecode('10:1234567890') == '1234567890' + try: + bdecode('02:xy') + assert 0 + except ValueError: + pass + try: + bdecode('l') + assert 0 + except ValueError: + pass + assert bdecode('le') == [] + try: + bdecode('leanfdldjfh') + assert 0 + except ValueError: + pass + assert bdecode('l0:0:0:e') == ['', '', ''] + try: + bdecode('relwjhrlewjh') + assert 0 + except ValueError: + pass + assert bdecode('li1ei2ei3ee') == [1, 2, 3] + assert bdecode('l3:asd2:xye') == ['asd', 'xy'] + assert bdecode('ll5:Alice3:Bobeli2ei3eee') == [['Alice', 'Bob'], [2, 3]] + try: + bdecode('d') + assert 0 + except ValueError: + pass + try: + bdecode('defoobar') + assert 0 + except ValueError: + pass + assert bdecode('de') == {} + assert bdecode('d3:agei25e4:eyes4:bluee') == {'age': 25, 'eyes': 'blue'} + assert bdecode('d8:spam.mp3d6:author5:Alice6:lengthi100000eee') == {'spam.mp3': {'author': 'Alice', 'length': 100000}} + try: + bdecode('d3:fooe') + assert 0 + except ValueError: + pass + try: + bdecode('di1e0:e') + assert 0 + except ValueError: + pass + try: + bdecode('d1:b0:1:a0:e') + assert 0 + except ValueError: + pass + try: + bdecode('d1:a0:1:a0:e') + assert 0 + except ValueError: + pass + try: + bdecode('i03e') + assert 0 + except ValueError: + pass + try: + bdecode('l01:ae') + assert 0 + except ValueError: + pass + try: + bdecode('9999:x') + assert 0 + except ValueError: + pass + try: + bdecode('l0:') + assert 0 + except ValueError: + pass + try: + bdecode('d0:0:') + assert 0 + except ValueError: + pass + try: + bdecode('d0:') + assert 0 + except ValueError: + pass + +bencached_marker = [] + +class Bencached: + def __init__(self, s): + self.marker = bencached_marker + self.bencoded = s + +BencachedType = type(Bencached('')) # insufficient, but good as a filter + +def encode_bencached(x, r): + assert x.marker == bencached_marker + r.append(x.bencoded) + +def encode_int(x, r): + r.extend(('i', str(x), 'e')) + +def encode_bool(x, r): + encode_int(int(x), r) + +def encode_string(x, r): + r.extend((str(len(x)), ':', x)) + +def encode_unicode(x, r): + #r.append('u') + encode_string(x.encode('UTF-8'), r) + +def encode_list(x, r): + r.append('l') + for e in x: + encode_func[type(e)](e, r) + r.append('e') + +def encode_dict(x, r): + r.append('d') + ilist = x.items() + ilist.sort() + for k, v in ilist: + try: + r.extend((str(len(k)), ':', k)) + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "k: %s" % k + raise + + encode_func[type(v)](v, r) + r.append('e') + +encode_func = {} +encode_func[BencachedType] = encode_bencached +encode_func[IntType] = encode_int +encode_func[LongType] = encode_int +encode_func[StringType] = encode_string +encode_func[ListType] = encode_list +encode_func[TupleType] = encode_list +encode_func[DictType] = encode_dict +if BooleanType: + encode_func[BooleanType] = encode_bool +if UnicodeType: + encode_func[UnicodeType] = encode_unicode + +def bencode(x): + r = [] + try: + encode_func[type(x)](x, r) + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bencode: *** error *** could not encode type %s (value: %s)" % (type(x), x) + print_stack() + + print_exc() + assert 0 + try: + return ''.join(r) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bencode: join error",x + for elem in r: + print "elem",elem,"has type",type(elem) + print_exc() + return '' + +def test_bencode(): + assert bencode(4) == 'i4e' + assert bencode(0) == 'i0e' + assert bencode(-10) == 'i-10e' + assert bencode(12345678901234567890L) == 'i12345678901234567890e' + assert bencode('') == '0:' + assert bencode('abc') == '3:abc' + assert bencode('1234567890') == '10:1234567890' + assert bencode([]) == 'le' + assert bencode([1, 2, 3]) == 'li1ei2ei3ee' + assert bencode([['Alice', 'Bob'], [2, 3]]) == 'll5:Alice3:Bobeli2ei3eee' + assert bencode({}) == 'de' + assert bencode({'age': 25, 'eyes': 'blue'}) == 'd3:agei25e4:eyes4:bluee' + assert bencode({'spam.mp3': {'author': 'Alice', 'length': 100000}}) == 'd8:spam.mp3d6:author5:Alice6:lengthi100000eee' + try: + bencode({1: 'foo'}) + assert 0 + except AssertionError: + pass + + +try: + import psyco + psyco.bind(bdecode) + psyco.bind(bencode) +except ImportError: + pass diff --git a/tribler-mod/Tribler/Core/BitTornado/bitfield.py b/tribler-mod/Tribler/Core/BitTornado/bitfield.py new file mode 100644 index 0000000..b209070 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/bitfield.py @@ -0,0 +1,172 @@ +from time import localtime, strftime +# Written by Bram Cohen, Uoti Urpala, and John Hoffman +# see LICENSE.txt for license information + +try: + True +except: + True = 1 + False = 0 + bool = lambda x: not not x + +try: + sum([1]) + negsum = lambda a: len(a) - sum(a) +except: + negsum = lambda a: reduce(lambda x, y: x + (not y), a, 0) + +def _int_to_booleans(x): + r = [] + for i in range(8): + r.append(bool(x & 0x80)) + x <<= 1 + return tuple(r) + +lookup_table = [] +reverse_lookup_table = {} +for i in xrange(256): + x = _int_to_booleans(i) + lookup_table.append(x) + reverse_lookup_table[x] = chr(i) + + +class Bitfield: + def __init__(self, length = None, bitstring = None, copyfrom = None): + if copyfrom is not None: + self.length = copyfrom.length + self.array = copyfrom.array[:] + self.numfalse = copyfrom.numfalse + return + if length is None: + raise ValueError, "length must be provided unless copying from another array" + self.length = length + if bitstring is not None: + extra = len(bitstring) * 8 - length + if extra < 0 or extra >= 8: + raise ValueError + t = lookup_table + r = [] + for c in bitstring: + r.extend(t[ord(c)]) + if extra > 0: + if r[-extra:] != [0] * extra: + raise ValueError + del r[-extra:] + self.array = r + self.numfalse = negsum(r) + else: + self.array = [False] * length + self.numfalse = length + + def __setitem__(self, index, val): + val = bool(val) + self.numfalse += self.array[index]-val + self.array[index] = val + + def __getitem__(self, index): + return self.array[index] + + def __len__(self): + return self.length + + def tostring(self): + booleans = self.array + t = reverse_lookup_table + s = len(booleans) % 8 + r = [ t[tuple(booleans[x:x+8])] for x in xrange(0, len(booleans)-s, 8) ] + if s: + r += t[tuple(booleans[-s:] + ([0] * (8-s)))] + return ''.join(r) + + def complete(self): + return not self.numfalse + + def copy(self): + return self.array[:self.length] + + def toboollist(self): + bools = [False] * self.length + for piece in range(0,self.length): + bools[piece] = self.array[piece] + return bools + + +def test_bitfield(): + try: + x = Bitfield(7, 'ab') + assert False + except ValueError: + pass + try: + x = Bitfield(7, 'ab') + assert False + except ValueError: + pass + try: + x = Bitfield(9, 'abc') + assert False + except ValueError: + pass + try: + x = Bitfield(0, 'a') + assert False + except ValueError: + pass + try: + x = Bitfield(1, '') + assert False + except ValueError: + pass + try: + x = Bitfield(7, '') + assert False + except ValueError: + pass + try: + x = Bitfield(8, '') + assert False + except ValueError: + pass + try: + x = Bitfield(9, 'a') + assert False + except ValueError: + pass + try: + x = Bitfield(7, chr(1)) + assert False + except ValueError: + pass + try: + x = Bitfield(9, chr(0) + chr(0x40)) + assert False + except ValueError: + pass + assert Bitfield(0, '').tostring() == '' + assert Bitfield(1, chr(0x80)).tostring() == chr(0x80) + assert Bitfield(7, chr(0x02)).tostring() == chr(0x02) + assert Bitfield(8, chr(0xFF)).tostring() == chr(0xFF) + assert Bitfield(9, chr(0) + chr(0x80)).tostring() == chr(0) + chr(0x80) + x = Bitfield(1) + assert x.numfalse == 1 + x[0] = 1 + assert x.numfalse == 0 + x[0] = 1 + assert x.numfalse == 0 + assert x.tostring() == chr(0x80) + x = Bitfield(7) + assert len(x) == 7 + x[6] = 1 + assert x.numfalse == 6 + assert x.tostring() == chr(0x02) + x = Bitfield(8) + x[7] = 1 + assert x.tostring() == chr(1) + x = Bitfield(9) + x[8] = 1 + assert x.numfalse == 8 + assert x.tostring() == chr(0) + chr(0x80) + x = Bitfield(8, chr(0xC4)) + assert len(x) == 8 + assert x.numfalse == 5 + assert x.tostring() == chr(0xC4) diff --git a/tribler-mod/Tribler/Core/BitTornado/bitfield.py.bak b/tribler-mod/Tribler/Core/BitTornado/bitfield.py.bak new file mode 100644 index 0000000..0b2255f --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/bitfield.py.bak @@ -0,0 +1,171 @@ +# Written by Bram Cohen, Uoti Urpala, and John Hoffman +# see LICENSE.txt for license information + +try: + True +except: + True = 1 + False = 0 + bool = lambda x: not not x + +try: + sum([1]) + negsum = lambda a: len(a) - sum(a) +except: + negsum = lambda a: reduce(lambda x, y: x + (not y), a, 0) + +def _int_to_booleans(x): + r = [] + for i in range(8): + r.append(bool(x & 0x80)) + x <<= 1 + return tuple(r) + +lookup_table = [] +reverse_lookup_table = {} +for i in xrange(256): + x = _int_to_booleans(i) + lookup_table.append(x) + reverse_lookup_table[x] = chr(i) + + +class Bitfield: + def __init__(self, length = None, bitstring = None, copyfrom = None): + if copyfrom is not None: + self.length = copyfrom.length + self.array = copyfrom.array[:] + self.numfalse = copyfrom.numfalse + return + if length is None: + raise ValueError, "length must be provided unless copying from another array" + self.length = length + if bitstring is not None: + extra = len(bitstring) * 8 - length + if extra < 0 or extra >= 8: + raise ValueError + t = lookup_table + r = [] + for c in bitstring: + r.extend(t[ord(c)]) + if extra > 0: + if r[-extra:] != [0] * extra: + raise ValueError + del r[-extra:] + self.array = r + self.numfalse = negsum(r) + else: + self.array = [False] * length + self.numfalse = length + + def __setitem__(self, index, val): + val = bool(val) + self.numfalse += self.array[index]-val + self.array[index] = val + + def __getitem__(self, index): + return self.array[index] + + def __len__(self): + return self.length + + def tostring(self): + booleans = self.array + t = reverse_lookup_table + s = len(booleans) % 8 + r = [ t[tuple(booleans[x:x+8])] for x in xrange(0, len(booleans)-s, 8) ] + if s: + r += t[tuple(booleans[-s:] + ([0] * (8-s)))] + return ''.join(r) + + def complete(self): + return not self.numfalse + + def copy(self): + return self.array[:self.length] + + def toboollist(self): + bools = [False] * self.length + for piece in range(0,self.length): + bools[piece] = self.array[piece] + return bools + + +def test_bitfield(): + try: + x = Bitfield(7, 'ab') + assert False + except ValueError: + pass + try: + x = Bitfield(7, 'ab') + assert False + except ValueError: + pass + try: + x = Bitfield(9, 'abc') + assert False + except ValueError: + pass + try: + x = Bitfield(0, 'a') + assert False + except ValueError: + pass + try: + x = Bitfield(1, '') + assert False + except ValueError: + pass + try: + x = Bitfield(7, '') + assert False + except ValueError: + pass + try: + x = Bitfield(8, '') + assert False + except ValueError: + pass + try: + x = Bitfield(9, 'a') + assert False + except ValueError: + pass + try: + x = Bitfield(7, chr(1)) + assert False + except ValueError: + pass + try: + x = Bitfield(9, chr(0) + chr(0x40)) + assert False + except ValueError: + pass + assert Bitfield(0, '').tostring() == '' + assert Bitfield(1, chr(0x80)).tostring() == chr(0x80) + assert Bitfield(7, chr(0x02)).tostring() == chr(0x02) + assert Bitfield(8, chr(0xFF)).tostring() == chr(0xFF) + assert Bitfield(9, chr(0) + chr(0x80)).tostring() == chr(0) + chr(0x80) + x = Bitfield(1) + assert x.numfalse == 1 + x[0] = 1 + assert x.numfalse == 0 + x[0] = 1 + assert x.numfalse == 0 + assert x.tostring() == chr(0x80) + x = Bitfield(7) + assert len(x) == 7 + x[6] = 1 + assert x.numfalse == 6 + assert x.tostring() == chr(0x02) + x = Bitfield(8) + x[7] = 1 + assert x.tostring() == chr(1) + x = Bitfield(9) + x[8] = 1 + assert x.numfalse == 8 + assert x.tostring() == chr(0) + chr(0x80) + x = Bitfield(8, chr(0xC4)) + assert len(x) == 8 + assert x.numfalse == 5 + assert x.tostring() == chr(0xC4) diff --git a/tribler-mod/Tribler/Core/BitTornado/clock.py b/tribler-mod/Tribler/Core/BitTornado/clock.py new file mode 100644 index 0000000..3a7afce --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/clock.py @@ -0,0 +1,31 @@ +from time import localtime, strftime +# Written by John Hoffman +# see LICENSE.txt for license information + +import sys + +from time import time + +_MAXFORWARD = 100 +_FUDGE = 1 + +class RelativeTime: + def __init__(self): + self.time = time() + self.offset = 0 + + def get_time(self): + t = time() + self.offset + if t < self.time or t > self.time + _MAXFORWARD: + self.time += _FUDGE + self.offset += self.time - t + return self.time + self.time = t + return t + +if sys.platform != 'win32': + _RTIME = RelativeTime() + def clock(): + return _RTIME.get_time() +else: + from time import clock \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/BitTornado/clock.py.bak b/tribler-mod/Tribler/Core/BitTornado/clock.py.bak new file mode 100644 index 0000000..459e1ea --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/clock.py.bak @@ -0,0 +1,30 @@ +# Written by John Hoffman +# see LICENSE.txt for license information + +import sys + +from time import time + +_MAXFORWARD = 100 +_FUDGE = 1 + +class RelativeTime: + def __init__(self): + self.time = time() + self.offset = 0 + + def get_time(self): + t = time() + self.offset + if t < self.time or t > self.time + _MAXFORWARD: + self.time += _FUDGE + self.offset += self.time - t + return self.time + self.time = t + return t + +if sys.platform != 'win32': + _RTIME = RelativeTime() + def clock(): + return _RTIME.get_time() +else: + from time import clock \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/BitTornado/download_bt1.py b/tribler-mod/Tribler/Core/BitTornado/download_bt1.py new file mode 100644 index 0000000..69abe99 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/download_bt1.py @@ -0,0 +1,761 @@ +from time import localtime, strftime +# Written by Bram Cohen and Pawel Garbacki +# see LICENSE.txt for license information + +import os +from zurllib import urlopen +from urlparse import urlparse +from BT1.btformats import check_message +from BT1.Choker import Choker +from BT1.Storage import Storage +from BT1.StorageWrapper import StorageWrapper +from BT1.FileSelector import FileSelector +from BT1.Uploader import Upload +from BT1.Downloader import Downloader +from BT1.HTTPDownloader import HTTPDownloader +from BT1.Connecter import Connecter +from RateLimiter import RateLimiter +from BT1.Encrypter import Encoder +from RawServer import RawServer, autodetect_socket_style +from BT1.Rerequester import Rerequester +from BT1.DownloaderFeedback import DownloaderFeedback +from RateMeasure import RateMeasure +from CurrentRateMeasure import Measure +from BT1.PiecePicker import PiecePicker +from BT1.Statistics import Statistics +from bencode import bencode, bdecode +from sha import sha +from os import path, makedirs, listdir +from parseargs import parseargs, formatDefinitions, defaultargs +from socket import error as socketerror +from random import seed +from threading import Event +from clock import clock +import re + +from Tribler.Core.simpledefs import TRIBLER_TORRENT_EXT, VODEVENT_START +from Tribler.Core.Merkle.merkle import create_fake_hashes +from Tribler.Core.Utilities.unicode import bin2unicode, dunno2unicode +from Tribler.Core.Video.PiecePickerStreaming import PiecePickerVOD +from Tribler.Core.Video.VideoOnDemand import MovieOnDemandTransporter +from Tribler.Core.Video.VideoSource import VideoSourceTransporter,RateLimitedVideoSourceTransporter,PiecePickerSource,ECDSAAuthenticator +from Tribler.Core.APIImplementation.maketorrent import torrentfilerec2savefilename,savefilenames2finaldest + +# 2fastbt_ +from Tribler.Core.CoopDownload.Coordinator import Coordinator +from Tribler.Core.CoopDownload.Helper import Helper +from Tribler.Core.CoopDownload.RatePredictor import ExpSmoothRatePredictor +import sys +from traceback import print_exc,print_stack +# _2fastbt + +from Tribler.Core.simpledefs import * + +try: + True +except: + True = 1 + False = 0 + +DEBUG = False + +class BT1Download: + def __init__(self, statusfunc, finfunc, errorfunc, excfunc, logerrorfunc, doneflag, + config, response, infohash, id, rawserver, get_extip_func, port, + videoanalyserpath): + self.statusfunc = statusfunc + self.finfunc = finfunc + self.errorfunc = errorfunc + self.excfunc = excfunc + self.logerrorfunc = logerrorfunc + self.doneflag = doneflag + self.config = config + self.response = response + self.infohash = infohash + self.myid = id + self.rawserver = rawserver + self.get_extip_func = get_extip_func + self.port = port + + self.info = self.response['info'] + #self.infohash = sha(bencode(self.info)).digest() + # Merkle: Create list of fake hashes. This will be filled if we're an + # initial seeder + if self.info.has_key('root hash') or self.info.has_key('live'): + self.pieces = create_fake_hashes(self.info) + else: + self.pieces = [self.info['pieces'][x:x+20] + for x in xrange(0, len(self.info['pieces']), 20)] + self.len_pieces = len(self.pieces) + self.piecesize = self.info['piece length'] + self.unpauseflag = Event() + self.unpauseflag.set() + self.downloader = None + self.storagewrapper = None + self.fileselector = None + self.super_seeding_active = False + self.filedatflag = Event() + self.spewflag = Event() + self.superseedflag = Event() + self.whenpaused = None + self.finflag = Event() + self.rerequest = None + self.tcp_ack_fudge = config['tcp_ack_fudge'] + + self.play_video = (config['mode'] == DLMODE_VOD) + self.am_video_source = bool(config['video_source']) + # i.e. if VOD then G2G, if live then BT + self.use_g2g = self.play_video and not ('live' in response['info']) + self.videoinfo = None + self.videoanalyserpath = videoanalyserpath + self.voddownload = None + + self.selector_enabled = config['selector_enabled'] + + self.excflag = self.rawserver.get_exception_flag() + self.failed = False + self.checking = False + self.started = False + +# 2fastbt_ + try: + self.helper = None + self.coordinator = None + self.rate_predictor = None + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","BT1Download: coopdl_role is",self.config['coopdl_role'],`self.config['coopdl_coordinator_permid']` + + if self.config['coopdl_role'] == COOPDL_ROLE_COORDINATOR: + self.coordinator = Coordinator(self.infohash, self.len_pieces) + #if self.config['coopdl_role'] == COOPDL_ROLE_COORDINATOR or self.config['coopdl_role'] == COOPDL_ROLE_HELPER: + # Arno, 2008-05-20: removed Helper when coordinator, shouldn't need it. + # Reason to remove it is because it messes up PiecePicking: when a + # helper, it calls _next() again after it returned None, probably + # to provoke a RESERVE_PIECE request to the coordinator. + # This change passes test_dlhelp.py + # + if self.config['coopdl_role'] == COOPDL_ROLE_HELPER: + self.helper = Helper(self.infohash, self.len_pieces, self.config['coopdl_coordinator_permid'], coordinator = self.coordinator) + self.config['coopdl_role'] = '' + self.config['coopdl_coordinator_permid'] = '' + + if self.am_video_source: + self.picker = PiecePickerSource(self.len_pieces, config['rarest_first_cutoff'], + config['rarest_first_priority_cutoff'], helper = self.helper) + elif self.play_video: + # Jan-David: Start video-on-demand service + self.picker = PiecePickerVOD(self.len_pieces, config['rarest_first_cutoff'], + config['rarest_first_priority_cutoff'], helper = self.helper, piecesize=self.piecesize) + else: + self.picker = PiecePicker(self.len_pieces, config['rarest_first_cutoff'], + config['rarest_first_priority_cutoff'], helper = self.helper) + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","BT1Download: EXCEPTION in __init__ :'" + str(sys.exc_info()) + "' '" +# _2fastbt + + self.choker = Choker(config, rawserver.add_task, + self.picker, self.finflag.isSet) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","download_bt1.BT1Download: play_video is",self.play_video + + def set_videoinfo(self,videoinfo,videostatus): + self.videoinfo = videoinfo + self.videostatus = videostatus + + if self.play_video: + self.picker.set_videostatus( self.videostatus ) + + def checkSaveLocation(self, loc): + if self.info.has_key('length'): + return path.exists(loc) + for x in self.info['files']: + if path.exists(path.join(loc, x['path'][0])): + return True + return False + + + def saveAs(self, filefunc, pathfunc = None): + """ Now throws Exceptions """ + def make(f, forcedir = False): + if not forcedir: + f = path.split(f)[0] + if f != '' and not path.exists(f): + makedirs(f) + + if self.info.has_key('length'): + file_length = self.info['length'] + file = filefunc(self.info['name'], file_length, + self.config['saveas'], False) + # filefunc throws exc if filename gives IOError + + make(file) + files = [(file, file_length)] + else: + file_length = 0L + for x in self.info['files']: + file_length += x['length'] + file = filefunc(self.info['name'], file_length, + self.config['saveas'], True) + # filefunc throws exc if filename gives IOError + + # if this path exists, and no files from the info dict exist, we assume it's a new download and + # the user wants to create a new directory with the default name + existing = 0 + if path.exists(file): + if not path.isdir(file): + raise IOError(file + 'is not a dir') + if listdir(file): # if it's not empty + for x in self.info['files']: + savepath1 = torrentfilerec2savefilename(x,1) + if path.exists(path.join(file, savepath1)): + existing = 1 + if not existing: + try: + file = path.join(file, self.info['name']) + except UnicodeDecodeError: + file = path.join(file, dunno2unicode(self.info['name'])) + if path.exists(file) and not path.isdir(file): + if file.endswith('.torrent') or file.endswith(TRIBLER_TORRENT_EXT): + (prefix,ext) = os.path.splitext(file) + file = prefix + if path.exists(file) and not path.isdir(file): + raise IOError("Can't create dir - " + self.info['name']) + make(file, True) + + # alert the UI to any possible change in path + if pathfunc != None: + pathfunc(file) + + files = [] + for x in self.info['files']: + savepath = torrentfilerec2savefilename(x) + full = savefilenames2finaldest(file,savepath) + # Arno: TODO: this sometimes gives too long filenames for + # Windows. When fixing this take into account that + # Download.get_dest_files() should still produce the same + # filenames as your modifications here. + files.append((full, x['length'])) + make(full) + + self.filename = file + self.files = files + self.datalength = file_length + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","BT1Download: saveas returning ",`file`,"self.files is",`self.files` + + return file + + def getFilename(self): + return self.filename + + def get_dest(self,index): + return self.files[index][0] + + def get_datalength(self): + return self.datalength + + def _finished(self): + self.finflag.set() + try: + self.storage.set_readonly() + except (IOError, OSError), e: + self.errorfunc('trouble setting readonly at end - ' + str(e)) + if self.superseedflag.isSet(): + self._set_super_seed() + self.choker.set_round_robin_period( + max( self.config['round_robin_period'], + self.config['round_robin_period'] * + self.info['piece length'] / 200000 ) ) + self.rerequest_complete() + self.finfunc() + + def _data_flunked(self, amount, index): + self.ratemeasure_datarejected(amount) + if not self.doneflag.isSet(): + self.logerrorfunc('piece %d failed hash check, re-downloading it' % index) + + def _piece_from_live_source(self,index,data): + if self.videostatus.live_streaming and self.voddownload is not None: + return self.voddownload.piece_from_live_source(index,data) + else: + return True + + def _failed(self, reason): + self.failed = True + self.doneflag.set() + if reason is not None: + self.errorfunc(reason) + + + def initFiles(self, old_style = False, statusfunc = None, resumedata = None): + """ Now throws exceptions """ + if self.doneflag.isSet(): + return None + if not statusfunc: + statusfunc = self.statusfunc + + disabled_files = None + if self.selector_enabled: + self.priority = self.config['priority'] + if self.priority: + try: + self.priority = self.priority.split(',') + assert len(self.priority) == len(self.files) + self.priority = [int(p) for p in self.priority] + for p in self.priority: + assert p >= -1 + assert p <= 2 + except: + raise ValueError('bad priority list given, ignored') + self.priority = None + try: + disabled_files = [x == -1 for x in self.priority] + except: + pass + + self.storage = Storage(self.files, self.info['piece length'], + self.doneflag, self.config, disabled_files) + + # Merkle: Are we dealing with a Merkle torrent y/n? + if self.info.has_key('root hash'): + root_hash = self.info['root hash'] + else: + root_hash = None + self.storagewrapper = StorageWrapper(self.videoinfo, self.storage, self.config['download_slice_size'], + self.pieces, self.info['piece length'], root_hash, + self._finished, self._failed, + statusfunc, self.doneflag, self.config['check_hashes'], + self._data_flunked, self._piece_from_live_source, self.rawserver.add_task, + self.config, self.unpauseflag) + + if self.selector_enabled: + self.fileselector = FileSelector(self.files, self.info['piece length'], + None, + self.storage, self.storagewrapper, + self.rawserver.add_task, + self._failed) + + if resumedata: + self.fileselector.unpickle(resumedata) + + self.checking = True + if old_style: + return self.storagewrapper.old_style_init() + return self.storagewrapper.initialize + + + def _make_upload(self, connection, ratelimiter, totalup): + return Upload(connection, ratelimiter, totalup, + self.choker, self.storagewrapper, self.picker, + self.config) + + def _kick_peer(self, connection): + def k(connection = connection): + connection.close() + self.rawserver.add_task(k, 0) + + def _ban_peer(self, ip): + self.encoder_ban(ip) + + def _received_raw_data(self, x): + if self.tcp_ack_fudge: + x = int(x*self.tcp_ack_fudge) + self.ratelimiter.adjust_sent(x) +# self.upmeasure.update_rate(x) + + def _received_data(self, x): + self.downmeasure.update_rate(x) + self.ratemeasure.data_came_in(x) + + def _received_http_data(self, x): + self.downmeasure.update_rate(x) + self.ratemeasure.data_came_in(x) + self.downloader.external_data_received(x) + + def _cancelfunc(self, pieces): + self.downloader.cancel_piece_download(pieces) + self.httpdownloader.cancel_piece_download(pieces) + def _reqmorefunc(self, pieces): + self.downloader.requeue_piece_download(pieces) + + def startEngine(self, ratelimiter = None, vodeventfunc = None): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","BT1Download: startEngine",`self.info['name']` + + if self.doneflag.isSet(): + return + + self.checking = False + + for i in xrange(self.len_pieces): + if self.storagewrapper.do_I_have(i): + self.picker.complete(i) + self.upmeasure = Measure(self.config['max_rate_period'], + self.config['upload_rate_fudge']) + self.downmeasure = Measure(self.config['max_rate_period']) + + if ratelimiter: + self.ratelimiter = ratelimiter + else: + self.ratelimiter = RateLimiter(self.rawserver.add_task, + self.config['upload_unit_size'], + self.setConns) + self.ratelimiter.set_upload_rate(self.config['max_upload_rate']) + + self.ratemeasure = RateMeasure() + self.ratemeasure_datarejected = self.ratemeasure.data_rejected + + self.downloader = Downloader(self.storagewrapper, self.picker, + self.config['request_backlog'], self.config['max_rate_period'], + self.len_pieces, self.config['download_slice_size'], + self._received_data, self.config['snub_time'], self.config['auto_kick'], + self._kick_peer, self._ban_peer, scheduler = self.rawserver.add_task) + self.downloader.set_download_rate(self.config['max_download_rate']) + + self.picker.set_downloader(self.downloader) +# 2fastbt_ + self.connecter = Connecter(self._make_upload, self.downloader, self.choker, + self.len_pieces, self.piecesize, self.upmeasure, self.config, + self.ratelimiter, self.info.has_key('root hash'), + self.rawserver.add_task, self.coordinator, self.helper, self.get_extip_func, self.port, self.use_g2g,self.infohash,self.response.get('announce',None)) +# _2fastbt + self.encoder = Encoder(self.connecter, self.rawserver, + self.myid, self.config['max_message_length'], self.rawserver.add_task, + self.config['keepalive_interval'], self.infohash, + self._received_raw_data, self.config) + self.encoder_ban = self.encoder.ban +#--- 2fastbt_ + if DEBUG: + print str(self.config['exclude_ips']) + for ip in self.config['exclude_ips']: + if DEBUG: + print "Banning ip: " + str(ip) + self.encoder_ban(ip) + + if self.helper is not None: + self.helper.set_encoder(self.encoder) + self.rate_predictor = ExpSmoothRatePredictor(self.rawserver, + self.downmeasure, self.config['max_download_rate']) + self.picker.set_rate_predictor(self.rate_predictor) + self.rate_predictor.update() +# _2fastbt + + self.httpdownloader = HTTPDownloader(self.storagewrapper, self.picker, + self.rawserver, self.finflag, self.logerrorfunc, self.downloader, + self.config['max_rate_period'], self.infohash, self._received_http_data, + self.connecter.got_piece) + if self.response.has_key('httpseeds') and not self.finflag.isSet(): + for u in self.response['httpseeds']: + self.httpdownloader.make_download(u) + + if self.selector_enabled: + self.fileselector.tie_in(self.picker, self._cancelfunc, self._reqmorefunc) + if self.priority: + self.fileselector.set_priorities_now(self.priority) + # erase old data once you've started modifying it + + if self.play_video: + if self.picker.am_I_complete(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","BT1Download: startEngine: VOD requested, but file complete on disk",self.videoinfo + vodeventfunc( self.videoinfo, VODEVENT_START, { + "complete": True, + "filename": self.videoinfo["outpath"], + "mimetype": self.videoinfo["mimetype"], + "stream": None, + "length": self.videostatus.selected_movie["size"], + } ) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","BT1Download: startEngine: Going into VOD mode",self.videoinfo + self.voddownload = MovieOnDemandTransporter(self,self.videostatus,self.videoinfo,self.videoanalyserpath,vodeventfunc) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","BT1Download: startEngine: Going into standard mode" + + if self.am_video_source: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","BT1Download: startEngine: Acting as VideoSource" + if self.config['video_ratelimit']: + self.videosourcetransporter = RateLimitedVideoSourceTransporter(self.config['video_ratelimit'],self.config['video_source'],self,self.config['video_source_authconfig']) + else: + self.videosourcetransporter = VideoSourceTransporter(self.config['video_source'],self,self.config['video_source_authconfig']) + self.videosourcetransporter.start() + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","BT1Download: startEngine: Not a VideoSource" + + if not self.doneflag.isSet(): + self.started = True + + def rerequest_complete(self): + if self.rerequest: + self.rerequest.announce(1) + + def rerequest_stopped(self): + if self.rerequest: + self.rerequest.announce(2) + + def rerequest_lastfailed(self): + if self.rerequest: + return self.rerequest.last_failed + return False + + def startRerequester(self): + if self.response.has_key ('announce-list'): + trackerlist = self.response['announce-list'] + for tier in range(len(trackerlist)): + for t in range(len(trackerlist[tier])): + trackerlist[tier][t] = bin2unicode(trackerlist[tier][t]) + else: + tracker = bin2unicode(self.response.get('announce', '')) + if tracker: + trackerlist = [[tracker]] + else: + trackerlist = [[]] + + self.rerequest = Rerequester(trackerlist, self.config['rerequest_interval'], + self.rawserver.add_task,self.connecter.how_many_connections, + self.config['min_peers'], self.encoder.start_connections, + self.rawserver.add_task, self.storagewrapper.get_amount_left, + self.upmeasure.get_total, self.downmeasure.get_total, self.port, self.config['ip'], + self.myid, self.infohash, self.config['http_timeout'], + self.logerrorfunc, self.excfunc, self.config['max_initiate'], + self.doneflag, self.upmeasure.get_rate, self.downmeasure.get_rate, + self.unpauseflag,self.config) + + self.encoder.set_rerequester(self.rerequest) + self.rerequest.start() + + + def _init_stats(self): + self.statistics = Statistics(self.upmeasure, self.downmeasure, + self.connecter, self.httpdownloader, self.ratelimiter, + self.rerequest_lastfailed, self.filedatflag) + if self.info.has_key('files'): + self.statistics.set_dirstats(self.files, self.info['piece length']) + + def autoStats(self, displayfunc = None): + if not displayfunc: + displayfunc = self.statusfunc + + self._init_stats() + DownloaderFeedback(self.choker, self.httpdownloader, self.rawserver.add_task, + self.upmeasure.get_rate, self.downmeasure.get_rate, + self.ratemeasure, self.storagewrapper.get_stats, + self.datalength, self.finflag, self.spewflag, self.statistics, + displayfunc, self.config['display_interval'], + infohash = self.infohash,voddownload=self.voddownload) + + def startStats(self): + self._init_stats() + self.spewflag.set() # start collecting peer cache + d = DownloaderFeedback(self.choker, self.httpdownloader, self.rawserver.add_task, + self.upmeasure.get_rate, self.downmeasure.get_rate, + self.ratemeasure, self.storagewrapper.get_stats, + self.datalength, self.finflag, self.spewflag, self.statistics, + infohash = self.infohash,voddownload=self.voddownload) + return d.gather + + + def getPortHandler(self): + return self.encoder + + + def checkpoint(self): # Added by Arno + """ Called by network thread """ + if self.fileselector and self.started: + # self.fileselector.finish() does nothing at the moment, so as + # long as the network thread calls this, it should be OK. + return self.fileselector.pickle() + else: + return None + + def shutdown(self): + if self.checking or self.started: + self.storagewrapper.sync() + self.storage.close() + self.rerequest_stopped() + resumedata = None + if self.fileselector and self.started: + if not self.failed: + self.fileselector.finish() + resumedata = self.fileselector.pickle() + if self.voddownload is not None: + self.voddownload.stop() + return resumedata + + + def setUploadRate(self, rate, networkcalling=False): + try: + def s(self = self, rate = rate): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","BT1Download: set max upload to",rate + self.config['max_upload_rate'] = rate + self.ratelimiter.set_upload_rate(rate) + if networkcalling: + s() + else: + self.rawserver.add_task(s) + except AttributeError: + pass + + def setConns(self, conns, conns2 = None,networkcalling=False): + if not conns2: + conns2 = conns + try: + def s(self = self, conns = conns, conns2 = conns2): + self.config['min_uploads'] = conns + self.config['max_uploads'] = conns2 + if (conns > 30): + self.config['max_initiate'] = conns + 10 + if networkcalling: + s() + else: + self.rawserver.add_task(s) + except AttributeError: + pass + + def setDownloadRate(self, rate,networkcalling=False): + try: + def s(self = self, rate = rate): + self.config['max_download_rate'] = rate + self.downloader.set_download_rate(rate) + if networkcalling: + s() + else: + self.rawserver.add_task(s) + except AttributeError: + pass + + def startConnection(self, ip, port, id): + self.encoder._start_connection((ip, port), id) + + def _startConnection(self, ipandport, id): + self.encoder._start_connection(ipandport, id) + + def setInitiate(self, initiate,networkcalling=False): + try: + def s(self = self, initiate = initiate): + self.config['max_initiate'] = initiate + if networkcalling: + s() + else: + self.rawserver.add_task(s) + except AttributeError: + pass + + def setMaxConns(self,nconns,networkcalling=False): + try: + def s(self = self, nconns = nconns): + self.config['max_connections'] = nconns + if networkcalling: + s() + else: + self.rawserver.add_task(s) + except AttributeError: + pass + + + def getConfig(self): + return self.config + + def getDefaults(self): + return defaultargs(defaults) + + def reannounce(self, special = None): + try: + def r(self = self, special = special): + if special is None: + self.rerequest.announce() + else: + self.rerequest.announce(specialurl = special) + self.rawserver.add_task(r) + except AttributeError: + pass + + def getResponse(self): + try: + return self.response + except: + return None + +# def Pause(self): +# try: +# if self.storagewrapper: +# self.rawserver.add_task(self._pausemaker, 0) +# except: +# return False +# self.unpauseflag.clear() +# return True +# +# def _pausemaker(self): +# self.whenpaused = clock() +# self.unpauseflag.wait() # sticks a monkey wrench in the main thread +# +# def Unpause(self): +# self.unpauseflag.set() +# if self.whenpaused and clock()-self.whenpaused > 60: +# def r(self = self): +# self.rerequest.announce(3) # rerequest automatically if paused for >60 seconds +# self.rawserver.add_task(r) + + def Pause(self): + if not self.storagewrapper: + return False + self.unpauseflag.clear() + self.rawserver.add_task(self.onPause) + return True + + def onPause(self): + self.whenpaused = clock() + if not self.downloader: + return + self.downloader.pause(True) + self.encoder.pause(True) + self.choker.pause(True) + + def Unpause(self): + self.unpauseflag.set() + self.rawserver.add_task(self.onUnpause) + + def onUnpause(self): + if not self.downloader: + return + self.downloader.pause(False) + self.encoder.pause(False) + self.choker.pause(False) + if self.rerequest and self.whenpaused and clock()-self.whenpaused > 60: + self.rerequest.announce(3) # rerequest automatically if paused for >60 seconds + + def set_super_seed(self,networkcalling=False): + self.superseedflag.set() + if networkcalling: + self._set_super_seed() + else: + self.rawserver.add_task(self._set_super_seed) + + def _set_super_seed(self): + if not self.super_seeding_active and self.finflag.isSet(): + self.super_seeding_active = True + self.logerrorfunc(' ** SUPER-SEED OPERATION ACTIVE **\n' + + ' please set Max uploads so each peer gets 6-8 kB/s') + def s(self = self): + self.downloader.set_super_seed() + self.choker.set_super_seed() + self.rawserver.add_task(s) + if self.finflag.isSet(): # mode started when already finished + def r(self = self): + self.rerequest.announce(3) # so after kicking everyone off, reannounce + self.rawserver.add_task(r) + + def am_I_finished(self): + return self.finflag.isSet() + + def get_transfer_stats(self): + return self.upmeasure.get_total(), self.downmeasure.get_total() + + def get_moviestreamtransport(self): + return self.voddownload diff --git a/tribler-mod/Tribler/Core/BitTornado/download_bt1.py.bak b/tribler-mod/Tribler/Core/BitTornado/download_bt1.py.bak new file mode 100644 index 0000000..0f0a46c --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/download_bt1.py.bak @@ -0,0 +1,760 @@ +# Written by Bram Cohen and Pawel Garbacki +# see LICENSE.txt for license information + +import os +from zurllib import urlopen +from urlparse import urlparse +from BT1.btformats import check_message +from BT1.Choker import Choker +from BT1.Storage import Storage +from BT1.StorageWrapper import StorageWrapper +from BT1.FileSelector import FileSelector +from BT1.Uploader import Upload +from BT1.Downloader import Downloader +from BT1.HTTPDownloader import HTTPDownloader +from BT1.Connecter import Connecter +from RateLimiter import RateLimiter +from BT1.Encrypter import Encoder +from RawServer import RawServer, autodetect_socket_style +from BT1.Rerequester import Rerequester +from BT1.DownloaderFeedback import DownloaderFeedback +from RateMeasure import RateMeasure +from CurrentRateMeasure import Measure +from BT1.PiecePicker import PiecePicker +from BT1.Statistics import Statistics +from bencode import bencode, bdecode +from sha import sha +from os import path, makedirs, listdir +from parseargs import parseargs, formatDefinitions, defaultargs +from socket import error as socketerror +from random import seed +from threading import Event +from clock import clock +import re + +from Tribler.Core.simpledefs import TRIBLER_TORRENT_EXT, VODEVENT_START +from Tribler.Core.Merkle.merkle import create_fake_hashes +from Tribler.Core.Utilities.unicode import bin2unicode, dunno2unicode +from Tribler.Core.Video.PiecePickerStreaming import PiecePickerVOD +from Tribler.Core.Video.VideoOnDemand import MovieOnDemandTransporter +from Tribler.Core.Video.VideoSource import VideoSourceTransporter,RateLimitedVideoSourceTransporter,PiecePickerSource,ECDSAAuthenticator +from Tribler.Core.APIImplementation.maketorrent import torrentfilerec2savefilename,savefilenames2finaldest + +# 2fastbt_ +from Tribler.Core.CoopDownload.Coordinator import Coordinator +from Tribler.Core.CoopDownload.Helper import Helper +from Tribler.Core.CoopDownload.RatePredictor import ExpSmoothRatePredictor +import sys +from traceback import print_exc,print_stack +# _2fastbt + +from Tribler.Core.simpledefs import * + +try: + True +except: + True = 1 + False = 0 + +DEBUG = False + +class BT1Download: + def __init__(self, statusfunc, finfunc, errorfunc, excfunc, logerrorfunc, doneflag, + config, response, infohash, id, rawserver, get_extip_func, port, + videoanalyserpath): + self.statusfunc = statusfunc + self.finfunc = finfunc + self.errorfunc = errorfunc + self.excfunc = excfunc + self.logerrorfunc = logerrorfunc + self.doneflag = doneflag + self.config = config + self.response = response + self.infohash = infohash + self.myid = id + self.rawserver = rawserver + self.get_extip_func = get_extip_func + self.port = port + + self.info = self.response['info'] + #self.infohash = sha(bencode(self.info)).digest() + # Merkle: Create list of fake hashes. This will be filled if we're an + # initial seeder + if self.info.has_key('root hash') or self.info.has_key('live'): + self.pieces = create_fake_hashes(self.info) + else: + self.pieces = [self.info['pieces'][x:x+20] + for x in xrange(0, len(self.info['pieces']), 20)] + self.len_pieces = len(self.pieces) + self.piecesize = self.info['piece length'] + self.unpauseflag = Event() + self.unpauseflag.set() + self.downloader = None + self.storagewrapper = None + self.fileselector = None + self.super_seeding_active = False + self.filedatflag = Event() + self.spewflag = Event() + self.superseedflag = Event() + self.whenpaused = None + self.finflag = Event() + self.rerequest = None + self.tcp_ack_fudge = config['tcp_ack_fudge'] + + self.play_video = (config['mode'] == DLMODE_VOD) + self.am_video_source = bool(config['video_source']) + # i.e. if VOD then G2G, if live then BT + self.use_g2g = self.play_video and not ('live' in response['info']) + self.videoinfo = None + self.videoanalyserpath = videoanalyserpath + self.voddownload = None + + self.selector_enabled = config['selector_enabled'] + + self.excflag = self.rawserver.get_exception_flag() + self.failed = False + self.checking = False + self.started = False + +# 2fastbt_ + try: + self.helper = None + self.coordinator = None + self.rate_predictor = None + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","BT1Download: coopdl_role is",self.config['coopdl_role'],`self.config['coopdl_coordinator_permid']` + + if self.config['coopdl_role'] == COOPDL_ROLE_COORDINATOR: + self.coordinator = Coordinator(self.infohash, self.len_pieces) + #if self.config['coopdl_role'] == COOPDL_ROLE_COORDINATOR or self.config['coopdl_role'] == COOPDL_ROLE_HELPER: + # Arno, 2008-05-20: removed Helper when coordinator, shouldn't need it. + # Reason to remove it is because it messes up PiecePicking: when a + # helper, it calls _next() again after it returned None, probably + # to provoke a RESERVE_PIECE request to the coordinator. + # This change passes test_dlhelp.py + # + if self.config['coopdl_role'] == COOPDL_ROLE_HELPER: + self.helper = Helper(self.infohash, self.len_pieces, self.config['coopdl_coordinator_permid'], coordinator = self.coordinator) + self.config['coopdl_role'] = '' + self.config['coopdl_coordinator_permid'] = '' + + if self.am_video_source: + self.picker = PiecePickerSource(self.len_pieces, config['rarest_first_cutoff'], + config['rarest_first_priority_cutoff'], helper = self.helper) + elif self.play_video: + # Jan-David: Start video-on-demand service + self.picker = PiecePickerVOD(self.len_pieces, config['rarest_first_cutoff'], + config['rarest_first_priority_cutoff'], helper = self.helper, piecesize=self.piecesize) + else: + self.picker = PiecePicker(self.len_pieces, config['rarest_first_cutoff'], + config['rarest_first_priority_cutoff'], helper = self.helper) + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","BT1Download: EXCEPTION in __init__ :'" + str(sys.exc_info()) + "' '" +# _2fastbt + + self.choker = Choker(config, rawserver.add_task, + self.picker, self.finflag.isSet) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","download_bt1.BT1Download: play_video is",self.play_video + + def set_videoinfo(self,videoinfo,videostatus): + self.videoinfo = videoinfo + self.videostatus = videostatus + + if self.play_video: + self.picker.set_videostatus( self.videostatus ) + + def checkSaveLocation(self, loc): + if self.info.has_key('length'): + return path.exists(loc) + for x in self.info['files']: + if path.exists(path.join(loc, x['path'][0])): + return True + return False + + + def saveAs(self, filefunc, pathfunc = None): + """ Now throws Exceptions """ + def make(f, forcedir = False): + if not forcedir: + f = path.split(f)[0] + if f != '' and not path.exists(f): + makedirs(f) + + if self.info.has_key('length'): + file_length = self.info['length'] + file = filefunc(self.info['name'], file_length, + self.config['saveas'], False) + # filefunc throws exc if filename gives IOError + + make(file) + files = [(file, file_length)] + else: + file_length = 0L + for x in self.info['files']: + file_length += x['length'] + file = filefunc(self.info['name'], file_length, + self.config['saveas'], True) + # filefunc throws exc if filename gives IOError + + # if this path exists, and no files from the info dict exist, we assume it's a new download and + # the user wants to create a new directory with the default name + existing = 0 + if path.exists(file): + if not path.isdir(file): + raise IOError(file + 'is not a dir') + if listdir(file): # if it's not empty + for x in self.info['files']: + savepath1 = torrentfilerec2savefilename(x,1) + if path.exists(path.join(file, savepath1)): + existing = 1 + if not existing: + try: + file = path.join(file, self.info['name']) + except UnicodeDecodeError: + file = path.join(file, dunno2unicode(self.info['name'])) + if path.exists(file) and not path.isdir(file): + if file.endswith('.torrent') or file.endswith(TRIBLER_TORRENT_EXT): + (prefix,ext) = os.path.splitext(file) + file = prefix + if path.exists(file) and not path.isdir(file): + raise IOError("Can't create dir - " + self.info['name']) + make(file, True) + + # alert the UI to any possible change in path + if pathfunc != None: + pathfunc(file) + + files = [] + for x in self.info['files']: + savepath = torrentfilerec2savefilename(x) + full = savefilenames2finaldest(file,savepath) + # Arno: TODO: this sometimes gives too long filenames for + # Windows. When fixing this take into account that + # Download.get_dest_files() should still produce the same + # filenames as your modifications here. + files.append((full, x['length'])) + make(full) + + self.filename = file + self.files = files + self.datalength = file_length + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","BT1Download: saveas returning ",`file`,"self.files is",`self.files` + + return file + + def getFilename(self): + return self.filename + + def get_dest(self,index): + return self.files[index][0] + + def get_datalength(self): + return self.datalength + + def _finished(self): + self.finflag.set() + try: + self.storage.set_readonly() + except (IOError, OSError), e: + self.errorfunc('trouble setting readonly at end - ' + str(e)) + if self.superseedflag.isSet(): + self._set_super_seed() + self.choker.set_round_robin_period( + max( self.config['round_robin_period'], + self.config['round_robin_period'] * + self.info['piece length'] / 200000 ) ) + self.rerequest_complete() + self.finfunc() + + def _data_flunked(self, amount, index): + self.ratemeasure_datarejected(amount) + if not self.doneflag.isSet(): + self.logerrorfunc('piece %d failed hash check, re-downloading it' % index) + + def _piece_from_live_source(self,index,data): + if self.videostatus.live_streaming and self.voddownload is not None: + return self.voddownload.piece_from_live_source(index,data) + else: + return True + + def _failed(self, reason): + self.failed = True + self.doneflag.set() + if reason is not None: + self.errorfunc(reason) + + + def initFiles(self, old_style = False, statusfunc = None, resumedata = None): + """ Now throws exceptions """ + if self.doneflag.isSet(): + return None + if not statusfunc: + statusfunc = self.statusfunc + + disabled_files = None + if self.selector_enabled: + self.priority = self.config['priority'] + if self.priority: + try: + self.priority = self.priority.split(',') + assert len(self.priority) == len(self.files) + self.priority = [int(p) for p in self.priority] + for p in self.priority: + assert p >= -1 + assert p <= 2 + except: + raise ValueError('bad priority list given, ignored') + self.priority = None + try: + disabled_files = [x == -1 for x in self.priority] + except: + pass + + self.storage = Storage(self.files, self.info['piece length'], + self.doneflag, self.config, disabled_files) + + # Merkle: Are we dealing with a Merkle torrent y/n? + if self.info.has_key('root hash'): + root_hash = self.info['root hash'] + else: + root_hash = None + self.storagewrapper = StorageWrapper(self.videoinfo, self.storage, self.config['download_slice_size'], + self.pieces, self.info['piece length'], root_hash, + self._finished, self._failed, + statusfunc, self.doneflag, self.config['check_hashes'], + self._data_flunked, self._piece_from_live_source, self.rawserver.add_task, + self.config, self.unpauseflag) + + if self.selector_enabled: + self.fileselector = FileSelector(self.files, self.info['piece length'], + None, + self.storage, self.storagewrapper, + self.rawserver.add_task, + self._failed) + + if resumedata: + self.fileselector.unpickle(resumedata) + + self.checking = True + if old_style: + return self.storagewrapper.old_style_init() + return self.storagewrapper.initialize + + + def _make_upload(self, connection, ratelimiter, totalup): + return Upload(connection, ratelimiter, totalup, + self.choker, self.storagewrapper, self.picker, + self.config) + + def _kick_peer(self, connection): + def k(connection = connection): + connection.close() + self.rawserver.add_task(k, 0) + + def _ban_peer(self, ip): + self.encoder_ban(ip) + + def _received_raw_data(self, x): + if self.tcp_ack_fudge: + x = int(x*self.tcp_ack_fudge) + self.ratelimiter.adjust_sent(x) +# self.upmeasure.update_rate(x) + + def _received_data(self, x): + self.downmeasure.update_rate(x) + self.ratemeasure.data_came_in(x) + + def _received_http_data(self, x): + self.downmeasure.update_rate(x) + self.ratemeasure.data_came_in(x) + self.downloader.external_data_received(x) + + def _cancelfunc(self, pieces): + self.downloader.cancel_piece_download(pieces) + self.httpdownloader.cancel_piece_download(pieces) + def _reqmorefunc(self, pieces): + self.downloader.requeue_piece_download(pieces) + + def startEngine(self, ratelimiter = None, vodeventfunc = None): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","BT1Download: startEngine",`self.info['name']` + + if self.doneflag.isSet(): + return + + self.checking = False + + for i in xrange(self.len_pieces): + if self.storagewrapper.do_I_have(i): + self.picker.complete(i) + self.upmeasure = Measure(self.config['max_rate_period'], + self.config['upload_rate_fudge']) + self.downmeasure = Measure(self.config['max_rate_period']) + + if ratelimiter: + self.ratelimiter = ratelimiter + else: + self.ratelimiter = RateLimiter(self.rawserver.add_task, + self.config['upload_unit_size'], + self.setConns) + self.ratelimiter.set_upload_rate(self.config['max_upload_rate']) + + self.ratemeasure = RateMeasure() + self.ratemeasure_datarejected = self.ratemeasure.data_rejected + + self.downloader = Downloader(self.storagewrapper, self.picker, + self.config['request_backlog'], self.config['max_rate_period'], + self.len_pieces, self.config['download_slice_size'], + self._received_data, self.config['snub_time'], self.config['auto_kick'], + self._kick_peer, self._ban_peer, scheduler = self.rawserver.add_task) + self.downloader.set_download_rate(self.config['max_download_rate']) + + self.picker.set_downloader(self.downloader) +# 2fastbt_ + self.connecter = Connecter(self._make_upload, self.downloader, self.choker, + self.len_pieces, self.piecesize, self.upmeasure, self.config, + self.ratelimiter, self.info.has_key('root hash'), + self.rawserver.add_task, self.coordinator, self.helper, self.get_extip_func, self.port, self.use_g2g,self.infohash,self.response.get('announce',None)) +# _2fastbt + self.encoder = Encoder(self.connecter, self.rawserver, + self.myid, self.config['max_message_length'], self.rawserver.add_task, + self.config['keepalive_interval'], self.infohash, + self._received_raw_data, self.config) + self.encoder_ban = self.encoder.ban +#--- 2fastbt_ + if DEBUG: + print str(self.config['exclude_ips']) + for ip in self.config['exclude_ips']: + if DEBUG: + print "Banning ip: " + str(ip) + self.encoder_ban(ip) + + if self.helper is not None: + self.helper.set_encoder(self.encoder) + self.rate_predictor = ExpSmoothRatePredictor(self.rawserver, + self.downmeasure, self.config['max_download_rate']) + self.picker.set_rate_predictor(self.rate_predictor) + self.rate_predictor.update() +# _2fastbt + + self.httpdownloader = HTTPDownloader(self.storagewrapper, self.picker, + self.rawserver, self.finflag, self.logerrorfunc, self.downloader, + self.config['max_rate_period'], self.infohash, self._received_http_data, + self.connecter.got_piece) + if self.response.has_key('httpseeds') and not self.finflag.isSet(): + for u in self.response['httpseeds']: + self.httpdownloader.make_download(u) + + if self.selector_enabled: + self.fileselector.tie_in(self.picker, self._cancelfunc, self._reqmorefunc) + if self.priority: + self.fileselector.set_priorities_now(self.priority) + # erase old data once you've started modifying it + + if self.play_video: + if self.picker.am_I_complete(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","BT1Download: startEngine: VOD requested, but file complete on disk",self.videoinfo + vodeventfunc( self.videoinfo, VODEVENT_START, { + "complete": True, + "filename": self.videoinfo["outpath"], + "mimetype": self.videoinfo["mimetype"], + "stream": None, + "length": self.videostatus.selected_movie["size"], + } ) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","BT1Download: startEngine: Going into VOD mode",self.videoinfo + self.voddownload = MovieOnDemandTransporter(self,self.videostatus,self.videoinfo,self.videoanalyserpath,vodeventfunc) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","BT1Download: startEngine: Going into standard mode" + + if self.am_video_source: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","BT1Download: startEngine: Acting as VideoSource" + if self.config['video_ratelimit']: + self.videosourcetransporter = RateLimitedVideoSourceTransporter(self.config['video_ratelimit'],self.config['video_source'],self,self.config['video_source_authconfig']) + else: + self.videosourcetransporter = VideoSourceTransporter(self.config['video_source'],self,self.config['video_source_authconfig']) + self.videosourcetransporter.start() + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","BT1Download: startEngine: Not a VideoSource" + + if not self.doneflag.isSet(): + self.started = True + + def rerequest_complete(self): + if self.rerequest: + self.rerequest.announce(1) + + def rerequest_stopped(self): + if self.rerequest: + self.rerequest.announce(2) + + def rerequest_lastfailed(self): + if self.rerequest: + return self.rerequest.last_failed + return False + + def startRerequester(self): + if self.response.has_key ('announce-list'): + trackerlist = self.response['announce-list'] + for tier in range(len(trackerlist)): + for t in range(len(trackerlist[tier])): + trackerlist[tier][t] = bin2unicode(trackerlist[tier][t]) + else: + tracker = bin2unicode(self.response.get('announce', '')) + if tracker: + trackerlist = [[tracker]] + else: + trackerlist = [[]] + + self.rerequest = Rerequester(trackerlist, self.config['rerequest_interval'], + self.rawserver.add_task,self.connecter.how_many_connections, + self.config['min_peers'], self.encoder.start_connections, + self.rawserver.add_task, self.storagewrapper.get_amount_left, + self.upmeasure.get_total, self.downmeasure.get_total, self.port, self.config['ip'], + self.myid, self.infohash, self.config['http_timeout'], + self.logerrorfunc, self.excfunc, self.config['max_initiate'], + self.doneflag, self.upmeasure.get_rate, self.downmeasure.get_rate, + self.unpauseflag,self.config) + + self.encoder.set_rerequester(self.rerequest) + self.rerequest.start() + + + def _init_stats(self): + self.statistics = Statistics(self.upmeasure, self.downmeasure, + self.connecter, self.httpdownloader, self.ratelimiter, + self.rerequest_lastfailed, self.filedatflag) + if self.info.has_key('files'): + self.statistics.set_dirstats(self.files, self.info['piece length']) + + def autoStats(self, displayfunc = None): + if not displayfunc: + displayfunc = self.statusfunc + + self._init_stats() + DownloaderFeedback(self.choker, self.httpdownloader, self.rawserver.add_task, + self.upmeasure.get_rate, self.downmeasure.get_rate, + self.ratemeasure, self.storagewrapper.get_stats, + self.datalength, self.finflag, self.spewflag, self.statistics, + displayfunc, self.config['display_interval'], + infohash = self.infohash,voddownload=self.voddownload) + + def startStats(self): + self._init_stats() + self.spewflag.set() # start collecting peer cache + d = DownloaderFeedback(self.choker, self.httpdownloader, self.rawserver.add_task, + self.upmeasure.get_rate, self.downmeasure.get_rate, + self.ratemeasure, self.storagewrapper.get_stats, + self.datalength, self.finflag, self.spewflag, self.statistics, + infohash = self.infohash,voddownload=self.voddownload) + return d.gather + + + def getPortHandler(self): + return self.encoder + + + def checkpoint(self): # Added by Arno + """ Called by network thread """ + if self.fileselector and self.started: + # self.fileselector.finish() does nothing at the moment, so as + # long as the network thread calls this, it should be OK. + return self.fileselector.pickle() + else: + return None + + def shutdown(self): + if self.checking or self.started: + self.storagewrapper.sync() + self.storage.close() + self.rerequest_stopped() + resumedata = None + if self.fileselector and self.started: + if not self.failed: + self.fileselector.finish() + resumedata = self.fileselector.pickle() + if self.voddownload is not None: + self.voddownload.stop() + return resumedata + + + def setUploadRate(self, rate, networkcalling=False): + try: + def s(self = self, rate = rate): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","BT1Download: set max upload to",rate + self.config['max_upload_rate'] = rate + self.ratelimiter.set_upload_rate(rate) + if networkcalling: + s() + else: + self.rawserver.add_task(s) + except AttributeError: + pass + + def setConns(self, conns, conns2 = None,networkcalling=False): + if not conns2: + conns2 = conns + try: + def s(self = self, conns = conns, conns2 = conns2): + self.config['min_uploads'] = conns + self.config['max_uploads'] = conns2 + if (conns > 30): + self.config['max_initiate'] = conns + 10 + if networkcalling: + s() + else: + self.rawserver.add_task(s) + except AttributeError: + pass + + def setDownloadRate(self, rate,networkcalling=False): + try: + def s(self = self, rate = rate): + self.config['max_download_rate'] = rate + self.downloader.set_download_rate(rate) + if networkcalling: + s() + else: + self.rawserver.add_task(s) + except AttributeError: + pass + + def startConnection(self, ip, port, id): + self.encoder._start_connection((ip, port), id) + + def _startConnection(self, ipandport, id): + self.encoder._start_connection(ipandport, id) + + def setInitiate(self, initiate,networkcalling=False): + try: + def s(self = self, initiate = initiate): + self.config['max_initiate'] = initiate + if networkcalling: + s() + else: + self.rawserver.add_task(s) + except AttributeError: + pass + + def setMaxConns(self,nconns,networkcalling=False): + try: + def s(self = self, nconns = nconns): + self.config['max_connections'] = nconns + if networkcalling: + s() + else: + self.rawserver.add_task(s) + except AttributeError: + pass + + + def getConfig(self): + return self.config + + def getDefaults(self): + return defaultargs(defaults) + + def reannounce(self, special = None): + try: + def r(self = self, special = special): + if special is None: + self.rerequest.announce() + else: + self.rerequest.announce(specialurl = special) + self.rawserver.add_task(r) + except AttributeError: + pass + + def getResponse(self): + try: + return self.response + except: + return None + +# def Pause(self): +# try: +# if self.storagewrapper: +# self.rawserver.add_task(self._pausemaker, 0) +# except: +# return False +# self.unpauseflag.clear() +# return True +# +# def _pausemaker(self): +# self.whenpaused = clock() +# self.unpauseflag.wait() # sticks a monkey wrench in the main thread +# +# def Unpause(self): +# self.unpauseflag.set() +# if self.whenpaused and clock()-self.whenpaused > 60: +# def r(self = self): +# self.rerequest.announce(3) # rerequest automatically if paused for >60 seconds +# self.rawserver.add_task(r) + + def Pause(self): + if not self.storagewrapper: + return False + self.unpauseflag.clear() + self.rawserver.add_task(self.onPause) + return True + + def onPause(self): + self.whenpaused = clock() + if not self.downloader: + return + self.downloader.pause(True) + self.encoder.pause(True) + self.choker.pause(True) + + def Unpause(self): + self.unpauseflag.set() + self.rawserver.add_task(self.onUnpause) + + def onUnpause(self): + if not self.downloader: + return + self.downloader.pause(False) + self.encoder.pause(False) + self.choker.pause(False) + if self.rerequest and self.whenpaused and clock()-self.whenpaused > 60: + self.rerequest.announce(3) # rerequest automatically if paused for >60 seconds + + def set_super_seed(self,networkcalling=False): + self.superseedflag.set() + if networkcalling: + self._set_super_seed() + else: + self.rawserver.add_task(self._set_super_seed) + + def _set_super_seed(self): + if not self.super_seeding_active and self.finflag.isSet(): + self.super_seeding_active = True + self.logerrorfunc(' ** SUPER-SEED OPERATION ACTIVE **\n' + + ' please set Max uploads so each peer gets 6-8 kB/s') + def s(self = self): + self.downloader.set_super_seed() + self.choker.set_super_seed() + self.rawserver.add_task(s) + if self.finflag.isSet(): # mode started when already finished + def r(self = self): + self.rerequest.announce(3) # so after kicking everyone off, reannounce + self.rawserver.add_task(r) + + def am_I_finished(self): + return self.finflag.isSet() + + def get_transfer_stats(self): + return self.upmeasure.get_total(), self.downmeasure.get_total() + + def get_moviestreamtransport(self): + return self.voddownload diff --git a/tribler-mod/Tribler/Core/BitTornado/inifile.py b/tribler-mod/Tribler/Core/BitTornado/inifile.py new file mode 100644 index 0000000..14634b4 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/inifile.py @@ -0,0 +1,170 @@ +from time import localtime, strftime +# Written by John Hoffman +# see LICENSE.txt for license information + +__fool_epydoc = 481 +''' +reads/writes a Windows-style INI file +format: + + aa = "bb" + cc = 11 + + [eee] + ff = "gg" + +decodes to: +d = { '': {'aa':'bb','cc':'11'}, 'eee': {'ff':'gg'} } + +the encoder can also take this as input: + +d = { 'aa': 'bb, 'cc': 11, 'eee': {'ff':'gg'} } + +though it will only decode in the above format. Keywords must be strings. +Values that are strings are written surrounded by quotes, and the decoding +routine automatically strips any. +Booleans are written as integers. Anything else aside from string/int/float +may have unpredictable results. +''' + +from traceback import print_exc +from types import DictType, StringType +try: + from types import BooleanType +except ImportError: + BooleanType = None + +try: + True +except: + True = 1 + False = 0 + +DEBUG = False + +def ini_write(f, d, comment=''): + try: + a = {'':{}} + for k, v in d.items(): + assert type(k) == StringType + k = k.lower() + if type(v) == DictType: + if DEBUG: + print 'new section:' +k + if k: + assert not a.has_key(k) + a[k] = {} + aa = a[k] + for kk, vv in v: + assert type(kk) == StringType + kk = kk.lower() + assert not aa.has_key(kk) + if type(vv) == BooleanType: + vv = int(vv) + if type(vv) == StringType: + vv = '"'+vv+'"' + aa[kk] = str(vv) + if DEBUG: + print 'a['+k+']['+kk+'] = '+str(vv) + else: + aa = a[''] + assert not aa.has_key(k) + if type(v) == BooleanType: + v = int(v) + if type(v) == StringType: + v = '"'+v+'"' + aa[k] = str(v) + if DEBUG: + print 'a[\'\']['+k+'] = '+str(v) + r = open(f, 'w') + if comment: + for c in comment.split('\n'): + r.write('# '+c+'\n') + r.write('\n') + l = a.keys() + l.sort() + for k in l: + if k: + r.write('\n['+k+']\n') + aa = a[k] + ll = aa.keys() + ll.sort() + for kk in ll: + r.write(kk+' = '+aa[kk]+'\n') + success = True + except: + if DEBUG: + print_exc() + success = False + try: + r.close() + except: + pass + return success + + +if DEBUG: + def errfunc(lineno, line, err): + print '('+str(lineno)+') '+err+': '+line +else: + errfunc = lambda lineno, line, err: None + +def ini_read(f, errfunc = errfunc): + try: + r = open(f, 'r') + ll = r.readlines() + d = {} + dd = {'':d} + for i in xrange(len(ll)): + l = ll[i] + l = l.strip() + if not l: + continue + if l[0] == '#': + continue + if l[0] == '[': + if l[-1] != ']': + errfunc(i, l, 'syntax error') + continue + l1 = l[1:-1].strip().lower() + if not l1: + errfunc(i, l, 'syntax error') + continue + if dd.has_key(l1): + errfunc(i, l, 'duplicate section') + d = dd[l1] + continue + d = {} + dd[l1] = d + continue + try: + k, v = l.split('=', 1) + except: + try: + k, v = l.split(':', 1) + except: + errfunc(i, l, 'syntax error') + continue + k = k.strip().lower() + v = v.strip() + if len(v) > 1 and ( (v[0] == '"' and v[-1] == '"') or + (v[0] == "'" and v[-1] == "'") ): + v = v[1:-1] + if not k: + errfunc(i, l, 'syntax error') + continue + if d.has_key(k): + errfunc(i, l, 'duplicate entry') + continue + d[k] = v + if DEBUG: + print dd + except: + if DEBUG: + print_exc() + dd = None + try: + r.close() + except: + pass + return dd diff --git a/tribler-mod/Tribler/Core/BitTornado/inifile.py.bak b/tribler-mod/Tribler/Core/BitTornado/inifile.py.bak new file mode 100644 index 0000000..4802f0e --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/inifile.py.bak @@ -0,0 +1,169 @@ +# Written by John Hoffman +# see LICENSE.txt for license information + +__fool_epydoc = 481 +''' +reads/writes a Windows-style INI file +format: + + aa = "bb" + cc = 11 + + [eee] + ff = "gg" + +decodes to: +d = { '': {'aa':'bb','cc':'11'}, 'eee': {'ff':'gg'} } + +the encoder can also take this as input: + +d = { 'aa': 'bb, 'cc': 11, 'eee': {'ff':'gg'} } + +though it will only decode in the above format. Keywords must be strings. +Values that are strings are written surrounded by quotes, and the decoding +routine automatically strips any. +Booleans are written as integers. Anything else aside from string/int/float +may have unpredictable results. +''' + +from traceback import print_exc +from types import DictType, StringType +try: + from types import BooleanType +except ImportError: + BooleanType = None + +try: + True +except: + True = 1 + False = 0 + +DEBUG = False + +def ini_write(f, d, comment=''): + try: + a = {'':{}} + for k, v in d.items(): + assert type(k) == StringType + k = k.lower() + if type(v) == DictType: + if DEBUG: + print 'new section:' +k + if k: + assert not a.has_key(k) + a[k] = {} + aa = a[k] + for kk, vv in v: + assert type(kk) == StringType + kk = kk.lower() + assert not aa.has_key(kk) + if type(vv) == BooleanType: + vv = int(vv) + if type(vv) == StringType: + vv = '"'+vv+'"' + aa[kk] = str(vv) + if DEBUG: + print 'a['+k+']['+kk+'] = '+str(vv) + else: + aa = a[''] + assert not aa.has_key(k) + if type(v) == BooleanType: + v = int(v) + if type(v) == StringType: + v = '"'+v+'"' + aa[k] = str(v) + if DEBUG: + print 'a[\'\']['+k+'] = '+str(v) + r = open(f, 'w') + if comment: + for c in comment.split('\n'): + r.write('# '+c+'\n') + r.write('\n') + l = a.keys() + l.sort() + for k in l: + if k: + r.write('\n['+k+']\n') + aa = a[k] + ll = aa.keys() + ll.sort() + for kk in ll: + r.write(kk+' = '+aa[kk]+'\n') + success = True + except: + if DEBUG: + print_exc() + success = False + try: + r.close() + except: + pass + return success + + +if DEBUG: + def errfunc(lineno, line, err): + print '('+str(lineno)+') '+err+': '+line +else: + errfunc = lambda lineno, line, err: None + +def ini_read(f, errfunc = errfunc): + try: + r = open(f, 'r') + ll = r.readlines() + d = {} + dd = {'':d} + for i in xrange(len(ll)): + l = ll[i] + l = l.strip() + if not l: + continue + if l[0] == '#': + continue + if l[0] == '[': + if l[-1] != ']': + errfunc(i, l, 'syntax error') + continue + l1 = l[1:-1].strip().lower() + if not l1: + errfunc(i, l, 'syntax error') + continue + if dd.has_key(l1): + errfunc(i, l, 'duplicate section') + d = dd[l1] + continue + d = {} + dd[l1] = d + continue + try: + k, v = l.split('=', 1) + except: + try: + k, v = l.split(':', 1) + except: + errfunc(i, l, 'syntax error') + continue + k = k.strip().lower() + v = v.strip() + if len(v) > 1 and ( (v[0] == '"' and v[-1] == '"') or + (v[0] == "'" and v[-1] == "'") ): + v = v[1:-1] + if not k: + errfunc(i, l, 'syntax error') + continue + if d.has_key(k): + errfunc(i, l, 'duplicate entry') + continue + d[k] = v + if DEBUG: + print dd + except: + if DEBUG: + print_exc() + dd = None + try: + r.close() + except: + pass + return dd diff --git a/tribler-mod/Tribler/Core/BitTornado/iprangeparse.py b/tribler-mod/Tribler/Core/BitTornado/iprangeparse.py new file mode 100644 index 0000000..f41bf23 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/iprangeparse.py @@ -0,0 +1,195 @@ +from time import localtime, strftime +# Written by John Hoffman +# see LICENSE.txt for license information + +from bisect import bisect, insort + +try: + True +except: + True = 1 + False = 0 + bool = lambda x: not not x + + +def to_long_ipv4(ip): + ip = ip.split('.') + if len(ip) != 4: + raise ValueError, "bad address" + b = 0L + for n in ip: + b *= 256 + b += int(n) + return b + + +def to_long_ipv6(ip): + if not ip: + raise ValueError, "bad address" + if ip == '::': # boundary handling + ip = '' + elif ip[:2] == '::': + ip = ip[1:] + elif ip[0] == ':': + raise ValueError, "bad address" + elif ip[-2:] == '::': + ip = ip[:-1] + elif ip[-1] == ':': + raise ValueError, "bad address" + + b = [] + doublecolon = False + for n in ip.split(':'): + if n == '': # double-colon + if doublecolon: + raise ValueError, "bad address" + doublecolon = True + b.append(None) + continue + if n.find('.') >= 0: # IPv4 + n = n.split('.') + if len(n) != 4: + raise ValueError, "bad address" + for i in n: + b.append(int(i)) + continue + n = ('0'*(4-len(n))) + n + b.append(int(n[:2], 16)) + b.append(int(n[2:], 16)) + bb = 0L + for n in b: + if n is None: + for i in xrange(17-len(b)): + bb *= 256 + continue + bb *= 256 + bb += n + return bb + +ipv4addrmask = 65535L*256*256*256*256 + +class IP_List: + def __init__(self): + self.ipv4list = [] # starts of ranges + self.ipv4dict = {} # start: end of ranges + self.ipv6list = [] # " + self.ipv6dict = {} # " + + def __nonzero__(self): + return bool(self.ipv4list or self.ipv6list) + + + def append(self, ip_beg, ip_end = None): + if ip_end is None: + ip_end = ip_beg + else: + assert ip_beg <= ip_end + if ip_beg.find(':') < 0: # IPv4 + ip_beg = to_long_ipv4(ip_beg) + ip_end = to_long_ipv4(ip_end) + l = self.ipv4list + d = self.ipv4dict + else: + ip_beg = to_long_ipv6(ip_beg) + ip_end = to_long_ipv6(ip_end) + bb = ip_beg % (256*256*256*256) + if bb == ipv4addrmask: + ip_beg -= bb + ip_end -= bb + l = self.ipv4list + d = self.ipv4dict + else: + l = self.ipv6list + d = self.ipv6dict + + pos = bisect(l, ip_beg)-1 + done = pos < 0 + while not done: + p = pos + while p < len(l): + range_beg = l[p] + if range_beg > ip_end+1: + done = True + break + range_end = d[range_beg] + if range_end < ip_beg-1: + p += 1 + if p == len(l): + done = True + break + continue + # if neither of the above conditions is true, the ranges overlap + ip_beg = min(ip_beg, range_beg) + ip_end = max(ip_end, range_end) + del l[p] + del d[range_beg] + break + + insort(l, ip_beg) + d[ip_beg] = ip_end + + + def includes(self, ip): + if not (self.ipv4list or self.ipv6list): + return False + if ip.find(':') < 0: # IPv4 + ip = to_long_ipv4(ip) + l = self.ipv4list + d = self.ipv4dict + else: + ip = to_long_ipv6(ip) + bb = ip % (256*256*256*256) + if bb == ipv4addrmask: + ip -= bb + l = self.ipv4list + d = self.ipv4dict + else: + l = self.ipv6list + d = self.ipv6dict + for ip_beg in l[bisect(l, ip)-1:]: + if ip == ip_beg: + return True + ip_end = d[ip_beg] + if ip > ip_beg and ip <= ip_end: + return True + return False + + + # reads a list from a file in the format 'whatever:whatever:ip-ip' + # (not IPv6 compatible at all) + def read_rangelist(self, file): + f = open(file, 'r') + while 1: + line = f.readline() + if not line: + break + line = line.strip() + if not line or line[0] == '#': + continue + line = line.split(':')[-1] + try: + ip1, ip2 = line.split('-') + except: + ip1 = line + ip2 = line + try: + self.append(ip1.strip(), ip2.strip()) + except: + print '*** WARNING *** could not parse IP range: '+line + f.close() + +def is_ipv4(ip): + return ip.find(':') < 0 + +def is_valid_ip(ip): + try: + if is_ipv4(ip): + a = ip.split('.') + assert len(a) == 4 + for i in a: + chr(int(i)) + return True + to_long_ipv6(ip) + return True + except: + return False diff --git a/tribler-mod/Tribler/Core/BitTornado/iprangeparse.py.bak b/tribler-mod/Tribler/Core/BitTornado/iprangeparse.py.bak new file mode 100644 index 0000000..9304c39 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/iprangeparse.py.bak @@ -0,0 +1,194 @@ +# Written by John Hoffman +# see LICENSE.txt for license information + +from bisect import bisect, insort + +try: + True +except: + True = 1 + False = 0 + bool = lambda x: not not x + + +def to_long_ipv4(ip): + ip = ip.split('.') + if len(ip) != 4: + raise ValueError, "bad address" + b = 0L + for n in ip: + b *= 256 + b += int(n) + return b + + +def to_long_ipv6(ip): + if not ip: + raise ValueError, "bad address" + if ip == '::': # boundary handling + ip = '' + elif ip[:2] == '::': + ip = ip[1:] + elif ip[0] == ':': + raise ValueError, "bad address" + elif ip[-2:] == '::': + ip = ip[:-1] + elif ip[-1] == ':': + raise ValueError, "bad address" + + b = [] + doublecolon = False + for n in ip.split(':'): + if n == '': # double-colon + if doublecolon: + raise ValueError, "bad address" + doublecolon = True + b.append(None) + continue + if n.find('.') >= 0: # IPv4 + n = n.split('.') + if len(n) != 4: + raise ValueError, "bad address" + for i in n: + b.append(int(i)) + continue + n = ('0'*(4-len(n))) + n + b.append(int(n[:2], 16)) + b.append(int(n[2:], 16)) + bb = 0L + for n in b: + if n is None: + for i in xrange(17-len(b)): + bb *= 256 + continue + bb *= 256 + bb += n + return bb + +ipv4addrmask = 65535L*256*256*256*256 + +class IP_List: + def __init__(self): + self.ipv4list = [] # starts of ranges + self.ipv4dict = {} # start: end of ranges + self.ipv6list = [] # " + self.ipv6dict = {} # " + + def __nonzero__(self): + return bool(self.ipv4list or self.ipv6list) + + + def append(self, ip_beg, ip_end = None): + if ip_end is None: + ip_end = ip_beg + else: + assert ip_beg <= ip_end + if ip_beg.find(':') < 0: # IPv4 + ip_beg = to_long_ipv4(ip_beg) + ip_end = to_long_ipv4(ip_end) + l = self.ipv4list + d = self.ipv4dict + else: + ip_beg = to_long_ipv6(ip_beg) + ip_end = to_long_ipv6(ip_end) + bb = ip_beg % (256*256*256*256) + if bb == ipv4addrmask: + ip_beg -= bb + ip_end -= bb + l = self.ipv4list + d = self.ipv4dict + else: + l = self.ipv6list + d = self.ipv6dict + + pos = bisect(l, ip_beg)-1 + done = pos < 0 + while not done: + p = pos + while p < len(l): + range_beg = l[p] + if range_beg > ip_end+1: + done = True + break + range_end = d[range_beg] + if range_end < ip_beg-1: + p += 1 + if p == len(l): + done = True + break + continue + # if neither of the above conditions is true, the ranges overlap + ip_beg = min(ip_beg, range_beg) + ip_end = max(ip_end, range_end) + del l[p] + del d[range_beg] + break + + insort(l, ip_beg) + d[ip_beg] = ip_end + + + def includes(self, ip): + if not (self.ipv4list or self.ipv6list): + return False + if ip.find(':') < 0: # IPv4 + ip = to_long_ipv4(ip) + l = self.ipv4list + d = self.ipv4dict + else: + ip = to_long_ipv6(ip) + bb = ip % (256*256*256*256) + if bb == ipv4addrmask: + ip -= bb + l = self.ipv4list + d = self.ipv4dict + else: + l = self.ipv6list + d = self.ipv6dict + for ip_beg in l[bisect(l, ip)-1:]: + if ip == ip_beg: + return True + ip_end = d[ip_beg] + if ip > ip_beg and ip <= ip_end: + return True + return False + + + # reads a list from a file in the format 'whatever:whatever:ip-ip' + # (not IPv6 compatible at all) + def read_rangelist(self, file): + f = open(file, 'r') + while 1: + line = f.readline() + if not line: + break + line = line.strip() + if not line or line[0] == '#': + continue + line = line.split(':')[-1] + try: + ip1, ip2 = line.split('-') + except: + ip1 = line + ip2 = line + try: + self.append(ip1.strip(), ip2.strip()) + except: + print '*** WARNING *** could not parse IP range: '+line + f.close() + +def is_ipv4(ip): + return ip.find(':') < 0 + +def is_valid_ip(ip): + try: + if is_ipv4(ip): + a = ip.split('.') + assert len(a) == 4 + for i in a: + chr(int(i)) + return True + to_long_ipv6(ip) + return True + except: + return False diff --git a/tribler-mod/Tribler/Core/BitTornado/natpunch.py b/tribler-mod/Tribler/Core/BitTornado/natpunch.py new file mode 100644 index 0000000..986e36c --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/natpunch.py @@ -0,0 +1,382 @@ +from time import localtime, strftime +# Written by John Hoffman, Arno Bakker +# derived from NATPortMapping.py by Yejun Yang +# and from example code by Myers Carpenter +# see LICENSE.txt for license information + +import sys +import socket +from traceback import print_exc +from subnetparse import IP_List +from clock import clock +from __init__ import createPeerID + +from Tribler.Core.NATFirewall.upnp import UPnPPlatformIndependent,UPnPError +from Tribler.Core.NATFirewall.guessip import get_my_wan_ip +try: + True +except: + True = 1 + False = 0 + +DEBUG = False + +EXPIRE_CACHE = 30 # seconds +ID = "BT-"+createPeerID()[-4:] + +try: + import pythoncom, win32com.client + win32_imported = 1 +except ImportError: + if DEBUG and (sys.platform == 'win32'): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","natpunch: ERROR: pywin32 package not installed, UPnP mode 2 won't work now" + win32_imported = 0 + +UPnPError = UPnPError + +class _UPnP1: # derived from Myers Carpenter's code + # seems to use the machine's local UPnP + # system for its operation. Runs fairly fast + + def __init__(self): + self.map = None + self.last_got_map = -10e10 + + def _get_map(self): + if self.last_got_map + EXPIRE_CACHE < clock(): + try: + dispatcher = win32com.client.Dispatch("HNetCfg.NATUPnP") + self.map = dispatcher.StaticPortMappingCollection + self.last_got_map = clock() + except: + if DEBUG: + print_exc() + self.map = None + return self.map + + def test(self): + try: + assert self._get_map() # make sure a map was found + success = True + except: + if DEBUG: + print_exc() + success = False + return success + + + def open(self, ip, p, iproto='TCP'): + map = self._get_map() + try: + map.Add(p, iproto, p, ip, True, ID) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'upnp1: succesfully opened port: '+ip+':'+str(p) + success = True + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp1: COULDN'T OPEN "+str(p) + print_exc() + success = False + return success + + + def close(self, p, iproto='TCP'): + map = self._get_map() + try: + map.Remove(p, iproto) + success = True + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'upnp1: succesfully closed port: '+str(p) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp1: COULDN'T CLOSE "+str(p) + print_exc() + success = False + return success + + + def clean(self, retry = False, iproto='TCP'): + if not win32_imported: + return + try: + map = self._get_map() + ports_in_use = [] + for i in xrange(len(map)): + try: + mapping = map[i] + port = mapping.ExternalPort + prot = str(mapping.Protocol).lower() + desc = str(mapping.Description).lower() + except: + port = None + if port and prot == iproto.lower() and desc[:3] == 'bt-': + ports_in_use.append(port) + success = True + for port in ports_in_use: + try: + map.Remove(port, iproto) + except: + success = False + if not success and not retry: + self.clean(retry = True) + except: + pass + + def get_ext_ip(self): + return None + + +class _UPnP2: # derived from Yejun Yang's code + # apparently does a direct search for UPnP hardware + # may work in some cases where _UPnP1 won't, but is slow + # still need to implement "clean" method + + def __init__(self): + self.services = None + self.last_got_services = -10e10 + + def _get_services(self): + if not self.services or self.last_got_services + EXPIRE_CACHE < clock(): + self.services = [] + try: + f=win32com.client.Dispatch("UPnP.UPnPDeviceFinder") + for t in ( "urn:schemas-upnp-org:service:WANIPConnection:1", + "urn:schemas-upnp-org:service:WANPPPConnection:1" ): + try: + conns = f.FindByType(t, 0) + for c in xrange(len(conns)): + try: + svcs = conns[c].Services + for s in xrange(len(svcs)): + try: + self.services.append(svcs[s]) + except: + if DEBUG: + print_exc() + except: + if DEBUG: + print_exc() + except: + if DEBUG: + print_exc() + except: + if DEBUG: + print_exc() + self.last_got_services = clock() + return self.services + + def test(self): + try: + assert self._get_services() # make sure some services can be found + success = True + except: + success = False + return success + + + def open(self, ip, p, iproto='TCP'): + svcs = self._get_services() + success = False + for s in svcs: + try: + s.InvokeAction('AddPortMapping', ['', p, iproto, p, ip, True, ID, 0], '') + success = True + except: + if DEBUG: + print_exc() + if DEBUG and not success: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp2: COULDN'T OPEN "+str(p) + print_exc() + return success + + + def close(self, p, iproto='TCP'): + svcs = self._get_services() + success = False + for s in svcs: + try: + s.InvokeAction('DeletePortMapping', ['', p, iproto], '') + success = True + except: + if DEBUG: + print_exc() + if DEBUG and not success: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp2: COULDN'T CLOSE "+str(p) + print_exc() + return success + + + def get_ext_ip(self): + svcs = self._get_services() + success = None + for s in svcs: + try: + ret = s.InvokeAction('GetExternalIPAddress',[],'') + # With MS Internet Connection Sharing: + # - Good reply is: (None, (u'130.37.168.199',)) + # - When router disconnected from Internet: (None, (u'',)) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp2: GetExternapIPAddress returned",ret + dns = ret[1] + if str(dns[0]) != '': + success = str(dns[0]) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp2: RETURNED IP ADDRESS EMPTY" + except: + if DEBUG: + print_exc() + if DEBUG and not success: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp2: COULDN'T GET EXT IP ADDR" + return success + +class _UPnP3: + def __init__(self): + self.u = UPnPPlatformIndependent() + + def test(self): + try: + self.u.discover() + return self.u.found_wanted_services() + except: + if DEBUG: + print_exc() + return False + + def open(self,ip,p,iproto='TCP'): + """ Return False in case of network failure, + Raises UPnPError in case of a properly reported error from the server + """ + try: + self.u.add_port_map(ip,p,iproto=iproto) + return True + except UPnPError,e: + if DEBUG: + print_exc() + raise e + except: + if DEBUG: + print_exc() + return False + + def close(self,p,iproto='TCP'): + """ Return False in case of network failure, + Raises UPnPError in case of a properly reported error from the server + """ + try: + self.u.del_port_map(p,iproto=iproto) + return True + except UPnPError,e: + if DEBUG: + print_exc() + raise e + except: + if DEBUG: + print_exc() + return False + + def get_ext_ip(self): + """ Return False in case of network failure, + Raises UPnPError in case of a properly reported error from the server + """ + try: + return self.u.get_ext_ip() + except UPnPError,e: + if DEBUG: + print_exc() + raise e + except: + if DEBUG: + print_exc() + return None + +class UPnPWrapper: # master holding class + + __single = None + + def __init__(self): + if UPnPWrapper.__single: + raise RuntimeError, "UPnPWrapper is singleton" + UPnPWrapper.__single = self + + self.upnp1 = _UPnP1() + self.upnp2 = _UPnP2() + self.upnp3 = _UPnP3() + self.upnplist = (None, self.upnp1, self.upnp2, self.upnp3) + self.upnp = None + self.local_ip = None + self.last_got_ip = -10e10 + + def getInstance(*args, **kw): + if UPnPWrapper.__single is None: + UPnPWrapper(*args, **kw) + return UPnPWrapper.__single + getInstance = staticmethod(getInstance) + + def register(self,guessed_localip): + self.local_ip = guessed_localip + + def get_ip(self): + if self.last_got_ip + EXPIRE_CACHE < clock(): + if self.local_ip is None: + local_ips = IP_List() + local_ips.set_intranet_addresses() + try: + for info in socket.getaddrinfo(socket.gethostname(), 0, socket.AF_INET): + # exception if socket library isn't recent + self.local_ip = info[4][0] + if local_ips.includes(self.local_ip): + self.last_got_ip = clock() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'upnpX: Local IP found: '+self.local_ip + break + else: + raise ValueError('upnpX: couldn\'t find intranet IP') + except: + self.local_ip = None + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'upnpX: Error finding local IP' + print_exc() + return self.local_ip + + def test(self, upnp_type): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'upnpX: testing UPnP type '+str(upnp_type) + if not upnp_type or self.get_ip() is None or (upnp_type <= 2 and not win32_imported): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'upnpX: UPnP not supported' + return 0 + if upnp_type != 3: + pythoncom.CoInitialize() # leave initialized + self.upnp = self.upnplist[upnp_type] # cache this + if self.upnp.test(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'upnpX: ok' + return upnp_type + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'upnpX: tested bad' + return 0 + + def open(self, p, iproto='TCP'): + assert self.upnp, "upnpX: must run UPnP_test() with the desired UPnP access type first" + return self.upnp.open(self.get_ip(), p, iproto=iproto) + + def close(self, p, iproto='TCP'): + assert self.upnp, "upnpX: must run UPnP_test() with the desired UPnP access type first" + return self.upnp.close(p,iproto=iproto) + + def clean(self,iproto='TCP'): + return self.upnp1.clean(iproto=iproto) + + def get_ext_ip(self): + assert self.upnp, "upnpX: must run UPnP_test() with the desired UPnP access type first" + return self.upnp.get_ext_ip() + +if __name__ == '__main__': + ip = get_my_wan_ip() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","guessed ip",ip + u = UPnPWrapper() + u.register(ip) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TEST RETURNED",u.test(3) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","IGD says my external IP is",u.get_ext_ip() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","IGD open returned",u.open(6881) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","IGD close returned",u.close(6881) diff --git a/tribler-mod/Tribler/Core/BitTornado/natpunch.py.bak b/tribler-mod/Tribler/Core/BitTornado/natpunch.py.bak new file mode 100644 index 0000000..ec9a257 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/natpunch.py.bak @@ -0,0 +1,381 @@ +# Written by John Hoffman, Arno Bakker +# derived from NATPortMapping.py by Yejun Yang +# and from example code by Myers Carpenter +# see LICENSE.txt for license information + +import sys +import socket +from traceback import print_exc +from subnetparse import IP_List +from clock import clock +from __init__ import createPeerID + +from Tribler.Core.NATFirewall.upnp import UPnPPlatformIndependent,UPnPError +from Tribler.Core.NATFirewall.guessip import get_my_wan_ip +try: + True +except: + True = 1 + False = 0 + +DEBUG = False + +EXPIRE_CACHE = 30 # seconds +ID = "BT-"+createPeerID()[-4:] + +try: + import pythoncom, win32com.client + win32_imported = 1 +except ImportError: + if DEBUG and (sys.platform == 'win32'): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","natpunch: ERROR: pywin32 package not installed, UPnP mode 2 won't work now" + win32_imported = 0 + +UPnPError = UPnPError + +class _UPnP1: # derived from Myers Carpenter's code + # seems to use the machine's local UPnP + # system for its operation. Runs fairly fast + + def __init__(self): + self.map = None + self.last_got_map = -10e10 + + def _get_map(self): + if self.last_got_map + EXPIRE_CACHE < clock(): + try: + dispatcher = win32com.client.Dispatch("HNetCfg.NATUPnP") + self.map = dispatcher.StaticPortMappingCollection + self.last_got_map = clock() + except: + if DEBUG: + print_exc() + self.map = None + return self.map + + def test(self): + try: + assert self._get_map() # make sure a map was found + success = True + except: + if DEBUG: + print_exc() + success = False + return success + + + def open(self, ip, p, iproto='TCP'): + map = self._get_map() + try: + map.Add(p, iproto, p, ip, True, ID) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'upnp1: succesfully opened port: '+ip+':'+str(p) + success = True + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp1: COULDN'T OPEN "+str(p) + print_exc() + success = False + return success + + + def close(self, p, iproto='TCP'): + map = self._get_map() + try: + map.Remove(p, iproto) + success = True + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'upnp1: succesfully closed port: '+str(p) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp1: COULDN'T CLOSE "+str(p) + print_exc() + success = False + return success + + + def clean(self, retry = False, iproto='TCP'): + if not win32_imported: + return + try: + map = self._get_map() + ports_in_use = [] + for i in xrange(len(map)): + try: + mapping = map[i] + port = mapping.ExternalPort + prot = str(mapping.Protocol).lower() + desc = str(mapping.Description).lower() + except: + port = None + if port and prot == iproto.lower() and desc[:3] == 'bt-': + ports_in_use.append(port) + success = True + for port in ports_in_use: + try: + map.Remove(port, iproto) + except: + success = False + if not success and not retry: + self.clean(retry = True) + except: + pass + + def get_ext_ip(self): + return None + + +class _UPnP2: # derived from Yejun Yang's code + # apparently does a direct search for UPnP hardware + # may work in some cases where _UPnP1 won't, but is slow + # still need to implement "clean" method + + def __init__(self): + self.services = None + self.last_got_services = -10e10 + + def _get_services(self): + if not self.services or self.last_got_services + EXPIRE_CACHE < clock(): + self.services = [] + try: + f=win32com.client.Dispatch("UPnP.UPnPDeviceFinder") + for t in ( "urn:schemas-upnp-org:service:WANIPConnection:1", + "urn:schemas-upnp-org:service:WANPPPConnection:1" ): + try: + conns = f.FindByType(t, 0) + for c in xrange(len(conns)): + try: + svcs = conns[c].Services + for s in xrange(len(svcs)): + try: + self.services.append(svcs[s]) + except: + if DEBUG: + print_exc() + except: + if DEBUG: + print_exc() + except: + if DEBUG: + print_exc() + except: + if DEBUG: + print_exc() + self.last_got_services = clock() + return self.services + + def test(self): + try: + assert self._get_services() # make sure some services can be found + success = True + except: + success = False + return success + + + def open(self, ip, p, iproto='TCP'): + svcs = self._get_services() + success = False + for s in svcs: + try: + s.InvokeAction('AddPortMapping', ['', p, iproto, p, ip, True, ID, 0], '') + success = True + except: + if DEBUG: + print_exc() + if DEBUG and not success: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp2: COULDN'T OPEN "+str(p) + print_exc() + return success + + + def close(self, p, iproto='TCP'): + svcs = self._get_services() + success = False + for s in svcs: + try: + s.InvokeAction('DeletePortMapping', ['', p, iproto], '') + success = True + except: + if DEBUG: + print_exc() + if DEBUG and not success: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp2: COULDN'T CLOSE "+str(p) + print_exc() + return success + + + def get_ext_ip(self): + svcs = self._get_services() + success = None + for s in svcs: + try: + ret = s.InvokeAction('GetExternalIPAddress',[],'') + # With MS Internet Connection Sharing: + # - Good reply is: (None, (u'130.37.168.199',)) + # - When router disconnected from Internet: (None, (u'',)) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp2: GetExternapIPAddress returned",ret + dns = ret[1] + if str(dns[0]) != '': + success = str(dns[0]) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp2: RETURNED IP ADDRESS EMPTY" + except: + if DEBUG: + print_exc() + if DEBUG and not success: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp2: COULDN'T GET EXT IP ADDR" + return success + +class _UPnP3: + def __init__(self): + self.u = UPnPPlatformIndependent() + + def test(self): + try: + self.u.discover() + return self.u.found_wanted_services() + except: + if DEBUG: + print_exc() + return False + + def open(self,ip,p,iproto='TCP'): + """ Return False in case of network failure, + Raises UPnPError in case of a properly reported error from the server + """ + try: + self.u.add_port_map(ip,p,iproto=iproto) + return True + except UPnPError,e: + if DEBUG: + print_exc() + raise e + except: + if DEBUG: + print_exc() + return False + + def close(self,p,iproto='TCP'): + """ Return False in case of network failure, + Raises UPnPError in case of a properly reported error from the server + """ + try: + self.u.del_port_map(p,iproto=iproto) + return True + except UPnPError,e: + if DEBUG: + print_exc() + raise e + except: + if DEBUG: + print_exc() + return False + + def get_ext_ip(self): + """ Return False in case of network failure, + Raises UPnPError in case of a properly reported error from the server + """ + try: + return self.u.get_ext_ip() + except UPnPError,e: + if DEBUG: + print_exc() + raise e + except: + if DEBUG: + print_exc() + return None + +class UPnPWrapper: # master holding class + + __single = None + + def __init__(self): + if UPnPWrapper.__single: + raise RuntimeError, "UPnPWrapper is singleton" + UPnPWrapper.__single = self + + self.upnp1 = _UPnP1() + self.upnp2 = _UPnP2() + self.upnp3 = _UPnP3() + self.upnplist = (None, self.upnp1, self.upnp2, self.upnp3) + self.upnp = None + self.local_ip = None + self.last_got_ip = -10e10 + + def getInstance(*args, **kw): + if UPnPWrapper.__single is None: + UPnPWrapper(*args, **kw) + return UPnPWrapper.__single + getInstance = staticmethod(getInstance) + + def register(self,guessed_localip): + self.local_ip = guessed_localip + + def get_ip(self): + if self.last_got_ip + EXPIRE_CACHE < clock(): + if self.local_ip is None: + local_ips = IP_List() + local_ips.set_intranet_addresses() + try: + for info in socket.getaddrinfo(socket.gethostname(), 0, socket.AF_INET): + # exception if socket library isn't recent + self.local_ip = info[4][0] + if local_ips.includes(self.local_ip): + self.last_got_ip = clock() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'upnpX: Local IP found: '+self.local_ip + break + else: + raise ValueError('upnpX: couldn\'t find intranet IP') + except: + self.local_ip = None + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'upnpX: Error finding local IP' + print_exc() + return self.local_ip + + def test(self, upnp_type): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'upnpX: testing UPnP type '+str(upnp_type) + if not upnp_type or self.get_ip() is None or (upnp_type <= 2 and not win32_imported): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'upnpX: UPnP not supported' + return 0 + if upnp_type != 3: + pythoncom.CoInitialize() # leave initialized + self.upnp = self.upnplist[upnp_type] # cache this + if self.upnp.test(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'upnpX: ok' + return upnp_type + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'upnpX: tested bad' + return 0 + + def open(self, p, iproto='TCP'): + assert self.upnp, "upnpX: must run UPnP_test() with the desired UPnP access type first" + return self.upnp.open(self.get_ip(), p, iproto=iproto) + + def close(self, p, iproto='TCP'): + assert self.upnp, "upnpX: must run UPnP_test() with the desired UPnP access type first" + return self.upnp.close(p,iproto=iproto) + + def clean(self,iproto='TCP'): + return self.upnp1.clean(iproto=iproto) + + def get_ext_ip(self): + assert self.upnp, "upnpX: must run UPnP_test() with the desired UPnP access type first" + return self.upnp.get_ext_ip() + +if __name__ == '__main__': + ip = get_my_wan_ip() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","guessed ip",ip + u = UPnPWrapper() + u.register(ip) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TEST RETURNED",u.test(3) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","IGD says my external IP is",u.get_ext_ip() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","IGD open returned",u.open(6881) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","IGD close returned",u.close(6881) diff --git a/tribler-mod/Tribler/Core/BitTornado/parseargs.py b/tribler-mod/Tribler/Core/BitTornado/parseargs.py new file mode 100644 index 0000000..d402857 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/parseargs.py @@ -0,0 +1,141 @@ +from time import localtime, strftime +# Written by Bill Bumgarner and Bram Cohen +# see LICENSE.txt for license information + +from types import * +from cStringIO import StringIO + + +def splitLine(line, COLS=80, indent=10): + indent = " " * indent + width = COLS - (len(indent) + 1) + if indent and width < 15: + width = COLS - 2 + indent = " " + s = StringIO() + i = 0 + for word in line.split(): + if i == 0: + s.write(indent+word) + i = len(word) + continue + if i + len(word) >= width: + s.write('\n'+indent+word) + i = len(word) + continue + s.write(' '+word) + i += len(word) + 1 + return s.getvalue() + +def formatDefinitions(options, COLS, presets = {}): + s = StringIO() + for (longname, default, doc) in options: + s.write('--' + longname + ' \n') + default = presets.get(longname, default) + if type(default) in (IntType, LongType): + try: + default = int(default) + except: + pass + if default is not None: + doc += ' (defaults to ' + repr(default) + ')' + s.write(splitLine(doc, COLS, 10)) + s.write('\n\n') + return s.getvalue() + + +def usage(string): + raise ValueError(string) + + +def defaultargs(options): + l = {} + for (longname, default, doc) in options: + if default is not None: + l[longname] = default + return l + + +def parseargs(argv, options, minargs = None, maxargs = None, presets = {}): + config = {} + longkeyed = {} + for option in options: + longname, default, doc = option + longkeyed[longname] = option + config[longname] = default + for longname in presets.keys(): # presets after defaults but before arguments + config[longname] = presets[longname] + options = [] + args = [] + pos = 0 + while pos < len(argv): + if argv[pos][:2] != '--': + args.append(argv[pos]) + pos += 1 + else: + if pos == len(argv) - 1: + usage('parameter passed in at end with no value') + key, value = argv[pos][2:], argv[pos+1] + pos += 2 + if not longkeyed.has_key(key): + usage('unknown key --' + key) + longname, default, doc = longkeyed[key] + try: + t = type(config[longname]) + if t is NoneType or t is StringType: + config[longname] = value + elif t in (IntType, LongType): + config[longname] = long(value) + elif t is FloatType: + config[longname] = float(value) + elif t is BooleanType: + config[longname] = bool(value) + else: + print 'parseargs: unknown type is',t + assert 0 + except ValueError, e: + usage('wrong format of --%s - %s' % (key, str(e))) + for key, value in config.items(): + if value is None: + usage("Option --%s is required." % key) + if minargs is not None and len(args) < minargs: + usage("Must supply at least %d args." % minargs) + if maxargs is not None and len(args) > maxargs: + usage("Too many args - %d max." % maxargs) + return (config, args) + +def test_parseargs(): + assert parseargs(('d', '--a', 'pq', 'e', '--b', '3', '--c', '4.5', 'f'), (('a', 'x', ''), ('b', 1, ''), ('c', 2.3, ''))) == ({'a': 'pq', 'b': 3, 'c': 4.5}, ['d', 'e', 'f']) + assert parseargs([], [('a', 'x', '')]) == ({'a': 'x'}, []) + assert parseargs(['--a', 'x', '--a', 'y'], [('a', '', '')]) == ({'a': 'y'}, []) + try: + parseargs([], [('a', 'x', '')]) + except ValueError: + pass + try: + parseargs(['--a', 'x'], []) + except ValueError: + pass + try: + parseargs(['--a'], [('a', 'x', '')]) + except ValueError: + pass + try: + parseargs([], [], 1, 2) + except ValueError: + pass + assert parseargs(['x'], [], 1, 2) == ({}, ['x']) + assert parseargs(['x', 'y'], [], 1, 2) == ({}, ['x', 'y']) + try: + parseargs(['x', 'y', 'z'], [], 1, 2) + except ValueError: + pass + try: + parseargs(['--a', '2.0'], [('a', 3, '')]) + except ValueError: + pass + try: + parseargs(['--a', 'z'], [('a', 2.1, '')]) + except ValueError: + pass + diff --git a/tribler-mod/Tribler/Core/BitTornado/parseargs.py.bak b/tribler-mod/Tribler/Core/BitTornado/parseargs.py.bak new file mode 100644 index 0000000..3a37e42 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/parseargs.py.bak @@ -0,0 +1,140 @@ +# Written by Bill Bumgarner and Bram Cohen +# see LICENSE.txt for license information + +from types import * +from cStringIO import StringIO + + +def splitLine(line, COLS=80, indent=10): + indent = " " * indent + width = COLS - (len(indent) + 1) + if indent and width < 15: + width = COLS - 2 + indent = " " + s = StringIO() + i = 0 + for word in line.split(): + if i == 0: + s.write(indent+word) + i = len(word) + continue + if i + len(word) >= width: + s.write('\n'+indent+word) + i = len(word) + continue + s.write(' '+word) + i += len(word) + 1 + return s.getvalue() + +def formatDefinitions(options, COLS, presets = {}): + s = StringIO() + for (longname, default, doc) in options: + s.write('--' + longname + ' \n') + default = presets.get(longname, default) + if type(default) in (IntType, LongType): + try: + default = int(default) + except: + pass + if default is not None: + doc += ' (defaults to ' + repr(default) + ')' + s.write(splitLine(doc, COLS, 10)) + s.write('\n\n') + return s.getvalue() + + +def usage(string): + raise ValueError(string) + + +def defaultargs(options): + l = {} + for (longname, default, doc) in options: + if default is not None: + l[longname] = default + return l + + +def parseargs(argv, options, minargs = None, maxargs = None, presets = {}): + config = {} + longkeyed = {} + for option in options: + longname, default, doc = option + longkeyed[longname] = option + config[longname] = default + for longname in presets.keys(): # presets after defaults but before arguments + config[longname] = presets[longname] + options = [] + args = [] + pos = 0 + while pos < len(argv): + if argv[pos][:2] != '--': + args.append(argv[pos]) + pos += 1 + else: + if pos == len(argv) - 1: + usage('parameter passed in at end with no value') + key, value = argv[pos][2:], argv[pos+1] + pos += 2 + if not longkeyed.has_key(key): + usage('unknown key --' + key) + longname, default, doc = longkeyed[key] + try: + t = type(config[longname]) + if t is NoneType or t is StringType: + config[longname] = value + elif t in (IntType, LongType): + config[longname] = long(value) + elif t is FloatType: + config[longname] = float(value) + elif t is BooleanType: + config[longname] = bool(value) + else: + print 'parseargs: unknown type is',t + assert 0 + except ValueError, e: + usage('wrong format of --%s - %s' % (key, str(e))) + for key, value in config.items(): + if value is None: + usage("Option --%s is required." % key) + if minargs is not None and len(args) < minargs: + usage("Must supply at least %d args." % minargs) + if maxargs is not None and len(args) > maxargs: + usage("Too many args - %d max." % maxargs) + return (config, args) + +def test_parseargs(): + assert parseargs(('d', '--a', 'pq', 'e', '--b', '3', '--c', '4.5', 'f'), (('a', 'x', ''), ('b', 1, ''), ('c', 2.3, ''))) == ({'a': 'pq', 'b': 3, 'c': 4.5}, ['d', 'e', 'f']) + assert parseargs([], [('a', 'x', '')]) == ({'a': 'x'}, []) + assert parseargs(['--a', 'x', '--a', 'y'], [('a', '', '')]) == ({'a': 'y'}, []) + try: + parseargs([], [('a', 'x', '')]) + except ValueError: + pass + try: + parseargs(['--a', 'x'], []) + except ValueError: + pass + try: + parseargs(['--a'], [('a', 'x', '')]) + except ValueError: + pass + try: + parseargs([], [], 1, 2) + except ValueError: + pass + assert parseargs(['x'], [], 1, 2) == ({}, ['x']) + assert parseargs(['x', 'y'], [], 1, 2) == ({}, ['x', 'y']) + try: + parseargs(['x', 'y', 'z'], [], 1, 2) + except ValueError: + pass + try: + parseargs(['--a', '2.0'], [('a', 3, '')]) + except ValueError: + pass + try: + parseargs(['--a', 'z'], [('a', 2.1, '')]) + except ValueError: + pass + diff --git a/tribler-mod/Tribler/Core/BitTornado/parsedir.py b/tribler-mod/Tribler/Core/BitTornado/parsedir.py new file mode 100644 index 0000000..a414fb1 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/parsedir.py @@ -0,0 +1,151 @@ +from time import localtime, strftime +# Written by John Hoffman and Uoti Urpala +# see LICENSE.txt for license information +from bencode import bencode, bdecode +from BT1.btformats import check_info +from sha import sha +import os +from Tribler.Core.simpledefs import TRIBLER_TORRENT_EXT + +try: + True +except: + True = 1 + False = 0 + +NOISY = False + +def _errfunc(x): + print ":: "+x + +def parsedir(directory, parsed, files, blocked, + exts = ['.torrent', TRIBLER_TORRENT_EXT], return_metainfo = False, errfunc = _errfunc): + if NOISY: + errfunc('checking dir') + dirs_to_check = [directory] + new_files = {} + new_blocked = {} + torrent_type = {} + while dirs_to_check: # first, recurse directories and gather torrents + directory = dirs_to_check.pop() + newtorrents = False + for f in os.listdir(directory): + newtorrent = None + for ext in exts: + if f.endswith(ext): + newtorrent = ext[1:] + break + if newtorrent: + newtorrents = True + p = os.path.join(directory, f) + new_files[p] = [(int(os.path.getmtime(p)), os.path.getsize(p)), 0] + torrent_type[p] = newtorrent + if not newtorrents: + for f in os.listdir(directory): + p = os.path.join(directory, f) + if os.path.isdir(p): + dirs_to_check.append(p) + + new_parsed = {} + to_add = [] + added = {} + removed = {} + # files[path] = [(modification_time, size), hash], hash is 0 if the file + # has not been successfully parsed + for p, v in new_files.items(): # re-add old items and check for changes + oldval = files.get(p) + if not oldval: # new file + to_add.append(p) + continue + h = oldval[1] + if oldval[0] == v[0]: # file is unchanged from last parse + if h: + if blocked.has_key(p): # parseable + blocked means duplicate + to_add.append(p) # other duplicate may have gone away + else: + new_parsed[h] = parsed[h] + new_files[p] = oldval + else: + new_blocked[p] = 1 # same broken unparseable file + continue + if parsed.has_key(h) and not blocked.has_key(p): + if NOISY: + errfunc('removing '+p+' (will re-add)') + removed[h] = parsed[h] + to_add.append(p) + + to_add.sort() + for p in to_add: # then, parse new and changed torrents + new_file = new_files[p] + v, h = new_file + if new_parsed.has_key(h): # duplicate + if not blocked.has_key(p) or files[p][0] != v: + errfunc('**warning** '+ + p +' is a duplicate torrent for '+new_parsed[h]['path']) + new_blocked[p] = 1 + continue + + if NOISY: + errfunc('adding '+p) + try: + ff = open(p, 'rb') + d = bdecode(ff.read()) + check_info(d['info']) + h = sha(bencode(d['info'])).digest() + new_file[1] = h + if new_parsed.has_key(h): + errfunc('**warning** '+ + p +' is a duplicate torrent for '+new_parsed[h]['path']) + new_blocked[p] = 1 + continue + + a = {} + a['path'] = p + f = os.path.basename(p) + a['file'] = f + a['type'] = torrent_type[p] + i = d['info'] + l = 0 + nf = 0 + if i.has_key('length'): + l = i.get('length', 0) + nf = 1 + elif i.has_key('files'): + for li in i['files']: + nf += 1 + if li.has_key('length'): + l += li['length'] + a['numfiles'] = nf + a['length'] = l + a['name'] = i.get('name', f) + def setkey(k, d = d, a = a): + if d.has_key(k): + a[k] = d[k] + setkey('failure reason') + setkey('warning message') + setkey('announce-list') + if return_metainfo: + a['metainfo'] = d + except: + errfunc('**warning** '+p+' has errors') + new_blocked[p] = 1 + continue + try: + ff.close() + except: + pass + if NOISY: + errfunc('... successful') + new_parsed[h] = a + added[h] = a + + for p, v in files.items(): # and finally, mark removed torrents + if not new_files.has_key(p) and not blocked.has_key(p): + if NOISY: + errfunc('removing '+p) + removed[v[1]] = parsed[v[1]] + + if NOISY: + errfunc('done checking') + return (new_parsed, new_files, new_blocked, added, removed) + diff --git a/tribler-mod/Tribler/Core/BitTornado/parsedir.py.bak b/tribler-mod/Tribler/Core/BitTornado/parsedir.py.bak new file mode 100644 index 0000000..2564f85 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/parsedir.py.bak @@ -0,0 +1,150 @@ +# Written by John Hoffman and Uoti Urpala +# see LICENSE.txt for license information +from bencode import bencode, bdecode +from BT1.btformats import check_info +from sha import sha +import os +from Tribler.Core.simpledefs import TRIBLER_TORRENT_EXT + +try: + True +except: + True = 1 + False = 0 + +NOISY = False + +def _errfunc(x): + print ":: "+x + +def parsedir(directory, parsed, files, blocked, + exts = ['.torrent', TRIBLER_TORRENT_EXT], return_metainfo = False, errfunc = _errfunc): + if NOISY: + errfunc('checking dir') + dirs_to_check = [directory] + new_files = {} + new_blocked = {} + torrent_type = {} + while dirs_to_check: # first, recurse directories and gather torrents + directory = dirs_to_check.pop() + newtorrents = False + for f in os.listdir(directory): + newtorrent = None + for ext in exts: + if f.endswith(ext): + newtorrent = ext[1:] + break + if newtorrent: + newtorrents = True + p = os.path.join(directory, f) + new_files[p] = [(int(os.path.getmtime(p)), os.path.getsize(p)), 0] + torrent_type[p] = newtorrent + if not newtorrents: + for f in os.listdir(directory): + p = os.path.join(directory, f) + if os.path.isdir(p): + dirs_to_check.append(p) + + new_parsed = {} + to_add = [] + added = {} + removed = {} + # files[path] = [(modification_time, size), hash], hash is 0 if the file + # has not been successfully parsed + for p, v in new_files.items(): # re-add old items and check for changes + oldval = files.get(p) + if not oldval: # new file + to_add.append(p) + continue + h = oldval[1] + if oldval[0] == v[0]: # file is unchanged from last parse + if h: + if blocked.has_key(p): # parseable + blocked means duplicate + to_add.append(p) # other duplicate may have gone away + else: + new_parsed[h] = parsed[h] + new_files[p] = oldval + else: + new_blocked[p] = 1 # same broken unparseable file + continue + if parsed.has_key(h) and not blocked.has_key(p): + if NOISY: + errfunc('removing '+p+' (will re-add)') + removed[h] = parsed[h] + to_add.append(p) + + to_add.sort() + for p in to_add: # then, parse new and changed torrents + new_file = new_files[p] + v, h = new_file + if new_parsed.has_key(h): # duplicate + if not blocked.has_key(p) or files[p][0] != v: + errfunc('**warning** '+ + p +' is a duplicate torrent for '+new_parsed[h]['path']) + new_blocked[p] = 1 + continue + + if NOISY: + errfunc('adding '+p) + try: + ff = open(p, 'rb') + d = bdecode(ff.read()) + check_info(d['info']) + h = sha(bencode(d['info'])).digest() + new_file[1] = h + if new_parsed.has_key(h): + errfunc('**warning** '+ + p +' is a duplicate torrent for '+new_parsed[h]['path']) + new_blocked[p] = 1 + continue + + a = {} + a['path'] = p + f = os.path.basename(p) + a['file'] = f + a['type'] = torrent_type[p] + i = d['info'] + l = 0 + nf = 0 + if i.has_key('length'): + l = i.get('length', 0) + nf = 1 + elif i.has_key('files'): + for li in i['files']: + nf += 1 + if li.has_key('length'): + l += li['length'] + a['numfiles'] = nf + a['length'] = l + a['name'] = i.get('name', f) + def setkey(k, d = d, a = a): + if d.has_key(k): + a[k] = d[k] + setkey('failure reason') + setkey('warning message') + setkey('announce-list') + if return_metainfo: + a['metainfo'] = d + except: + errfunc('**warning** '+p+' has errors') + new_blocked[p] = 1 + continue + try: + ff.close() + except: + pass + if NOISY: + errfunc('... successful') + new_parsed[h] = a + added[h] = a + + for p, v in files.items(): # and finally, mark removed torrents + if not new_files.has_key(p) and not blocked.has_key(p): + if NOISY: + errfunc('removing '+p) + removed[v[1]] = parsed[v[1]] + + if NOISY: + errfunc('done checking') + return (new_parsed, new_files, new_blocked, added, removed) + diff --git a/tribler-mod/Tribler/Core/BitTornado/piecebuffer.py b/tribler-mod/Tribler/Core/BitTornado/piecebuffer.py new file mode 100644 index 0000000..03aa61e --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/piecebuffer.py @@ -0,0 +1,87 @@ +from time import localtime, strftime +# Written by John Hoffman +# see LICENSE.txt for license information + +from array import array +from threading import Lock +# import inspect +try: + True +except: + True = 1 + False = 0 + +DEBUG = False + +class SingleBuffer: + def __init__(self, pool): + self.pool = pool + self.buf = array('c') + + def init(self): + if DEBUG: + print self.pool.count + ''' + for x in xrange(6,1,-1): + try: + f = inspect.currentframe(x).f_code + print (f.co_filename,f.co_firstlineno,f.co_name) + del f + except: + pass + print '' + ''' + self.length = 0 + + def append(self, s): + l = self.length+len(s) + self.buf[self.length:l] = array('c', s) + self.length = l + + def __len__(self): + return self.length + + def __getslice__(self, a, b): + if b > self.length: + b = self.length + if b < 0: + b += self.length + if a == 0 and b == self.length and len(self.buf) == b: + return self.buf # optimization + return self.buf[a:b] + + def getarray(self): + return self.buf[:self.length] + + def release(self): + if DEBUG: + print -self.pool.count + self.pool.release(self) + + +class BufferPool: + def __init__(self): + self.pool = [] + self.lock = Lock() + if DEBUG: + self.count = 0 + + def new(self): + self.lock.acquire() + if self.pool: + x = self.pool.pop() + else: + x = SingleBuffer(self) + if DEBUG: + self.count += 1 + x.count = self.count + x.init() + self.lock.release() + return x + + def release(self, x): + self.pool.append(x) + + +_pool = BufferPool() +PieceBuffer = _pool.new diff --git a/tribler-mod/Tribler/Core/BitTornado/piecebuffer.py.bak b/tribler-mod/Tribler/Core/BitTornado/piecebuffer.py.bak new file mode 100644 index 0000000..75e3e07 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/piecebuffer.py.bak @@ -0,0 +1,86 @@ +# Written by John Hoffman +# see LICENSE.txt for license information + +from array import array +from threading import Lock +# import inspect +try: + True +except: + True = 1 + False = 0 + +DEBUG = False + +class SingleBuffer: + def __init__(self, pool): + self.pool = pool + self.buf = array('c') + + def init(self): + if DEBUG: + print self.pool.count + ''' + for x in xrange(6,1,-1): + try: + f = inspect.currentframe(x).f_code + print (f.co_filename,f.co_firstlineno,f.co_name) + del f + except: + pass + print '' + ''' + self.length = 0 + + def append(self, s): + l = self.length+len(s) + self.buf[self.length:l] = array('c', s) + self.length = l + + def __len__(self): + return self.length + + def __getslice__(self, a, b): + if b > self.length: + b = self.length + if b < 0: + b += self.length + if a == 0 and b == self.length and len(self.buf) == b: + return self.buf # optimization + return self.buf[a:b] + + def getarray(self): + return self.buf[:self.length] + + def release(self): + if DEBUG: + print -self.pool.count + self.pool.release(self) + + +class BufferPool: + def __init__(self): + self.pool = [] + self.lock = Lock() + if DEBUG: + self.count = 0 + + def new(self): + self.lock.acquire() + if self.pool: + x = self.pool.pop() + else: + x = SingleBuffer(self) + if DEBUG: + self.count += 1 + x.count = self.count + x.init() + self.lock.release() + return x + + def release(self, x): + self.pool.append(x) + + +_pool = BufferPool() +PieceBuffer = _pool.new diff --git a/tribler-mod/Tribler/Core/BitTornado/selectpoll.py b/tribler-mod/Tribler/Core/BitTornado/selectpoll.py new file mode 100644 index 0000000..d1ff358 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/selectpoll.py @@ -0,0 +1,129 @@ +from time import localtime, strftime + +# Written by Bram Cohen +# see LICENSE.txt for license information +# Arno,2007-02-23: this poll class is used on win32 + +import sys +from select import select +from time import sleep +from types import IntType +from bisect import bisect +from sets import Set +POLLIN = 1 +POLLOUT = 2 +POLLERR = 8 +POLLHUP = 16 + +DEBUG = True #False + +class poll: + def __init__(self): + self.rlist = [] + self.wlist = [] + + def register(self, f, t): + if type(f) != IntType: + f = f.fileno() + if (t & POLLIN): + insert(self.rlist, f) + else: + remove(self.rlist, f) + if (t & POLLOUT): + insert(self.wlist, f) + else: + remove(self.wlist, f) + + def unregister(self, f): + if type(f) != IntType: + f = f.fileno() + remove(self.rlist, f) + remove(self.wlist, f) + + def poll(self, timeout = None): + if self.rlist or self.wlist: + try: + # Arno, 2007-02-23: The original code never checked for errors + # on any file descriptors. + elist = Set(self.rlist) + elist = elist.union(self.wlist) + elist = list(elist) # in Python2.3, elist must be a list type + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","selectpoll: elist = ",elist + r, w, e = select(self.rlist, self.wlist, elist, timeout) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","selectpoll: e = ",e + except ValueError: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","selectpoll: select: bad param" + return None + else: + sleep(timeout) + return [] + result = [] + for s in r: + result.append((s, POLLIN)) + for s in w: + result.append((s, POLLOUT)) + for s in e: + result.append((s, POLLERR)) + return result + +def remove(list, item): + i = bisect(list, item) + if i > 0 and list[i-1] == item: + del list[i-1] + +def insert(list, item): + i = bisect(list, item) + if i == 0 or list[i-1] != item: + list.insert(i, item) + +def test_remove(): + x = [2, 4, 6] + remove(x, 2) + assert x == [4, 6] + x = [2, 4, 6] + remove(x, 4) + assert x == [2, 6] + x = [2, 4, 6] + remove(x, 6) + assert x == [2, 4] + x = [2, 4, 6] + remove(x, 5) + assert x == [2, 4, 6] + x = [2, 4, 6] + remove(x, 1) + assert x == [2, 4, 6] + x = [2, 4, 6] + remove(x, 7) + assert x == [2, 4, 6] + x = [2, 4, 6] + remove(x, 5) + assert x == [2, 4, 6] + x = [] + remove(x, 3) + assert x == [] + +def test_insert(): + x = [2, 4] + insert(x, 1) + assert x == [1, 2, 4] + x = [2, 4] + insert(x, 3) + assert x == [2, 3, 4] + x = [2, 4] + insert(x, 5) + assert x == [2, 4, 5] + x = [2, 4] + insert(x, 2) + assert x == [2, 4] + x = [2, 4] + insert(x, 4) + assert x == [2, 4] + x = [2, 3, 4] + insert(x, 3) + assert x == [2, 3, 4] + x = [] + insert(x, 3) + assert x == [3] diff --git a/tribler-mod/Tribler/Core/BitTornado/selectpoll.py.bak b/tribler-mod/Tribler/Core/BitTornado/selectpoll.py.bak new file mode 100644 index 0000000..13ab6da --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/selectpoll.py.bak @@ -0,0 +1,128 @@ + +# Written by Bram Cohen +# see LICENSE.txt for license information +# Arno,2007-02-23: this poll class is used on win32 + +import sys +from select import select +from time import sleep +from types import IntType +from bisect import bisect +from sets import Set +POLLIN = 1 +POLLOUT = 2 +POLLERR = 8 +POLLHUP = 16 + +DEBUG = True #False + +class poll: + def __init__(self): + self.rlist = [] + self.wlist = [] + + def register(self, f, t): + if type(f) != IntType: + f = f.fileno() + if (t & POLLIN): + insert(self.rlist, f) + else: + remove(self.rlist, f) + if (t & POLLOUT): + insert(self.wlist, f) + else: + remove(self.wlist, f) + + def unregister(self, f): + if type(f) != IntType: + f = f.fileno() + remove(self.rlist, f) + remove(self.wlist, f) + + def poll(self, timeout = None): + if self.rlist or self.wlist: + try: + # Arno, 2007-02-23: The original code never checked for errors + # on any file descriptors. + elist = Set(self.rlist) + elist = elist.union(self.wlist) + elist = list(elist) # in Python2.3, elist must be a list type + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","selectpoll: elist = ",elist + r, w, e = select(self.rlist, self.wlist, elist, timeout) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","selectpoll: e = ",e + except ValueError: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","selectpoll: select: bad param" + return None + else: + sleep(timeout) + return [] + result = [] + for s in r: + result.append((s, POLLIN)) + for s in w: + result.append((s, POLLOUT)) + for s in e: + result.append((s, POLLERR)) + return result + +def remove(list, item): + i = bisect(list, item) + if i > 0 and list[i-1] == item: + del list[i-1] + +def insert(list, item): + i = bisect(list, item) + if i == 0 or list[i-1] != item: + list.insert(i, item) + +def test_remove(): + x = [2, 4, 6] + remove(x, 2) + assert x == [4, 6] + x = [2, 4, 6] + remove(x, 4) + assert x == [2, 6] + x = [2, 4, 6] + remove(x, 6) + assert x == [2, 4] + x = [2, 4, 6] + remove(x, 5) + assert x == [2, 4, 6] + x = [2, 4, 6] + remove(x, 1) + assert x == [2, 4, 6] + x = [2, 4, 6] + remove(x, 7) + assert x == [2, 4, 6] + x = [2, 4, 6] + remove(x, 5) + assert x == [2, 4, 6] + x = [] + remove(x, 3) + assert x == [] + +def test_insert(): + x = [2, 4] + insert(x, 1) + assert x == [1, 2, 4] + x = [2, 4] + insert(x, 3) + assert x == [2, 3, 4] + x = [2, 4] + insert(x, 5) + assert x == [2, 4, 5] + x = [2, 4] + insert(x, 2) + assert x == [2, 4] + x = [2, 4] + insert(x, 4) + assert x == [2, 4] + x = [2, 3, 4] + insert(x, 3) + assert x == [2, 3, 4] + x = [] + insert(x, 3) + assert x == [3] diff --git a/tribler-mod/Tribler/Core/BitTornado/subnetparse.py b/tribler-mod/Tribler/Core/BitTornado/subnetparse.py new file mode 100644 index 0000000..ff1fd5e --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/subnetparse.py @@ -0,0 +1,219 @@ +from time import localtime, strftime +# Written by John Hoffman +# see LICENSE.txt for license information + +from bisect import bisect, insort + +try: + True +except: + True = 1 + False = 0 + bool = lambda x: not not x + +hexbinmap = { + '0': '0000', + '1': '0001', + '2': '0010', + '3': '0011', + '4': '0100', + '5': '0101', + '6': '0110', + '7': '0111', + '8': '1000', + '9': '1001', + 'a': '1010', + 'b': '1011', + 'c': '1100', + 'd': '1101', + 'e': '1110', + 'f': '1111', + 'x': '0000', +} + +chrbinmap = {} +for n in xrange(256): + b = [] + nn = n + for i in xrange(8): + if nn & 0x80: + b.append('1') + else: + b.append('0') + nn <<= 1 + chrbinmap[n] = ''.join(b) + + +def to_bitfield_ipv4(ip): + ip = ip.split('.') + if len(ip) != 4: + raise ValueError, "bad address" + b = [] + for i in ip: + b.append(chrbinmap[int(i)]) + return ''.join(b) + +def to_bitfield_ipv6(ip): + b = '' + doublecolon = False + + if not ip: + raise ValueError, "bad address" + if ip == '::': # boundary handling + ip = '' + elif ip[:2] == '::': + ip = ip[1:] + elif ip[0] == ':': + raise ValueError, "bad address" + elif ip[-2:] == '::': + ip = ip[:-1] + elif ip[-1] == ':': + raise ValueError, "bad address" + for n in ip.split(':'): + if n == '': # double-colon + if doublecolon: + raise ValueError, "bad address" + doublecolon = True + b += ':' + continue + if n.find('.') >= 0: # IPv4 + n = to_bitfield_ipv4(n) + b += n + '0'*(32-len(n)) + continue + n = ('x'*(4-len(n))) + n + for i in n: + b += hexbinmap[i] + if doublecolon: + pos = b.find(':') + b = b[:pos]+('0'*(129-len(b)))+b[pos+1:] + if len(b) != 128: # always check size + raise ValueError, "bad address" + return b + +ipv4addrmask = to_bitfield_ipv6('::ffff:0:0')[:96] + +class IP_List: + def __init__(self): + self.ipv4list = [] + self.ipv6list = [] + + def __nonzero__(self): + return bool(self.ipv4list or self.ipv6list) + + + def append(self, ip, depth = 256): + if ip.find(':') < 0: # IPv4 + insort(self.ipv4list, to_bitfield_ipv4(ip)[:depth]) + else: + b = to_bitfield_ipv6(ip) + if b.startswith(ipv4addrmask): + insort(self.ipv4list, b[96:][:depth-96]) + else: + insort(self.ipv6list, b[:depth]) + + + def includes(self, ip): + if not (self.ipv4list or self.ipv6list): + return False + if ip.find(':') < 0: # IPv4 + b = to_bitfield_ipv4(ip) + else: + b = to_bitfield_ipv6(ip) + if b.startswith(ipv4addrmask): + b = b[96:] + if len(b) > 32: + l = self.ipv6list + else: + l = self.ipv4list + for map in l[bisect(l, b)-1:]: + if b.startswith(map): + return True + if map > b: + return False + return False + + + def read_fieldlist(self, file): # reads a list from a file in the format 'ip/len ' + f = open(file, 'r') + while 1: + line = f.readline() + if not line: + break + line = line.strip().expandtabs() + if not line or line[0] == '#': + continue + try: + line, garbage = line.split(' ', 1) + except: + pass + try: + line, garbage = line.split('#', 1) + except: + pass + try: + ip, depth = line.split('/') + except: + ip = line + depth = None + try: + if depth is not None: + depth = int(depth) + self.append(ip, depth) + except: + print '*** WARNING *** could not parse IP range: '+line + f.close() + + + def set_intranet_addresses(self): + self.append('127.0.0.1', 8) + self.append('10.0.0.0', 8) + self.append('172.16.0.0', 12) + self.append('192.168.0.0', 16) + self.append('169.254.0.0', 16) + self.append('::1') + self.append('fe80::', 16) + self.append('fec0::', 16) + + def set_ipv4_addresses(self): + self.append('::ffff:0:0', 96) + +def ipv6_to_ipv4(ip): + ip = to_bitfield_ipv6(ip) + if not ip.startswith(ipv4addrmask): + raise ValueError, "not convertible to IPv4" + ip = ip[-32:] + x = '' + for i in range(4): + x += str(int(ip[:8], 2)) + if i < 3: + x += '.' + ip = ip[8:] + return x + +def to_ipv4(ip): + if is_ipv4(ip): + _valid_ipv4(ip) + return ip + return ipv6_to_ipv4(ip) + +def is_ipv4(ip): + return ip.find(':') < 0 + +def _valid_ipv4(ip): + ip = ip.split('.') + if len(ip) != 4: + raise ValueError + for i in ip: + chr(int(i)) + +def is_valid_ip(ip): + try: + if not ip: + return False + if is_ipv4(ip): + _valid_ipv4(ip) + return True + to_bitfield_ipv6(ip) + return True + except: + return False diff --git a/tribler-mod/Tribler/Core/BitTornado/subnetparse.py.bak b/tribler-mod/Tribler/Core/BitTornado/subnetparse.py.bak new file mode 100644 index 0000000..18e4187 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/subnetparse.py.bak @@ -0,0 +1,218 @@ +# Written by John Hoffman +# see LICENSE.txt for license information + +from bisect import bisect, insort + +try: + True +except: + True = 1 + False = 0 + bool = lambda x: not not x + +hexbinmap = { + '0': '0000', + '1': '0001', + '2': '0010', + '3': '0011', + '4': '0100', + '5': '0101', + '6': '0110', + '7': '0111', + '8': '1000', + '9': '1001', + 'a': '1010', + 'b': '1011', + 'c': '1100', + 'd': '1101', + 'e': '1110', + 'f': '1111', + 'x': '0000', +} + +chrbinmap = {} +for n in xrange(256): + b = [] + nn = n + for i in xrange(8): + if nn & 0x80: + b.append('1') + else: + b.append('0') + nn <<= 1 + chrbinmap[n] = ''.join(b) + + +def to_bitfield_ipv4(ip): + ip = ip.split('.') + if len(ip) != 4: + raise ValueError, "bad address" + b = [] + for i in ip: + b.append(chrbinmap[int(i)]) + return ''.join(b) + +def to_bitfield_ipv6(ip): + b = '' + doublecolon = False + + if not ip: + raise ValueError, "bad address" + if ip == '::': # boundary handling + ip = '' + elif ip[:2] == '::': + ip = ip[1:] + elif ip[0] == ':': + raise ValueError, "bad address" + elif ip[-2:] == '::': + ip = ip[:-1] + elif ip[-1] == ':': + raise ValueError, "bad address" + for n in ip.split(':'): + if n == '': # double-colon + if doublecolon: + raise ValueError, "bad address" + doublecolon = True + b += ':' + continue + if n.find('.') >= 0: # IPv4 + n = to_bitfield_ipv4(n) + b += n + '0'*(32-len(n)) + continue + n = ('x'*(4-len(n))) + n + for i in n: + b += hexbinmap[i] + if doublecolon: + pos = b.find(':') + b = b[:pos]+('0'*(129-len(b)))+b[pos+1:] + if len(b) != 128: # always check size + raise ValueError, "bad address" + return b + +ipv4addrmask = to_bitfield_ipv6('::ffff:0:0')[:96] + +class IP_List: + def __init__(self): + self.ipv4list = [] + self.ipv6list = [] + + def __nonzero__(self): + return bool(self.ipv4list or self.ipv6list) + + + def append(self, ip, depth = 256): + if ip.find(':') < 0: # IPv4 + insort(self.ipv4list, to_bitfield_ipv4(ip)[:depth]) + else: + b = to_bitfield_ipv6(ip) + if b.startswith(ipv4addrmask): + insort(self.ipv4list, b[96:][:depth-96]) + else: + insort(self.ipv6list, b[:depth]) + + + def includes(self, ip): + if not (self.ipv4list or self.ipv6list): + return False + if ip.find(':') < 0: # IPv4 + b = to_bitfield_ipv4(ip) + else: + b = to_bitfield_ipv6(ip) + if b.startswith(ipv4addrmask): + b = b[96:] + if len(b) > 32: + l = self.ipv6list + else: + l = self.ipv4list + for map in l[bisect(l, b)-1:]: + if b.startswith(map): + return True + if map > b: + return False + return False + + + def read_fieldlist(self, file): # reads a list from a file in the format 'ip/len ' + f = open(file, 'r') + while 1: + line = f.readline() + if not line: + break + line = line.strip().expandtabs() + if not line or line[0] == '#': + continue + try: + line, garbage = line.split(' ', 1) + except: + pass + try: + line, garbage = line.split('#', 1) + except: + pass + try: + ip, depth = line.split('/') + except: + ip = line + depth = None + try: + if depth is not None: + depth = int(depth) + self.append(ip, depth) + except: + print '*** WARNING *** could not parse IP range: '+line + f.close() + + + def set_intranet_addresses(self): + self.append('127.0.0.1', 8) + self.append('10.0.0.0', 8) + self.append('172.16.0.0', 12) + self.append('192.168.0.0', 16) + self.append('169.254.0.0', 16) + self.append('::1') + self.append('fe80::', 16) + self.append('fec0::', 16) + + def set_ipv4_addresses(self): + self.append('::ffff:0:0', 96) + +def ipv6_to_ipv4(ip): + ip = to_bitfield_ipv6(ip) + if not ip.startswith(ipv4addrmask): + raise ValueError, "not convertible to IPv4" + ip = ip[-32:] + x = '' + for i in range(4): + x += str(int(ip[:8], 2)) + if i < 3: + x += '.' + ip = ip[8:] + return x + +def to_ipv4(ip): + if is_ipv4(ip): + _valid_ipv4(ip) + return ip + return ipv6_to_ipv4(ip) + +def is_ipv4(ip): + return ip.find(':') < 0 + +def _valid_ipv4(ip): + ip = ip.split('.') + if len(ip) != 4: + raise ValueError + for i in ip: + chr(int(i)) + +def is_valid_ip(ip): + try: + if not ip: + return False + if is_ipv4(ip): + _valid_ipv4(ip) + return True + to_bitfield_ipv6(ip) + return True + except: + return False diff --git a/tribler-mod/Tribler/Core/BitTornado/torrentlistparse.py b/tribler-mod/Tribler/Core/BitTornado/torrentlistparse.py new file mode 100644 index 0000000..9931635 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/torrentlistparse.py @@ -0,0 +1,39 @@ +from time import localtime, strftime +# Written by John Hoffman +# see LICENSE.txt for license information + +from binascii import unhexlify + +try: + True +except: + True = 1 + False = 0 + + +# parses a list of torrent hashes, in the format of one hash per line in hex format + +def parsetorrentlist(filename, parsed): + new_parsed = {} + added = {} + removed = parsed + f = open(filename, 'r') + while 1: + l = f.readline() + if not l: + break + l = l.strip() + try: + if len(l) != 40: + raise ValueError, 'bad line' + h = unhexlify(l) + except: + print '*** WARNING *** could not parse line in torrent list: '+l + if parsed.has_key(h): + del removed[h] + else: + added[h] = True + new_parsed[h] = True + f.close() + return (new_parsed, added, removed) + diff --git a/tribler-mod/Tribler/Core/BitTornado/torrentlistparse.py.bak b/tribler-mod/Tribler/Core/BitTornado/torrentlistparse.py.bak new file mode 100644 index 0000000..668c245 --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/torrentlistparse.py.bak @@ -0,0 +1,38 @@ +# Written by John Hoffman +# see LICENSE.txt for license information + +from binascii import unhexlify + +try: + True +except: + True = 1 + False = 0 + + +# parses a list of torrent hashes, in the format of one hash per line in hex format + +def parsetorrentlist(filename, parsed): + new_parsed = {} + added = {} + removed = parsed + f = open(filename, 'r') + while 1: + l = f.readline() + if not l: + break + l = l.strip() + try: + if len(l) != 40: + raise ValueError, 'bad line' + h = unhexlify(l) + except: + print '*** WARNING *** could not parse line in torrent list: '+l + if parsed.has_key(h): + del removed[h] + else: + added[h] = True + new_parsed[h] = True + f.close() + return (new_parsed, added, removed) + diff --git a/tribler-mod/Tribler/Core/BitTornado/zurllib.py b/tribler-mod/Tribler/Core/BitTornado/zurllib.py new file mode 100644 index 0000000..bbe485c --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/zurllib.py @@ -0,0 +1,101 @@ +from time import localtime, strftime +# Written by John Hoffman +# see LICENSE.txt for license information + +from httplib import HTTPConnection, HTTPSConnection, HTTPException +from urlparse import urlparse +from bencode import bdecode +from gzip import GzipFile +from StringIO import StringIO +from __init__ import product_name, version_short +from traceback import print_exc + +VERSION = product_name+'/'+version_short +MAX_REDIRECTS = 10 + + +class btHTTPcon(HTTPConnection): # attempt to add automatic connection timeout + def connect(self): + HTTPConnection.connect(self) + try: + self.sock.settimeout(30) + except: + pass + +class btHTTPScon(HTTPSConnection): # attempt to add automatic connection timeout + def connect(self): + HTTPSConnection.connect(self) + try: + self.sock.settimeout(30) + except: + pass + +class urlopen: + def __init__(self, url): + self.tries = 0 + self._open(url.strip()) + self.error_return = None + + def _open(self, url): + self.tries += 1 + if self.tries > MAX_REDIRECTS: + raise IOError, ('http error', 500, + "Internal Server Error: Redirect Recursion") + (scheme, netloc, path, pars, query, fragment) = urlparse(url) + if scheme != 'http' and scheme != 'https': + raise IOError, ('url error', 'unknown url type', scheme, url) + url = path + if pars: + url += ';'+pars + if query: + url += '?'+query +# if fragment: + try: + if scheme == 'http': + self.connection = btHTTPcon(netloc) + else: + self.connection = btHTTPScon(netloc) + self.connection.request('GET', url, None, + { 'User-Agent': VERSION, + 'Accept-Encoding': 'gzip' } ) + self.response = self.connection.getresponse() + except HTTPException, e: + print_exc() + raise IOError, ('http error', str(e)) + status = self.response.status + if status in (301, 302): + try: + self.connection.close() + except: + pass + self._open(self.response.getheader('Location')) + return + if status != 200: + try: + data = self._read() + d = bdecode(data) + if d.has_key('failure reason'): + self.error_return = data + return + except: + pass + raise IOError, ('http error', status, self.response.reason) + + def read(self): + if self.error_return: + return self.error_return + return self._read() + + def _read(self): + data = self.response.read() + if self.response.getheader('Content-Encoding', '').find('gzip') >= 0: + try: + compressed = StringIO(data) + f = GzipFile(fileobj = compressed) + data = f.read() + except: + raise IOError, ('http error', 'got corrupt response') + return data + + def close(self): + self.connection.close() diff --git a/tribler-mod/Tribler/Core/BitTornado/zurllib.py.bak b/tribler-mod/Tribler/Core/BitTornado/zurllib.py.bak new file mode 100644 index 0000000..2aa578d --- /dev/null +++ b/tribler-mod/Tribler/Core/BitTornado/zurllib.py.bak @@ -0,0 +1,100 @@ +# Written by John Hoffman +# see LICENSE.txt for license information + +from httplib import HTTPConnection, HTTPSConnection, HTTPException +from urlparse import urlparse +from bencode import bdecode +from gzip import GzipFile +from StringIO import StringIO +from __init__ import product_name, version_short +from traceback import print_exc + +VERSION = product_name+'/'+version_short +MAX_REDIRECTS = 10 + + +class btHTTPcon(HTTPConnection): # attempt to add automatic connection timeout + def connect(self): + HTTPConnection.connect(self) + try: + self.sock.settimeout(30) + except: + pass + +class btHTTPScon(HTTPSConnection): # attempt to add automatic connection timeout + def connect(self): + HTTPSConnection.connect(self) + try: + self.sock.settimeout(30) + except: + pass + +class urlopen: + def __init__(self, url): + self.tries = 0 + self._open(url.strip()) + self.error_return = None + + def _open(self, url): + self.tries += 1 + if self.tries > MAX_REDIRECTS: + raise IOError, ('http error', 500, + "Internal Server Error: Redirect Recursion") + (scheme, netloc, path, pars, query, fragment) = urlparse(url) + if scheme != 'http' and scheme != 'https': + raise IOError, ('url error', 'unknown url type', scheme, url) + url = path + if pars: + url += ';'+pars + if query: + url += '?'+query +# if fragment: + try: + if scheme == 'http': + self.connection = btHTTPcon(netloc) + else: + self.connection = btHTTPScon(netloc) + self.connection.request('GET', url, None, + { 'User-Agent': VERSION, + 'Accept-Encoding': 'gzip' } ) + self.response = self.connection.getresponse() + except HTTPException, e: + print_exc() + raise IOError, ('http error', str(e)) + status = self.response.status + if status in (301, 302): + try: + self.connection.close() + except: + pass + self._open(self.response.getheader('Location')) + return + if status != 200: + try: + data = self._read() + d = bdecode(data) + if d.has_key('failure reason'): + self.error_return = data + return + except: + pass + raise IOError, ('http error', status, self.response.reason) + + def read(self): + if self.error_return: + return self.error_return + return self._read() + + def _read(self): + data = self.response.read() + if self.response.getheader('Content-Encoding', '').find('gzip') >= 0: + try: + compressed = StringIO(data) + f = GzipFile(fileobj = compressed) + data = f.read() + except: + raise IOError, ('http error', 'got corrupt response') + return data + + def close(self): + self.connection.close() diff --git a/tribler-mod/Tribler/Core/BuddyCast/TorrentCollecting.py b/tribler-mod/Tribler/Core/BuddyCast/TorrentCollecting.py new file mode 100644 index 0000000..d841f4d --- /dev/null +++ b/tribler-mod/Tribler/Core/BuddyCast/TorrentCollecting.py @@ -0,0 +1,27 @@ +from time import localtime, strftime +# Written by Jie Yang +# see LICENSE.txt for license information + +DEBUG = False + +class SimpleTorrentCollecting: + """ + Simplest torrent collecting policy: randomly collect a torrent when received + a buddycast message + """ + + def __init__(self, metadata_handler, data_handler): + self.metadata_handler = metadata_handler + self.data_handler = data_handler + self.torrent_db = data_handler.torrent_db + self.pref_db = data_handler.pref_db + self.cache_pool = {} + + + def trigger(self, permid, selversion, collect_candidate=None): + infohash = self.torrent_db.selectTorrentToCollect(permid, collect_candidate) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '*****-----------***** trigger torrent collecting', `infohash` + if infohash and self.metadata_handler: + self.metadata_handler.send_metadata_request(permid, infohash, selversion) + + diff --git a/tribler-mod/Tribler/Core/BuddyCast/TorrentCollecting.py.bak b/tribler-mod/Tribler/Core/BuddyCast/TorrentCollecting.py.bak new file mode 100644 index 0000000..c9bf827 --- /dev/null +++ b/tribler-mod/Tribler/Core/BuddyCast/TorrentCollecting.py.bak @@ -0,0 +1,26 @@ +# Written by Jie Yang +# see LICENSE.txt for license information + +DEBUG = False + +class SimpleTorrentCollecting: + """ + Simplest torrent collecting policy: randomly collect a torrent when received + a buddycast message + """ + + def __init__(self, metadata_handler, data_handler): + self.metadata_handler = metadata_handler + self.data_handler = data_handler + self.torrent_db = data_handler.torrent_db + self.pref_db = data_handler.pref_db + self.cache_pool = {} + + + def trigger(self, permid, selversion, collect_candidate=None): + infohash = self.torrent_db.selectTorrentToCollect(permid, collect_candidate) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '*****-----------***** trigger torrent collecting', `infohash` + if infohash and self.metadata_handler: + self.metadata_handler.send_metadata_request(permid, infohash, selversion) + + diff --git a/tribler-mod/Tribler/Core/BuddyCast/__init__.py b/tribler-mod/Tribler/Core/BuddyCast/__init__.py new file mode 100644 index 0000000..b7ef832 --- /dev/null +++ b/tribler-mod/Tribler/Core/BuddyCast/__init__.py @@ -0,0 +1,3 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/BuddyCast/__init__.py.bak b/tribler-mod/Tribler/Core/BuddyCast/__init__.py.bak new file mode 100644 index 0000000..395f8fb --- /dev/null +++ b/tribler-mod/Tribler/Core/BuddyCast/__init__.py.bak @@ -0,0 +1,2 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/BuddyCast/bartercast.py b/tribler-mod/Tribler/Core/BuddyCast/bartercast.py new file mode 100644 index 0000000..066e45a --- /dev/null +++ b/tribler-mod/Tribler/Core/BuddyCast/bartercast.py @@ -0,0 +1,344 @@ +from time import localtime, strftime +# Written by Michel Meulpolder +# see LICENSE.txt for license information +import sys, os + +from Tribler.Core.BitTornado.bencode import bencode, bdecode +from Tribler.Core.Statistics.Logger import OverlayLogger +from Tribler.Core.BitTornado.BT1.MessageID import BARTERCAST #, KEEP_ALIVE +from Tribler.Core.CacheDB.CacheDBHandler import BarterCastDBHandler +from Tribler.Core.Utilities.utilities import * +from traceback import print_exc +from types import StringType, ListType, DictType +from time import time, gmtime, strftime, ctime + +from Tribler.Core.Overlay.permid import permid_for_user +from Tribler.Core.Overlay.SecureOverlay import OLPROTO_VER_FIFTH + + +MAX_BARTERCAST_LENGTH = 10 * 1024 * 1024 # TODO: give this length a reasonable value +NO_PEERS_IN_MSG = 10 +REFRESH_TOPN_INTERVAL = 30 * 60 + +DEBUG = False +LOG = False + +def now(): + return int(time()) + +class BarterCastCore: + + ################################ + def __init__(self, data_handler, overlay_bridge, log = '', dnsindb = None): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "=================Initializing bartercast core" + + self.data_handler = data_handler + self.dnsindb = dnsindb + self.log = log + self.overlay_bridge = overlay_bridge + self.bartercastdb = BarterCastDBHandler.getInstance() + + self.network_delay = 30 + self.send_block_list = {} + self.recv_block_list = {} + self.block_interval = 1*60*60 # block interval for a peer to barter cast + + self.topn = self.bartercastdb.getTopNPeers(NO_PEERS_IN_MSG, local_only = True)['top'] + self.overlay_bridge.add_task(self.refreshTopN, REFRESH_TOPN_INTERVAL) + + if self.log: + self.overlay_log = OverlayLogger.getInstance(self.log) + + if LOG: + self.logfile = '/Users/michel/packages/bartercast_dataset/bartercast42.log' + if not os.path.exists(self.logfile): + log = open(self.logfile, 'w') + log.close() + + + ################################ + def refreshTopN(self): + + self.topn = self.bartercastdb.getTopNPeers(NO_PEERS_IN_MSG, local_only = True)['top'] + self.overlay_bridge.add_task(self.refreshTopN, REFRESH_TOPN_INTERVAL) + + + + ################################ + def createAndSendBarterCastMessage(self, target_permid, selversion, active = False): + + + # for older versions of Tribler (non-BarterCast): do nothing + if selversion <= OLPROTO_VER_FIFTH: + return + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "===========bartercast: Sending BarterCast msg to ", self.bartercastdb.getName(target_permid) + + # create a new bartercast message + bartercast_data = self.createBarterCastMessage(target_permid) + + if LOG: + self.logMsg(bartercast_data, target_permid, 'out', logfile = self.logfile) + + try: + bartercast_msg = bencode(bartercast_data) + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "error bartercast_data:", bartercast_data + return + + # send the message + self.overlay_bridge.send(target_permid, BARTERCAST+bartercast_msg, self.bartercastSendCallback) + + self.blockPeer(target_permid, self.send_block_list, self.block_interval) + + + + ################################ + def createBarterCastMessage(self, target_permid): + """ Create a bartercast message """ + + my_permid = self.bartercastdb.my_permid + local_top = self.topn + top_peers = map(lambda (permid, up, down): permid, local_top) + data = {} + totals = self.bartercastdb.getTotals() # (total_up, total_down) + + for permid in top_peers: + + item = self.bartercastdb.getItem((my_permid, permid)) + + if item is not None: + # retrieve what i have uploaded to permid + data_to = item['uploaded'] + # retrieve what i have downloaded from permid + data_from = item['downloaded'] + + data[permid] = {'u': data_to, 'd': data_from} + + bartercast_data = {'data': data, 'totals': totals} + + return bartercast_data + + + ################################ + def bartercastSendCallback(self, exc, target_permid, other=0): + if exc is None: + if DEBUG: + print "bartercast: %s *** msg was sent successfully to peer %s" % (ctime(now()), self.bartercastdb.getName(target_permid)) + else: + if DEBUG: + print "bartercast: %s *** warning - error in sending msg to %s" % (ctime(now()), self.bartercastdb.getName(target_permid)) + + + ################################ + def gotBarterCastMessage(self, recv_msg, sender_permid, selversion): + """ Received a bartercast message and handle it. Reply if needed """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'bartercast: %s Received a BarterCast msg from %s'% (ctime(now()), self.bartercastdb.getName(sender_permid)) + + if not sender_permid or sender_permid == self.bartercastdb.my_permid: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercast: error - got BarterCastMsg from a None peer", \ + sender_permid, recv_msg + return False + + if MAX_BARTERCAST_LENGTH > 0 and len(recv_msg) > MAX_BARTERCAST_LENGTH: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercast: warning - got large BarterCastMsg", len(t) + return False + + bartercast_data = {} + + try: + bartercast_data = bdecode(recv_msg) + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercast: warning, invalid bencoded data" + return False + + try: # check bartercast message + self.validBarterCastMsg(bartercast_data) + except RuntimeError, msg: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", msg + return False + + if LOG: + self.logMsg(bartercast_data, sender_permid, 'in', logfile = self.logfile) + + data = bartercast_data['data'] + + if 'totals' in bartercast_data: + totals = bartercast_data['totals'] + else: + totals = None + + if DEBUG: + st = time() + self.handleBarterCastMsg(sender_permid, data) + et = time() + diff = et - st + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bartercast: HANDLE took %.4f" % diff + else: + self.handleBarterCastMsg(sender_permid, data, totals) + + if not self.isBlocked(sender_permid, self.send_block_list): + self.replyBarterCast(sender_permid, selversion) + + return True + + + + ################################ + def validBarterCastMsg(self, bartercast_data): + + if not type(bartercast_data) == DictType: + raise RuntimeError, "bartercast: received data is not a dictionary" + return False + + if not bartercast_data.has_key('data'): + raise RuntimeError, "bartercast: 'data' key doesn't exist" + return False + + if not type(bartercast_data['data']) == DictType: + raise RuntimeError, "bartercast: 'data' value is not dictionary" + return False + + for permid in bartercast_data['data'].keys(): + + if not bartercast_data['data'][permid].has_key('u') or \ + not bartercast_data['data'][permid].has_key('d'): + raise RuntimeError, "bartercast: datafield doesn't contain 'u' or 'd' keys" + return False + + return True + + ################################ + def handleBarterCastMsg(self, sender_permid, data, totals = None): + """ process bartercast data in database """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercast: Processing bartercast msg from: ", self.bartercastdb.getName(sender_permid) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "totals: ", totals + + + permids = data.keys() + changed = False + + # 1. Add any unknown peers to the database in a single transaction + self.bartercastdb.addPeersBatch(permids) + + + # 2. Add totals to database (without committing) + if totals != None and len(totals) == 2: + up = int(totals[0]) + down = int(totals[1]) + self.bartercastdb.updateULDL((sender_permid, sender_permid), up, down, commit = False) + changed = True + + # 3. Add all the received records to the database in a single transaction + datalen = len(permids) + for i in range(0,datalen): + permid = permids[i] + + data_to = data[permid]['u'] + data_from = data[permid]['d'] + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercast: data: (%s, %s) up = %d down = %d" % (self.bartercastdb.getName(sender_permid), self.bartercastdb.getName(permid),\ + data_to, data_from) + + # update database sender->permid and permid->sender + #commit = (i == datalen-1) + self.bartercastdb.updateULDL((sender_permid, permid), data_to, data_from, commit = False) + changed = True + + if changed: + self.bartercastdb.commit() + + + # ARNODB: + # get rid of index on DB? See where used + + + ################################ + def replyBarterCast(self, target_permid, selversion): + """ Reply a bartercast message """ + + if DEBUG: + st = time() + self.createAndSendBarterCastMessage(target_permid, selversion) + et = time() + diff = et - st + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bartercast: CREATE took %.4f" % diff + else: + self.createAndSendBarterCastMessage(target_permid, selversion) + + + # Blocking functions (similar to BuddyCast): + + ################################ + def isBlocked(self, peer_permid, block_list): + if peer_permid not in block_list: + return False + unblock_time = block_list[peer_permid] + if now() >= unblock_time - self.network_delay: # 30 seconds for network delay + block_list.pop(peer_permid) + return False + return True + + + + ################################ + def blockPeer(self, peer_permid, block_list, block_interval=None): + """ Add a peer to a block list """ + + if block_interval is None: + block_interval = self.block_interval + unblock_time = now() + block_interval + block_list[peer_permid] = unblock_time + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'bartercast: %s Blocked peer %s'% (ctime(now()), self.bartercastdb.getName(peer_permid)) + + + ################################ + def logMsg(self, msg_data, msg_permid, in_or_out, logfile): + + if in_or_out == 'in': + permid_from = permid_for_user(msg_permid) + + elif in_or_out == 'out': + permid_from = 'LOCAL' + + else: + return + + timestamp = now() + + log = open(logfile, 'a') + string = '%.1f %s %s' % (timestamp, in_or_out, permid_for_user(msg_permid)) + log.write(string + '\n') + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", string + + data = msg_data.get('data', []) + + for permid in data: + u = data[permid]['u'] + d = data[permid]['d'] + + string = '%.1f %s %s %d %d' % (timestamp, permid_from, permid_for_user(permid), u, d) + log.write(string + '\n') + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", string + + totals = msg_data.get('totals', None) + + if totals != None: + (u, d) = totals + + string = '%.1f TOT %s %d %d' % (timestamp, permid_from, u, d) + log.write(string + '\n') + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", string + + + log.close() diff --git a/tribler-mod/Tribler/Core/BuddyCast/bartercast.py.bak b/tribler-mod/Tribler/Core/BuddyCast/bartercast.py.bak new file mode 100644 index 0000000..dbff951 --- /dev/null +++ b/tribler-mod/Tribler/Core/BuddyCast/bartercast.py.bak @@ -0,0 +1,343 @@ +# Written by Michel Meulpolder +# see LICENSE.txt for license information +import sys, os + +from Tribler.Core.BitTornado.bencode import bencode, bdecode +from Tribler.Core.Statistics.Logger import OverlayLogger +from Tribler.Core.BitTornado.BT1.MessageID import BARTERCAST #, KEEP_ALIVE +from Tribler.Core.CacheDB.CacheDBHandler import BarterCastDBHandler +from Tribler.Core.Utilities.utilities import * +from traceback import print_exc +from types import StringType, ListType, DictType +from time import time, gmtime, strftime, ctime + +from Tribler.Core.Overlay.permid import permid_for_user +from Tribler.Core.Overlay.SecureOverlay import OLPROTO_VER_FIFTH + + +MAX_BARTERCAST_LENGTH = 10 * 1024 * 1024 # TODO: give this length a reasonable value +NO_PEERS_IN_MSG = 10 +REFRESH_TOPN_INTERVAL = 30 * 60 + +DEBUG = False +LOG = False + +def now(): + return int(time()) + +class BarterCastCore: + + ################################ + def __init__(self, data_handler, overlay_bridge, log = '', dnsindb = None): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "=================Initializing bartercast core" + + self.data_handler = data_handler + self.dnsindb = dnsindb + self.log = log + self.overlay_bridge = overlay_bridge + self.bartercastdb = BarterCastDBHandler.getInstance() + + self.network_delay = 30 + self.send_block_list = {} + self.recv_block_list = {} + self.block_interval = 1*60*60 # block interval for a peer to barter cast + + self.topn = self.bartercastdb.getTopNPeers(NO_PEERS_IN_MSG, local_only = True)['top'] + self.overlay_bridge.add_task(self.refreshTopN, REFRESH_TOPN_INTERVAL) + + if self.log: + self.overlay_log = OverlayLogger.getInstance(self.log) + + if LOG: + self.logfile = '/Users/michel/packages/bartercast_dataset/bartercast42.log' + if not os.path.exists(self.logfile): + log = open(self.logfile, 'w') + log.close() + + + ################################ + def refreshTopN(self): + + self.topn = self.bartercastdb.getTopNPeers(NO_PEERS_IN_MSG, local_only = True)['top'] + self.overlay_bridge.add_task(self.refreshTopN, REFRESH_TOPN_INTERVAL) + + + + ################################ + def createAndSendBarterCastMessage(self, target_permid, selversion, active = False): + + + # for older versions of Tribler (non-BarterCast): do nothing + if selversion <= OLPROTO_VER_FIFTH: + return + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "===========bartercast: Sending BarterCast msg to ", self.bartercastdb.getName(target_permid) + + # create a new bartercast message + bartercast_data = self.createBarterCastMessage(target_permid) + + if LOG: + self.logMsg(bartercast_data, target_permid, 'out', logfile = self.logfile) + + try: + bartercast_msg = bencode(bartercast_data) + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "error bartercast_data:", bartercast_data + return + + # send the message + self.overlay_bridge.send(target_permid, BARTERCAST+bartercast_msg, self.bartercastSendCallback) + + self.blockPeer(target_permid, self.send_block_list, self.block_interval) + + + + ################################ + def createBarterCastMessage(self, target_permid): + """ Create a bartercast message """ + + my_permid = self.bartercastdb.my_permid + local_top = self.topn + top_peers = map(lambda (permid, up, down): permid, local_top) + data = {} + totals = self.bartercastdb.getTotals() # (total_up, total_down) + + for permid in top_peers: + + item = self.bartercastdb.getItem((my_permid, permid)) + + if item is not None: + # retrieve what i have uploaded to permid + data_to = item['uploaded'] + # retrieve what i have downloaded from permid + data_from = item['downloaded'] + + data[permid] = {'u': data_to, 'd': data_from} + + bartercast_data = {'data': data, 'totals': totals} + + return bartercast_data + + + ################################ + def bartercastSendCallback(self, exc, target_permid, other=0): + if exc is None: + if DEBUG: + print "bartercast: %s *** msg was sent successfully to peer %s" % (ctime(now()), self.bartercastdb.getName(target_permid)) + else: + if DEBUG: + print "bartercast: %s *** warning - error in sending msg to %s" % (ctime(now()), self.bartercastdb.getName(target_permid)) + + + ################################ + def gotBarterCastMessage(self, recv_msg, sender_permid, selversion): + """ Received a bartercast message and handle it. Reply if needed """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'bartercast: %s Received a BarterCast msg from %s'% (ctime(now()), self.bartercastdb.getName(sender_permid)) + + if not sender_permid or sender_permid == self.bartercastdb.my_permid: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercast: error - got BarterCastMsg from a None peer", \ + sender_permid, recv_msg + return False + + if MAX_BARTERCAST_LENGTH > 0 and len(recv_msg) > MAX_BARTERCAST_LENGTH: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercast: warning - got large BarterCastMsg", len(t) + return False + + bartercast_data = {} + + try: + bartercast_data = bdecode(recv_msg) + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercast: warning, invalid bencoded data" + return False + + try: # check bartercast message + self.validBarterCastMsg(bartercast_data) + except RuntimeError, msg: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", msg + return False + + if LOG: + self.logMsg(bartercast_data, sender_permid, 'in', logfile = self.logfile) + + data = bartercast_data['data'] + + if 'totals' in bartercast_data: + totals = bartercast_data['totals'] + else: + totals = None + + if DEBUG: + st = time() + self.handleBarterCastMsg(sender_permid, data) + et = time() + diff = et - st + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bartercast: HANDLE took %.4f" % diff + else: + self.handleBarterCastMsg(sender_permid, data, totals) + + if not self.isBlocked(sender_permid, self.send_block_list): + self.replyBarterCast(sender_permid, selversion) + + return True + + + + ################################ + def validBarterCastMsg(self, bartercast_data): + + if not type(bartercast_data) == DictType: + raise RuntimeError, "bartercast: received data is not a dictionary" + return False + + if not bartercast_data.has_key('data'): + raise RuntimeError, "bartercast: 'data' key doesn't exist" + return False + + if not type(bartercast_data['data']) == DictType: + raise RuntimeError, "bartercast: 'data' value is not dictionary" + return False + + for permid in bartercast_data['data'].keys(): + + if not bartercast_data['data'][permid].has_key('u') or \ + not bartercast_data['data'][permid].has_key('d'): + raise RuntimeError, "bartercast: datafield doesn't contain 'u' or 'd' keys" + return False + + return True + + ################################ + def handleBarterCastMsg(self, sender_permid, data, totals = None): + """ process bartercast data in database """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercast: Processing bartercast msg from: ", self.bartercastdb.getName(sender_permid) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "totals: ", totals + + + permids = data.keys() + changed = False + + # 1. Add any unknown peers to the database in a single transaction + self.bartercastdb.addPeersBatch(permids) + + + # 2. Add totals to database (without committing) + if totals != None and len(totals) == 2: + up = int(totals[0]) + down = int(totals[1]) + self.bartercastdb.updateULDL((sender_permid, sender_permid), up, down, commit = False) + changed = True + + # 3. Add all the received records to the database in a single transaction + datalen = len(permids) + for i in range(0,datalen): + permid = permids[i] + + data_to = data[permid]['u'] + data_from = data[permid]['d'] + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercast: data: (%s, %s) up = %d down = %d" % (self.bartercastdb.getName(sender_permid), self.bartercastdb.getName(permid),\ + data_to, data_from) + + # update database sender->permid and permid->sender + #commit = (i == datalen-1) + self.bartercastdb.updateULDL((sender_permid, permid), data_to, data_from, commit = False) + changed = True + + if changed: + self.bartercastdb.commit() + + + # ARNODB: + # get rid of index on DB? See where used + + + ################################ + def replyBarterCast(self, target_permid, selversion): + """ Reply a bartercast message """ + + if DEBUG: + st = time() + self.createAndSendBarterCastMessage(target_permid, selversion) + et = time() + diff = et - st + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bartercast: CREATE took %.4f" % diff + else: + self.createAndSendBarterCastMessage(target_permid, selversion) + + + # Blocking functions (similar to BuddyCast): + + ################################ + def isBlocked(self, peer_permid, block_list): + if peer_permid not in block_list: + return False + unblock_time = block_list[peer_permid] + if now() >= unblock_time - self.network_delay: # 30 seconds for network delay + block_list.pop(peer_permid) + return False + return True + + + + ################################ + def blockPeer(self, peer_permid, block_list, block_interval=None): + """ Add a peer to a block list """ + + if block_interval is None: + block_interval = self.block_interval + unblock_time = now() + block_interval + block_list[peer_permid] = unblock_time + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'bartercast: %s Blocked peer %s'% (ctime(now()), self.bartercastdb.getName(peer_permid)) + + + ################################ + def logMsg(self, msg_data, msg_permid, in_or_out, logfile): + + if in_or_out == 'in': + permid_from = permid_for_user(msg_permid) + + elif in_or_out == 'out': + permid_from = 'LOCAL' + + else: + return + + timestamp = now() + + log = open(logfile, 'a') + string = '%.1f %s %s' % (timestamp, in_or_out, permid_for_user(msg_permid)) + log.write(string + '\n') + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", string + + data = msg_data.get('data', []) + + for permid in data: + u = data[permid]['u'] + d = data[permid]['d'] + + string = '%.1f %s %s %d %d' % (timestamp, permid_from, permid_for_user(permid), u, d) + log.write(string + '\n') + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", string + + totals = msg_data.get('totals', None) + + if totals != None: + (u, d) = totals + + string = '%.1f TOT %s %d %d' % (timestamp, permid_from, u, d) + log.write(string + '\n') + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", string + + + log.close() diff --git a/tribler-mod/Tribler/Core/BuddyCast/buddycast.py b/tribler-mod/Tribler/Core/BuddyCast/buddycast.py new file mode 100644 index 0000000..2d5c945 --- /dev/null +++ b/tribler-mod/Tribler/Core/BuddyCast/buddycast.py @@ -0,0 +1,2482 @@ +from time import localtime, strftime +# Written by Jie Yang +# see LICENSE.txt for license information +# + +__fool_epydoc = 481 +""" + BuddyCast2 epidemic protocol for p2p recommendation and semantic clustering + +Algorithm in LaTeX format: + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% algorithm of the active peer %%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\begin{figure*}[ht] +\begin{center} +\begin{algorithmic}[1] + +\LOOP +\STATE wait($\Delta T$ time units) \COMMENT{15 seconds in current implementation} +\STATE remove any peer from $B_S$ and $B_R$ if its block time was expired. +\STATE keep connection with all peers in $C_T$, $C_R$ and $C_U$ +\IF{$idle\_loops > 0$} + \STATE $idle\_loops \leftarrow idle\_loops - 1$ \COMMENT{skip this loop for rate control} +\ELSE + \IF{$C_C$ is empty} + \STATE $C_C \leftarrow$ select 5 peers recently seen from Mega Cache + \ENDIF + \STATE $Q \leftarrow$ select a most similar taste buddy or a random online peer from $C_C$ + \STATE connectPeer($Q$) + \STATE block($Q$, $B_S$, 4hours) + \STATE remove $Q$ from $C_C$ + \IF{$Q$ is connected successfully} + \STATE buddycast\_msg\_send $\leftarrow$ \textbf{createBuddycastMsg}() + \STATE send buddycast\_msg\_send to $Q$ + \STATE receive buddycast\_msg\_recv from $Q$ + \STATE $C_C \leftarrow$ fillPeers(buddycast\_msg\_recv) + \STATE \textbf{addConnectedPeer}($Q$) \COMMENT{add $Q$ into $C_T$, $C_R$ or $C_U$ according to its similarity} + \STATE blockPeer($Q$, $B_R$, 4hours) + \ENDIF + +\ENDIF +\ENDLOOP + +\end{algorithmic} +\caption{The protocol of an active peer.} +\label{Fig:buddycast_algorithm} +\end{center} +\end{figure*} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% algorithm of the passive peer %%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\begin{figure*}[ht] +\begin{center} +\begin{algorithmic}[1] + +\LOOP + \STATE receive buddycast\_msg\_recv from $Q$ + \STATE $C_C \leftarrow$ fillPeers(buddycast\_msg\_recv) + \STATE \textbf{addConnectedPeer}($Q$) + \STATE blockPeer($Q$, $B_R$, 4hours) + \STATE buddycast\_msg\_send $\leftarrow$ \textbf{createBuddycastMsg}() + \STATE send buddycast\_msg\_send to $Q$ + \STATE blockPeer($Q$, $B_S$, 4hours) + \STATE remove $Q$ from $C_C$ + \STATE $idle\_loops \leftarrow idle\_loops + 1$ \COMMENT{idle for a loop for + rate control} +\ENDLOOP + +\end{algorithmic} +\caption{The protocol of an passive peer.} +\label{Fig:buddycast_algorithm} +\end{center} +\end{figure*} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%% algorithm of creating a buddycast message %% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\begin{figure*}[ht] +\begin{center} +function \textbf{createBuddycastMsg}() +\begin{algorithmic} + \STATE $My\_Preferences \leftarrow$ the most recently 50 preferences of the active peer + \STATE $Taste\_Buddies \leftarrow$ all peers from $C_T$ + \STATE $Random\_Peers \leftarrow$ all peers from $C_R$ + \STATE $buddycast\_msg\_send \leftarrow$ create an empty message + \STATE $buddycast\_msg\_send$ attaches the active peer's address and $My\_Preferences$ + \STATE $buddycast\_msg\_send$ attaches addresses of $Taste\_Buddies$ + \STATE $buddycast\_msg\_send$ attaches at most 10 preferences of each peer in $Taste\_Buddies$ + \STATE $buddycast\_msg\_send$ attaches addresses of $Random\_Peers$ +\end{algorithmic} +\caption{The function of creating a buddycast message} +\label{Fig:buddycast_createBuddycastMsg} +\end{center} +\end{figure*} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%% algorithm of adding a peer into C_T or C_R or C_U %% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\begin{figure*}[ht] +\begin{center} +function \textbf{addConnectedPeer}($Q$) +\begin{algorithmic} + \IF{$Q$ is connectable} + \STATE $Sim_Q \leftarrow$ getSimilarity($Q$) \COMMENT{similarity between $Q$ and the active peer} + \STATE $Min_{Sim} \leftarrow$ similarity of the least similar peer in $C_T$ + \IF{$Sim_Q \geq Min_{Sim}$ \textbf{or} ($C_T$ is not full \textbf{and} $Sim_Q>0$)} + \STATE $C_T \leftarrow C_T + Q$ + \STATE move the least similar peer to $C_R$ if $C_T$ overloads + \ELSE + \STATE $C_R \leftarrow C_R + Q$ + \STATE remove the oldest peer to $C_R$ if $C_R$ overloads + \ENDIF + \ELSE + \STATE $C_U \leftarrow C_U + Q$ + \ENDIF + +\end{algorithmic} +\caption{The function of adding a peer into $C_T$ or $C_R$} +\label{Fig:buddycast_addConnectedPeer} +\end{center} +\end{figure*} + +""" +""" + +BuddyCast 3: + No preferences for taste buddies; + don't accept preferences of taste buddies from incoming message either + 50 recent my prefs + 50 recent collected torrents + 50 ratings + +Torrent info + preferences: Recently downloaded torrents by the user {'seeders','leechers','check time'} + collected torrents: Recently collected torrents (include Subscribed torrents) + #ratings: Recently rated torrents and their ratings (negative rating means this torrent was deleted) +Taste Buddies + permid + ip + port + similarity +Random Peers + permid + ip + port + similarity + +""" + +import sys +from random import sample, randint, shuffle +from time import time, gmtime, strftime +from traceback import print_exc +from sets import Set +from array import array +from bisect import insort +from copy import deepcopy +import gc +import socket + +from Tribler.Core.simpledefs import BCCOLPOLICY_SIMPLE +from Tribler.Core.BitTornado.bencode import bencode, bdecode +from Tribler.Core.BitTornado.BT1.MessageID import BUDDYCAST, BARTERCAST, KEEP_ALIVE, MODERATIONCAST_HAVE, MODERATIONCAST_REQUEST, MODERATIONCAST_REPLY, VOTECAST +from Tribler.Core.Utilities.utilities import show_permid_short, show_permid,validPermid,validIP,validPort,validInfohash,readableBuddyCastMsg, hostname_or_ip2ip +from Tribler.Core.Utilities.unicode import dunno2unicode +from Tribler.Core.simpledefs import NTFY_ACT_MEET, NTFY_ACT_RECOMMEND, NTFY_MYPREFERENCES, NTFY_INSERT, NTFY_DELETE +from Tribler.Core.NATFirewall.DialbackMsgHandler import DialbackMsgHandler +from Tribler.Core.Overlay.SecureOverlay import OLPROTO_VER_SECOND, OLPROTO_VER_THIRD, OLPROTO_VER_FOURTH, OLPROTO_VER_SIXTH, OLPROTO_VER_EIGHTH +from Tribler.Core.CacheDB.sqlitecachedb import bin2str, str2bin +from similarity import P2PSimLM +from TorrentCollecting import SimpleTorrentCollecting #, TiT4TaTTorrentCollecting +from Tribler.Core.Statistics.Logger import OverlayLogger +from Tribler.Core.Statistics.Crawler import Crawler + +from threading import currentThread + +from bartercast import BarterCastCore +from moderationcast import ModerationCastCore +from votecast import VoteCastCore + +DEBUG = False # for errors +debug = False # for status +debugnic = True # for my temporary outputs +unblock = 0 + +# Nicolas: 10 KByte -- I set this to 1024 KByte. +# The term_id->term dictionary can become almost arbitrarily long +# would be strange if buddycast stopped working once a user has done a lot of searches... +# +# Arno, 2009-03-06: Too big: we don't want every peer to send out 1 MB messages +# every 15 secs. Set to 100K +# +# Nicolas, 2009-03-06: Ok this was really old. 10k in fact is enough with the new constraints on clicklog data +MAX_BUDDYCAST_LENGTH = 10*1024 + +REMOTE_SEARCH_PEER_NTORRENTS_THRESHOLD = 100 # speedup finding >=4.1 peers in this version + +# used for datahandler.peers +PEER_SIM_POS = 0 +PEER_LASTSEEN_POS = 1 +PEER_PREF_POS = 2 + +def now(): + return int(time()) + +def ctime(t): + return strftime("%Y-%m-%d.%H:%M:%S", gmtime(t)) + +def validBuddyCastData(prefxchg, nmyprefs=50, nbuddies=10, npeers=10, nbuddyprefs=10): + + # Arno: TODO: make check version dependent + + def validPeer(peer): + validPermid(peer['permid']) + validIP(peer['ip']) + validPort(peer['port']) + + def validPref(pref, num): + if not (isinstance(prefxchg, list) or isinstance(prefxchg, dict)): + raise RuntimeError, "bc: invalid pref type " + str(type(prefxchg)) + if num > 0 and len(pref) > num: + raise RuntimeError, "bc: length of pref exceeds " + str((len(pref), num)) + for p in pref: + validInfohash(p) + + validPeer(prefxchg) + if not (isinstance(prefxchg['name'], str) or isinstance(prefxchg['name'], unicode)): + raise RuntimeError, "bc: invalid name type " + str(type(prefxchg['name'])) + + # Nicolas: create a validity check that doesn't have to know about the version + # just found out this function is not called anymore. well if it gets called one day, it should handle both + prefs = prefxchg['preferences'] + if prefs: + if type(prefs[0])==list: + # list of lists: this is the new wire protocol. entry 0 of each list contains infohash + validPref([pref[0] for pref in prefs], nmyprefs) + else: + # old style + validPref(prefs, nmyprefs) + + if len(prefxchg['taste buddies']) > nbuddies: + raise RuntimeError, "bc: length of prefxchg['taste buddies'] exceeds " + \ + str(len(prefxchg['taste buddies'])) + for b in prefxchg['taste buddies']: + validPeer(b) + #validPref(b['preferences'], nbuddyprefs) # not used from version 4 + + if len(prefxchg['random peers']) > npeers: + raise RuntimeError, "bc: length of random peers " + \ + str(len(prefxchg['random peers'])) + for b in prefxchg['random peers']: + validPeer(b) + + # ARNOCOMMENT: missing test for 'collected torrents' field + + return True + + +class BuddyCastFactory: + __single = None + + def __init__(self, superpeer=False, log=''): + if BuddyCastFactory.__single: + raise RuntimeError, "BuddyCastFactory is singleton" + BuddyCastFactory.__single = self + self.registered = False + self.buddycast_core = None + self.buddycast_interval = 15 # MOST IMPORTANT PARAMETER + self.superpeer = superpeer + self.log = log + self.running = False + self.data_handler = None + self.started = False # did call do_buddycast() at least once + self.max_peers = 2500 # was 2500 + self.ranonce = False # Nicolas: had the impression that BuddyCast can be tested more reliably if I wait until it has gone through buddycast_core.work() successfully once + if self.superpeer: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bc: Starting in SuperPeer mode" + + def getInstance(*args, **kw): + if BuddyCastFactory.__single is None: + BuddyCastFactory(*args, **kw) + return BuddyCastFactory.__single + getInstance = staticmethod(getInstance) + + def register(self, overlay_bridge, launchmany, errorfunc, + metadata_handler, torrent_collecting_solution, running, + max_peers=2500): + if self.registered: + return + self.overlay_bridge = overlay_bridge + self.launchmany = launchmany + self.metadata_handler = metadata_handler + self.torrent_collecting_solution = torrent_collecting_solution + self.errorfunc = errorfunc + + # BuddyCast is always started, but only active when this var is set. + self.running = bool(running) + self.max_peers = max_peers + + self.registered = True + + def register2(self): + # Arno: only start using overlay thread when normal init is finished to + # prevent concurrencty on singletons + if self.registered: + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: Register BuddyCast", currentThread().getName() + self.overlay_bridge.add_task(self.olthread_register, 0) + + def olthread_register(self, start=True): + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: OlThread Register", currentThread().getName() + + self.data_handler = DataHandler(self.launchmany, self.overlay_bridge, max_num_peers=self.max_peers) + + # ARNOCOMMENT: get rid of this dnsindb / get_dns_from_peerdb abuse off SecureOverlay + self.bartercast_core = BarterCastCore(self.data_handler, self.overlay_bridge, self.log, self.launchmany.secure_overlay.get_dns_from_peerdb) + + self.moderationcast_core = ModerationCastCore(self.data_handler, self.overlay_bridge, self.launchmany.session, self.getCurrrentInterval, self.log, self.launchmany.secure_overlay.get_dns_from_peerdb) + self.votecast_core = VoteCastCore(self.data_handler, self.overlay_bridge, self.launchmany.session, self.getCurrrentInterval, self.log, self.launchmany.secure_overlay.get_dns_from_peerdb) + + self.buddycast_core = BuddyCastCore(self.overlay_bridge, self.launchmany, + self.data_handler, self.buddycast_interval, self.superpeer, + self.metadata_handler, self.torrent_collecting_solution, self.bartercast_core, self.moderationcast_core, self.votecast_core, self.log) + + self.data_handler.register_buddycast_core(self.buddycast_core) + + self.moderationcast_core.showAllModerations() + self.votecast_core.showAllVotes() + + if start: + self.start_time = now() + # Arno, 2007-02-28: BC is now started self.buddycast_interval after client + # startup. This is assumed to give enough time for UPnP to open the firewall + # if any. So when you change this time, make sure it allows for UPnP to + # do its thing, or add explicit coordination between UPnP and BC. + # See BitTornado/launchmany.py + self.overlay_bridge.add_task(self.data_handler.postInit, 0) + self.overlay_bridge.add_task(self.doBuddyCast, 0.1) + # Arno: HYPOTHESIS: if set to small, we'll only ask superpeers at clean start. + if self.data_handler.torrent_db.size() > 0: + waitt = 1.0 + else: + waitt = 3.0 + self.overlay_bridge.add_task(self.data_handler.initRemoteSearchPeers,waitt) + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "BuddyCast starts up",waitt + + def doBuddyCast(self): + if not self.running: + return + + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bc: doBuddyCast!", currentThread().getName() + + # Reschedule ourselves for next round + buddycast_interval = self.getCurrrentInterval() + self.overlay_bridge.add_task(self.doBuddyCast, buddycast_interval) + if not self.started: + self.started = True + + # Do our thang. + self.buddycast_core.work() + self.ranonce = True # Nicolas: now we can start testing and stuff works better + + def pauseBuddyCast(self): + self.running = False + + def restartBuddyCast(self): + if self.registered and not self.running: + self.running = True + self.doBuddyCast() + + def getCurrrentInterval(self): + """ + install [#(peers - superpeers)==0] & start < 2min: interval = 1 + start < 30min: interval = 5 + start > 24hour: interval = 60 + other: interval = 15 + """ + + #return 3 ### DEBUG, remove it before release!! + + past = now() - self.start_time + if past < 2*60: + if self.data_handler.get_npeers() < 20: + interval = 2 + else: + interval = 5 + elif past < 30*60: + interval = 5 + elif past > 24*60*60: + interval = 60 + else: + interval = 15 + return interval + + + def handleMessage(self, permid, selversion, message): + + if not self.registered or not self.running: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: handleMessage got message, but we're not enabled or running" + return False + + t = message[0] + + if t == BUDDYCAST: + return self.gotBuddyCastMessage(message[1:], permid, selversion) + elif t == KEEP_ALIVE: + if message[1:] == '': + return self.gotKeepAliveMessage(permid) + else: + return False + + elif t == MODERATIONCAST_HAVE: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Received moderationcast_have message" + if self.moderationcast_core != None: + return self.moderationcast_core.gotModerationCastHaveMessage(message[1:], permid, selversion) + + elif t == MODERATIONCAST_REQUEST: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Received moderation_request message" + if self.moderationcast_core != None: + return self.moderationcast_core.gotModerationCastRequestMessage(message[1:], permid, selversion) + + elif t == MODERATIONCAST_REPLY: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Received moderation_reply message" + if self.moderationcast_core != None: + return self.moderationcast_core.gotModerationCastReplyMessage(message[1:], permid, selversion) + + elif t == VOTECAST: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: Received votecast message" + if self.votecast_core != None: + return self.votecast_core.gotVoteCastMessage(message[1:], permid, selversion) + + + elif t == BARTERCAST: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: Received bartercast message" + if self.bartercast_core != None: + return self.bartercast_core.gotBarterCastMessage(message[1:], permid, selversion) + + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: wrong message to buddycast", ord(t), "Round", self.buddycast_core.round + return False + + def gotBuddyCastMessage(self, msg, permid, selversion): + if self.registered and self.running: + return self.buddycast_core.gotBuddyCastMessage(msg, permid, selversion) + else: + return False + + def gotKeepAliveMessage(self, permid): + if self.registered and self.running: + return self.buddycast_core.gotKeepAliveMessage(permid) + else: + return False + + def handleConnection(self,exc,permid,selversion,locally_initiated): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: handleConnection",exc,show_permid_short(permid),selversion,locally_initiated,currentThread().getName() + + if not self.registered: + return + + if DEBUG: + nconn = 0 + conns = self.buddycast_core.connections + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "\nbc: conn in buddycast", len(conns) + for peer_permid in conns: + _permid = show_permid_short(peer_permid) + nconn += 1 + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: ", nconn, _permid, conns[peer_permid] + + if self.running or exc is not None: # if not running, only close connection + self.buddycast_core.handleConnection(exc,permid,selversion,locally_initiated) + + def addMyPref(self, torrent): + """ Called by OverlayThread (as should be everything) """ + if self.registered: + self.data_handler.addMyPref(torrent) + + def delMyPref(self, torrent): + if self.registered: + self.data_handler.delMyPref(torrent) + + +class BuddyCastCore: + + TESTASSERVER = False # for unit testing + + def __init__(self, overlay_bridge, launchmany, data_handler, + buddycast_interval, superpeer, + metadata_handler, torrent_collecting_solution, bartercast_core, moderationcast_core, votecast_core, log=None): + self.overlay_bridge = overlay_bridge + self.launchmany = launchmany + self.data_handler = data_handler + self.buddycast_interval = buddycast_interval + self.superpeer = superpeer + #print_stack() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'debug buddycast' + #superpeer # change it for superpeers + #self.superpeer_set = Set(self.data_handler.getSuperPeers()) + self.log = log + self.dialback = DialbackMsgHandler.getInstance() + + self.ip = self.data_handler.getMyIp() + self.port = self.data_handler.getMyPort() + self.permid = self.data_handler.getMyPermid() + # Jie: we must trainsfer my name to unicode here before sent out + # because the receiver might not be able to transfer the name to unicode, + # but the receiver might be able to display the unicode str correctly + # in that he installed the character set and therefore unicode can map it + self.name = dunno2unicode(self.data_handler.getMyName()) # encode it to unicode + + # --- parameters --- + #self.timeout = 5*60 + self.block_interval = 4*60*60 # block interval for a peer to buddycast + self.short_block_interval = 4*60*60 # block interval if failed to connect the peer + self.num_myprefs = 50 # num of my preferences in buddycast msg + self.max_collected_torrents = 50 # num of recently collected torrents (from BuddyCast 3) + self.num_tbs = 10 # num of taste buddies in buddycast msg + self.num_tb_prefs = 10 # num of taset buddy's preferences in buddycast msg + self.num_rps = 10 # num of random peers in buddycast msg + # time to check connection and send keep alive message + #self.check_connection_round = max(1, 120/self.buddycast_interval) + self.max_conn_cand = 100 # max number of connection candidates + self.max_conn_tb = 10 # max number of connectable taste buddies + self.max_conn_rp = 10 # max number of connectable random peers + self.max_conn_up = 10 # max number of unconnectable peers + self.bootstrap_num = 10 # max number of peers to fill when bootstrapping + self.bootstrap_interval = 5*60 # 5 min + self.network_delay = self.buddycast_interval*2 # 30 seconds + self.check_period = 120 # how many seconds to send keep alive message and check updates + self.num_search_cand = 10 # max number of remote search peer candidates + self.num_remote_peers_in_msg = 2 # number of remote search peers in msg + + # --- memory --- + self.send_block_list = {} # permid:unlock_time + self.recv_block_list = {} + self.connections = {} # permid: overlay_version + self.connected_taste_buddies = [] # [permid] + self.connected_random_peers = [] # [permid] + self.connected_connectable_peers = {} # permid: {'connect_time', 'ip', 'port', 'similarity', 'oversion', 'num_torrents'} + self.connected_unconnectable_peers = {} # permid: connect_time + self.connection_candidates = {} # permid: last_seen + self.remote_search_peer_candidates = [] # [last_seen,permid], sorted, the first one in the list is the oldest one + + # --- stats --- + self.target_type = 0 + self.next_initiate = 0 + self.round = 0 # every call to work() is a round + self.bootstrapped = False # bootstrap once every 1 hours + self.bootstrap_time = 0 # number of times to bootstrap + self.total_bootstrapped_time = 0 + self.last_bootstrapped = now() # bootstrap time of the last time + self.start_time = now() + self.last_check_time = 0 + + # --- dependent modules --- + self.metadata_handler = metadata_handler + self.torrent_collecting = None + if torrent_collecting_solution == BCCOLPOLICY_SIMPLE: + self.torrent_collecting = SimpleTorrentCollecting(metadata_handler, data_handler) + + # -- misc --- + self.dnsindb = launchmany.secure_overlay.get_dns_from_peerdb + if self.log: + self.overlay_log = OverlayLogger.getInstance(self.log) + + # Bartercast + self.bartercast_core = bartercast_core + #self.bartercast_core.buddycast_core = self + + self.moderationcast_core = moderationcast_core + self.votecast_core = votecast_core + + # Crawler + crawler = Crawler.get_instance() + self.crawler = crawler.am_crawler() + + + def get_peer_info(self, target_permid, include_permid=True): + if not target_permid: + return ' None ' + dns = self.dnsindb(target_permid) + if not dns: + return ' None ' + try: + ip = dns[0] + port = dns[1] + sim = self.data_handler.getPeerSim(target_permid) + if include_permid: + s_pid = show_permid_short(target_permid) + return ' %s %s:%s %.3f ' % (s_pid, ip, port, sim) + else: + return ' %s:%s %.3f' % (ip, port, sim) + except: + return ' ' + repr(dns) + ' ' + + def work(self): + """ + The engineer of buddycast empidemic protocol. + In every round, it selects a target and initates a buddycast exchange, + or idels due to replying messages in the last rounds. + """ + + try: + self.round += 1 + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'bc: ************ working buddycast', currentThread().getName() + self.print_debug_info('Active', 2) + if self.log: + nPeer, nPref, nCc, nBs, nBr, nSO, nCo, nCt, nCr, nCu = self.get_stats() + self.overlay_log('BUCA_STA', self.round, (nPeer,nPref,nCc), (nBs,nBr), (nSO,nCo), (nCt,nCr,nCu)) + + self.print_debug_info('Active', 3) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'bc: ************ working buddycast 2' + self.updateSendBlockList() + + _now = now() + if _now - self.last_check_time >= self.check_period: + self.print_debug_info('Active', 4) + self.keepConnections() + #self.data_handler.checkUpdate() + gc.collect() + self.last_check_time = _now + + if self.next_initiate > 0: + # It replied some meesages in the last rounds, so it doesn't initiate Buddycast + self.print_debug_info('Active', 6) + self.next_initiate -= 1 + else: + if len(self.connection_candidates) == 0: + self.booted = self._bootstrap(self.bootstrap_num) + self.print_debug_info('Active', 9) + + # It didn't reply any message in the last rounds, so it can initiate BuddyCast + if len(self.connection_candidates) > 0: + r, target_permid = self.selectTarget() + self.print_debug_info('Active', 11, target_permid, r=r) + self.startBuddyCast(target_permid) + + if debug: + print + except: + print_exc() + + # -------------- bootstrap -------------- # + def _bootstrap(self, number): + """ Select a number of peers from recent online peers which are not + in send_block_list to fill connection_candidates. + When to call this function is an issue to study. + """ + + _now = now() + # bootstrapped recently, so wait for a while + if self.bootstrapped and _now - self.last_bootstrapped < self.bootstrap_interval: + self.bootstrap_time = 0 # let it read the most recent peers next time + return -1 + + #ARNODB: self.data_handler.peers is a map from peer_id to something, i.e., not + # permid. send_block_list is a list of permids + send_block_list_ids = [] + for permid in self.send_block_list: + peer_id = self.data_handler.getPeerID(permid) + send_block_list_ids.append(peer_id) + + target_cands_ids = Set(self.data_handler.peers) - Set(send_block_list_ids) + recent_peers_ids = self.selectRecentPeers(target_cands_ids, number, + startfrom=self.bootstrap_time*number) + + for peer_id in recent_peers_ids: + last_seen = self.data_handler.getPeerIDLastSeen(peer_id) + self.addConnCandidate(self.data_handler.getPeerPermid(peer_id), last_seen) + self.limitConnCandidate() + + self.bootstrap_time += 1 + self.total_bootstrapped_time += 1 + self.last_bootstrapped = _now + if len(self.connection_candidates) < self.bootstrap_num: + self.bootstrapped = True # don't reboot until self.bootstrap_interval later + else: + self.bootstrapped = False # reset it to allow read more peers if needed + return 1 + + def selectRecentPeers(self, cand_ids, number, startfrom=0): + """ select a number of most recently online peers + @return a list of peer_ids + """ + + if not cand_ids: + return [] + peerids = [] + last_seens = [] + for peer_id in cand_ids: + peerids.append(peer_id) + last_seens.append(self.data_handler.getPeerIDLastSeen(peer_id)) + npeers = len(peerids) + if npeers == 0: + return [] + aux = zip(last_seens, peerids) + aux.sort() + aux.reverse() + peers = [] + i = 0 + + # roll back when startfrom is bigger than npeers + startfrom = startfrom % npeers + endat = startfrom + number + for _, peerid in aux[startfrom:endat]: + peers.append(peerid) + return peers + + def addConnCandidate(self, peer_permid, last_seen): + """ add a peer to connection_candidates, and only keep a number of + the most fresh peers inside. + """ + + if self.isBlocked(peer_permid, self.send_block_list) or peer_permid == self.permid: + return + self.connection_candidates[peer_permid] = last_seen + + def limitConnCandidate(self): + if len(self.connection_candidates) > self.max_conn_cand: + tmp_list = zip(self.connection_candidates.values(),self.connection_candidates.keys()) + tmp_list.sort() + while len(self.connection_candidates) > self.max_conn_cand: + ls,peer_permid = tmp_list.pop(0) + self.removeConnCandidate(peer_permid) + + def removeConnCandidate(self, peer_permid): + if peer_permid in self.connection_candidates: + self.connection_candidates.pop(peer_permid) + + # -------------- routines in each round -------------- # + def updateSendBlockList(self): + """ Remove expired peers in send block list """ + + _now = now() + for p in self.send_block_list.keys(): # don't call isBlocked() for performance reason + if _now >= self.send_block_list[p] - self.network_delay: + if debug: + print "bc: *** unblock peer in send block list" + self.get_peer_info(p) + \ + "expiration:", ctime(self.send_block_list[p]) + self.send_block_list.pop(p) + + def keepConnections(self): + """ Close expired connections, and extend the expiration of + peers in connection lists + """ + + timeout_list = [] + for peer_permid in self.connections: + # we don't close connection here, because if no incoming msg, + # sockethandler will close connection in 5-6 min. + + if (peer_permid in self.connected_connectable_peers or \ + peer_permid in self.connected_unconnectable_peers): + timeout_list.append(peer_permid) + + if self.crawler: + # since we are crawling, we are not interested in + # retaining connections for a long time. + for peer_permid in timeout_list: + self.closeConnection(peer_permid, "a crawler does not retain connections for long") + else: + for peer_permid in timeout_list: + self.sendKeepAliveMsg(peer_permid) + + def sendKeepAliveMsg(self, peer_permid): + """ Send keep alive message to a peer, and extend its expiration """ + + if self.isConnected(peer_permid): + overlay_protocol_version = self.connections[peer_permid] + if overlay_protocol_version >= OLPROTO_VER_THIRD: + # From this version, support KEEP_ALIVE message in secure overlay + keepalive_msg = '' + self.overlay_bridge.send(peer_permid, KEEP_ALIVE+keepalive_msg, + self.keepaliveSendCallback) + if debug: + print "*** Send keep alive to peer", self.get_peer_info(peer_permid), \ + "overlay version", overlay_protocol_version + + def isConnected(self, peer_permid): + return peer_permid in self.connections + + def keepaliveSendCallback(self, exc, peer_permid, other=0): + if exc is None: + pass + else: + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: error - send keep alive msg", exc, \ + self.get_peer_info(peer_permid), "Round", self.round + self.closeConnection(peer_permid, 'keepalive:'+str(exc)) + + def gotKeepAliveMessage(self, peer_permid): + if self.isConnected(peer_permid): + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: Got keep alive from", self.get_peer_info(peer_permid) + if self.crawler: + # since we are crawling, we are not interested in + # retaining connections for a long time. + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: Got keep alive from", self.get_peer_info(peer_permid), "closing connection because we are a crawler" + return False + return True + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: error - got keep alive from a not connected peer. Round", \ + self.round + return False + + # -------------- initiate buddycast, active thread -------------- # + # ------ select buddycast target ------ # + def selectTarget(self): + """ select a most similar taste buddy or a most likely online random peer + from connection candidates list by 50/50 chance to initate buddycast exchange. + """ + + def selectTBTarget(): + # Select the most similar taste buddy + max_sim = (-1, None) + for permid in self.connection_candidates: + peer_id = self.data_handler.getPeerID(permid) + if peer_id: + sim = self.data_handler.getPeerSim(permid) + max_sim = max(max_sim, (sim, permid)) + selected_permid = max_sim[1] + if selected_permid is None: + return None + else: + return selected_permid + + def selectRPTarget(): + # Randomly select a random peer + selected_permid = None + while len(self.connection_candidates) > 0: + selected_permid = sample(self.connection_candidates, 1)[0] + selected_peer_id = self.data_handler.getPeerID(selected_permid) + if selected_peer_id is None: + self.removeConnCandidate(selected_permid) + selected_permid = None + elif selected_peer_id: + break + + return selected_permid + + self.target_type = 1 - self.target_type + if self.target_type == 0: # select a taste buddy + target_permid = selectTBTarget() + else: # select a random peer + target_permid = selectRPTarget() + + return self.target_type, target_permid + + # ------ start buddycast exchange ------ # + def startBuddyCast(self, target_permid): + """ Connect to a peer, create a buddycast message and send it """ + + if not target_permid or target_permid == self.permid: + return + + if not self.isBlocked(target_permid, self.send_block_list): + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'bc: connect a peer', show_permid_short(target_permid), currentThread().getName() + self.overlay_bridge.connect(target_permid, self.buddycastConnectCallback) + + self.print_debug_info('Active', 12, target_permid) + if self.log: + dns = self.dnsindb(target_permid) + if dns: + ip,port = dns + self.overlay_log('CONN_TRY', ip, port, show_permid(target_permid)) + + # always block the target for a while not matter succeeded or not + #self.blockPeer(target_permid, self.send_block_list, self.short_block_interval) + self.print_debug_info('Active', 13, target_permid) + + # remove it from candidates no matter if it has been connected + self.removeConnCandidate(target_permid) + self.print_debug_info('Active', 14, target_permid) + + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'buddycast: peer', self.get_peer_info(target_permid), \ + 'is blocked while starting buddycast to it.', "Round", self.round + + def buddycastConnectCallback(self, exc, dns, target_permid, selversion): + if exc is None: + ## Create message depending on selected protocol version + try: + if not self.isConnected(target_permid): + if debug: + raise RuntimeError, 'buddycast: not connected while calling connect_callback' + return + + self.print_debug_info('Active', 15, target_permid, selversion) + + self.createAndSendBuddyCastMessage(target_permid, selversion, active=True) + + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: error in reply buddycast msg",\ + exc, dns, show_permid_short(target_permid), selversion, "Round", self.round, + + else: + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: warning - connecting to",\ + show_permid_short(target_permid),exc,dns, ctime(now()) + + def createAndSendBuddyCastMessage(self, target_permid, selversion, active): + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bc: SENDING BC to",show_permid_short(target_permid) + + buddycast_data = self.createBuddyCastMessage(target_permid, selversion) + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: createAndSendBuddyCastMessage", len(buddycast_data), currentThread().getName() + try: + buddycast_data['permid'] = self.permid + validBuddyCastData(buddycast_data, self.num_myprefs, + self.num_tbs, self.num_rps, self.num_tb_prefs) + buddycast_data.pop('permid') + buddycast_msg = bencode(buddycast_data) + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "error buddycast_data:", buddycast_data + return + + if active: + self.print_debug_info('Active', 16, target_permid) + else: + self.print_debug_info('Passive', 6, target_permid) + + self.overlay_bridge.send(target_permid, BUDDYCAST+buddycast_msg, self.buddycastSendCallback) + self.blockPeer(target_permid, self.send_block_list, self.short_block_interval) + self.removeConnCandidate(target_permid) + + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '****************--------------'*2 + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'sent buddycast message to', show_permid_short(target_permid), len(buddycast_msg) + + if active: + self.print_debug_info('Active', 17, target_permid) + else: + self.print_debug_info('Passive', 7, target_permid) + + # Bartercast + if self.bartercast_core != None and active: + self.bartercast_core.createAndSendBarterCastMessage(target_permid, selversion, active) + + # As of March 5, 2009, ModerationCastHave Messages and VoteCast Messages + # are sent in lock-step with BuddyCast. (only if there are any + # moderations and/or votes to send.) + # + if self.moderationcast_core != None: + self.moderationcast_core.createAndSendModerationCastHaveMessage(target_permid, selversion) + + if self.votecast_core != None: + self.votecast_core.createAndSendVoteCastMessage(target_permid, selversion) + + if self.log: + dns = self.dnsindb(target_permid) + if dns: + ip,port = dns + if active: + MSG_ID = 'ACTIVE_BC' + else: + MSG_ID = 'PASSIVE_BC' + msg = repr(readableBuddyCastMsg(buddycast_data,selversion)) # from utilities + self.overlay_log('SEND_MSG', ip, port, show_permid(target_permid), selversion, MSG_ID, msg) + return buddycast_data # Nicolas: for testing + + def createBuddyCastMessage(self, target_permid, selversion, target_ip=None, target_port=None): + """ Create a buddycast message for a target peer on selected protocol version """ + # Nicolas: added manual target_ip, target_port parameters for testing + try: + target_ip,target_port = self.dnsindb(target_permid) + except: + if not self.TESTASSERVER: + raise # allow manual ips during unit-testing if dnsindb fails + if not target_ip or not target_port: + return {} + my_pref = self.data_handler.getMyLivePreferences(selversion, self.num_myprefs) #[pref] + taste_buddies = self.getTasteBuddies(self.num_tbs, self.num_tb_prefs, target_permid, target_ip, target_port, selversion) + random_peers = self.getRandomPeers(self.num_rps, target_permid, target_ip, target_port, selversion) #{peer:last_seen} + buddycast_data = {'ip':self.ip, + 'port':self.port, + 'name':self.name, + 'preferences':my_pref, + 'taste buddies':taste_buddies, + 'random peers':random_peers} + + if selversion >= OLPROTO_VER_THIRD: + # From this version, add 'connectable' entry in buddycast message + connectable = self.isConnectable() + buddycast_data['connectable'] = connectable + + if selversion >= OLPROTO_VER_FOURTH: + recent_collect = self.metadata_handler.getRecentlyCollectedTorrents(self.max_collected_torrents) + buddycast_data['collected torrents'] = recent_collect + + if selversion >= OLPROTO_VER_SIXTH: + npeers = self.data_handler.get_npeers() + ntorrents = self.data_handler.get_ntorrents() + nmyprefs = self.data_handler.get_nmyprefs() + buddycast_data['npeers'] = npeers + buddycast_data['nfiles'] = ntorrents + buddycast_data['ndls'] = nmyprefs + + + return buddycast_data + + def getTasteBuddies(self, ntbs, ntbprefs, target_permid, target_ip, target_port, selversion): + """ Randomly select a number of peers from connected_taste_buddies. """ + + if not self.connected_taste_buddies: + return [] + tb_list = self.connected_taste_buddies[:] + if target_permid in tb_list: + tb_list.remove(target_permid) + + peers = [] + for permid in tb_list: + # keys = ('ip', 'port', 'oversion', 'num_torrents') + peer = deepcopy(self.connected_connectable_peers[permid]) + if peer['ip'] == target_ip and peer['port'] == target_port: + continue + peer['similarity'] = self.data_handler.getPeerSim(permid) + peer['permid'] = permid + peers.append(peer) + +# peers = self.data_handler.getPeers(tb_list, ['permid', 'ip', 'port', 'similarity', 'oversion', 'num_torrents']) +# # filter peers with the same ip and port +# peers = filter(lambda p:p['ip']!=target_ip or int(p['port'])!=target_port, peers) +# +# for i in range(len(peers)): +# peers[i]['port'] = int(peers[i]['port']) + + # In overlay version 2, buddycast has 'age' field + if selversion <= OLPROTO_VER_SECOND: + for i in range(len(peers)): + peers[i]['age'] = 0 + + # In overlay version 2 and 3, buddycast doesn't have similarity field, and taste buddy has preferences + if selversion <= OLPROTO_VER_THIRD: + for i in range(len(peers)): + peers[i].pop('similarity') + peers[i]['preferences'] = [] # don't support from now on + + # From overlay version 4, buddycast includes similarity for peers + if selversion >= OLPROTO_VER_FOURTH: + for i in range(len(peers)): + peers[i]['similarity'] = int(peers[i]['similarity']+0.5) # bencode doesn't accept float type + + + + # Every peer >= 6 in message attachs nfiles and oversion for remote search from version 6 + for i in range(len(peers)): + oversion = peers[i].pop('oversion') + nfiles = peers[i].pop('num_torrents') + if selversion >= OLPROTO_VER_SIXTH and oversion >= OLPROTO_VER_SIXTH and nfiles >= REMOTE_SEARCH_PEER_NTORRENTS_THRESHOLD: + peers[i]['oversion'] = oversion + # ascribe it to the inconsistent name of the same concept in msg and db + peers[i]['nfiles'] = nfiles + + return peers + + def getRandomPeers(self, nrps, target_permid, target_ip, target_port, selversion): + """ Randomly select a number of peers from connected_random_peers. """ + + if not self.connected_random_peers: + return [] + rp_list = self.connected_random_peers[:] + + # From version 6, two (might be offline) remote-search-peers must be included in msg + if selversion >= OLPROTO_VER_SIXTH: + remote_search_peers = self.getRemoteSearchPeers(self.num_remote_peers_in_msg) + rp_list += remote_search_peers + if len(rp_list) > nrps: + rp_list = sample(rp_list, nrps) + + if target_permid in rp_list: + rp_list.remove(target_permid) + + peers = [] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'bc: ******** rplist nconn', len(rp_list), len(self.connected_connectable_peers) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", rp_list, self.connected_connectable_peers + for permid in rp_list: + # keys = ('ip', 'port', 'oversion', 'num_torrents') + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '**************', `self.connected_connectable_peers`, `rp_list` + # TODO: Fix this bug: not consisitent + if permid not in self.connected_connectable_peers: + continue + peer = deepcopy(self.connected_connectable_peers[permid]) + if peer['ip'] == target_ip and peer['port'] == target_port: + continue + peer['similarity'] = self.data_handler.getPeerSim(permid) + peer['permid'] = permid + peers.append(peer) + +# peers = self.data_handler.getPeers(rp_list, ['permid', 'ip', 'port', 'similarity', 'oversion', 'num_torrents']) +# peers = filter(lambda p:p['ip']!=target_ip or int(p['port'])!=target_port, peers) +# +# for i in range(len(peers)): +# peers[i]['port'] = int(peers[i]['port']) + + if selversion <= OLPROTO_VER_SECOND: + for i in range(len(peers)): + peers[i]['age'] = 0 + + # random peer also attachs similarity from 4 + if selversion <= OLPROTO_VER_THIRD: + for i in range(len(peers)): + peers[i].pop('similarity') + + if selversion >= OLPROTO_VER_FOURTH: + for i in range(len(peers)): + old_sim = peers[i]['similarity'] + if old_sim is None: + old_sim = 0.0 + peers[i]['similarity'] = int(old_sim+0.5) + + # Every peer >= 6 in message attachs nfiles and oversion for remote search from version 6 + for i in range(len(peers)): + oversion = peers[i].pop('oversion') + nfiles = peers[i].pop('num_torrents') + # only include remote-search-peers + if selversion >= OLPROTO_VER_SIXTH and oversion >= OLPROTO_VER_SIXTH and nfiles >= REMOTE_SEARCH_PEER_NTORRENTS_THRESHOLD: + peers[i]['oversion'] = oversion + # ascribe it to the inconsistent name of the same concept in msg and db + peers[i]['nfiles'] = nfiles + + return peers + + def isConnectable(self): + return bool(self.dialback.isConnectable()) + + def buddycastSendCallback(self, exc, target_permid, other=0): + if exc is None: + if debug: + print "bc: *** msg was sent successfully to peer", \ + self.get_peer_info(target_permid) + else: + if debug: + print "bc: *** warning - error in sending msg to",\ + self.get_peer_info(target_permid), exc + self.closeConnection(target_permid, 'buddycast:'+str(exc)) + + def blockPeer(self, peer_permid, block_list, block_interval=None): + """ Add a peer to a block list """ + + peer_id = peer_permid # ARNODB: confusing! + if block_interval is None: + block_interval = self.block_interval + unblock_time = now() + block_interval + block_list[peer_id] = unblock_time + + + + def isBlocked(self, peer_permid, block_list): + if self.TESTASSERVER: + return False # we do not want to be blocked when sending various messages + + peer_id = peer_permid + if peer_id not in block_list: + return False + + unblock_time = block_list[peer_id] + if now() >= unblock_time - self.network_delay: # 30 seconds for network delay + block_list.pop(peer_id) + return False + return True + + + + # ------ receive a buddycast message, for both active and passive thread ------ # + def gotBuddyCastMessage(self, recv_msg, sender_permid, selversion): + """ Received a buddycast message and handle it. Reply if needed """ + + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: got and handle buddycast msg", currentThread().getName() + + if not sender_permid or sender_permid == self.permid: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: error - got BuddyCastMsg from a None peer", \ + sender_permid, recv_msg, "Round", self.round + return False + + blocked = self.isBlocked(sender_permid, self.recv_block_list) + if blocked: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: warning - got BuddyCastMsg from a recv blocked peer", \ + show_permid(sender_permid), "Round", self.round + return True # allow the connection to be kept. That peer may have restarted in 4 hours + + # Jie: Because buddycast message is implemented as a dictionary, anybody can + # insert any content in the message. It isn't secure if someone puts + # some fake contents inside and make the message very large. The same + # secure issue could happen in other protocols over the secure overlay layer. + # Therefore, I'd like to set a limitation of the length of buddycast message. + # The receiver should close the connection if the length of the message + # exceeds the limitation. According to my experience, the biggest + # buddycast message should be around 6~7KBytes. So the reasonable + # length limitation might be 10KB for buddycast message. + if MAX_BUDDYCAST_LENGTH > 0 and len(recv_msg) > MAX_BUDDYCAST_LENGTH: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: warning - got large BuddyCastMsg", len(recv_msg), "Round", self.round + return False + + active = self.isBlocked(sender_permid, self.send_block_list) + + + if active: + self.print_debug_info('Active', 18, sender_permid) + else: + self.print_debug_info('Passive', 2, sender_permid) + + buddycast_data = {} + try: + try: + buddycast_data = bdecode(recv_msg) + except ValueError, msg: + try: + errmsg = str(msg) + except: + errmsg = repr(msg) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: warning, got invalid BuddyCastMsg:", errmsg, \ + "Round", self.round # ipv6 + return False + buddycast_data.update({'permid':sender_permid}) + try: # check buddycast message + validBuddyCastData(buddycast_data, 0, + self.num_tbs, self.num_rps, self.num_tb_prefs) # RCP 2 + except RuntimeError, msg: + try: + errmsg = str(msg) + except: + errmsg = repr(msg) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: warning, got invalid BuddyCastMsg:", errmsg, \ + "Round", self.round # ipv6 + return False + + + # update sender's ip and port in buddycast + dns = self.dnsindb(sender_permid) + if dns != None: + sender_ip = dns[0] + sender_port = dns[1] + buddycast_data.update({'ip':sender_ip}) + buddycast_data.update({'port':sender_port}) + + if self.log: + if active: + MSG_ID = 'ACTIVE_BC' + else: + MSG_ID = 'PASSIVE_BC' + msg = repr(readableBuddyCastMsg(buddycast_data,selversion)) # from utilities + self.overlay_log('RECV_MSG', sender_ip, sender_port, show_permid(sender_permid), selversion, MSG_ID, msg) + + # store discovered peers/preferences/torrents to cache and db + conn = buddycast_data.get('connectable', 0) # 0 - unknown + + self.handleBuddyCastMessage(sender_permid, buddycast_data, selversion) + if active: + conn = 1 + + if active: + self.print_debug_info('Active', 19, sender_permid) + else: + self.print_debug_info('Passive', 3, sender_permid) + + # update sender and other peers in connection list + addto = self.addPeerToConnList(sender_permid, conn) + + if active: + self.print_debug_info('Active', 20, sender_permid) + else: + self.print_debug_info('Passive', 4, sender_permid) + + except Exception, msg: + print_exc() + raise Exception, msg + return True # don't close connection, maybe my problem in handleBuddyCastMessage + + self.blockPeer(sender_permid, self.recv_block_list) + + # update torrent collecting module + #self.data_handler.checkUpdate() + collected_infohashes = buddycast_data.get('collected torrents', []) + if self.torrent_collecting and not self.superpeer: + collected_infohashes += self.getPreferenceHashes(buddycast_data) + self.torrent_collecting.trigger(sender_permid, selversion, collected_infohashes) + + if active: + self.print_debug_info('Active', 21, sender_permid) + else: + self.print_debug_info('Passive', 5, sender_permid) + + if not active: + self.replyBuddyCast(sender_permid, selversion) + + # show activity + buf = dunno2unicode('"'+buddycast_data['name']+'"') + self.launchmany.set_activity(NTFY_ACT_RECOMMEND, buf) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '*****************************************************************************************************' + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "* bc: Got BuddyCast Message from",self.get_peer_info(sender_permid),active + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '******************************** Yahoo! *************************************************************' + + return True + + + def createPreferenceDictionaryList(self, buddycast_data): + """as of OL 8, preferences are no longer lists of infohashes, but lists of lists containing + infohashes and associated metadata. this method checks which overlay version has been used + and replaces either format by a list of dictionaries, such that the rest of the code can remain + version-agnostic and additional information like torrent ids can be stored along the way""" + + prefs = buddycast_data.get('preferences',[]) + # assume at least one entry below here + if len(prefs) == 0: + return [] + d = [] + + try: + + if not type(prefs[0])==list: + # pre-OLPROTO_VER_EIGHTH + # create dictionary from list of info hashes, extended fields simply aren't set + + d = [dict({'infohash': pref}) for pref in prefs] + + # we shouldn't receive these lists if the peer says he's OL 8. + # let's accept it but complain + if buddycast_data['oversion'] >= OLPROTO_VER_EIGHTH: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'buddycast: received OLPROTO_VER_EIGHTH buddycast data containing old style preferences. only ok if talking to an earlier non-release version' + return d + + # if the single prefs entries are lists, we have a more modern wire format + # currently, there is only one possibility + if buddycast_data['oversion'] >= OLPROTO_VER_EIGHTH: + # create dictionary from list of lists + d = [dict({'infohash': pref[0], + 'search_terms': pref[1], + 'position': pref[2], + 'reranking_strategy': pref[3]}) + for pref in prefs] + else: + raise RuntimeError, 'buddycast: unknown preference protocol, pref entries are lists but oversion= %s:\n%s' % (buddycast_data['oversion'], prefs) + + return d + + except Exception, msg: + print_exc() + raise Exception, msg + return d + + + + + + def getPreferenceHashes(self, buddycast_data): + """convenience function returning the infohashes from the preferences. + returns a list of infohashes, i.e. replaces old calls to buddycast_data.get('preferences')""" + return [preference.get('infohash',"") for preference in buddycast_data.get('preferences', [])] + + def handleBuddyCastMessage(self, sender_permid, buddycast_data, selversion): + """ Handle received buddycast message + Add peers, torrents and preferences into database and update last seen + Add fresh peers to candidate list + All database updates caused by buddycast msg should be handled here + """ + + _now = now() + + cache_db_data = {'peer':{},'infohash':Set(),'pref':[]} # peer, updates / pref, pairs + cache_peer_data = {} + + tbs = buddycast_data.pop('taste buddies') + rps = buddycast_data.pop('random peers') + buddycast_data['oversion'] = selversion + + max_tb_sim = 1 + + # include sender itself + bc_data = [buddycast_data] + tbs + rps + for peer in bc_data: + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bc: Learned about peer",peer['ip'] + + peer_permid = peer['permid'] + if peer_permid == self.permid: + continue + age = max(peer.get('age', 0), 0) # From secure overlay version 3, it doesn't include 'age' + last_seen = _now - age + old_last_seen = self.data_handler.getPeerLastSeen(peer_permid) + last_seen = min(max(old_last_seen, last_seen), _now) + oversion = peer.get('oversion', 0) + nfiles = peer.get('nfiles', 0) + self.addRemoteSearchPeer(peer_permid, oversion, nfiles, last_seen) + + cache_peer_data[peer_permid] = {} + cache_peer_data[peer_permid]['last_seen'] = last_seen + #self.data_handler._addPeerToCache(peer_permid, last_seen) + #if selversion >= OLPROTO_VER_FOURTH: + sim = peer.get('similarity', 0) + max_tb_sim = max(max_tb_sim, sim) + if sim > 0: + cache_peer_data[peer_permid]['sim'] = sim + #self.data_handler.addRelativeSim(sender_permid, peer_permid, sim, max_tb_sim) + + if peer_permid != sender_permid: + self.addConnCandidate(peer_permid, last_seen) + + new_peer_data = {} + #new_peer_data['permid'] = peer['permid'] + new_peer_data['ip'] = hostname_or_ip2ip(peer['ip']) + new_peer_data['port'] = peer['port'] + new_peer_data['last_seen'] = last_seen + if peer.has_key('name'): + new_peer_data['name'] = dunno2unicode(peer['name']) # store in db as unicode + cache_db_data['peer'][peer_permid] = new_peer_data + #self.data_handler.addPeer(peer_permid, last_seen, new_peer_data, commit=True) # new peer + + self.limitConnCandidate() + if len(self.connection_candidates) > self.bootstrap_num: + self.bootstrapped = True + + # database stuff + if selversion >= OLPROTO_VER_SIXTH: + stats = {'num_peers':buddycast_data['npeers'],'num_torrents':buddycast_data['nfiles'],'num_prefs':buddycast_data['ndls']} + cache_db_data['peer'][sender_permid].update(stats) + + cache_db_data['peer'][sender_permid]['last_buddycast'] = _now + + prefs = self.createPreferenceDictionaryList(buddycast_data) + buddycast_data['preferences'] = prefs # Nicolas: store this back into buddycast_data because it's used later on gotBuddyCastMessage again + + infohashes = Set(buddycast_data.get('collected torrents', [])) + prefhashes = Set(self.getPreferenceHashes(buddycast_data)) # only accept sender's preference, to avoid pollution + infohashes = infohashes.union(prefhashes) + + cache_db_data['infohash'] = infohashes + #self.data_handler.addInfohashes(infohashes, commit=True) + if prefs: + cache_db_data['pref'] = prefs + #self.data_handler.addPeerPreferences(sender_permid, prefs) + #self.data_handler.increaseBuddyCastTimes(sender_permid, commit=True) + + self.data_handler.handleBCData(cache_db_data, cache_peer_data, sender_permid, max_tb_sim) + + def removeFromConnList(self, peer_permid): + removed = 0 + if peer_permid in self.connected_connectable_peers: # Ct + self.connected_connectable_peers.pop(peer_permid) + try: + self.connected_taste_buddies.remove(peer_permid) + except ValueError: + pass + try: + self.connected_random_peers.remove(peer_permid) + except ValueError: + pass + removed = 1 + if peer_permid in self.connected_unconnectable_peers: # Cu + self.connected_unconnectable_peers.pop(peer_permid) + removed = 2 + return removed + + def addPeerToConnList(self, peer_permid, connectable=0): + """ Add the peer to Ct, Cr or Cu """ + + # remove the existing peer from lists so that its status can be updated later + self.removeFromConnList(peer_permid) + + if not self.isConnected(peer_permid): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: cannot add a unconnected peer to conn list", "Round", self.round + return + + _now = now() + + if connectable == 1: + self.addPeerToConnCP(peer_permid, _now) + addto = '(reachable peer)' + else: + self.addPeerToConnUP(peer_permid, _now) + addto = '(peer deemed unreachable)' + + return addto + + def updateTBandRPList(self): + """ Select the top 10 most similar (sim>0) peer to TB and others to RP """ + + nconnpeers = len(self.connected_connectable_peers) + if nconnpeers == 0: + self.connected_taste_buddies = [] + self.connected_random_peers = [] + return + + tmplist = [] + tbs = [] + rps = [] + for permid in self.connected_connectable_peers: + sim = self.data_handler.getPeerSim(permid) + if sim > 0: + tmplist.append([sim, permid]) + else: + rps.append(permid) + tmplist.sort() + tmplist.reverse() + + #ntb = self.max_conn_tb # 10 tb & 10 rp + ntb = min((nconnpeers+1)/2, self.max_conn_tb) # half tb and half rp + if len(tmplist) > 0: + for sim,permid in tmplist[:ntb]: + tbs.append(permid) + ntb = len(tbs) + if len(tmplist) > ntb: + rps = [permid for sim,permid in tmplist[ntb:]] + rps + + # remove the oldest peer from both random peer list and connected_connectable_peers + if len(rps) > self.max_conn_rp: + tmplist = [] + for permid in rps: + connect_time = self.connected_connectable_peers[permid]['connect_time'] + tmplist.append([connect_time, permid]) + tmplist.sort() + tmplist.reverse() + rps = [] + i = 0 + for last_seen,permid in tmplist: + if i < self.max_conn_rp: + rps.append(permid) + else: + self.connected_connectable_peers.pop(permid) + i += 1 + + self.connected_taste_buddies = tbs + self.connected_random_peers = rps + + for p in self.connected_taste_buddies: + assert p in self.connected_connectable_peers + for p in self.connected_random_peers: + assert p in self.connected_connectable_peers + assert len(self.connected_taste_buddies) + len(self.connected_random_peers) <= len(self.connected_connectable_peers) + + + def addPeerToConnCP(self, peer_permid, conn_time): + keys = ('ip', 'port', 'oversion', 'num_torrents') + res = self.data_handler.getPeer(peer_permid, keys) + peer = dict(zip(keys,res)) + peer['connect_time'] = conn_time + self.connected_connectable_peers[peer_permid] = peer + self.updateTBandRPList() + + def addNewPeerToConnList(self, conn_list, max_num, peer_permid, conn_time): + """ Add a peer to a connection list, and pop the oldest peer out """ + + if max_num <= 0 or len(conn_list) < max_num: + conn_list[peer_permid] = conn_time + return None + + else: + oldest_peer = (conn_time+1, None) + initial = 'abcdefghijklmnopqrstuvwxyz' + separator = ':-)' + for p in conn_list: + _conn_time = conn_list[p] + r = randint(0, self.max_conn_tb) + name = initial[r] + separator + p + to_cmp = (_conn_time, name) + oldest_peer = min(oldest_peer, to_cmp) + + if conn_time >= oldest_peer[0]: # add it + out_peer = oldest_peer[1].split(separator)[1] + conn_list.pop(out_peer) + conn_list[peer_permid] = conn_time + return out_peer + return peer_permid + + def addPeerToConnUP(self, peer_permid, conn_time): + ups = self.connected_unconnectable_peers + if peer_permid not in ups: + out_peer = self.addNewPeerToConnList(ups, + self.max_conn_up, peer_permid, conn_time) + if out_peer != peer_permid: + return True + return False + + # -------------- reply buddycast, passive thread -------------- # + def replyBuddyCast(self, target_permid, selversion): + """ Reply a buddycast message """ + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '*************** replay buddycast message', show_permid_short(target_permid), self.isConnected(target_permid) + + if not self.isConnected(target_permid): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'buddycast: lost connection while replying buddycast', \ + # "Round", self.round + return + + self.createAndSendBuddyCastMessage(target_permid, selversion, active=False) + + self.print_debug_info('Passive', 8, target_permid) + self.print_debug_info('Passive', 9, target_permid) + + self.next_initiate += 1 # Be idel in next round + self.print_debug_info('Passive', 10) + + + # -------------- handle overlay connections from SecureOverlay ---------- # + def handleConnection(self,exc,permid,selversion,locally_initiated): + if exc is None and permid != self.permid: # add a connection + self.addConnection(permid, selversion, locally_initiated) + else: + self.closeConnection(permid, 'overlayswarm:'+str(exc)) + + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: handle conn from overlay", exc, \ + self.get_peer_info(permid), "selversion:", selversion, \ + "local_init:", locally_initiated, ctime(now()) + + def addConnection(self, peer_permid, selversion, locally_initiated): + # add connection to connection list + _now = now() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: addConnection", self.isConnected(peer_permid) + if not self.isConnected(peer_permid): + # SecureOverlay has already added the peer to db + self.connections[peer_permid] = selversion # add a new connection + addto = self.addPeerToConnList(peer_permid, locally_initiated) + + dns = self.get_peer_info(peer_permid, include_permid=False) + buf = '%s %s'%(dns, addto) + self.launchmany.set_activity(NTFY_ACT_MEET, buf) # notify user interface + + if self.torrent_collecting and not self.superpeer: + self.torrent_collecting.trigger(peer_permid, selversion) + + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: add connection", \ + self.get_peer_info(peer_permid), "to", addto + if self.log: + dns = self.dnsindb(peer_permid) + if dns: + ip,port = dns + self.overlay_log('CONN_ADD', ip, port, show_permid(peer_permid), selversion) + + def closeConnection(self, peer_permid, reason): + """ Close connection with a peer, and remove it from connection lists """ + + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: close connection:", self.get_peer_info(peer_permid) + + if self.isConnected(peer_permid): + self.connections.pop(peer_permid) + removed = self.removeFromConnList(peer_permid) + if removed == 1: + self.updateTBandRPList() + + if self.log: + dns = self.dnsindb(peer_permid) + if dns: + ip,port = dns + self.overlay_log('CONN_DEL', ip, port, show_permid(peer_permid), reason) + + # -------------- print debug info ---------- # + def get_stats(self): + nPeer = len(self.data_handler.peers) + nPref = nPeer #len(self.data_handler.preferences) + nCc = len(self.connection_candidates) + nBs = len(self.send_block_list) + nBr = len(self.recv_block_list) + nSO = -1 # TEMP ARNO len(self.overlay_bridge.debug_get_live_connections()) + nCo = len(self.connections) + nCt = len(self.connected_taste_buddies) + nCr = len(self.connected_random_peers) + nCu = len(self.connected_unconnectable_peers) + return nPeer, nPref, nCc, nBs, nBr, nSO, nCo, nCt, nCr, nCu + + def print_debug_info(self, thread, step, target_permid=None, selversion=0, r=0, addto=''): + if not debug: + return + if DEBUG: + print "bc: *****", thread, str(step), "-", + if thread == 'Active': + if step == 2: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Working:", now() - self.start_time, \ + "seconds since start. Round", self.round, "Time:", ctime(now()) + nPeer, nPref, nCc, nBs, nBr, nSO, nCo, nCt, nCr, nCu = self.get_stats() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: *** Status: nPeer nPref nCc: %d %d %d nBs nBr: %d %d nSO nCo nCt nCr nCu: %d %d %d %d %d" % \ + (nPeer,nPref,nCc, nBs,nBr, nSO,nCo, nCt,nCr,nCu) + if nSO != nCo: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: warning - nSo and nCo is inconsistent" + if nCc > self.max_conn_cand or nCt > self.max_conn_tb or nCr > self.max_conn_rp or nCu > self.max_conn_up: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: warning - nCC or nCt or nCr or nCu overloads" + _now = now() + buf = "" + i = 1 + for p in self.connected_taste_buddies: + buf += "bc: %d taste buddies: "%i + self.get_peer_info(p) + str(_now-self.connected_connectable_peers[p]['connect_time']) + " version: " + str(self.connections[p]) + "\n" + i += 1 + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", buf + + buf = "" + i = 1 + for p in self.connected_random_peers: + buf += "bc: %d random peers: "%i + self.get_peer_info(p) + str(_now-self.connected_connectable_peers[p]['connect_time']) + " version: " + str(self.connections[p]) + "\n" + i += 1 + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", buf + + buf = "" + i = 1 + for p in self.connected_unconnectable_peers: + buf += "bc: %d unconnectable peers: "%i + self.get_peer_info(p) + str(_now-self.connected_unconnectable_peers[p]) + " version: " + str(self.connections[p]) + "\n" + i += 1 + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", buf + buf = "" + totalsim = 0 + nsimpeers = 0 + minsim = 1e10 + maxsim = 0 + sims = [] + for p in self.data_handler.peers: + sim = self.data_handler.peers[p][PEER_SIM_POS] + if sim > 0: + sims.append(sim) + if sims: + minsim = min(sims) + maxsim = max(sims) + nsimpeers = len(sims) + totalsim = sum(sims) + if nsimpeers > 0: + meansim = totalsim/nsimpeers + else: + meansim = 0 + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: * sim peer: %d %.3f %.3f %.3f %.3f\n" % (nsimpeers, totalsim, meansim, minsim, maxsim) + + elif step == 3: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "check blocked peers: Round", self.round + + elif step == 4: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "keep connections with peers: Round", self.round + + elif step == 6: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "idle loop:", self.next_initiate + + elif step == 9: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bootstrapping: select", self.bootstrap_num, \ + "peers recently seen from Mega Cache" + if self.booted < 0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: *** bootstrapped recently, so wait for a while" + elif self.booted == 0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: *** no peers to bootstrap. Try next time" + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: *** bootstrapped, got", len(self.connection_candidates), \ + "peers in Cc. Times of bootstrapped", self.total_bootstrapped_time + buf = "" + for p in self.connection_candidates: + buf += "bc: * cand:" + `p` + "\n" + buf += "\nbc: Remote Search Peer Candidates:\n" + for p in self.remote_search_peer_candidates: + buf += "bc: * remote: %d "%p[0] + self.get_peer_info(p[1]) + "\n" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", buf + + elif step == 11: + buf = "select " + if r == 0: + buf += "a most similar taste buddy" + else: + buf += "a most likely online random peer" + buf += " from Cc for buddycast out\n" + + if target_permid: + buf += "bc: *** got target %s sim: %s last_seen: %s" % \ + (self.get_peer_info(target_permid), + self.data_handler.getPeerSim(target_permid), + ctime(self.data_handler.getPeerLastSeen(target_permid))) + else: + buf += "bc: *** no target to select. Skip this round" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", buf + + elif step == 12: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "connect a peer to start buddycast", self.get_peer_info(target_permid) + + elif step == 13: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "block connected peer in send block list", \ + self.get_peer_info(target_permid)#, self.send_block_list[target_permid] + + elif step == 14: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "remove connected peer from Cc", \ + self.get_peer_info(target_permid)#, "removed?", target_permid not in self.connection_candidates + + elif step == 15: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "peer is connected", \ + self.get_peer_info(target_permid), "overlay version", selversion, currentThread().getName() + + elif step == 16: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "create buddycast to send to", self.get_peer_info(target_permid) + + elif step == 17: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "send buddycast msg to", self.get_peer_info(target_permid) + + elif step == 18: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "receive buddycast message from peer %s" % self.get_peer_info(target_permid) + + elif step == 19: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "store peers from incoming msg to cache and db" + + elif step == 20: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "add connected peer %s to connection list %s" % (self.get_peer_info(target_permid), addto) + + elif step == 21: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "block connected peer in recv block list", \ + self.get_peer_info(target_permid), self.recv_block_list[target_permid] + + if thread == 'Passive': + if step == 2: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "receive buddycast message from peer %s" % self.get_peer_info(target_permid) + + elif step == 3: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "store peers from incoming msg to cache and db" + + elif step == 4: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "add connected peer %s to connection list %s" % (self.get_peer_info(target_permid), addto) + + elif step == 5: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "block connected peer in recv block list", \ + self.get_peer_info(target_permid), self.recv_block_list[target_permid] + + elif step == 6: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "create buddycast to reply to", self.get_peer_info(target_permid) + + elif step == 7: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "reply buddycast msg to", self.get_peer_info(target_permid) + + elif step == 8: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "block connected peer in send block list", \ + self.get_peer_info(target_permid), self.send_block_list[target_permid] + + elif step == 9: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "remove connected peer from Cc", \ + self.get_peer_info(target_permid)#, "removed?", target_permid not in self.connection_candidates + + elif step == 10: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "add idle loops", self.next_initiate + sys.stdout.flush() + sys.stderr.flush() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: *****", thread, str(step), "-", + + def getAllTasteBuddies(self): + return self.connected_taste_buddies + + def addRemoteSearchPeer(self, permid, oversion, ntorrents, last_seen): + if oversion >= OLPROTO_VER_SIXTH and ntorrents >= REMOTE_SEARCH_PEER_NTORRENTS_THRESHOLD: + insort(self.remote_search_peer_candidates, [last_seen,permid]) + if len(self.remote_search_peer_candidates) > self.num_search_cand: + self.remote_search_peer_candidates.pop(0) + + def getRemoteSearchPeers(self, npeers): + if len(self.remote_search_peer_candidates) > npeers: + _peers = sample(self.remote_search_peer_candidates, npeers) # randomly select + else: + _peers = self.remote_search_peer_candidates + peers = [permid for last_seen,permid in _peers] + return peers + + +class DataHandler: + def __init__(self, launchmany, overlay_bridge, max_num_peers=2500): + self.launchmany = launchmany + self.overlay_bridge = overlay_bridge + self.config = self.launchmany.session.sessconfig # should be safe at startup + # --- database handlers --- + self.peer_db = launchmany.peer_db + self.superpeer_db = launchmany.superpeer_db + self.torrent_db = launchmany.torrent_db + self.mypref_db = launchmany.mypref_db + self.pref_db = launchmany.pref_db + # self.term_db = launchmany.term_db + self.friend_db = launchmany.friend_db + self.myfriends = Set() # FIXME: implement friends + self.myprefs = [] # torrent ids + self.peers = {} # peer_id: [similarity, last_seen, prefs(array('l',[torrent_id])] + self.default_peer = [0, 0, None] + self.owners = {} # torrent_ids_of_mine: Set(peer_id) + self.permid = self.getMyPermid() + self.nprefs = 0 + self.ntorrents = 0 + self.last_check_ntorrents = 0 + #self.total_pref_changed = 0 + # how many peers to load into cache from db + #self.max_peer_in_db = max_num_peers + self.max_num_peers = min(max(max_num_peers, 100), 2500) # at least 100, at most 2500 + #self.time_sim_weight = 4*60*60 # every 4 hours equals to a point of similarity + # after added some many (user, item) pairs, update sim of item to item + #self.update_i2i_threshold = 100 + #self.npeers = self.peer_db.size() - self.superpeer_db.size() + self.old_peer_num = 0 + self.buddycast_core = None + self.all_peer_list = None + self.num_peers_ui = None + self.num_torrents_ui = None + self.cached_updates = {'peer':{},'torrent':{}} + + # Subscribe BC to updates to MyPreferences, such that we can add/remove + # them from our download history that we send to other peers. + self.launchmany.session.add_observer(self.sesscb_ntfy_myprefs,NTFY_MYPREFERENCES,[NTFY_INSERT,NTFY_DELETE]) + + def commit(self): + self.peer_db.commit() + + def register_buddycast_core(self, buddycast_core): + self.buddycast_core = buddycast_core + + def getMyName(self, name=''): + return self.config.get('nickname', name) + + def getMyIp(self, ip=''): + return self.launchmany.get_ext_ip() + + def getMyPort(self, port=0): + return self.launchmany.listen_port + + def getMyPermid(self, permid=''): + return self.launchmany.session.get_permid() + + def getPeerID(self, permid): + if isinstance(permid, int) and permid > 0: + return permid + else: + return self.peer_db.getPeerID(permid) + + def getTorrentID(self, infohash): + if isinstance(infohash, int) and infohash > 0: + return infohash + else: + return self.peer_db.getPeerID(permid) + + def getPeerPermid(self, peer_id): + return self.peer_db.getPermid(peer_id) + + def updatePort(self, port): + self.my_db.put('port', port) + + def postInit(self, delay=4, batch=50, update_interval=10, npeers=None, updatesim=True): + # build up a cache layer between app and db + if npeers is None: + npeers = self.max_num_peers + self.updateMyPreferences() + self.loadAllPeers(npeers) + if updatesim: + self.updateAllSim(delay, batch, update_interval) + + def updateMyPreferences(self, num_pref=None): + # get most recent preferences, and sort by torrent id + res = self.mypref_db.getAll('torrent_id', order_by='creation_time desc', limit=num_pref) + self.myprefs = [p[0] for p in res] + + for torrent_id in self.myprefs: + self.updateOwners(torrent_id) + + def updateOwners(self, torrent_id): + res = self.pref_db.getAll('peer_id', where='torrent_id=%d'%torrent_id) + self.owners[torrent_id] = Set([p[0] for p in res]) + + def loadAllPeers(self, num_peers=None): + """ Read peers from db and put them in self.peers. + At most num_peers (=self.max_num_peers) recently seen peers can be cached. + + """ + peer_values = self.peer_db.getAll(['peer_id','similarity','last_seen'], order_by='last_connected desc', limit=num_peers) + self.peers = dict(zip([p[0] for p in peer_values], [[p[1],p[2],array('l', [])] for p in peer_values])) + + user_item_pairs = self.pref_db.getRecentPeersPrefs('last_connected',num_peers) + self.nprefs = len(user_item_pairs) + + for pid,tid in user_item_pairs: + self.peers[pid][PEER_PREF_POS].append(tid) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '**************** loadAllPeers', len(self.peers) + +# for pid in self.peers: +# self.peers[pid][PEER_PREF_POS].sort() # keep in order + + def updateAllSim(self, delay=4, batch=50, update_interval=10): + self._updateAllPeerSim(delay, batch, update_interval) # 0.156 second + self._updateAllItemRel(delay, batch, update_interval) # 0.875 second + # Tuning batch (without index relevance) + + # batch = 25: 0.00 0.22 0.58 + # batch = 50: min/avg/max execution time: 0.09 0.29 0.63 second + # batch = 100: 0.16 0.47 0.95 + # update_interval=10 + # 50000 updates take: 50000 / 50 * (10+0.3) / 3600 = 3 hours + # cpu load: 0.3/10 = 3% + + # With index relevance: + # batch = 50: min/avg/max execution time: 0.08 0.62 1.39 second + # batch = 25: 0.00 0.41 1.67 + # update_interval=5, batch=25 + # 50000 updates take: 50000 / 25 * (5+0.4) / 3600 = 3 hours + # cpu load: 0.4/5 = 8% + + def cacheSimUpdates(self, update_table, updates, delay, batch, update_interval): + self.cached_updates[update_table].update(updates) + self.overlay_bridge.add_task(lambda:self.checkSimUpdates(batch, update_interval), delay, 'checkSimUpdates') + + def checkSimUpdates(self, batch, update_interval): + last_update = 0 + if self.cached_updates['peer']: + updates = [] + update_peers = self.cached_updates['peer'] + keys = update_peers.keys() + shuffle(keys) # to avoid always update the same items when cacheSimUpdates is called frequently + for key in keys[:batch]: + updates.append((update_peers.pop(key), key)) + self.overlay_bridge.add_task(lambda:self.peer_db.updatePeerSims(updates), last_update + update_interval, 'updatePeerSims') + last_update += update_interval + + if self.cached_updates['torrent']: + updates = [] + update_peers = self.cached_updates['torrent'] + keys = update_peers.keys() + shuffle(keys) + for key in keys[:batch]: + updates.append((update_peers.pop(key), key)) + self.overlay_bridge.add_task(lambda:self.torrent_db.updateTorrentRelevances(updates), last_update + update_interval, 'updateTorrentRelevances') + last_update += update_interval + + if self.cached_updates['peer'] or self.cached_updates['torrent']: + self.overlay_bridge.add_task(lambda:self.checkSimUpdates(batch, update_interval), last_update+0.001, 'checkSimUpdates') + + def _updateAllPeerSim(self, delay, batch, update_interval): + # update similarity to all peers to keep consistent + + if self.old_peer_num == len(self.peers): # if no new peers, don't update + return + starttime = time() + self.nprefs = 0 # total nprefs must be updated before compute similarity + for peer_id in self.peers: + self.nprefs += len(self.peers[peer_id][PEER_PREF_POS]) + + updates = {} + for peer_id in self.peers: + oldsim = self.peers[peer_id][PEER_SIM_POS] + if not self.peers[peer_id][PEER_PREF_POS]: + continue + self.updateSimilarity(peer_id, False) + sim = self.peers[peer_id][PEER_SIM_POS] + if abs(sim - oldsim) > oldsim*0.05: + updates[peer_id] = sim + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '****************** update peer sim', len(updates), len(self.peers) + if updates: + self.cacheSimUpdates('peer', updates, delay, batch, update_interval) + + def _updateAllItemRel(self, delay, batch, update_interval): + # update all item's relevance + # Relevance of I = Sum(Sim(Users who have I)) + Poplarity(I) + # warning: this function may take 5 seconds to commit to the database + if len(self.peers) == 0: + return + tids = {} + nsimpeers = 0 + for peer_id in self.peers: + if self.peers[peer_id][PEER_PREF_POS]: + sim = self.peers[peer_id][PEER_SIM_POS] + if sim > 0: + nsimpeers += 1 + prefs = self.peers[peer_id][PEER_PREF_POS] + for tid in prefs: + if tid not in tids: + tids[tid] = [0,0] + tids[tid][0] += sim + tids[tid][1] += 1 + + if len(tids) == 1: + return + + res = self.torrent_db.getTorrentRelevances(tids) + if res: + old_rels = dict(res) + else: + old_rels = {} + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '********* update all item rel', len(old_rels), len(tids) #, old_rels[:10] + + for tid in tids.keys(): + tids[tid] = tids[tid][0]/tids[tid][1] + tids[tid][1] + old_rel = old_rels.get(tid, None) + if old_rel != None and abs(old_rel - tids[tid]) <= old_rel*0.05: + tids.pop(tid) # don't update db + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '**************--- update all item rel', len(tids), len(old_rels) #, len(self.peers), nsimpeers, tids.items()[:10] # 37307 2500 + if tids: + self.cacheSimUpdates('torrent', tids, delay, batch, update_interval) + + + def sesscb_ntfy_myprefs(self,subject,changeType,objectID,*args): + """ Called by SessionCallback thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bc: sesscb_ntfy_myprefs:",subject,changeType,`objectID` + if subject == NTFY_MYPREFERENCES: + infohash = objectID + if changeType == NTFY_INSERT: + op_my_pref_lambda = lambda:self.addMyPref(infohash) + elif changeType == NTFY_DELETE: + op_my_pref_lambda = lambda:self.delMyPref(infohash) + # Execute on OverlayThread + self.overlay_bridge.add_task(op_my_pref_lambda, 0) + + + def addMyPref(self, infohash): + infohash_str=bin2str(infohash) + torrentdata = self.torrent_db.getOne(('secret', 'torrent_id'), infohash=infohash_str) + if not torrentdata: + return + + secret = torrentdata[0] + torrent_id = torrentdata[1] + if secret: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'bc: Omitting secret download: %s' % torrentdata.get('info', {}).get('name', 'unknown') + return # do not buddycast secret downloads + + if torrent_id not in self.myprefs: + insort(self.myprefs, torrent_id) + self.updateOwners(torrent_id) + self.old_peer_num = 0 + self.updateAllSim() # time-consuming + #self.total_pref_changed += self.update_i2i_threshold + + def delMyPref(self, infohash): + torrent_id = self.torrent_db.getTorrentID(infohash) + if torrent_id in self.myprefs: + self.myprefs.remove(torrent_id) + self.owners.pop(torrent_id) + self.old_peer_num = 0 + self.updateAllSim() + #self.total_pref_changed += self.update_i2i_threshold + + def initRemoteSearchPeers(self, num_peers=10): + peer_values = self.peer_db.getAll(['permid','oversion','num_torrents','last_seen'], order_by='last_seen desc', limit=num_peers) + for p in peer_values: + p = list(p) + p[0] = str2bin(p[0]) + self.buddycast_core.addRemoteSearchPeer(*tuple(p)) + pass + + + def updatePeerPref(self, peer_permid, cur_prefs): + peer_id = self.getPeerID(peer_permid) + cur_prefs_array = array('l', cur_prefs) + self.peers[peer_id][PEER_PREF_POS] = cur_prefs_array + + overlap = Set(self.owners).intersection(Set(self.peers[peer_id][PEER_PREF_POS])) + if len(overlap) > 0: + for torrent_id in overlap: + self.owners[torrent_id].add(peer_id) + + def getMyLivePreferences(self, selversion, num=0): + """ Get a number of my preferences. Get all if num==0 """ + if selversion>=OLPROTO_VER_EIGHTH: + return self.mypref_db.getRecentLivePrefListWithClicklog(num) + else: + return self.mypref_db.getRecentLivePrefList(num) + + def getPeerSim(self, peer_permid, read_db=False, raw=False): + if read_db: + sim = self.peer_db.getPeerSim(peer_permid) + else: + peer_id = self.getPeerID(peer_permid) + if peer_id is None or peer_id not in self.peers: + sim = 0 + else: + sim = self.peers[peer_id][PEER_SIM_POS] + if sim is None: + sim = 0 + if not raw: + # negative value means it is calculated from other peers, + # not itself. See addRelativeSim() + return abs(sim) + else: + return sim + + def getPeerLastSeen(self, peer_permid): + peer_id = self.getPeerID(peer_permid) + return self.getPeerIDLastSeen(peer_id) + + def getPeerIDLastSeen(self, peer_id): + if not peer_id or peer_id not in self.peers: + return 0 + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '***** getPeerLastSeen', self.peers[pefer_permid], `peer_permid` + return self.peers[peer_id][PEER_LASTSEEN_POS] + + def getPeerPrefList(self, peer_permid): + """ Get a number of peer's preference list. Get all if num==0. + If live==True, dead torrents won't include + """ + peer_id = self.getPeerID(peer_permid) + if peer_id not in self.peers: + return self.pref_db.getPrefList(peer_permid) + else: + return self.peers[peer_id][PEER_PREF_POS] + +# def addPeer(self, peer_permid, last_seen, peer_data=None, commit=True): +# """ add a peer from buddycast message to both cache and db """ +# +# if peer_permid != self.permid: +# if peer_data is not None: +# self._addPeerToDB(peer_permid, last_seen, peer_data, commit=commit) +# self._addPeerToCache(peer_permid, last_seen) + + def _addPeerToCache(self, peer_permid, last_seen): + """ add a peer to cache """ + # Secure Overlay should have added this peer to database. + if peer_permid == self.permid: + return + peer_id = self.getPeerID(peer_permid) + assert peer_id != None, `peer_permid` + if peer_id not in self.peers: + sim = self.peer_db.getPeerSim(peer_permid) + peerprefs = self.pref_db.getPrefList(peer_permid) # [torrent_id] + self.peers[peer_id] = [last_seen, sim, array('l', peerprefs)] # last_seen, similarity, pref + else: + self.peers[peer_id][PEER_LASTSEEN_POS] = last_seen + + def _addPeerToDB(self, peer_permid, peer_data, commit=True): + + if peer_permid == self.permid: + return + new_peer_data = {} + try: + new_peer_data['permid'] = peer_data['permid'] + new_peer_data['ip'] = hostname_or_ip2ip(peer_data['ip']) + new_peer_data['port'] = peer_data['port'] + new_peer_data['last_seen'] = peer_data['last_seen'] + if peer_data.has_key('name'): + new_peer_data['name'] = dunno2unicode(peer_data['name']) # store in db as unicode + + self.peer_db.addPeer(peer_permid, new_peer_data, update_dns=True, commit=commit) + + except KeyError: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: _addPeerToDB has KeyError" + except socket.gaierror: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: _addPeerToDB cannot find host by name", peer_data['ip'] + except: + print_exc() + + def addInfohashes(self, infohash_list, commit=True): + for infohash in infohash_list: + self.torrent_db.addInfohash(infohash, commit=False) # it the infohash already exists, it will skip it + if commit: + self.torrent_db.commit() + + def addPeerPreferences(self, peer_permid, prefs, commit=True): + """ add a peer's preferences to both cache and db """ + + if peer_permid == self.permid: + return 0 + + cur_prefs = self.getPeerPrefList(peer_permid) + if not cur_prefs: + cur_prefs = [] + prefs2add = [] + for pref in prefs: + infohash = pref['infohash'] # Nicolas: new dictionary format of OL 8 preferences + torrent_id = self.torrent_db.getTorrentID(infohash) + if not torrent_id: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "buddycast: DB Warning: infohash", bin2str(infohash), "should have been inserted into db, but was not found" + continue + pref['torrent_id'] = torrent_id + if torrent_id not in cur_prefs: + prefs2add.append(pref) + cur_prefs.append(torrent_id) + + if len(prefs2add) > 0: + self.pref_db.addPreferences(peer_permid, prefs2add, is_torrent_id=True, commit=commit) + self.updatePeerPref(peer_permid, cur_prefs) + self.nprefs += len(prefs2add) + peer_id = self.getPeerID(peer_permid) + self.updateSimilarity(peer_id, commit=commit) + + def updateSimilarity(self, peer_id, update_db=True, commit=True): + """ update a peer's similarity """ + + if len(self.myprefs) == 0: + return + sim = self.LMP2PSimilarity(peer_id) + self.peers[peer_id][PEER_SIM_POS] = sim + if update_db and sim>0: + self.peer_db.updatePeerSims([(sim,peer_id)], commit=commit) + + def LMP2PSimilarity(self, peer_id): + peer_pref = self.peers[peer_id][PEER_PREF_POS] + sim = P2PSimLM(peer_id, self.myprefs, peer_pref, self.owners, self.nprefs, mu=1.0) + return sim + +# def increaseBuddyCastTimes(self, peer_permid, commit=True): +# self.peer_db.updateTimes(peer_permid, 'buddycast_times', 1, commit=False) +# self.peer_db.updatePeer(peer_permid, commit=commit, last_buddycast=now()) + + def getPeer(self, permid, keys=None): + return self.peer_db.getPeer(permid, keys) + + def addRelativeSim(self, sender_permid, peer_permid, sim, max_sim): + # Given Sim(I, A) and Sim(A, B), predict Sim(I, B) + # Sim(I, B) = Sim(I, A)*Sim(A, B)/Max(Sim(A,B)) for all B + old_sim = self.getPeerSim(peer_permid, raw=True) + if old_sim > 0: # its similarity has been calculated based on its preferences + return + old_sim = abs(old_sim) + sender_sim = self.getPeerSim(sender_permid) + new_sim = sender_sim*sim/max_sim + if old_sim == 0: + peer_sim = new_sim + else: + peer_sim = (new_sim + old_sim)/2 + peer_sim = -1*peer_sim + # using negative value to indicate this sim comes from others + peer_id = self.getPeerID(peer_permid) + self.peers[peer_id][PEER_SIM_POS] = peer_sim + + def get_npeers(self): + if self.num_peers_ui is None: + return len(self.peers) # changed to this according to Maarten's suggestion + else: + return self.num_peers_ui + + def get_ntorrents(self): + if self.num_torrents_ui is None: + _now = now() + if _now - self.last_check_ntorrents > 5*60: + self.ntorrents = self.torrent_db.getNumberCollectedTorrents() + self.last_check_ntorrents = _now + return self.ntorrents + else: + return self.num_torrents_ui + + def get_nmyprefs(self): + return len(self.myprefs) + +# def updatePeerLevelStats(self,permid,npeers,ntorrents,nprefs,commit=True): +# d = {'num_peers':npeers,'num_torrents':ntorrents,'num_prefs':nprefs} +# self.peer_db.updatePeer(permid, commit=commit, **d) + +# def getAllPeerList(self): +# return self.all_peer_list +# +# def removeAllPeerList(self): +# self.all_peer_list = None +# +# def setNumPeersFromUI(self, num): +# self.num_peers_ui = num +# +# def setNumTorrentsFromUI(self, num): # not thread safe +# self.num_torrents_ui = num + + def handleBCData(self, cache_db_data, cache_peer_data, sender_permid, max_tb_sim): + #self.data_handler.addPeer(peer_permid, last_seen, new_peer_data, commit=True) # new peer + #self.data_handler.increaseBuddyCastTimes(sender_permid, commit=True) + #self.data_handler.addInfohashes(infohashes, commit=True) + + #self.data_handler._addPeerToCache(peer_permid, last_seen) + #self.data_handler.addRelativeSim(sender_permid, peer_permid, sim, max_tb_sim) + + #self.data_handler.addPeerPreferences(sender_permid, prefs) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bc: handleBCData:",`cache_db_data` + + + ADD_PEER = 1 + UPDATE_PEER = 2 + ADD_INFOHASH = 3 + + peer_data = cache_db_data['peer'] + db_writes = [] + for permid in peer_data: + new_peer = peer_data[permid] + old_peer = self.peer_db.getPeer(permid) + if not old_peer: + if permid == sender_permid: + new_peer['buddycast_times'] = 1 + db_writes.append((ADD_PEER, permid, new_peer)) + else: + #print old_peer + old_last_seen = old_peer['last_seen'] + new_last_seen = new_peer['last_seen'] + if permid == sender_permid: + if not old_peer['buddycast_times']: + new_peer['buddycast_times'] = 1 + else: + new_peer['buddycast_times'] = + 1 + if not old_last_seen or new_last_seen > old_last_seen + 4*60*60: + # don't update if it was updated in 4 hours + for k in new_peer.keys(): + if old_peer[k] == new_peer[k]: + new_peer.pop(k) + if new_peer: + db_writes.append((UPDATE_PEER, permid, new_peer)) + + for infohash in cache_db_data['infohash']: + tid = self.torrent_db.getTorrentID(infohash) + if tid is None: + db_writes.append((ADD_INFOHASH, infohash)) + + for item in db_writes: + if item[0] == ADD_PEER: + permid = item[1] + new_peer = item[2] + # Arno, 2008-09-17: Don't use IP data from BC message, network info gets precedence + updateDNS = (permid != sender_permid) + self.peer_db.addPeer(permid, new_peer, update_dns=updateDNS, commit=False) + elif item[0] == UPDATE_PEER: + permid = item[1] + new_peer = item[2] + # Arno, 2008-09-17: Don't use IP data from BC message, network info gets precedence + updateDNS = (permid != sender_permid) + if not updateDNS: + if 'ip' in new_peer: + del new_peer['ip'] + if 'port' in new_peer: + del new_peer['port'] + self.peer_db.updatePeer(permid, commit=False, **new_peer) + elif item[0] == ADD_INFOHASH: + infohash = item[1] + self.torrent_db.addInfohash(infohash, commit=False) + + #self.torrent_db._db.show_sql(1) + self.torrent_db.commit() + #self.torrent_db._db.show_sql(0) + + for item in db_writes: + if item[0] == ADD_PEER or item[0] == UPDATE_PEER: + permid = item[1] + new_peer = item[2] + last_seen = new_peer['last_seen'] + self._addPeerToCache(permid, last_seen) + + for permid in peer_data: + if 'sim' in peer_data[permid]: + sim = peer_data[permid]['sim'] + self.addRelativeSim(sender_permid, permid, sim, max_tb_sim) + + #self.torrent_db._db.show_sql(1) + self.torrent_db.commit() + #self.torrent_db._db.show_sql(0) + + # Nicolas: moved this block *before* the call to addPeerPreferences because with the clicklog, + # this in fact writes to several different databases, so it's easier to tell it to commit + # right away. hope this is ok + + # Nicolas 2009-03-30: thing is that we need to create terms and their generated ids, forcing at least one commit in-between + # have to see later how this might be optimized. right now, there's three commits: + # before addPeerPreferences, after bulk_insert, and after storing clicklog data + + if cache_db_data['pref']: + self.addPeerPreferences(sender_permid, + cache_db_data['pref'], + commit=True) + + + #print hash(k), peer_data[k] + #cache_db_data['infohash'] + #cache_db_data['pref'] diff --git a/tribler-mod/Tribler/Core/BuddyCast/buddycast.py.bak b/tribler-mod/Tribler/Core/BuddyCast/buddycast.py.bak new file mode 100644 index 0000000..353fe19 --- /dev/null +++ b/tribler-mod/Tribler/Core/BuddyCast/buddycast.py.bak @@ -0,0 +1,2481 @@ +# Written by Jie Yang +# see LICENSE.txt for license information +# + +__fool_epydoc = 481 +""" + BuddyCast2 epidemic protocol for p2p recommendation and semantic clustering + +Algorithm in LaTeX format: + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% algorithm of the active peer %%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\begin{figure*}[ht] +\begin{center} +\begin{algorithmic}[1] + +\LOOP +\STATE wait($\Delta T$ time units) \COMMENT{15 seconds in current implementation} +\STATE remove any peer from $B_S$ and $B_R$ if its block time was expired. +\STATE keep connection with all peers in $C_T$, $C_R$ and $C_U$ +\IF{$idle\_loops > 0$} + \STATE $idle\_loops \leftarrow idle\_loops - 1$ \COMMENT{skip this loop for rate control} +\ELSE + \IF{$C_C$ is empty} + \STATE $C_C \leftarrow$ select 5 peers recently seen from Mega Cache + \ENDIF + \STATE $Q \leftarrow$ select a most similar taste buddy or a random online peer from $C_C$ + \STATE connectPeer($Q$) + \STATE block($Q$, $B_S$, 4hours) + \STATE remove $Q$ from $C_C$ + \IF{$Q$ is connected successfully} + \STATE buddycast\_msg\_send $\leftarrow$ \textbf{createBuddycastMsg}() + \STATE send buddycast\_msg\_send to $Q$ + \STATE receive buddycast\_msg\_recv from $Q$ + \STATE $C_C \leftarrow$ fillPeers(buddycast\_msg\_recv) + \STATE \textbf{addConnectedPeer}($Q$) \COMMENT{add $Q$ into $C_T$, $C_R$ or $C_U$ according to its similarity} + \STATE blockPeer($Q$, $B_R$, 4hours) + \ENDIF + +\ENDIF +\ENDLOOP + +\end{algorithmic} +\caption{The protocol of an active peer.} +\label{Fig:buddycast_algorithm} +\end{center} +\end{figure*} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% algorithm of the passive peer %%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\begin{figure*}[ht] +\begin{center} +\begin{algorithmic}[1] + +\LOOP + \STATE receive buddycast\_msg\_recv from $Q$ + \STATE $C_C \leftarrow$ fillPeers(buddycast\_msg\_recv) + \STATE \textbf{addConnectedPeer}($Q$) + \STATE blockPeer($Q$, $B_R$, 4hours) + \STATE buddycast\_msg\_send $\leftarrow$ \textbf{createBuddycastMsg}() + \STATE send buddycast\_msg\_send to $Q$ + \STATE blockPeer($Q$, $B_S$, 4hours) + \STATE remove $Q$ from $C_C$ + \STATE $idle\_loops \leftarrow idle\_loops + 1$ \COMMENT{idle for a loop for + rate control} +\ENDLOOP + +\end{algorithmic} +\caption{The protocol of an passive peer.} +\label{Fig:buddycast_algorithm} +\end{center} +\end{figure*} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%% algorithm of creating a buddycast message %% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\begin{figure*}[ht] +\begin{center} +function \textbf{createBuddycastMsg}() +\begin{algorithmic} + \STATE $My\_Preferences \leftarrow$ the most recently 50 preferences of the active peer + \STATE $Taste\_Buddies \leftarrow$ all peers from $C_T$ + \STATE $Random\_Peers \leftarrow$ all peers from $C_R$ + \STATE $buddycast\_msg\_send \leftarrow$ create an empty message + \STATE $buddycast\_msg\_send$ attaches the active peer's address and $My\_Preferences$ + \STATE $buddycast\_msg\_send$ attaches addresses of $Taste\_Buddies$ + \STATE $buddycast\_msg\_send$ attaches at most 10 preferences of each peer in $Taste\_Buddies$ + \STATE $buddycast\_msg\_send$ attaches addresses of $Random\_Peers$ +\end{algorithmic} +\caption{The function of creating a buddycast message} +\label{Fig:buddycast_createBuddycastMsg} +\end{center} +\end{figure*} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%% algorithm of adding a peer into C_T or C_R or C_U %% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\begin{figure*}[ht] +\begin{center} +function \textbf{addConnectedPeer}($Q$) +\begin{algorithmic} + \IF{$Q$ is connectable} + \STATE $Sim_Q \leftarrow$ getSimilarity($Q$) \COMMENT{similarity between $Q$ and the active peer} + \STATE $Min_{Sim} \leftarrow$ similarity of the least similar peer in $C_T$ + \IF{$Sim_Q \geq Min_{Sim}$ \textbf{or} ($C_T$ is not full \textbf{and} $Sim_Q>0$)} + \STATE $C_T \leftarrow C_T + Q$ + \STATE move the least similar peer to $C_R$ if $C_T$ overloads + \ELSE + \STATE $C_R \leftarrow C_R + Q$ + \STATE remove the oldest peer to $C_R$ if $C_R$ overloads + \ENDIF + \ELSE + \STATE $C_U \leftarrow C_U + Q$ + \ENDIF + +\end{algorithmic} +\caption{The function of adding a peer into $C_T$ or $C_R$} +\label{Fig:buddycast_addConnectedPeer} +\end{center} +\end{figure*} + +""" +""" + +BuddyCast 3: + No preferences for taste buddies; + don't accept preferences of taste buddies from incoming message either + 50 recent my prefs + 50 recent collected torrents + 50 ratings + +Torrent info + preferences: Recently downloaded torrents by the user {'seeders','leechers','check time'} + collected torrents: Recently collected torrents (include Subscribed torrents) + #ratings: Recently rated torrents and their ratings (negative rating means this torrent was deleted) +Taste Buddies + permid + ip + port + similarity +Random Peers + permid + ip + port + similarity + +""" + +import sys +from random import sample, randint, shuffle +from time import time, gmtime, strftime +from traceback import print_exc +from sets import Set +from array import array +from bisect import insort +from copy import deepcopy +import gc +import socket + +from Tribler.Core.simpledefs import BCCOLPOLICY_SIMPLE +from Tribler.Core.BitTornado.bencode import bencode, bdecode +from Tribler.Core.BitTornado.BT1.MessageID import BUDDYCAST, BARTERCAST, KEEP_ALIVE, MODERATIONCAST_HAVE, MODERATIONCAST_REQUEST, MODERATIONCAST_REPLY, VOTECAST +from Tribler.Core.Utilities.utilities import show_permid_short, show_permid,validPermid,validIP,validPort,validInfohash,readableBuddyCastMsg, hostname_or_ip2ip +from Tribler.Core.Utilities.unicode import dunno2unicode +from Tribler.Core.simpledefs import NTFY_ACT_MEET, NTFY_ACT_RECOMMEND, NTFY_MYPREFERENCES, NTFY_INSERT, NTFY_DELETE +from Tribler.Core.NATFirewall.DialbackMsgHandler import DialbackMsgHandler +from Tribler.Core.Overlay.SecureOverlay import OLPROTO_VER_SECOND, OLPROTO_VER_THIRD, OLPROTO_VER_FOURTH, OLPROTO_VER_SIXTH, OLPROTO_VER_EIGHTH +from Tribler.Core.CacheDB.sqlitecachedb import bin2str, str2bin +from similarity import P2PSimLM +from TorrentCollecting import SimpleTorrentCollecting #, TiT4TaTTorrentCollecting +from Tribler.Core.Statistics.Logger import OverlayLogger +from Tribler.Core.Statistics.Crawler import Crawler + +from threading import currentThread + +from bartercast import BarterCastCore +from moderationcast import ModerationCastCore +from votecast import VoteCastCore + +DEBUG = False # for errors +debug = False # for status +debugnic = True # for my temporary outputs +unblock = 0 + +# Nicolas: 10 KByte -- I set this to 1024 KByte. +# The term_id->term dictionary can become almost arbitrarily long +# would be strange if buddycast stopped working once a user has done a lot of searches... +# +# Arno, 2009-03-06: Too big: we don't want every peer to send out 1 MB messages +# every 15 secs. Set to 100K +# +# Nicolas, 2009-03-06: Ok this was really old. 10k in fact is enough with the new constraints on clicklog data +MAX_BUDDYCAST_LENGTH = 10*1024 + +REMOTE_SEARCH_PEER_NTORRENTS_THRESHOLD = 100 # speedup finding >=4.1 peers in this version + +# used for datahandler.peers +PEER_SIM_POS = 0 +PEER_LASTSEEN_POS = 1 +PEER_PREF_POS = 2 + +def now(): + return int(time()) + +def ctime(t): + return strftime("%Y-%m-%d.%H:%M:%S", gmtime(t)) + +def validBuddyCastData(prefxchg, nmyprefs=50, nbuddies=10, npeers=10, nbuddyprefs=10): + + # Arno: TODO: make check version dependent + + def validPeer(peer): + validPermid(peer['permid']) + validIP(peer['ip']) + validPort(peer['port']) + + def validPref(pref, num): + if not (isinstance(prefxchg, list) or isinstance(prefxchg, dict)): + raise RuntimeError, "bc: invalid pref type " + str(type(prefxchg)) + if num > 0 and len(pref) > num: + raise RuntimeError, "bc: length of pref exceeds " + str((len(pref), num)) + for p in pref: + validInfohash(p) + + validPeer(prefxchg) + if not (isinstance(prefxchg['name'], str) or isinstance(prefxchg['name'], unicode)): + raise RuntimeError, "bc: invalid name type " + str(type(prefxchg['name'])) + + # Nicolas: create a validity check that doesn't have to know about the version + # just found out this function is not called anymore. well if it gets called one day, it should handle both + prefs = prefxchg['preferences'] + if prefs: + if type(prefs[0])==list: + # list of lists: this is the new wire protocol. entry 0 of each list contains infohash + validPref([pref[0] for pref in prefs], nmyprefs) + else: + # old style + validPref(prefs, nmyprefs) + + if len(prefxchg['taste buddies']) > nbuddies: + raise RuntimeError, "bc: length of prefxchg['taste buddies'] exceeds " + \ + str(len(prefxchg['taste buddies'])) + for b in prefxchg['taste buddies']: + validPeer(b) + #validPref(b['preferences'], nbuddyprefs) # not used from version 4 + + if len(prefxchg['random peers']) > npeers: + raise RuntimeError, "bc: length of random peers " + \ + str(len(prefxchg['random peers'])) + for b in prefxchg['random peers']: + validPeer(b) + + # ARNOCOMMENT: missing test for 'collected torrents' field + + return True + + +class BuddyCastFactory: + __single = None + + def __init__(self, superpeer=False, log=''): + if BuddyCastFactory.__single: + raise RuntimeError, "BuddyCastFactory is singleton" + BuddyCastFactory.__single = self + self.registered = False + self.buddycast_core = None + self.buddycast_interval = 15 # MOST IMPORTANT PARAMETER + self.superpeer = superpeer + self.log = log + self.running = False + self.data_handler = None + self.started = False # did call do_buddycast() at least once + self.max_peers = 2500 # was 2500 + self.ranonce = False # Nicolas: had the impression that BuddyCast can be tested more reliably if I wait until it has gone through buddycast_core.work() successfully once + if self.superpeer: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bc: Starting in SuperPeer mode" + + def getInstance(*args, **kw): + if BuddyCastFactory.__single is None: + BuddyCastFactory(*args, **kw) + return BuddyCastFactory.__single + getInstance = staticmethod(getInstance) + + def register(self, overlay_bridge, launchmany, errorfunc, + metadata_handler, torrent_collecting_solution, running, + max_peers=2500): + if self.registered: + return + self.overlay_bridge = overlay_bridge + self.launchmany = launchmany + self.metadata_handler = metadata_handler + self.torrent_collecting_solution = torrent_collecting_solution + self.errorfunc = errorfunc + + # BuddyCast is always started, but only active when this var is set. + self.running = bool(running) + self.max_peers = max_peers + + self.registered = True + + def register2(self): + # Arno: only start using overlay thread when normal init is finished to + # prevent concurrencty on singletons + if self.registered: + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: Register BuddyCast", currentThread().getName() + self.overlay_bridge.add_task(self.olthread_register, 0) + + def olthread_register(self, start=True): + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: OlThread Register", currentThread().getName() + + self.data_handler = DataHandler(self.launchmany, self.overlay_bridge, max_num_peers=self.max_peers) + + # ARNOCOMMENT: get rid of this dnsindb / get_dns_from_peerdb abuse off SecureOverlay + self.bartercast_core = BarterCastCore(self.data_handler, self.overlay_bridge, self.log, self.launchmany.secure_overlay.get_dns_from_peerdb) + + self.moderationcast_core = ModerationCastCore(self.data_handler, self.overlay_bridge, self.launchmany.session, self.getCurrrentInterval, self.log, self.launchmany.secure_overlay.get_dns_from_peerdb) + self.votecast_core = VoteCastCore(self.data_handler, self.overlay_bridge, self.launchmany.session, self.getCurrrentInterval, self.log, self.launchmany.secure_overlay.get_dns_from_peerdb) + + self.buddycast_core = BuddyCastCore(self.overlay_bridge, self.launchmany, + self.data_handler, self.buddycast_interval, self.superpeer, + self.metadata_handler, self.torrent_collecting_solution, self.bartercast_core, self.moderationcast_core, self.votecast_core, self.log) + + self.data_handler.register_buddycast_core(self.buddycast_core) + + self.moderationcast_core.showAllModerations() + self.votecast_core.showAllVotes() + + if start: + self.start_time = now() + # Arno, 2007-02-28: BC is now started self.buddycast_interval after client + # startup. This is assumed to give enough time for UPnP to open the firewall + # if any. So when you change this time, make sure it allows for UPnP to + # do its thing, or add explicit coordination between UPnP and BC. + # See BitTornado/launchmany.py + self.overlay_bridge.add_task(self.data_handler.postInit, 0) + self.overlay_bridge.add_task(self.doBuddyCast, 0.1) + # Arno: HYPOTHESIS: if set to small, we'll only ask superpeers at clean start. + if self.data_handler.torrent_db.size() > 0: + waitt = 1.0 + else: + waitt = 3.0 + self.overlay_bridge.add_task(self.data_handler.initRemoteSearchPeers,waitt) + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "BuddyCast starts up",waitt + + def doBuddyCast(self): + if not self.running: + return + + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bc: doBuddyCast!", currentThread().getName() + + # Reschedule ourselves for next round + buddycast_interval = self.getCurrrentInterval() + self.overlay_bridge.add_task(self.doBuddyCast, buddycast_interval) + if not self.started: + self.started = True + + # Do our thang. + self.buddycast_core.work() + self.ranonce = True # Nicolas: now we can start testing and stuff works better + + def pauseBuddyCast(self): + self.running = False + + def restartBuddyCast(self): + if self.registered and not self.running: + self.running = True + self.doBuddyCast() + + def getCurrrentInterval(self): + """ + install [#(peers - superpeers)==0] & start < 2min: interval = 1 + start < 30min: interval = 5 + start > 24hour: interval = 60 + other: interval = 15 + """ + + #return 3 ### DEBUG, remove it before release!! + + past = now() - self.start_time + if past < 2*60: + if self.data_handler.get_npeers() < 20: + interval = 2 + else: + interval = 5 + elif past < 30*60: + interval = 5 + elif past > 24*60*60: + interval = 60 + else: + interval = 15 + return interval + + + def handleMessage(self, permid, selversion, message): + + if not self.registered or not self.running: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: handleMessage got message, but we're not enabled or running" + return False + + t = message[0] + + if t == BUDDYCAST: + return self.gotBuddyCastMessage(message[1:], permid, selversion) + elif t == KEEP_ALIVE: + if message[1:] == '': + return self.gotKeepAliveMessage(permid) + else: + return False + + elif t == MODERATIONCAST_HAVE: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Received moderationcast_have message" + if self.moderationcast_core != None: + return self.moderationcast_core.gotModerationCastHaveMessage(message[1:], permid, selversion) + + elif t == MODERATIONCAST_REQUEST: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Received moderation_request message" + if self.moderationcast_core != None: + return self.moderationcast_core.gotModerationCastRequestMessage(message[1:], permid, selversion) + + elif t == MODERATIONCAST_REPLY: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Received moderation_reply message" + if self.moderationcast_core != None: + return self.moderationcast_core.gotModerationCastReplyMessage(message[1:], permid, selversion) + + elif t == VOTECAST: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: Received votecast message" + if self.votecast_core != None: + return self.votecast_core.gotVoteCastMessage(message[1:], permid, selversion) + + + elif t == BARTERCAST: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: Received bartercast message" + if self.bartercast_core != None: + return self.bartercast_core.gotBarterCastMessage(message[1:], permid, selversion) + + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: wrong message to buddycast", ord(t), "Round", self.buddycast_core.round + return False + + def gotBuddyCastMessage(self, msg, permid, selversion): + if self.registered and self.running: + return self.buddycast_core.gotBuddyCastMessage(msg, permid, selversion) + else: + return False + + def gotKeepAliveMessage(self, permid): + if self.registered and self.running: + return self.buddycast_core.gotKeepAliveMessage(permid) + else: + return False + + def handleConnection(self,exc,permid,selversion,locally_initiated): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: handleConnection",exc,show_permid_short(permid),selversion,locally_initiated,currentThread().getName() + + if not self.registered: + return + + if DEBUG: + nconn = 0 + conns = self.buddycast_core.connections + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "\nbc: conn in buddycast", len(conns) + for peer_permid in conns: + _permid = show_permid_short(peer_permid) + nconn += 1 + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: ", nconn, _permid, conns[peer_permid] + + if self.running or exc is not None: # if not running, only close connection + self.buddycast_core.handleConnection(exc,permid,selversion,locally_initiated) + + def addMyPref(self, torrent): + """ Called by OverlayThread (as should be everything) """ + if self.registered: + self.data_handler.addMyPref(torrent) + + def delMyPref(self, torrent): + if self.registered: + self.data_handler.delMyPref(torrent) + + +class BuddyCastCore: + + TESTASSERVER = False # for unit testing + + def __init__(self, overlay_bridge, launchmany, data_handler, + buddycast_interval, superpeer, + metadata_handler, torrent_collecting_solution, bartercast_core, moderationcast_core, votecast_core, log=None): + self.overlay_bridge = overlay_bridge + self.launchmany = launchmany + self.data_handler = data_handler + self.buddycast_interval = buddycast_interval + self.superpeer = superpeer + #print_stack() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'debug buddycast' + #superpeer # change it for superpeers + #self.superpeer_set = Set(self.data_handler.getSuperPeers()) + self.log = log + self.dialback = DialbackMsgHandler.getInstance() + + self.ip = self.data_handler.getMyIp() + self.port = self.data_handler.getMyPort() + self.permid = self.data_handler.getMyPermid() + # Jie: we must trainsfer my name to unicode here before sent out + # because the receiver might not be able to transfer the name to unicode, + # but the receiver might be able to display the unicode str correctly + # in that he installed the character set and therefore unicode can map it + self.name = dunno2unicode(self.data_handler.getMyName()) # encode it to unicode + + # --- parameters --- + #self.timeout = 5*60 + self.block_interval = 4*60*60 # block interval for a peer to buddycast + self.short_block_interval = 4*60*60 # block interval if failed to connect the peer + self.num_myprefs = 50 # num of my preferences in buddycast msg + self.max_collected_torrents = 50 # num of recently collected torrents (from BuddyCast 3) + self.num_tbs = 10 # num of taste buddies in buddycast msg + self.num_tb_prefs = 10 # num of taset buddy's preferences in buddycast msg + self.num_rps = 10 # num of random peers in buddycast msg + # time to check connection and send keep alive message + #self.check_connection_round = max(1, 120/self.buddycast_interval) + self.max_conn_cand = 100 # max number of connection candidates + self.max_conn_tb = 10 # max number of connectable taste buddies + self.max_conn_rp = 10 # max number of connectable random peers + self.max_conn_up = 10 # max number of unconnectable peers + self.bootstrap_num = 10 # max number of peers to fill when bootstrapping + self.bootstrap_interval = 5*60 # 5 min + self.network_delay = self.buddycast_interval*2 # 30 seconds + self.check_period = 120 # how many seconds to send keep alive message and check updates + self.num_search_cand = 10 # max number of remote search peer candidates + self.num_remote_peers_in_msg = 2 # number of remote search peers in msg + + # --- memory --- + self.send_block_list = {} # permid:unlock_time + self.recv_block_list = {} + self.connections = {} # permid: overlay_version + self.connected_taste_buddies = [] # [permid] + self.connected_random_peers = [] # [permid] + self.connected_connectable_peers = {} # permid: {'connect_time', 'ip', 'port', 'similarity', 'oversion', 'num_torrents'} + self.connected_unconnectable_peers = {} # permid: connect_time + self.connection_candidates = {} # permid: last_seen + self.remote_search_peer_candidates = [] # [last_seen,permid], sorted, the first one in the list is the oldest one + + # --- stats --- + self.target_type = 0 + self.next_initiate = 0 + self.round = 0 # every call to work() is a round + self.bootstrapped = False # bootstrap once every 1 hours + self.bootstrap_time = 0 # number of times to bootstrap + self.total_bootstrapped_time = 0 + self.last_bootstrapped = now() # bootstrap time of the last time + self.start_time = now() + self.last_check_time = 0 + + # --- dependent modules --- + self.metadata_handler = metadata_handler + self.torrent_collecting = None + if torrent_collecting_solution == BCCOLPOLICY_SIMPLE: + self.torrent_collecting = SimpleTorrentCollecting(metadata_handler, data_handler) + + # -- misc --- + self.dnsindb = launchmany.secure_overlay.get_dns_from_peerdb + if self.log: + self.overlay_log = OverlayLogger.getInstance(self.log) + + # Bartercast + self.bartercast_core = bartercast_core + #self.bartercast_core.buddycast_core = self + + self.moderationcast_core = moderationcast_core + self.votecast_core = votecast_core + + # Crawler + crawler = Crawler.get_instance() + self.crawler = crawler.am_crawler() + + + def get_peer_info(self, target_permid, include_permid=True): + if not target_permid: + return ' None ' + dns = self.dnsindb(target_permid) + if not dns: + return ' None ' + try: + ip = dns[0] + port = dns[1] + sim = self.data_handler.getPeerSim(target_permid) + if include_permid: + s_pid = show_permid_short(target_permid) + return ' %s %s:%s %.3f ' % (s_pid, ip, port, sim) + else: + return ' %s:%s %.3f' % (ip, port, sim) + except: + return ' ' + repr(dns) + ' ' + + def work(self): + """ + The engineer of buddycast empidemic protocol. + In every round, it selects a target and initates a buddycast exchange, + or idels due to replying messages in the last rounds. + """ + + try: + self.round += 1 + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'bc: ************ working buddycast', currentThread().getName() + self.print_debug_info('Active', 2) + if self.log: + nPeer, nPref, nCc, nBs, nBr, nSO, nCo, nCt, nCr, nCu = self.get_stats() + self.overlay_log('BUCA_STA', self.round, (nPeer,nPref,nCc), (nBs,nBr), (nSO,nCo), (nCt,nCr,nCu)) + + self.print_debug_info('Active', 3) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'bc: ************ working buddycast 2' + self.updateSendBlockList() + + _now = now() + if _now - self.last_check_time >= self.check_period: + self.print_debug_info('Active', 4) + self.keepConnections() + #self.data_handler.checkUpdate() + gc.collect() + self.last_check_time = _now + + if self.next_initiate > 0: + # It replied some meesages in the last rounds, so it doesn't initiate Buddycast + self.print_debug_info('Active', 6) + self.next_initiate -= 1 + else: + if len(self.connection_candidates) == 0: + self.booted = self._bootstrap(self.bootstrap_num) + self.print_debug_info('Active', 9) + + # It didn't reply any message in the last rounds, so it can initiate BuddyCast + if len(self.connection_candidates) > 0: + r, target_permid = self.selectTarget() + self.print_debug_info('Active', 11, target_permid, r=r) + self.startBuddyCast(target_permid) + + if debug: + print + except: + print_exc() + + # -------------- bootstrap -------------- # + def _bootstrap(self, number): + """ Select a number of peers from recent online peers which are not + in send_block_list to fill connection_candidates. + When to call this function is an issue to study. + """ + + _now = now() + # bootstrapped recently, so wait for a while + if self.bootstrapped and _now - self.last_bootstrapped < self.bootstrap_interval: + self.bootstrap_time = 0 # let it read the most recent peers next time + return -1 + + #ARNODB: self.data_handler.peers is a map from peer_id to something, i.e., not + # permid. send_block_list is a list of permids + send_block_list_ids = [] + for permid in self.send_block_list: + peer_id = self.data_handler.getPeerID(permid) + send_block_list_ids.append(peer_id) + + target_cands_ids = Set(self.data_handler.peers) - Set(send_block_list_ids) + recent_peers_ids = self.selectRecentPeers(target_cands_ids, number, + startfrom=self.bootstrap_time*number) + + for peer_id in recent_peers_ids: + last_seen = self.data_handler.getPeerIDLastSeen(peer_id) + self.addConnCandidate(self.data_handler.getPeerPermid(peer_id), last_seen) + self.limitConnCandidate() + + self.bootstrap_time += 1 + self.total_bootstrapped_time += 1 + self.last_bootstrapped = _now + if len(self.connection_candidates) < self.bootstrap_num: + self.bootstrapped = True # don't reboot until self.bootstrap_interval later + else: + self.bootstrapped = False # reset it to allow read more peers if needed + return 1 + + def selectRecentPeers(self, cand_ids, number, startfrom=0): + """ select a number of most recently online peers + @return a list of peer_ids + """ + + if not cand_ids: + return [] + peerids = [] + last_seens = [] + for peer_id in cand_ids: + peerids.append(peer_id) + last_seens.append(self.data_handler.getPeerIDLastSeen(peer_id)) + npeers = len(peerids) + if npeers == 0: + return [] + aux = zip(last_seens, peerids) + aux.sort() + aux.reverse() + peers = [] + i = 0 + + # roll back when startfrom is bigger than npeers + startfrom = startfrom % npeers + endat = startfrom + number + for _, peerid in aux[startfrom:endat]: + peers.append(peerid) + return peers + + def addConnCandidate(self, peer_permid, last_seen): + """ add a peer to connection_candidates, and only keep a number of + the most fresh peers inside. + """ + + if self.isBlocked(peer_permid, self.send_block_list) or peer_permid == self.permid: + return + self.connection_candidates[peer_permid] = last_seen + + def limitConnCandidate(self): + if len(self.connection_candidates) > self.max_conn_cand: + tmp_list = zip(self.connection_candidates.values(),self.connection_candidates.keys()) + tmp_list.sort() + while len(self.connection_candidates) > self.max_conn_cand: + ls,peer_permid = tmp_list.pop(0) + self.removeConnCandidate(peer_permid) + + def removeConnCandidate(self, peer_permid): + if peer_permid in self.connection_candidates: + self.connection_candidates.pop(peer_permid) + + # -------------- routines in each round -------------- # + def updateSendBlockList(self): + """ Remove expired peers in send block list """ + + _now = now() + for p in self.send_block_list.keys(): # don't call isBlocked() for performance reason + if _now >= self.send_block_list[p] - self.network_delay: + if debug: + print "bc: *** unblock peer in send block list" + self.get_peer_info(p) + \ + "expiration:", ctime(self.send_block_list[p]) + self.send_block_list.pop(p) + + def keepConnections(self): + """ Close expired connections, and extend the expiration of + peers in connection lists + """ + + timeout_list = [] + for peer_permid in self.connections: + # we don't close connection here, because if no incoming msg, + # sockethandler will close connection in 5-6 min. + + if (peer_permid in self.connected_connectable_peers or \ + peer_permid in self.connected_unconnectable_peers): + timeout_list.append(peer_permid) + + if self.crawler: + # since we are crawling, we are not interested in + # retaining connections for a long time. + for peer_permid in timeout_list: + self.closeConnection(peer_permid, "a crawler does not retain connections for long") + else: + for peer_permid in timeout_list: + self.sendKeepAliveMsg(peer_permid) + + def sendKeepAliveMsg(self, peer_permid): + """ Send keep alive message to a peer, and extend its expiration """ + + if self.isConnected(peer_permid): + overlay_protocol_version = self.connections[peer_permid] + if overlay_protocol_version >= OLPROTO_VER_THIRD: + # From this version, support KEEP_ALIVE message in secure overlay + keepalive_msg = '' + self.overlay_bridge.send(peer_permid, KEEP_ALIVE+keepalive_msg, + self.keepaliveSendCallback) + if debug: + print "*** Send keep alive to peer", self.get_peer_info(peer_permid), \ + "overlay version", overlay_protocol_version + + def isConnected(self, peer_permid): + return peer_permid in self.connections + + def keepaliveSendCallback(self, exc, peer_permid, other=0): + if exc is None: + pass + else: + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: error - send keep alive msg", exc, \ + self.get_peer_info(peer_permid), "Round", self.round + self.closeConnection(peer_permid, 'keepalive:'+str(exc)) + + def gotKeepAliveMessage(self, peer_permid): + if self.isConnected(peer_permid): + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: Got keep alive from", self.get_peer_info(peer_permid) + if self.crawler: + # since we are crawling, we are not interested in + # retaining connections for a long time. + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: Got keep alive from", self.get_peer_info(peer_permid), "closing connection because we are a crawler" + return False + return True + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: error - got keep alive from a not connected peer. Round", \ + self.round + return False + + # -------------- initiate buddycast, active thread -------------- # + # ------ select buddycast target ------ # + def selectTarget(self): + """ select a most similar taste buddy or a most likely online random peer + from connection candidates list by 50/50 chance to initate buddycast exchange. + """ + + def selectTBTarget(): + # Select the most similar taste buddy + max_sim = (-1, None) + for permid in self.connection_candidates: + peer_id = self.data_handler.getPeerID(permid) + if peer_id: + sim = self.data_handler.getPeerSim(permid) + max_sim = max(max_sim, (sim, permid)) + selected_permid = max_sim[1] + if selected_permid is None: + return None + else: + return selected_permid + + def selectRPTarget(): + # Randomly select a random peer + selected_permid = None + while len(self.connection_candidates) > 0: + selected_permid = sample(self.connection_candidates, 1)[0] + selected_peer_id = self.data_handler.getPeerID(selected_permid) + if selected_peer_id is None: + self.removeConnCandidate(selected_permid) + selected_permid = None + elif selected_peer_id: + break + + return selected_permid + + self.target_type = 1 - self.target_type + if self.target_type == 0: # select a taste buddy + target_permid = selectTBTarget() + else: # select a random peer + target_permid = selectRPTarget() + + return self.target_type, target_permid + + # ------ start buddycast exchange ------ # + def startBuddyCast(self, target_permid): + """ Connect to a peer, create a buddycast message and send it """ + + if not target_permid or target_permid == self.permid: + return + + if not self.isBlocked(target_permid, self.send_block_list): + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'bc: connect a peer', show_permid_short(target_permid), currentThread().getName() + self.overlay_bridge.connect(target_permid, self.buddycastConnectCallback) + + self.print_debug_info('Active', 12, target_permid) + if self.log: + dns = self.dnsindb(target_permid) + if dns: + ip,port = dns + self.overlay_log('CONN_TRY', ip, port, show_permid(target_permid)) + + # always block the target for a while not matter succeeded or not + #self.blockPeer(target_permid, self.send_block_list, self.short_block_interval) + self.print_debug_info('Active', 13, target_permid) + + # remove it from candidates no matter if it has been connected + self.removeConnCandidate(target_permid) + self.print_debug_info('Active', 14, target_permid) + + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'buddycast: peer', self.get_peer_info(target_permid), \ + 'is blocked while starting buddycast to it.', "Round", self.round + + def buddycastConnectCallback(self, exc, dns, target_permid, selversion): + if exc is None: + ## Create message depending on selected protocol version + try: + if not self.isConnected(target_permid): + if debug: + raise RuntimeError, 'buddycast: not connected while calling connect_callback' + return + + self.print_debug_info('Active', 15, target_permid, selversion) + + self.createAndSendBuddyCastMessage(target_permid, selversion, active=True) + + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: error in reply buddycast msg",\ + exc, dns, show_permid_short(target_permid), selversion, "Round", self.round, + + else: + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: warning - connecting to",\ + show_permid_short(target_permid),exc,dns, ctime(now()) + + def createAndSendBuddyCastMessage(self, target_permid, selversion, active): + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bc: SENDING BC to",show_permid_short(target_permid) + + buddycast_data = self.createBuddyCastMessage(target_permid, selversion) + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: createAndSendBuddyCastMessage", len(buddycast_data), currentThread().getName() + try: + buddycast_data['permid'] = self.permid + validBuddyCastData(buddycast_data, self.num_myprefs, + self.num_tbs, self.num_rps, self.num_tb_prefs) + buddycast_data.pop('permid') + buddycast_msg = bencode(buddycast_data) + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "error buddycast_data:", buddycast_data + return + + if active: + self.print_debug_info('Active', 16, target_permid) + else: + self.print_debug_info('Passive', 6, target_permid) + + self.overlay_bridge.send(target_permid, BUDDYCAST+buddycast_msg, self.buddycastSendCallback) + self.blockPeer(target_permid, self.send_block_list, self.short_block_interval) + self.removeConnCandidate(target_permid) + + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '****************--------------'*2 + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'sent buddycast message to', show_permid_short(target_permid), len(buddycast_msg) + + if active: + self.print_debug_info('Active', 17, target_permid) + else: + self.print_debug_info('Passive', 7, target_permid) + + # Bartercast + if self.bartercast_core != None and active: + self.bartercast_core.createAndSendBarterCastMessage(target_permid, selversion, active) + + # As of March 5, 2009, ModerationCastHave Messages and VoteCast Messages + # are sent in lock-step with BuddyCast. (only if there are any + # moderations and/or votes to send.) + # + if self.moderationcast_core != None: + self.moderationcast_core.createAndSendModerationCastHaveMessage(target_permid, selversion) + + if self.votecast_core != None: + self.votecast_core.createAndSendVoteCastMessage(target_permid, selversion) + + if self.log: + dns = self.dnsindb(target_permid) + if dns: + ip,port = dns + if active: + MSG_ID = 'ACTIVE_BC' + else: + MSG_ID = 'PASSIVE_BC' + msg = repr(readableBuddyCastMsg(buddycast_data,selversion)) # from utilities + self.overlay_log('SEND_MSG', ip, port, show_permid(target_permid), selversion, MSG_ID, msg) + return buddycast_data # Nicolas: for testing + + def createBuddyCastMessage(self, target_permid, selversion, target_ip=None, target_port=None): + """ Create a buddycast message for a target peer on selected protocol version """ + # Nicolas: added manual target_ip, target_port parameters for testing + try: + target_ip,target_port = self.dnsindb(target_permid) + except: + if not self.TESTASSERVER: + raise # allow manual ips during unit-testing if dnsindb fails + if not target_ip or not target_port: + return {} + my_pref = self.data_handler.getMyLivePreferences(selversion, self.num_myprefs) #[pref] + taste_buddies = self.getTasteBuddies(self.num_tbs, self.num_tb_prefs, target_permid, target_ip, target_port, selversion) + random_peers = self.getRandomPeers(self.num_rps, target_permid, target_ip, target_port, selversion) #{peer:last_seen} + buddycast_data = {'ip':self.ip, + 'port':self.port, + 'name':self.name, + 'preferences':my_pref, + 'taste buddies':taste_buddies, + 'random peers':random_peers} + + if selversion >= OLPROTO_VER_THIRD: + # From this version, add 'connectable' entry in buddycast message + connectable = self.isConnectable() + buddycast_data['connectable'] = connectable + + if selversion >= OLPROTO_VER_FOURTH: + recent_collect = self.metadata_handler.getRecentlyCollectedTorrents(self.max_collected_torrents) + buddycast_data['collected torrents'] = recent_collect + + if selversion >= OLPROTO_VER_SIXTH: + npeers = self.data_handler.get_npeers() + ntorrents = self.data_handler.get_ntorrents() + nmyprefs = self.data_handler.get_nmyprefs() + buddycast_data['npeers'] = npeers + buddycast_data['nfiles'] = ntorrents + buddycast_data['ndls'] = nmyprefs + + + return buddycast_data + + def getTasteBuddies(self, ntbs, ntbprefs, target_permid, target_ip, target_port, selversion): + """ Randomly select a number of peers from connected_taste_buddies. """ + + if not self.connected_taste_buddies: + return [] + tb_list = self.connected_taste_buddies[:] + if target_permid in tb_list: + tb_list.remove(target_permid) + + peers = [] + for permid in tb_list: + # keys = ('ip', 'port', 'oversion', 'num_torrents') + peer = deepcopy(self.connected_connectable_peers[permid]) + if peer['ip'] == target_ip and peer['port'] == target_port: + continue + peer['similarity'] = self.data_handler.getPeerSim(permid) + peer['permid'] = permid + peers.append(peer) + +# peers = self.data_handler.getPeers(tb_list, ['permid', 'ip', 'port', 'similarity', 'oversion', 'num_torrents']) +# # filter peers with the same ip and port +# peers = filter(lambda p:p['ip']!=target_ip or int(p['port'])!=target_port, peers) +# +# for i in range(len(peers)): +# peers[i]['port'] = int(peers[i]['port']) + + # In overlay version 2, buddycast has 'age' field + if selversion <= OLPROTO_VER_SECOND: + for i in range(len(peers)): + peers[i]['age'] = 0 + + # In overlay version 2 and 3, buddycast doesn't have similarity field, and taste buddy has preferences + if selversion <= OLPROTO_VER_THIRD: + for i in range(len(peers)): + peers[i].pop('similarity') + peers[i]['preferences'] = [] # don't support from now on + + # From overlay version 4, buddycast includes similarity for peers + if selversion >= OLPROTO_VER_FOURTH: + for i in range(len(peers)): + peers[i]['similarity'] = int(peers[i]['similarity']+0.5) # bencode doesn't accept float type + + + + # Every peer >= 6 in message attachs nfiles and oversion for remote search from version 6 + for i in range(len(peers)): + oversion = peers[i].pop('oversion') + nfiles = peers[i].pop('num_torrents') + if selversion >= OLPROTO_VER_SIXTH and oversion >= OLPROTO_VER_SIXTH and nfiles >= REMOTE_SEARCH_PEER_NTORRENTS_THRESHOLD: + peers[i]['oversion'] = oversion + # ascribe it to the inconsistent name of the same concept in msg and db + peers[i]['nfiles'] = nfiles + + return peers + + def getRandomPeers(self, nrps, target_permid, target_ip, target_port, selversion): + """ Randomly select a number of peers from connected_random_peers. """ + + if not self.connected_random_peers: + return [] + rp_list = self.connected_random_peers[:] + + # From version 6, two (might be offline) remote-search-peers must be included in msg + if selversion >= OLPROTO_VER_SIXTH: + remote_search_peers = self.getRemoteSearchPeers(self.num_remote_peers_in_msg) + rp_list += remote_search_peers + if len(rp_list) > nrps: + rp_list = sample(rp_list, nrps) + + if target_permid in rp_list: + rp_list.remove(target_permid) + + peers = [] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'bc: ******** rplist nconn', len(rp_list), len(self.connected_connectable_peers) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", rp_list, self.connected_connectable_peers + for permid in rp_list: + # keys = ('ip', 'port', 'oversion', 'num_torrents') + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '**************', `self.connected_connectable_peers`, `rp_list` + # TODO: Fix this bug: not consisitent + if permid not in self.connected_connectable_peers: + continue + peer = deepcopy(self.connected_connectable_peers[permid]) + if peer['ip'] == target_ip and peer['port'] == target_port: + continue + peer['similarity'] = self.data_handler.getPeerSim(permid) + peer['permid'] = permid + peers.append(peer) + +# peers = self.data_handler.getPeers(rp_list, ['permid', 'ip', 'port', 'similarity', 'oversion', 'num_torrents']) +# peers = filter(lambda p:p['ip']!=target_ip or int(p['port'])!=target_port, peers) +# +# for i in range(len(peers)): +# peers[i]['port'] = int(peers[i]['port']) + + if selversion <= OLPROTO_VER_SECOND: + for i in range(len(peers)): + peers[i]['age'] = 0 + + # random peer also attachs similarity from 4 + if selversion <= OLPROTO_VER_THIRD: + for i in range(len(peers)): + peers[i].pop('similarity') + + if selversion >= OLPROTO_VER_FOURTH: + for i in range(len(peers)): + old_sim = peers[i]['similarity'] + if old_sim is None: + old_sim = 0.0 + peers[i]['similarity'] = int(old_sim+0.5) + + # Every peer >= 6 in message attachs nfiles and oversion for remote search from version 6 + for i in range(len(peers)): + oversion = peers[i].pop('oversion') + nfiles = peers[i].pop('num_torrents') + # only include remote-search-peers + if selversion >= OLPROTO_VER_SIXTH and oversion >= OLPROTO_VER_SIXTH and nfiles >= REMOTE_SEARCH_PEER_NTORRENTS_THRESHOLD: + peers[i]['oversion'] = oversion + # ascribe it to the inconsistent name of the same concept in msg and db + peers[i]['nfiles'] = nfiles + + return peers + + def isConnectable(self): + return bool(self.dialback.isConnectable()) + + def buddycastSendCallback(self, exc, target_permid, other=0): + if exc is None: + if debug: + print "bc: *** msg was sent successfully to peer", \ + self.get_peer_info(target_permid) + else: + if debug: + print "bc: *** warning - error in sending msg to",\ + self.get_peer_info(target_permid), exc + self.closeConnection(target_permid, 'buddycast:'+str(exc)) + + def blockPeer(self, peer_permid, block_list, block_interval=None): + """ Add a peer to a block list """ + + peer_id = peer_permid # ARNODB: confusing! + if block_interval is None: + block_interval = self.block_interval + unblock_time = now() + block_interval + block_list[peer_id] = unblock_time + + + + def isBlocked(self, peer_permid, block_list): + if self.TESTASSERVER: + return False # we do not want to be blocked when sending various messages + + peer_id = peer_permid + if peer_id not in block_list: + return False + + unblock_time = block_list[peer_id] + if now() >= unblock_time - self.network_delay: # 30 seconds for network delay + block_list.pop(peer_id) + return False + return True + + + + # ------ receive a buddycast message, for both active and passive thread ------ # + def gotBuddyCastMessage(self, recv_msg, sender_permid, selversion): + """ Received a buddycast message and handle it. Reply if needed """ + + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: got and handle buddycast msg", currentThread().getName() + + if not sender_permid or sender_permid == self.permid: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: error - got BuddyCastMsg from a None peer", \ + sender_permid, recv_msg, "Round", self.round + return False + + blocked = self.isBlocked(sender_permid, self.recv_block_list) + if blocked: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: warning - got BuddyCastMsg from a recv blocked peer", \ + show_permid(sender_permid), "Round", self.round + return True # allow the connection to be kept. That peer may have restarted in 4 hours + + # Jie: Because buddycast message is implemented as a dictionary, anybody can + # insert any content in the message. It isn't secure if someone puts + # some fake contents inside and make the message very large. The same + # secure issue could happen in other protocols over the secure overlay layer. + # Therefore, I'd like to set a limitation of the length of buddycast message. + # The receiver should close the connection if the length of the message + # exceeds the limitation. According to my experience, the biggest + # buddycast message should be around 6~7KBytes. So the reasonable + # length limitation might be 10KB for buddycast message. + if MAX_BUDDYCAST_LENGTH > 0 and len(recv_msg) > MAX_BUDDYCAST_LENGTH: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: warning - got large BuddyCastMsg", len(recv_msg), "Round", self.round + return False + + active = self.isBlocked(sender_permid, self.send_block_list) + + + if active: + self.print_debug_info('Active', 18, sender_permid) + else: + self.print_debug_info('Passive', 2, sender_permid) + + buddycast_data = {} + try: + try: + buddycast_data = bdecode(recv_msg) + except ValueError, msg: + try: + errmsg = str(msg) + except: + errmsg = repr(msg) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: warning, got invalid BuddyCastMsg:", errmsg, \ + "Round", self.round # ipv6 + return False + buddycast_data.update({'permid':sender_permid}) + try: # check buddycast message + validBuddyCastData(buddycast_data, 0, + self.num_tbs, self.num_rps, self.num_tb_prefs) # RCP 2 + except RuntimeError, msg: + try: + errmsg = str(msg) + except: + errmsg = repr(msg) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: warning, got invalid BuddyCastMsg:", errmsg, \ + "Round", self.round # ipv6 + return False + + + # update sender's ip and port in buddycast + dns = self.dnsindb(sender_permid) + if dns != None: + sender_ip = dns[0] + sender_port = dns[1] + buddycast_data.update({'ip':sender_ip}) + buddycast_data.update({'port':sender_port}) + + if self.log: + if active: + MSG_ID = 'ACTIVE_BC' + else: + MSG_ID = 'PASSIVE_BC' + msg = repr(readableBuddyCastMsg(buddycast_data,selversion)) # from utilities + self.overlay_log('RECV_MSG', sender_ip, sender_port, show_permid(sender_permid), selversion, MSG_ID, msg) + + # store discovered peers/preferences/torrents to cache and db + conn = buddycast_data.get('connectable', 0) # 0 - unknown + + self.handleBuddyCastMessage(sender_permid, buddycast_data, selversion) + if active: + conn = 1 + + if active: + self.print_debug_info('Active', 19, sender_permid) + else: + self.print_debug_info('Passive', 3, sender_permid) + + # update sender and other peers in connection list + addto = self.addPeerToConnList(sender_permid, conn) + + if active: + self.print_debug_info('Active', 20, sender_permid) + else: + self.print_debug_info('Passive', 4, sender_permid) + + except Exception, msg: + print_exc() + raise Exception, msg + return True # don't close connection, maybe my problem in handleBuddyCastMessage + + self.blockPeer(sender_permid, self.recv_block_list) + + # update torrent collecting module + #self.data_handler.checkUpdate() + collected_infohashes = buddycast_data.get('collected torrents', []) + if self.torrent_collecting and not self.superpeer: + collected_infohashes += self.getPreferenceHashes(buddycast_data) + self.torrent_collecting.trigger(sender_permid, selversion, collected_infohashes) + + if active: + self.print_debug_info('Active', 21, sender_permid) + else: + self.print_debug_info('Passive', 5, sender_permid) + + if not active: + self.replyBuddyCast(sender_permid, selversion) + + # show activity + buf = dunno2unicode('"'+buddycast_data['name']+'"') + self.launchmany.set_activity(NTFY_ACT_RECOMMEND, buf) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '*****************************************************************************************************' + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "* bc: Got BuddyCast Message from",self.get_peer_info(sender_permid),active + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '******************************** Yahoo! *************************************************************' + + return True + + + def createPreferenceDictionaryList(self, buddycast_data): + """as of OL 8, preferences are no longer lists of infohashes, but lists of lists containing + infohashes and associated metadata. this method checks which overlay version has been used + and replaces either format by a list of dictionaries, such that the rest of the code can remain + version-agnostic and additional information like torrent ids can be stored along the way""" + + prefs = buddycast_data.get('preferences',[]) + # assume at least one entry below here + if len(prefs) == 0: + return [] + d = [] + + try: + + if not type(prefs[0])==list: + # pre-OLPROTO_VER_EIGHTH + # create dictionary from list of info hashes, extended fields simply aren't set + + d = [dict({'infohash': pref}) for pref in prefs] + + # we shouldn't receive these lists if the peer says he's OL 8. + # let's accept it but complain + if buddycast_data['oversion'] >= OLPROTO_VER_EIGHTH: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'buddycast: received OLPROTO_VER_EIGHTH buddycast data containing old style preferences. only ok if talking to an earlier non-release version' + return d + + # if the single prefs entries are lists, we have a more modern wire format + # currently, there is only one possibility + if buddycast_data['oversion'] >= OLPROTO_VER_EIGHTH: + # create dictionary from list of lists + d = [dict({'infohash': pref[0], + 'search_terms': pref[1], + 'position': pref[2], + 'reranking_strategy': pref[3]}) + for pref in prefs] + else: + raise RuntimeError, 'buddycast: unknown preference protocol, pref entries are lists but oversion= %s:\n%s' % (buddycast_data['oversion'], prefs) + + return d + + except Exception, msg: + print_exc() + raise Exception, msg + return d + + + + + + def getPreferenceHashes(self, buddycast_data): + """convenience function returning the infohashes from the preferences. + returns a list of infohashes, i.e. replaces old calls to buddycast_data.get('preferences')""" + return [preference.get('infohash',"") for preference in buddycast_data.get('preferences', [])] + + def handleBuddyCastMessage(self, sender_permid, buddycast_data, selversion): + """ Handle received buddycast message + Add peers, torrents and preferences into database and update last seen + Add fresh peers to candidate list + All database updates caused by buddycast msg should be handled here + """ + + _now = now() + + cache_db_data = {'peer':{},'infohash':Set(),'pref':[]} # peer, updates / pref, pairs + cache_peer_data = {} + + tbs = buddycast_data.pop('taste buddies') + rps = buddycast_data.pop('random peers') + buddycast_data['oversion'] = selversion + + max_tb_sim = 1 + + # include sender itself + bc_data = [buddycast_data] + tbs + rps + for peer in bc_data: + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bc: Learned about peer",peer['ip'] + + peer_permid = peer['permid'] + if peer_permid == self.permid: + continue + age = max(peer.get('age', 0), 0) # From secure overlay version 3, it doesn't include 'age' + last_seen = _now - age + old_last_seen = self.data_handler.getPeerLastSeen(peer_permid) + last_seen = min(max(old_last_seen, last_seen), _now) + oversion = peer.get('oversion', 0) + nfiles = peer.get('nfiles', 0) + self.addRemoteSearchPeer(peer_permid, oversion, nfiles, last_seen) + + cache_peer_data[peer_permid] = {} + cache_peer_data[peer_permid]['last_seen'] = last_seen + #self.data_handler._addPeerToCache(peer_permid, last_seen) + #if selversion >= OLPROTO_VER_FOURTH: + sim = peer.get('similarity', 0) + max_tb_sim = max(max_tb_sim, sim) + if sim > 0: + cache_peer_data[peer_permid]['sim'] = sim + #self.data_handler.addRelativeSim(sender_permid, peer_permid, sim, max_tb_sim) + + if peer_permid != sender_permid: + self.addConnCandidate(peer_permid, last_seen) + + new_peer_data = {} + #new_peer_data['permid'] = peer['permid'] + new_peer_data['ip'] = hostname_or_ip2ip(peer['ip']) + new_peer_data['port'] = peer['port'] + new_peer_data['last_seen'] = last_seen + if peer.has_key('name'): + new_peer_data['name'] = dunno2unicode(peer['name']) # store in db as unicode + cache_db_data['peer'][peer_permid] = new_peer_data + #self.data_handler.addPeer(peer_permid, last_seen, new_peer_data, commit=True) # new peer + + self.limitConnCandidate() + if len(self.connection_candidates) > self.bootstrap_num: + self.bootstrapped = True + + # database stuff + if selversion >= OLPROTO_VER_SIXTH: + stats = {'num_peers':buddycast_data['npeers'],'num_torrents':buddycast_data['nfiles'],'num_prefs':buddycast_data['ndls']} + cache_db_data['peer'][sender_permid].update(stats) + + cache_db_data['peer'][sender_permid]['last_buddycast'] = _now + + prefs = self.createPreferenceDictionaryList(buddycast_data) + buddycast_data['preferences'] = prefs # Nicolas: store this back into buddycast_data because it's used later on gotBuddyCastMessage again + + infohashes = Set(buddycast_data.get('collected torrents', [])) + prefhashes = Set(self.getPreferenceHashes(buddycast_data)) # only accept sender's preference, to avoid pollution + infohashes = infohashes.union(prefhashes) + + cache_db_data['infohash'] = infohashes + #self.data_handler.addInfohashes(infohashes, commit=True) + if prefs: + cache_db_data['pref'] = prefs + #self.data_handler.addPeerPreferences(sender_permid, prefs) + #self.data_handler.increaseBuddyCastTimes(sender_permid, commit=True) + + self.data_handler.handleBCData(cache_db_data, cache_peer_data, sender_permid, max_tb_sim) + + def removeFromConnList(self, peer_permid): + removed = 0 + if peer_permid in self.connected_connectable_peers: # Ct + self.connected_connectable_peers.pop(peer_permid) + try: + self.connected_taste_buddies.remove(peer_permid) + except ValueError: + pass + try: + self.connected_random_peers.remove(peer_permid) + except ValueError: + pass + removed = 1 + if peer_permid in self.connected_unconnectable_peers: # Cu + self.connected_unconnectable_peers.pop(peer_permid) + removed = 2 + return removed + + def addPeerToConnList(self, peer_permid, connectable=0): + """ Add the peer to Ct, Cr or Cu """ + + # remove the existing peer from lists so that its status can be updated later + self.removeFromConnList(peer_permid) + + if not self.isConnected(peer_permid): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: cannot add a unconnected peer to conn list", "Round", self.round + return + + _now = now() + + if connectable == 1: + self.addPeerToConnCP(peer_permid, _now) + addto = '(reachable peer)' + else: + self.addPeerToConnUP(peer_permid, _now) + addto = '(peer deemed unreachable)' + + return addto + + def updateTBandRPList(self): + """ Select the top 10 most similar (sim>0) peer to TB and others to RP """ + + nconnpeers = len(self.connected_connectable_peers) + if nconnpeers == 0: + self.connected_taste_buddies = [] + self.connected_random_peers = [] + return + + tmplist = [] + tbs = [] + rps = [] + for permid in self.connected_connectable_peers: + sim = self.data_handler.getPeerSim(permid) + if sim > 0: + tmplist.append([sim, permid]) + else: + rps.append(permid) + tmplist.sort() + tmplist.reverse() + + #ntb = self.max_conn_tb # 10 tb & 10 rp + ntb = min((nconnpeers+1)/2, self.max_conn_tb) # half tb and half rp + if len(tmplist) > 0: + for sim,permid in tmplist[:ntb]: + tbs.append(permid) + ntb = len(tbs) + if len(tmplist) > ntb: + rps = [permid for sim,permid in tmplist[ntb:]] + rps + + # remove the oldest peer from both random peer list and connected_connectable_peers + if len(rps) > self.max_conn_rp: + tmplist = [] + for permid in rps: + connect_time = self.connected_connectable_peers[permid]['connect_time'] + tmplist.append([connect_time, permid]) + tmplist.sort() + tmplist.reverse() + rps = [] + i = 0 + for last_seen,permid in tmplist: + if i < self.max_conn_rp: + rps.append(permid) + else: + self.connected_connectable_peers.pop(permid) + i += 1 + + self.connected_taste_buddies = tbs + self.connected_random_peers = rps + + for p in self.connected_taste_buddies: + assert p in self.connected_connectable_peers + for p in self.connected_random_peers: + assert p in self.connected_connectable_peers + assert len(self.connected_taste_buddies) + len(self.connected_random_peers) <= len(self.connected_connectable_peers) + + + def addPeerToConnCP(self, peer_permid, conn_time): + keys = ('ip', 'port', 'oversion', 'num_torrents') + res = self.data_handler.getPeer(peer_permid, keys) + peer = dict(zip(keys,res)) + peer['connect_time'] = conn_time + self.connected_connectable_peers[peer_permid] = peer + self.updateTBandRPList() + + def addNewPeerToConnList(self, conn_list, max_num, peer_permid, conn_time): + """ Add a peer to a connection list, and pop the oldest peer out """ + + if max_num <= 0 or len(conn_list) < max_num: + conn_list[peer_permid] = conn_time + return None + + else: + oldest_peer = (conn_time+1, None) + initial = 'abcdefghijklmnopqrstuvwxyz' + separator = ':-)' + for p in conn_list: + _conn_time = conn_list[p] + r = randint(0, self.max_conn_tb) + name = initial[r] + separator + p + to_cmp = (_conn_time, name) + oldest_peer = min(oldest_peer, to_cmp) + + if conn_time >= oldest_peer[0]: # add it + out_peer = oldest_peer[1].split(separator)[1] + conn_list.pop(out_peer) + conn_list[peer_permid] = conn_time + return out_peer + return peer_permid + + def addPeerToConnUP(self, peer_permid, conn_time): + ups = self.connected_unconnectable_peers + if peer_permid not in ups: + out_peer = self.addNewPeerToConnList(ups, + self.max_conn_up, peer_permid, conn_time) + if out_peer != peer_permid: + return True + return False + + # -------------- reply buddycast, passive thread -------------- # + def replyBuddyCast(self, target_permid, selversion): + """ Reply a buddycast message """ + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '*************** replay buddycast message', show_permid_short(target_permid), self.isConnected(target_permid) + + if not self.isConnected(target_permid): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'buddycast: lost connection while replying buddycast', \ + # "Round", self.round + return + + self.createAndSendBuddyCastMessage(target_permid, selversion, active=False) + + self.print_debug_info('Passive', 8, target_permid) + self.print_debug_info('Passive', 9, target_permid) + + self.next_initiate += 1 # Be idel in next round + self.print_debug_info('Passive', 10) + + + # -------------- handle overlay connections from SecureOverlay ---------- # + def handleConnection(self,exc,permid,selversion,locally_initiated): + if exc is None and permid != self.permid: # add a connection + self.addConnection(permid, selversion, locally_initiated) + else: + self.closeConnection(permid, 'overlayswarm:'+str(exc)) + + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: handle conn from overlay", exc, \ + self.get_peer_info(permid), "selversion:", selversion, \ + "local_init:", locally_initiated, ctime(now()) + + def addConnection(self, peer_permid, selversion, locally_initiated): + # add connection to connection list + _now = now() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: addConnection", self.isConnected(peer_permid) + if not self.isConnected(peer_permid): + # SecureOverlay has already added the peer to db + self.connections[peer_permid] = selversion # add a new connection + addto = self.addPeerToConnList(peer_permid, locally_initiated) + + dns = self.get_peer_info(peer_permid, include_permid=False) + buf = '%s %s'%(dns, addto) + self.launchmany.set_activity(NTFY_ACT_MEET, buf) # notify user interface + + if self.torrent_collecting and not self.superpeer: + self.torrent_collecting.trigger(peer_permid, selversion) + + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: add connection", \ + self.get_peer_info(peer_permid), "to", addto + if self.log: + dns = self.dnsindb(peer_permid) + if dns: + ip,port = dns + self.overlay_log('CONN_ADD', ip, port, show_permid(peer_permid), selversion) + + def closeConnection(self, peer_permid, reason): + """ Close connection with a peer, and remove it from connection lists """ + + if debug: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: close connection:", self.get_peer_info(peer_permid) + + if self.isConnected(peer_permid): + self.connections.pop(peer_permid) + removed = self.removeFromConnList(peer_permid) + if removed == 1: + self.updateTBandRPList() + + if self.log: + dns = self.dnsindb(peer_permid) + if dns: + ip,port = dns + self.overlay_log('CONN_DEL', ip, port, show_permid(peer_permid), reason) + + # -------------- print debug info ---------- # + def get_stats(self): + nPeer = len(self.data_handler.peers) + nPref = nPeer #len(self.data_handler.preferences) + nCc = len(self.connection_candidates) + nBs = len(self.send_block_list) + nBr = len(self.recv_block_list) + nSO = -1 # TEMP ARNO len(self.overlay_bridge.debug_get_live_connections()) + nCo = len(self.connections) + nCt = len(self.connected_taste_buddies) + nCr = len(self.connected_random_peers) + nCu = len(self.connected_unconnectable_peers) + return nPeer, nPref, nCc, nBs, nBr, nSO, nCo, nCt, nCr, nCu + + def print_debug_info(self, thread, step, target_permid=None, selversion=0, r=0, addto=''): + if not debug: + return + if DEBUG: + print "bc: *****", thread, str(step), "-", + if thread == 'Active': + if step == 2: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Working:", now() - self.start_time, \ + "seconds since start. Round", self.round, "Time:", ctime(now()) + nPeer, nPref, nCc, nBs, nBr, nSO, nCo, nCt, nCr, nCu = self.get_stats() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: *** Status: nPeer nPref nCc: %d %d %d nBs nBr: %d %d nSO nCo nCt nCr nCu: %d %d %d %d %d" % \ + (nPeer,nPref,nCc, nBs,nBr, nSO,nCo, nCt,nCr,nCu) + if nSO != nCo: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: warning - nSo and nCo is inconsistent" + if nCc > self.max_conn_cand or nCt > self.max_conn_tb or nCr > self.max_conn_rp or nCu > self.max_conn_up: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: warning - nCC or nCt or nCr or nCu overloads" + _now = now() + buf = "" + i = 1 + for p in self.connected_taste_buddies: + buf += "bc: %d taste buddies: "%i + self.get_peer_info(p) + str(_now-self.connected_connectable_peers[p]['connect_time']) + " version: " + str(self.connections[p]) + "\n" + i += 1 + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", buf + + buf = "" + i = 1 + for p in self.connected_random_peers: + buf += "bc: %d random peers: "%i + self.get_peer_info(p) + str(_now-self.connected_connectable_peers[p]['connect_time']) + " version: " + str(self.connections[p]) + "\n" + i += 1 + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", buf + + buf = "" + i = 1 + for p in self.connected_unconnectable_peers: + buf += "bc: %d unconnectable peers: "%i + self.get_peer_info(p) + str(_now-self.connected_unconnectable_peers[p]) + " version: " + str(self.connections[p]) + "\n" + i += 1 + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", buf + buf = "" + totalsim = 0 + nsimpeers = 0 + minsim = 1e10 + maxsim = 0 + sims = [] + for p in self.data_handler.peers: + sim = self.data_handler.peers[p][PEER_SIM_POS] + if sim > 0: + sims.append(sim) + if sims: + minsim = min(sims) + maxsim = max(sims) + nsimpeers = len(sims) + totalsim = sum(sims) + if nsimpeers > 0: + meansim = totalsim/nsimpeers + else: + meansim = 0 + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: * sim peer: %d %.3f %.3f %.3f %.3f\n" % (nsimpeers, totalsim, meansim, minsim, maxsim) + + elif step == 3: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "check blocked peers: Round", self.round + + elif step == 4: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "keep connections with peers: Round", self.round + + elif step == 6: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "idle loop:", self.next_initiate + + elif step == 9: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bootstrapping: select", self.bootstrap_num, \ + "peers recently seen from Mega Cache" + if self.booted < 0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: *** bootstrapped recently, so wait for a while" + elif self.booted == 0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: *** no peers to bootstrap. Try next time" + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: *** bootstrapped, got", len(self.connection_candidates), \ + "peers in Cc. Times of bootstrapped", self.total_bootstrapped_time + buf = "" + for p in self.connection_candidates: + buf += "bc: * cand:" + `p` + "\n" + buf += "\nbc: Remote Search Peer Candidates:\n" + for p in self.remote_search_peer_candidates: + buf += "bc: * remote: %d "%p[0] + self.get_peer_info(p[1]) + "\n" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", buf + + elif step == 11: + buf = "select " + if r == 0: + buf += "a most similar taste buddy" + else: + buf += "a most likely online random peer" + buf += " from Cc for buddycast out\n" + + if target_permid: + buf += "bc: *** got target %s sim: %s last_seen: %s" % \ + (self.get_peer_info(target_permid), + self.data_handler.getPeerSim(target_permid), + ctime(self.data_handler.getPeerLastSeen(target_permid))) + else: + buf += "bc: *** no target to select. Skip this round" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", buf + + elif step == 12: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "connect a peer to start buddycast", self.get_peer_info(target_permid) + + elif step == 13: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "block connected peer in send block list", \ + self.get_peer_info(target_permid)#, self.send_block_list[target_permid] + + elif step == 14: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "remove connected peer from Cc", \ + self.get_peer_info(target_permid)#, "removed?", target_permid not in self.connection_candidates + + elif step == 15: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "peer is connected", \ + self.get_peer_info(target_permid), "overlay version", selversion, currentThread().getName() + + elif step == 16: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "create buddycast to send to", self.get_peer_info(target_permid) + + elif step == 17: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "send buddycast msg to", self.get_peer_info(target_permid) + + elif step == 18: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "receive buddycast message from peer %s" % self.get_peer_info(target_permid) + + elif step == 19: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "store peers from incoming msg to cache and db" + + elif step == 20: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "add connected peer %s to connection list %s" % (self.get_peer_info(target_permid), addto) + + elif step == 21: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "block connected peer in recv block list", \ + self.get_peer_info(target_permid), self.recv_block_list[target_permid] + + if thread == 'Passive': + if step == 2: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "receive buddycast message from peer %s" % self.get_peer_info(target_permid) + + elif step == 3: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "store peers from incoming msg to cache and db" + + elif step == 4: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "add connected peer %s to connection list %s" % (self.get_peer_info(target_permid), addto) + + elif step == 5: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "block connected peer in recv block list", \ + self.get_peer_info(target_permid), self.recv_block_list[target_permid] + + elif step == 6: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "create buddycast to reply to", self.get_peer_info(target_permid) + + elif step == 7: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "reply buddycast msg to", self.get_peer_info(target_permid) + + elif step == 8: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "block connected peer in send block list", \ + self.get_peer_info(target_permid), self.send_block_list[target_permid] + + elif step == 9: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "remove connected peer from Cc", \ + self.get_peer_info(target_permid)#, "removed?", target_permid not in self.connection_candidates + + elif step == 10: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "add idle loops", self.next_initiate + sys.stdout.flush() + sys.stderr.flush() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: *****", thread, str(step), "-", + + def getAllTasteBuddies(self): + return self.connected_taste_buddies + + def addRemoteSearchPeer(self, permid, oversion, ntorrents, last_seen): + if oversion >= OLPROTO_VER_SIXTH and ntorrents >= REMOTE_SEARCH_PEER_NTORRENTS_THRESHOLD: + insort(self.remote_search_peer_candidates, [last_seen,permid]) + if len(self.remote_search_peer_candidates) > self.num_search_cand: + self.remote_search_peer_candidates.pop(0) + + def getRemoteSearchPeers(self, npeers): + if len(self.remote_search_peer_candidates) > npeers: + _peers = sample(self.remote_search_peer_candidates, npeers) # randomly select + else: + _peers = self.remote_search_peer_candidates + peers = [permid for last_seen,permid in _peers] + return peers + + +class DataHandler: + def __init__(self, launchmany, overlay_bridge, max_num_peers=2500): + self.launchmany = launchmany + self.overlay_bridge = overlay_bridge + self.config = self.launchmany.session.sessconfig # should be safe at startup + # --- database handlers --- + self.peer_db = launchmany.peer_db + self.superpeer_db = launchmany.superpeer_db + self.torrent_db = launchmany.torrent_db + self.mypref_db = launchmany.mypref_db + self.pref_db = launchmany.pref_db + # self.term_db = launchmany.term_db + self.friend_db = launchmany.friend_db + self.myfriends = Set() # FIXME: implement friends + self.myprefs = [] # torrent ids + self.peers = {} # peer_id: [similarity, last_seen, prefs(array('l',[torrent_id])] + self.default_peer = [0, 0, None] + self.owners = {} # torrent_ids_of_mine: Set(peer_id) + self.permid = self.getMyPermid() + self.nprefs = 0 + self.ntorrents = 0 + self.last_check_ntorrents = 0 + #self.total_pref_changed = 0 + # how many peers to load into cache from db + #self.max_peer_in_db = max_num_peers + self.max_num_peers = min(max(max_num_peers, 100), 2500) # at least 100, at most 2500 + #self.time_sim_weight = 4*60*60 # every 4 hours equals to a point of similarity + # after added some many (user, item) pairs, update sim of item to item + #self.update_i2i_threshold = 100 + #self.npeers = self.peer_db.size() - self.superpeer_db.size() + self.old_peer_num = 0 + self.buddycast_core = None + self.all_peer_list = None + self.num_peers_ui = None + self.num_torrents_ui = None + self.cached_updates = {'peer':{},'torrent':{}} + + # Subscribe BC to updates to MyPreferences, such that we can add/remove + # them from our download history that we send to other peers. + self.launchmany.session.add_observer(self.sesscb_ntfy_myprefs,NTFY_MYPREFERENCES,[NTFY_INSERT,NTFY_DELETE]) + + def commit(self): + self.peer_db.commit() + + def register_buddycast_core(self, buddycast_core): + self.buddycast_core = buddycast_core + + def getMyName(self, name=''): + return self.config.get('nickname', name) + + def getMyIp(self, ip=''): + return self.launchmany.get_ext_ip() + + def getMyPort(self, port=0): + return self.launchmany.listen_port + + def getMyPermid(self, permid=''): + return self.launchmany.session.get_permid() + + def getPeerID(self, permid): + if isinstance(permid, int) and permid > 0: + return permid + else: + return self.peer_db.getPeerID(permid) + + def getTorrentID(self, infohash): + if isinstance(infohash, int) and infohash > 0: + return infohash + else: + return self.peer_db.getPeerID(permid) + + def getPeerPermid(self, peer_id): + return self.peer_db.getPermid(peer_id) + + def updatePort(self, port): + self.my_db.put('port', port) + + def postInit(self, delay=4, batch=50, update_interval=10, npeers=None, updatesim=True): + # build up a cache layer between app and db + if npeers is None: + npeers = self.max_num_peers + self.updateMyPreferences() + self.loadAllPeers(npeers) + if updatesim: + self.updateAllSim(delay, batch, update_interval) + + def updateMyPreferences(self, num_pref=None): + # get most recent preferences, and sort by torrent id + res = self.mypref_db.getAll('torrent_id', order_by='creation_time desc', limit=num_pref) + self.myprefs = [p[0] for p in res] + + for torrent_id in self.myprefs: + self.updateOwners(torrent_id) + + def updateOwners(self, torrent_id): + res = self.pref_db.getAll('peer_id', where='torrent_id=%d'%torrent_id) + self.owners[torrent_id] = Set([p[0] for p in res]) + + def loadAllPeers(self, num_peers=None): + """ Read peers from db and put them in self.peers. + At most num_peers (=self.max_num_peers) recently seen peers can be cached. + + """ + peer_values = self.peer_db.getAll(['peer_id','similarity','last_seen'], order_by='last_connected desc', limit=num_peers) + self.peers = dict(zip([p[0] for p in peer_values], [[p[1],p[2],array('l', [])] for p in peer_values])) + + user_item_pairs = self.pref_db.getRecentPeersPrefs('last_connected',num_peers) + self.nprefs = len(user_item_pairs) + + for pid,tid in user_item_pairs: + self.peers[pid][PEER_PREF_POS].append(tid) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '**************** loadAllPeers', len(self.peers) + +# for pid in self.peers: +# self.peers[pid][PEER_PREF_POS].sort() # keep in order + + def updateAllSim(self, delay=4, batch=50, update_interval=10): + self._updateAllPeerSim(delay, batch, update_interval) # 0.156 second + self._updateAllItemRel(delay, batch, update_interval) # 0.875 second + # Tuning batch (without index relevance) + + # batch = 25: 0.00 0.22 0.58 + # batch = 50: min/avg/max execution time: 0.09 0.29 0.63 second + # batch = 100: 0.16 0.47 0.95 + # update_interval=10 + # 50000 updates take: 50000 / 50 * (10+0.3) / 3600 = 3 hours + # cpu load: 0.3/10 = 3% + + # With index relevance: + # batch = 50: min/avg/max execution time: 0.08 0.62 1.39 second + # batch = 25: 0.00 0.41 1.67 + # update_interval=5, batch=25 + # 50000 updates take: 50000 / 25 * (5+0.4) / 3600 = 3 hours + # cpu load: 0.4/5 = 8% + + def cacheSimUpdates(self, update_table, updates, delay, batch, update_interval): + self.cached_updates[update_table].update(updates) + self.overlay_bridge.add_task(lambda:self.checkSimUpdates(batch, update_interval), delay, 'checkSimUpdates') + + def checkSimUpdates(self, batch, update_interval): + last_update = 0 + if self.cached_updates['peer']: + updates = [] + update_peers = self.cached_updates['peer'] + keys = update_peers.keys() + shuffle(keys) # to avoid always update the same items when cacheSimUpdates is called frequently + for key in keys[:batch]: + updates.append((update_peers.pop(key), key)) + self.overlay_bridge.add_task(lambda:self.peer_db.updatePeerSims(updates), last_update + update_interval, 'updatePeerSims') + last_update += update_interval + + if self.cached_updates['torrent']: + updates = [] + update_peers = self.cached_updates['torrent'] + keys = update_peers.keys() + shuffle(keys) + for key in keys[:batch]: + updates.append((update_peers.pop(key), key)) + self.overlay_bridge.add_task(lambda:self.torrent_db.updateTorrentRelevances(updates), last_update + update_interval, 'updateTorrentRelevances') + last_update += update_interval + + if self.cached_updates['peer'] or self.cached_updates['torrent']: + self.overlay_bridge.add_task(lambda:self.checkSimUpdates(batch, update_interval), last_update+0.001, 'checkSimUpdates') + + def _updateAllPeerSim(self, delay, batch, update_interval): + # update similarity to all peers to keep consistent + + if self.old_peer_num == len(self.peers): # if no new peers, don't update + return + starttime = time() + self.nprefs = 0 # total nprefs must be updated before compute similarity + for peer_id in self.peers: + self.nprefs += len(self.peers[peer_id][PEER_PREF_POS]) + + updates = {} + for peer_id in self.peers: + oldsim = self.peers[peer_id][PEER_SIM_POS] + if not self.peers[peer_id][PEER_PREF_POS]: + continue + self.updateSimilarity(peer_id, False) + sim = self.peers[peer_id][PEER_SIM_POS] + if abs(sim - oldsim) > oldsim*0.05: + updates[peer_id] = sim + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '****************** update peer sim', len(updates), len(self.peers) + if updates: + self.cacheSimUpdates('peer', updates, delay, batch, update_interval) + + def _updateAllItemRel(self, delay, batch, update_interval): + # update all item's relevance + # Relevance of I = Sum(Sim(Users who have I)) + Poplarity(I) + # warning: this function may take 5 seconds to commit to the database + if len(self.peers) == 0: + return + tids = {} + nsimpeers = 0 + for peer_id in self.peers: + if self.peers[peer_id][PEER_PREF_POS]: + sim = self.peers[peer_id][PEER_SIM_POS] + if sim > 0: + nsimpeers += 1 + prefs = self.peers[peer_id][PEER_PREF_POS] + for tid in prefs: + if tid not in tids: + tids[tid] = [0,0] + tids[tid][0] += sim + tids[tid][1] += 1 + + if len(tids) == 1: + return + + res = self.torrent_db.getTorrentRelevances(tids) + if res: + old_rels = dict(res) + else: + old_rels = {} + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '********* update all item rel', len(old_rels), len(tids) #, old_rels[:10] + + for tid in tids.keys(): + tids[tid] = tids[tid][0]/tids[tid][1] + tids[tid][1] + old_rel = old_rels.get(tid, None) + if old_rel != None and abs(old_rel - tids[tid]) <= old_rel*0.05: + tids.pop(tid) # don't update db + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '**************--- update all item rel', len(tids), len(old_rels) #, len(self.peers), nsimpeers, tids.items()[:10] # 37307 2500 + if tids: + self.cacheSimUpdates('torrent', tids, delay, batch, update_interval) + + + def sesscb_ntfy_myprefs(self,subject,changeType,objectID,*args): + """ Called by SessionCallback thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bc: sesscb_ntfy_myprefs:",subject,changeType,`objectID` + if subject == NTFY_MYPREFERENCES: + infohash = objectID + if changeType == NTFY_INSERT: + op_my_pref_lambda = lambda:self.addMyPref(infohash) + elif changeType == NTFY_DELETE: + op_my_pref_lambda = lambda:self.delMyPref(infohash) + # Execute on OverlayThread + self.overlay_bridge.add_task(op_my_pref_lambda, 0) + + + def addMyPref(self, infohash): + infohash_str=bin2str(infohash) + torrentdata = self.torrent_db.getOne(('secret', 'torrent_id'), infohash=infohash_str) + if not torrentdata: + return + + secret = torrentdata[0] + torrent_id = torrentdata[1] + if secret: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'bc: Omitting secret download: %s' % torrentdata.get('info', {}).get('name', 'unknown') + return # do not buddycast secret downloads + + if torrent_id not in self.myprefs: + insort(self.myprefs, torrent_id) + self.updateOwners(torrent_id) + self.old_peer_num = 0 + self.updateAllSim() # time-consuming + #self.total_pref_changed += self.update_i2i_threshold + + def delMyPref(self, infohash): + torrent_id = self.torrent_db.getTorrentID(infohash) + if torrent_id in self.myprefs: + self.myprefs.remove(torrent_id) + self.owners.pop(torrent_id) + self.old_peer_num = 0 + self.updateAllSim() + #self.total_pref_changed += self.update_i2i_threshold + + def initRemoteSearchPeers(self, num_peers=10): + peer_values = self.peer_db.getAll(['permid','oversion','num_torrents','last_seen'], order_by='last_seen desc', limit=num_peers) + for p in peer_values: + p = list(p) + p[0] = str2bin(p[0]) + self.buddycast_core.addRemoteSearchPeer(*tuple(p)) + pass + + + def updatePeerPref(self, peer_permid, cur_prefs): + peer_id = self.getPeerID(peer_permid) + cur_prefs_array = array('l', cur_prefs) + self.peers[peer_id][PEER_PREF_POS] = cur_prefs_array + + overlap = Set(self.owners).intersection(Set(self.peers[peer_id][PEER_PREF_POS])) + if len(overlap) > 0: + for torrent_id in overlap: + self.owners[torrent_id].add(peer_id) + + def getMyLivePreferences(self, selversion, num=0): + """ Get a number of my preferences. Get all if num==0 """ + if selversion>=OLPROTO_VER_EIGHTH: + return self.mypref_db.getRecentLivePrefListWithClicklog(num) + else: + return self.mypref_db.getRecentLivePrefList(num) + + def getPeerSim(self, peer_permid, read_db=False, raw=False): + if read_db: + sim = self.peer_db.getPeerSim(peer_permid) + else: + peer_id = self.getPeerID(peer_permid) + if peer_id is None or peer_id not in self.peers: + sim = 0 + else: + sim = self.peers[peer_id][PEER_SIM_POS] + if sim is None: + sim = 0 + if not raw: + # negative value means it is calculated from other peers, + # not itself. See addRelativeSim() + return abs(sim) + else: + return sim + + def getPeerLastSeen(self, peer_permid): + peer_id = self.getPeerID(peer_permid) + return self.getPeerIDLastSeen(peer_id) + + def getPeerIDLastSeen(self, peer_id): + if not peer_id or peer_id not in self.peers: + return 0 + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '***** getPeerLastSeen', self.peers[pefer_permid], `peer_permid` + return self.peers[peer_id][PEER_LASTSEEN_POS] + + def getPeerPrefList(self, peer_permid): + """ Get a number of peer's preference list. Get all if num==0. + If live==True, dead torrents won't include + """ + peer_id = self.getPeerID(peer_permid) + if peer_id not in self.peers: + return self.pref_db.getPrefList(peer_permid) + else: + return self.peers[peer_id][PEER_PREF_POS] + +# def addPeer(self, peer_permid, last_seen, peer_data=None, commit=True): +# """ add a peer from buddycast message to both cache and db """ +# +# if peer_permid != self.permid: +# if peer_data is not None: +# self._addPeerToDB(peer_permid, last_seen, peer_data, commit=commit) +# self._addPeerToCache(peer_permid, last_seen) + + def _addPeerToCache(self, peer_permid, last_seen): + """ add a peer to cache """ + # Secure Overlay should have added this peer to database. + if peer_permid == self.permid: + return + peer_id = self.getPeerID(peer_permid) + assert peer_id != None, `peer_permid` + if peer_id not in self.peers: + sim = self.peer_db.getPeerSim(peer_permid) + peerprefs = self.pref_db.getPrefList(peer_permid) # [torrent_id] + self.peers[peer_id] = [last_seen, sim, array('l', peerprefs)] # last_seen, similarity, pref + else: + self.peers[peer_id][PEER_LASTSEEN_POS] = last_seen + + def _addPeerToDB(self, peer_permid, peer_data, commit=True): + + if peer_permid == self.permid: + return + new_peer_data = {} + try: + new_peer_data['permid'] = peer_data['permid'] + new_peer_data['ip'] = hostname_or_ip2ip(peer_data['ip']) + new_peer_data['port'] = peer_data['port'] + new_peer_data['last_seen'] = peer_data['last_seen'] + if peer_data.has_key('name'): + new_peer_data['name'] = dunno2unicode(peer_data['name']) # store in db as unicode + + self.peer_db.addPeer(peer_permid, new_peer_data, update_dns=True, commit=commit) + + except KeyError: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: _addPeerToDB has KeyError" + except socket.gaierror: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bc: _addPeerToDB cannot find host by name", peer_data['ip'] + except: + print_exc() + + def addInfohashes(self, infohash_list, commit=True): + for infohash in infohash_list: + self.torrent_db.addInfohash(infohash, commit=False) # it the infohash already exists, it will skip it + if commit: + self.torrent_db.commit() + + def addPeerPreferences(self, peer_permid, prefs, commit=True): + """ add a peer's preferences to both cache and db """ + + if peer_permid == self.permid: + return 0 + + cur_prefs = self.getPeerPrefList(peer_permid) + if not cur_prefs: + cur_prefs = [] + prefs2add = [] + for pref in prefs: + infohash = pref['infohash'] # Nicolas: new dictionary format of OL 8 preferences + torrent_id = self.torrent_db.getTorrentID(infohash) + if not torrent_id: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "buddycast: DB Warning: infohash", bin2str(infohash), "should have been inserted into db, but was not found" + continue + pref['torrent_id'] = torrent_id + if torrent_id not in cur_prefs: + prefs2add.append(pref) + cur_prefs.append(torrent_id) + + if len(prefs2add) > 0: + self.pref_db.addPreferences(peer_permid, prefs2add, is_torrent_id=True, commit=commit) + self.updatePeerPref(peer_permid, cur_prefs) + self.nprefs += len(prefs2add) + peer_id = self.getPeerID(peer_permid) + self.updateSimilarity(peer_id, commit=commit) + + def updateSimilarity(self, peer_id, update_db=True, commit=True): + """ update a peer's similarity """ + + if len(self.myprefs) == 0: + return + sim = self.LMP2PSimilarity(peer_id) + self.peers[peer_id][PEER_SIM_POS] = sim + if update_db and sim>0: + self.peer_db.updatePeerSims([(sim,peer_id)], commit=commit) + + def LMP2PSimilarity(self, peer_id): + peer_pref = self.peers[peer_id][PEER_PREF_POS] + sim = P2PSimLM(peer_id, self.myprefs, peer_pref, self.owners, self.nprefs, mu=1.0) + return sim + +# def increaseBuddyCastTimes(self, peer_permid, commit=True): +# self.peer_db.updateTimes(peer_permid, 'buddycast_times', 1, commit=False) +# self.peer_db.updatePeer(peer_permid, commit=commit, last_buddycast=now()) + + def getPeer(self, permid, keys=None): + return self.peer_db.getPeer(permid, keys) + + def addRelativeSim(self, sender_permid, peer_permid, sim, max_sim): + # Given Sim(I, A) and Sim(A, B), predict Sim(I, B) + # Sim(I, B) = Sim(I, A)*Sim(A, B)/Max(Sim(A,B)) for all B + old_sim = self.getPeerSim(peer_permid, raw=True) + if old_sim > 0: # its similarity has been calculated based on its preferences + return + old_sim = abs(old_sim) + sender_sim = self.getPeerSim(sender_permid) + new_sim = sender_sim*sim/max_sim + if old_sim == 0: + peer_sim = new_sim + else: + peer_sim = (new_sim + old_sim)/2 + peer_sim = -1*peer_sim + # using negative value to indicate this sim comes from others + peer_id = self.getPeerID(peer_permid) + self.peers[peer_id][PEER_SIM_POS] = peer_sim + + def get_npeers(self): + if self.num_peers_ui is None: + return len(self.peers) # changed to this according to Maarten's suggestion + else: + return self.num_peers_ui + + def get_ntorrents(self): + if self.num_torrents_ui is None: + _now = now() + if _now - self.last_check_ntorrents > 5*60: + self.ntorrents = self.torrent_db.getNumberCollectedTorrents() + self.last_check_ntorrents = _now + return self.ntorrents + else: + return self.num_torrents_ui + + def get_nmyprefs(self): + return len(self.myprefs) + +# def updatePeerLevelStats(self,permid,npeers,ntorrents,nprefs,commit=True): +# d = {'num_peers':npeers,'num_torrents':ntorrents,'num_prefs':nprefs} +# self.peer_db.updatePeer(permid, commit=commit, **d) + +# def getAllPeerList(self): +# return self.all_peer_list +# +# def removeAllPeerList(self): +# self.all_peer_list = None +# +# def setNumPeersFromUI(self, num): +# self.num_peers_ui = num +# +# def setNumTorrentsFromUI(self, num): # not thread safe +# self.num_torrents_ui = num + + def handleBCData(self, cache_db_data, cache_peer_data, sender_permid, max_tb_sim): + #self.data_handler.addPeer(peer_permid, last_seen, new_peer_data, commit=True) # new peer + #self.data_handler.increaseBuddyCastTimes(sender_permid, commit=True) + #self.data_handler.addInfohashes(infohashes, commit=True) + + #self.data_handler._addPeerToCache(peer_permid, last_seen) + #self.data_handler.addRelativeSim(sender_permid, peer_permid, sim, max_tb_sim) + + #self.data_handler.addPeerPreferences(sender_permid, prefs) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bc: handleBCData:",`cache_db_data` + + + ADD_PEER = 1 + UPDATE_PEER = 2 + ADD_INFOHASH = 3 + + peer_data = cache_db_data['peer'] + db_writes = [] + for permid in peer_data: + new_peer = peer_data[permid] + old_peer = self.peer_db.getPeer(permid) + if not old_peer: + if permid == sender_permid: + new_peer['buddycast_times'] = 1 + db_writes.append((ADD_PEER, permid, new_peer)) + else: + #print old_peer + old_last_seen = old_peer['last_seen'] + new_last_seen = new_peer['last_seen'] + if permid == sender_permid: + if not old_peer['buddycast_times']: + new_peer['buddycast_times'] = 1 + else: + new_peer['buddycast_times'] = + 1 + if not old_last_seen or new_last_seen > old_last_seen + 4*60*60: + # don't update if it was updated in 4 hours + for k in new_peer.keys(): + if old_peer[k] == new_peer[k]: + new_peer.pop(k) + if new_peer: + db_writes.append((UPDATE_PEER, permid, new_peer)) + + for infohash in cache_db_data['infohash']: + tid = self.torrent_db.getTorrentID(infohash) + if tid is None: + db_writes.append((ADD_INFOHASH, infohash)) + + for item in db_writes: + if item[0] == ADD_PEER: + permid = item[1] + new_peer = item[2] + # Arno, 2008-09-17: Don't use IP data from BC message, network info gets precedence + updateDNS = (permid != sender_permid) + self.peer_db.addPeer(permid, new_peer, update_dns=updateDNS, commit=False) + elif item[0] == UPDATE_PEER: + permid = item[1] + new_peer = item[2] + # Arno, 2008-09-17: Don't use IP data from BC message, network info gets precedence + updateDNS = (permid != sender_permid) + if not updateDNS: + if 'ip' in new_peer: + del new_peer['ip'] + if 'port' in new_peer: + del new_peer['port'] + self.peer_db.updatePeer(permid, commit=False, **new_peer) + elif item[0] == ADD_INFOHASH: + infohash = item[1] + self.torrent_db.addInfohash(infohash, commit=False) + + #self.torrent_db._db.show_sql(1) + self.torrent_db.commit() + #self.torrent_db._db.show_sql(0) + + for item in db_writes: + if item[0] == ADD_PEER or item[0] == UPDATE_PEER: + permid = item[1] + new_peer = item[2] + last_seen = new_peer['last_seen'] + self._addPeerToCache(permid, last_seen) + + for permid in peer_data: + if 'sim' in peer_data[permid]: + sim = peer_data[permid]['sim'] + self.addRelativeSim(sender_permid, permid, sim, max_tb_sim) + + #self.torrent_db._db.show_sql(1) + self.torrent_db.commit() + #self.torrent_db._db.show_sql(0) + + # Nicolas: moved this block *before* the call to addPeerPreferences because with the clicklog, + # this in fact writes to several different databases, so it's easier to tell it to commit + # right away. hope this is ok + + # Nicolas 2009-03-30: thing is that we need to create terms and their generated ids, forcing at least one commit in-between + # have to see later how this might be optimized. right now, there's three commits: + # before addPeerPreferences, after bulk_insert, and after storing clicklog data + + if cache_db_data['pref']: + self.addPeerPreferences(sender_permid, + cache_db_data['pref'], + commit=True) + + + #print hash(k), peer_data[k] + #cache_db_data['infohash'] + #cache_db_data['pref'] diff --git a/tribler-mod/Tribler/Core/BuddyCast/moderationcast.py b/tribler-mod/Tribler/Core/BuddyCast/moderationcast.py new file mode 100644 index 0000000..1cc5452 --- /dev/null +++ b/tribler-mod/Tribler/Core/BuddyCast/moderationcast.py @@ -0,0 +1,456 @@ +from time import localtime, strftime +# Written by Vincent Heinink and Rameez Rahman +# see LICENSE.txt for license information +# + +from Tribler.Core.BitTornado.BT1.MessageID import MODERATIONCAST_HAVE, MODERATIONCAST_REQUEST, MODERATIONCAST_REPLY +from Tribler.Core.BitTornado.bencode import bencode, bdecode +from Tribler.Core.BuddyCast.moderationcast_util import * +from Tribler.Core.CacheDB.CacheDBHandler import ModerationCastDBHandler +from Tribler.Core.CacheDB.sqlitecachedb import SQLiteCacheDB, bin2str, str2bin, NULL +from Tribler.Core.Overlay.permid import permid_for_user +from Tribler.Core.Statistics.Logger import OverlayLogger +from Tribler.Core.Utilities.utilities import * + +from base64 import decodestring +from binascii import hexlify +from time import time +from traceback import print_exc +from types import StringType, ListType, DictType + +DEBUG_UI = False +DEBUG = False #Default debug +debug = False #For send-errors and other low-level stuff + +AUTO_MODERATE = False #Automatically moderate content, with bogus moderations +AUTO_MODERATE_INTERVAL = 1 #Number of seconds between creation of moderations + +class ModerationCastCore: + """ ModerationCastCore is responsible for sending and receiving: + MODERATIONCAST_HAVE, MODERATIONCAST_REQUEST, and MODERATIONCAST_REPLY-messages + """ + + ################################ + def __init__(self, data_handler, secure_overlay, session, buddycast_interval_function, log = '', dnsindb = None): + """ Returns an instance of this class + """ + #Keep reference to interval-function of BuddycastFactory + self.interval = buddycast_interval_function + self.data_handler = data_handler + self.dnsindb = dnsindb + self.log = log + self.secure_overlay = secure_overlay + self.moderationcastdb = ModerationCastDBHandler.getInstance() + self.my_permid = self.moderationcastdb.my_permid + self.session = session + + self.max_have_length = SINGLE_HAVE_LENGTH * session.get_moderationcast_moderations_per_have() + self.max_request_length = SINGLE_REQUEST_LENGTH * session.get_moderationcast_moderations_per_have() + + #Reference to buddycast-core, set by the buddycast-core (as it is created by the + #buddycast-factory after calling this constructor). + self.buddycast_core = None + + #Debug-interface + if DEBUG_UI: + from moderationcast_test import ModerationCastTest + ModerationCastTest(self) + + #Extend logging with ModerationCAST-messages and status + if self.log: + self.overlay_log = OverlayLogger.getInstance(self.log) + ##self.dnsindb = self.data_handler.get_dns_from_peerdb # Arno, 2009-05-15: Don't see why this must be diff. Don't exist anymore anyway + + if AUTO_MODERATE: + assert AUTO_MODERATE_INTERVAL > 0 + from moderationcast_experiment import BogusAutoModerator + self.auto_moderator = BogusAutoModerator(AUTO_MODERATE_INTERVAL) + + def initialized(self): + return self.buddycast_core is not None + + ################################ + def createAndSendModerationCastHaveMessage(self, target_permid, selversion): + + moderationcast_data = self.createModerationCastHaveMessage(target_permid) + if len(moderationcast_data) == 0: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "There are no moderations.. hence we do not send" + return + moderationcast_msg = bencode(moderationcast_data) + + if self.log: + dns = self.dnsindb(target_permid) + if dns: + ip,port = dns + MSG_ID = "MODERATIONCAST_HAVE" + msg = moderationCastHaveMsgToString(moderationcast_data) + self.overlay_log('SEND_MSG', ip, port, show_permid(target_permid), selversion, MSG_ID, msg) + + data = MODERATIONCAST_HAVE+moderationcast_msg + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Sending Moderationcast Have Msg", moderationCastHaveMsgToString(moderationcast_data) + self.secure_overlay.send(target_permid, data, self.moderationcastSendCallback) + + ################################ + def createModerationCastHaveMessage(self, target_permid): + """ Create a MODERATIONCAST_HAVE message """ + + #Select latest own moderations + size = self.session.get_moderationcast_recent_own_moderations_per_have() + info = self.moderationcastdb.recentOwnModerations(size) + + #Add random own moderations + size += self.session.get_moderationcast_random_own_moderations_per_have() + random_own = self.moderationcastdb.randomOwnModerations(size) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "random own >>>>>>>>>>> ", random_own + + for infohash in random_own: + if len(info) == size: + break + + if infohash not in info: + info.append(infohash) + + + #Add latest moderations to forward + size += self.session.get_moderationcast_recent_forward_moderations_per_have() + recent_forward = self.moderationcastdb.recentModerations(size) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "recent forward >>>>>>>>>>> ", recent_forward + for infohash in recent_forward: + if len(info) == size: + break + if infohash not in info: + info.append(infohash) + + #Add random moderations to forward + size += self.session.get_moderationcast_random_forward_moderations_per_have() + random_forward = self.moderationcastdb.randomModerations(size) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "random forward >>>>>>>>>>> ", random_forward + for infohash in random_forward: + if len(info) == size: + break + if infohash not in info: + info.append(infohash) + + data = [] + #Gather timestamp and size + for infohash in info: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "what exactly do we send",infohash + hash = infohash[2] + time = infohash[3] + data.append((hash, time)) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderationcast: Prepared", len(data), "moderations" + + return data + + ################################ + def createAndSendModerationCastRequestMessage(self, target_permid, have_message, selversion): + # for older versions of Tribler (non-ModerationCast): do nothing + #if selversion < MIN_VERSION: + #return + + # create a new MODERATIONCAST_REQUEST message + moderationcast_data = self.createModerationCastRequestMessage(target_permid, have_message) + + try: + moderationcast_msg = bencode(moderationcast_data) + except: + if DEBUG: + + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "error moderationcast_data:", moderationcast_data + return + + #Log SEND_MSG of uncompressed message + if self.log: + dns = self.dnsindb(target_permid) + if dns: + ip,port = dns + MSG_ID = "MODERATIONCAST_REQUEST" + msg = moderationCastRequestMsgToString(moderationcast_data) + self.overlay_log('SEND_MSG', ip, port, show_permid(target_permid), selversion, MSG_ID, msg) + + #if REQUEST_COMPRESSION: + #Compress this message + #moderationcast_msg = compress(moderationcast_msg) + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Sending Moderationcast Request Msg", moderationCastRequestMsgToString(moderationcast_data) + # send the message + data = MODERATIONCAST_REQUEST+moderationcast_msg + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","the moderation cast request is", data + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","the moderation cast request decoded is", bdecode(data) + #self.uploadLimiter.use(len(data)) #Log upload-bandwidth usage + #return + self.secure_overlay.send(target_permid, data, self.moderationcastSendCallback) + + ################################ + def createModerationCastRequestMessage(self, target_permid, have_message): + """ Create a MODERATIONCAST_REQUEST message """ + + #Select request set, such that it will not exceed download-bandwidth-limit and + #only select moderations for which we have the torrent and not have a newer moderation + #limit_bytes = self.downloadLimiter.getAvailableSize() + + requests = [] + requests_size = 0 + for (infohash, timestamp) in have_message: + if self.moderationcastdb.hasModeration(infohash): + moderation = self.moderationcastdb.getModeration(infohash) + if moderation[3] < timestamp: + requests.append(infohash) + else: + requests.append(infohash) + + + + return requests + + ################################ + def createAndSendModerationCastReplyMessage(self, target_permid, request_message, selversion): + # for older versions of Tribler (non-ModerationCast): do nothing + #if selversion < MIN_VERSION: + #return + + + # create a new MODERATIONCAST_REQUEST message + moderationcast_data = self.createModerationCastReplyMessage(target_permid, request_message) + + try: + moderationcast_msg = bencode(moderationcast_data) + except: + if DEBUG: + + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "error moderationcast_data:", moderationcast_data + return + + #Log SEND_MSG of uncompressed message + if self.log: + dns = self.dnsindb(target_permid) + if dns: + ip,port = dns + MSG_ID = "MODERATIONCAST_REPLY" + msg = moderationCastReplyMsgToString(moderationcast_data) + self.overlay_log('SEND_MSG', ip, port, show_permid(target_permid), selversion, MSG_ID, msg) + + #if REPLY_COMPRESSION: + #Compress this message + #moderationcast_msg = compress(moderationcast_msg) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Sending Moderationcast Reply Msg", moderationCastReplyMsgToString(moderationcast_data) + # send the message + data = MODERATIONCAST_REPLY+moderationcast_msg + self.secure_overlay.send(target_permid, data, self.moderationcastSendCallback) + + ################################ + def createModerationCastReplyMessage(self, target_permid, request_message): + """ Create a MODERATIONCAST_REPLY message """ + + #Select reply set, such that it will not exceed upload-bandwidth-limit: + #limit_bytes = self.uploadLimiter.getAvailableSize() + reply = [] + reply_size = 0 + + for infohash in request_message: + mod = self.moderationcastdb.getModeration(infohash) + moderation = {} + moderation['mod_id'] = mod[0] + moderation['mod_name'] = mod[1] + moderation['infohash'] = mod[2] + moderation['time_stamp'] = mod[3] + moderation['signature'] = mod[7] + + reply.append(moderation) + + return reply + + ################################ + def moderationcastSendCallback(self, exc, target_permid, other=0): + if exc is None: + if DEBUG: + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","moderationcast: *** msg was sent successfully to peer", permid_for_user(target_permid) + else: + if DEBUG: + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderationcast: *** warning - error in sending msg to", permid_for_user(target_permid), exc + + ################################ + def gotModerationCastHaveMessage(self, recv_msg, sender_permid, selversion): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'moderationcast: Received a HAVE msg from ', permid_for_user(sender_permid) + + if not sender_permid or sender_permid == self.my_permid: + return False + + if self.max_have_length > 0 and len(recv_msg) > self.max_have_length: + return False + + #check if this moderator is a fraud + mod = self.moderationcastdb.getModerator(permid_for_user(sender_permid)) + if mod is not None and len(mod)>0: + if mod[1]==-1: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Sorry this moderator is a fraud one:", permid_for_user(sender_permid) + return False + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "This moderator is not a fraud one:", permid_for_user(sender_permid) + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Never seen this moderator :", permid_for_user(sender_permid) + + moderationcast_data = {} + + try: + moderationcast_data = bdecode(recv_msg) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderationcast: warning, invalid bencoded data" + return False + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "received this thing from the test", moderationcast_data + # check message-structure + if not validModerationCastHaveMsg(moderationcast_data): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderationcast: invalid MODERATIONCAST_HAVE-message" + return False + + if DEBUG: + print "Received MODERATIONCAST_HAVE", moderationCastHaveMsgToString(moderationcast_data) + + #Log RECV_MSG of uncompressed message + if self.log: + dns = self.dnsindb(sender_permid) + if dns: + ip,port = dns + MSG_ID = "MODERATIONCAST_HAVE" + msg = moderationCastHaveMsgToString(moderationcast_data) + self.overlay_log('RECV_MSG', ip, port, show_permid(sender_permid), selversion, MSG_ID, msg) + + #Reply have-message, with request message + self.createAndSendModerationCastRequestMessage(sender_permid, moderationcast_data, selversion) + + return True + + ################################ + def gotModerationCastRequestMessage(self, recv_msg, sender_permid, selversion): + """ Received a MODERATIONCAST_REQUEST message and handle it. Reply if needed """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'moderationcast: Received a REQUEST msg from ', permid_for_user(sender_permid) + + #Log download-bandwidth-usage + #self.downloadLimiter.use(len(recv_msg)) + + #if REQUEST_COMPRESSION: + #Decompress this message, before handling further + #recv_msg = decompress(recv_msg) + + if not sender_permid or sender_permid == self.my_permid: + return False + + if self.max_request_length > 0 and len(recv_msg) > self.max_request_length: + return False + + moderationcast_data = {} + + try: + moderationcast_data = bdecode(recv_msg) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderationcast: warning, invalid bencoded data" + return False + + # check message-structure + if not validModerationCastRequestMsg(moderationcast_data): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderationcast: invalid MODERATIONCAST_REQUEST-message" + return False + + if DEBUG: + print "Received MODERATIONCAST_REQUEST", moderationCastRequestMsgToString(moderationcast_data) + + #Log RECV_MSG of uncompressed message + if self.log: + dns = self.dnsindb(sender_permid) + if dns: + ip,port = dns + MSG_ID = "MODERATIONCAST_REQUEST" + msg = moderationCastRequestMsgToString(moderationcast_data) + self.overlay_log('RECV_MSG', ip, port, show_permid(sender_permid), selversion, MSG_ID, msg) + + self.createAndSendModerationCastReplyMessage(sender_permid, moderationcast_data, selversion) + + return True + + ################################ + def gotModerationCastReplyMessage(self, recv_msg, sender_permid, selversion): + """ Received a MODERATIONCAST_REPLY message and handle it.""" + + #Log download-bandwidth-usage + #self.downloadLimiter.use(len(recv_msg)) + if not sender_permid or sender_permid == self.my_permid: + return False + + if MAX_REPLY_LENGTH > 0 and len(recv_msg) > MAX_REPLY_LENGTH: + return False + + moderationcast_data = {} + + try: + moderationcast_data = bdecode(recv_msg) + except: + return False + + # check message-structure + if not validModerationCastReplyMsg(moderationcast_data): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Received Invalid Moderationcast Reply Message" + return False + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Received MODERATIONCAST_REPLY", moderationCastReplyMsgToString(moderationcast_data) + + #Log RECV_MSG of uncompressed message + if self.log: + dns = self.dnsindb(sender_permid) + if dns: + ip,port = dns + MSG_ID = "MODERATIONCAST_REPLY" + msg = moderationCastReplyMsgToString(moderationcast_data) + self.overlay_log('RECV_MSG', ip, port, show_permid(sender_permid), selversion, MSG_ID, msg) + + #Handle moderationcast-have-message: + self.handleModerationCastReplyMsg(sender_permid, moderationcast_data) + + return True + + ################################ + def handleModerationCastReplyMsg(self, sender_permid, data): + + if DEBUG: + print "Processing MODERATIONCAST_REPLY msg from: ", permid_for_user(sender_permid) + + for moderation in data: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","intention>>>>", moderation + self.moderationcastdb.updateModeration(moderation) + + if DEBUG: + print "Processing MODERATIONCAST_REPLY msg from: ", permid_for_user(sender_permid), "DONE" + + + ################################ + + def showAllModerations(self): + """ Currently this function is only for testing, to show all moderations """ + if DEBUG: + records = self.moderationcastdb.getAll() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Existing moderations..." + for record in records: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", " modid:",record[0],"; modname:", record[1], "; infohash:",record[2],"; signature:", record[7] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "End of moderations..." + + records = self.moderationcastdb.getAllModerators() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Existing moderators..." + for record in records: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", " modid:",record[0],"; status:", record[1], "; timestamp:",record[2] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "End of moderators..." + diff --git a/tribler-mod/Tribler/Core/BuddyCast/moderationcast.py.bak b/tribler-mod/Tribler/Core/BuddyCast/moderationcast.py.bak new file mode 100644 index 0000000..307bb9c --- /dev/null +++ b/tribler-mod/Tribler/Core/BuddyCast/moderationcast.py.bak @@ -0,0 +1,455 @@ +# Written by Vincent Heinink and Rameez Rahman +# see LICENSE.txt for license information +# + +from Tribler.Core.BitTornado.BT1.MessageID import MODERATIONCAST_HAVE, MODERATIONCAST_REQUEST, MODERATIONCAST_REPLY +from Tribler.Core.BitTornado.bencode import bencode, bdecode +from Tribler.Core.BuddyCast.moderationcast_util import * +from Tribler.Core.CacheDB.CacheDBHandler import ModerationCastDBHandler +from Tribler.Core.CacheDB.sqlitecachedb import SQLiteCacheDB, bin2str, str2bin, NULL +from Tribler.Core.Overlay.permid import permid_for_user +from Tribler.Core.Statistics.Logger import OverlayLogger +from Tribler.Core.Utilities.utilities import * + +from base64 import decodestring +from binascii import hexlify +from time import time +from traceback import print_exc +from types import StringType, ListType, DictType + +DEBUG_UI = False +DEBUG = False #Default debug +debug = False #For send-errors and other low-level stuff + +AUTO_MODERATE = False #Automatically moderate content, with bogus moderations +AUTO_MODERATE_INTERVAL = 1 #Number of seconds between creation of moderations + +class ModerationCastCore: + """ ModerationCastCore is responsible for sending and receiving: + MODERATIONCAST_HAVE, MODERATIONCAST_REQUEST, and MODERATIONCAST_REPLY-messages + """ + + ################################ + def __init__(self, data_handler, secure_overlay, session, buddycast_interval_function, log = '', dnsindb = None): + """ Returns an instance of this class + """ + #Keep reference to interval-function of BuddycastFactory + self.interval = buddycast_interval_function + self.data_handler = data_handler + self.dnsindb = dnsindb + self.log = log + self.secure_overlay = secure_overlay + self.moderationcastdb = ModerationCastDBHandler.getInstance() + self.my_permid = self.moderationcastdb.my_permid + self.session = session + + self.max_have_length = SINGLE_HAVE_LENGTH * session.get_moderationcast_moderations_per_have() + self.max_request_length = SINGLE_REQUEST_LENGTH * session.get_moderationcast_moderations_per_have() + + #Reference to buddycast-core, set by the buddycast-core (as it is created by the + #buddycast-factory after calling this constructor). + self.buddycast_core = None + + #Debug-interface + if DEBUG_UI: + from moderationcast_test import ModerationCastTest + ModerationCastTest(self) + + #Extend logging with ModerationCAST-messages and status + if self.log: + self.overlay_log = OverlayLogger.getInstance(self.log) + ##self.dnsindb = self.data_handler.get_dns_from_peerdb # Arno, 2009-05-15: Don't see why this must be diff. Don't exist anymore anyway + + if AUTO_MODERATE: + assert AUTO_MODERATE_INTERVAL > 0 + from moderationcast_experiment import BogusAutoModerator + self.auto_moderator = BogusAutoModerator(AUTO_MODERATE_INTERVAL) + + def initialized(self): + return self.buddycast_core is not None + + ################################ + def createAndSendModerationCastHaveMessage(self, target_permid, selversion): + + moderationcast_data = self.createModerationCastHaveMessage(target_permid) + if len(moderationcast_data) == 0: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "There are no moderations.. hence we do not send" + return + moderationcast_msg = bencode(moderationcast_data) + + if self.log: + dns = self.dnsindb(target_permid) + if dns: + ip,port = dns + MSG_ID = "MODERATIONCAST_HAVE" + msg = moderationCastHaveMsgToString(moderationcast_data) + self.overlay_log('SEND_MSG', ip, port, show_permid(target_permid), selversion, MSG_ID, msg) + + data = MODERATIONCAST_HAVE+moderationcast_msg + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Sending Moderationcast Have Msg", moderationCastHaveMsgToString(moderationcast_data) + self.secure_overlay.send(target_permid, data, self.moderationcastSendCallback) + + ################################ + def createModerationCastHaveMessage(self, target_permid): + """ Create a MODERATIONCAST_HAVE message """ + + #Select latest own moderations + size = self.session.get_moderationcast_recent_own_moderations_per_have() + info = self.moderationcastdb.recentOwnModerations(size) + + #Add random own moderations + size += self.session.get_moderationcast_random_own_moderations_per_have() + random_own = self.moderationcastdb.randomOwnModerations(size) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "random own >>>>>>>>>>> ", random_own + + for infohash in random_own: + if len(info) == size: + break + + if infohash not in info: + info.append(infohash) + + + #Add latest moderations to forward + size += self.session.get_moderationcast_recent_forward_moderations_per_have() + recent_forward = self.moderationcastdb.recentModerations(size) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "recent forward >>>>>>>>>>> ", recent_forward + for infohash in recent_forward: + if len(info) == size: + break + if infohash not in info: + info.append(infohash) + + #Add random moderations to forward + size += self.session.get_moderationcast_random_forward_moderations_per_have() + random_forward = self.moderationcastdb.randomModerations(size) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "random forward >>>>>>>>>>> ", random_forward + for infohash in random_forward: + if len(info) == size: + break + if infohash not in info: + info.append(infohash) + + data = [] + #Gather timestamp and size + for infohash in info: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "what exactly do we send",infohash + hash = infohash[2] + time = infohash[3] + data.append((hash, time)) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderationcast: Prepared", len(data), "moderations" + + return data + + ################################ + def createAndSendModerationCastRequestMessage(self, target_permid, have_message, selversion): + # for older versions of Tribler (non-ModerationCast): do nothing + #if selversion < MIN_VERSION: + #return + + # create a new MODERATIONCAST_REQUEST message + moderationcast_data = self.createModerationCastRequestMessage(target_permid, have_message) + + try: + moderationcast_msg = bencode(moderationcast_data) + except: + if DEBUG: + + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "error moderationcast_data:", moderationcast_data + return + + #Log SEND_MSG of uncompressed message + if self.log: + dns = self.dnsindb(target_permid) + if dns: + ip,port = dns + MSG_ID = "MODERATIONCAST_REQUEST" + msg = moderationCastRequestMsgToString(moderationcast_data) + self.overlay_log('SEND_MSG', ip, port, show_permid(target_permid), selversion, MSG_ID, msg) + + #if REQUEST_COMPRESSION: + #Compress this message + #moderationcast_msg = compress(moderationcast_msg) + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Sending Moderationcast Request Msg", moderationCastRequestMsgToString(moderationcast_data) + # send the message + data = MODERATIONCAST_REQUEST+moderationcast_msg + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","the moderation cast request is", data + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","the moderation cast request decoded is", bdecode(data) + #self.uploadLimiter.use(len(data)) #Log upload-bandwidth usage + #return + self.secure_overlay.send(target_permid, data, self.moderationcastSendCallback) + + ################################ + def createModerationCastRequestMessage(self, target_permid, have_message): + """ Create a MODERATIONCAST_REQUEST message """ + + #Select request set, such that it will not exceed download-bandwidth-limit and + #only select moderations for which we have the torrent and not have a newer moderation + #limit_bytes = self.downloadLimiter.getAvailableSize() + + requests = [] + requests_size = 0 + for (infohash, timestamp) in have_message: + if self.moderationcastdb.hasModeration(infohash): + moderation = self.moderationcastdb.getModeration(infohash) + if moderation[3] < timestamp: + requests.append(infohash) + else: + requests.append(infohash) + + + + return requests + + ################################ + def createAndSendModerationCastReplyMessage(self, target_permid, request_message, selversion): + # for older versions of Tribler (non-ModerationCast): do nothing + #if selversion < MIN_VERSION: + #return + + + # create a new MODERATIONCAST_REQUEST message + moderationcast_data = self.createModerationCastReplyMessage(target_permid, request_message) + + try: + moderationcast_msg = bencode(moderationcast_data) + except: + if DEBUG: + + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "error moderationcast_data:", moderationcast_data + return + + #Log SEND_MSG of uncompressed message + if self.log: + dns = self.dnsindb(target_permid) + if dns: + ip,port = dns + MSG_ID = "MODERATIONCAST_REPLY" + msg = moderationCastReplyMsgToString(moderationcast_data) + self.overlay_log('SEND_MSG', ip, port, show_permid(target_permid), selversion, MSG_ID, msg) + + #if REPLY_COMPRESSION: + #Compress this message + #moderationcast_msg = compress(moderationcast_msg) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Sending Moderationcast Reply Msg", moderationCastReplyMsgToString(moderationcast_data) + # send the message + data = MODERATIONCAST_REPLY+moderationcast_msg + self.secure_overlay.send(target_permid, data, self.moderationcastSendCallback) + + ################################ + def createModerationCastReplyMessage(self, target_permid, request_message): + """ Create a MODERATIONCAST_REPLY message """ + + #Select reply set, such that it will not exceed upload-bandwidth-limit: + #limit_bytes = self.uploadLimiter.getAvailableSize() + reply = [] + reply_size = 0 + + for infohash in request_message: + mod = self.moderationcastdb.getModeration(infohash) + moderation = {} + moderation['mod_id'] = mod[0] + moderation['mod_name'] = mod[1] + moderation['infohash'] = mod[2] + moderation['time_stamp'] = mod[3] + moderation['signature'] = mod[7] + + reply.append(moderation) + + return reply + + ################################ + def moderationcastSendCallback(self, exc, target_permid, other=0): + if exc is None: + if DEBUG: + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","moderationcast: *** msg was sent successfully to peer", permid_for_user(target_permid) + else: + if DEBUG: + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderationcast: *** warning - error in sending msg to", permid_for_user(target_permid), exc + + ################################ + def gotModerationCastHaveMessage(self, recv_msg, sender_permid, selversion): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'moderationcast: Received a HAVE msg from ', permid_for_user(sender_permid) + + if not sender_permid or sender_permid == self.my_permid: + return False + + if self.max_have_length > 0 and len(recv_msg) > self.max_have_length: + return False + + #check if this moderator is a fraud + mod = self.moderationcastdb.getModerator(permid_for_user(sender_permid)) + if mod is not None and len(mod)>0: + if mod[1]==-1: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Sorry this moderator is a fraud one:", permid_for_user(sender_permid) + return False + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "This moderator is not a fraud one:", permid_for_user(sender_permid) + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Never seen this moderator :", permid_for_user(sender_permid) + + moderationcast_data = {} + + try: + moderationcast_data = bdecode(recv_msg) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderationcast: warning, invalid bencoded data" + return False + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "received this thing from the test", moderationcast_data + # check message-structure + if not validModerationCastHaveMsg(moderationcast_data): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderationcast: invalid MODERATIONCAST_HAVE-message" + return False + + if DEBUG: + print "Received MODERATIONCAST_HAVE", moderationCastHaveMsgToString(moderationcast_data) + + #Log RECV_MSG of uncompressed message + if self.log: + dns = self.dnsindb(sender_permid) + if dns: + ip,port = dns + MSG_ID = "MODERATIONCAST_HAVE" + msg = moderationCastHaveMsgToString(moderationcast_data) + self.overlay_log('RECV_MSG', ip, port, show_permid(sender_permid), selversion, MSG_ID, msg) + + #Reply have-message, with request message + self.createAndSendModerationCastRequestMessage(sender_permid, moderationcast_data, selversion) + + return True + + ################################ + def gotModerationCastRequestMessage(self, recv_msg, sender_permid, selversion): + """ Received a MODERATIONCAST_REQUEST message and handle it. Reply if needed """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'moderationcast: Received a REQUEST msg from ', permid_for_user(sender_permid) + + #Log download-bandwidth-usage + #self.downloadLimiter.use(len(recv_msg)) + + #if REQUEST_COMPRESSION: + #Decompress this message, before handling further + #recv_msg = decompress(recv_msg) + + if not sender_permid or sender_permid == self.my_permid: + return False + + if self.max_request_length > 0 and len(recv_msg) > self.max_request_length: + return False + + moderationcast_data = {} + + try: + moderationcast_data = bdecode(recv_msg) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderationcast: warning, invalid bencoded data" + return False + + # check message-structure + if not validModerationCastRequestMsg(moderationcast_data): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderationcast: invalid MODERATIONCAST_REQUEST-message" + return False + + if DEBUG: + print "Received MODERATIONCAST_REQUEST", moderationCastRequestMsgToString(moderationcast_data) + + #Log RECV_MSG of uncompressed message + if self.log: + dns = self.dnsindb(sender_permid) + if dns: + ip,port = dns + MSG_ID = "MODERATIONCAST_REQUEST" + msg = moderationCastRequestMsgToString(moderationcast_data) + self.overlay_log('RECV_MSG', ip, port, show_permid(sender_permid), selversion, MSG_ID, msg) + + self.createAndSendModerationCastReplyMessage(sender_permid, moderationcast_data, selversion) + + return True + + ################################ + def gotModerationCastReplyMessage(self, recv_msg, sender_permid, selversion): + """ Received a MODERATIONCAST_REPLY message and handle it.""" + + #Log download-bandwidth-usage + #self.downloadLimiter.use(len(recv_msg)) + if not sender_permid or sender_permid == self.my_permid: + return False + + if MAX_REPLY_LENGTH > 0 and len(recv_msg) > MAX_REPLY_LENGTH: + return False + + moderationcast_data = {} + + try: + moderationcast_data = bdecode(recv_msg) + except: + return False + + # check message-structure + if not validModerationCastReplyMsg(moderationcast_data): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Received Invalid Moderationcast Reply Message" + return False + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Received MODERATIONCAST_REPLY", moderationCastReplyMsgToString(moderationcast_data) + + #Log RECV_MSG of uncompressed message + if self.log: + dns = self.dnsindb(sender_permid) + if dns: + ip,port = dns + MSG_ID = "MODERATIONCAST_REPLY" + msg = moderationCastReplyMsgToString(moderationcast_data) + self.overlay_log('RECV_MSG', ip, port, show_permid(sender_permid), selversion, MSG_ID, msg) + + #Handle moderationcast-have-message: + self.handleModerationCastReplyMsg(sender_permid, moderationcast_data) + + return True + + ################################ + def handleModerationCastReplyMsg(self, sender_permid, data): + + if DEBUG: + print "Processing MODERATIONCAST_REPLY msg from: ", permid_for_user(sender_permid) + + for moderation in data: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","intention>>>>", moderation + self.moderationcastdb.updateModeration(moderation) + + if DEBUG: + print "Processing MODERATIONCAST_REPLY msg from: ", permid_for_user(sender_permid), "DONE" + + + ################################ + + def showAllModerations(self): + """ Currently this function is only for testing, to show all moderations """ + if DEBUG: + records = self.moderationcastdb.getAll() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Existing moderations..." + for record in records: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", " modid:",record[0],"; modname:", record[1], "; infohash:",record[2],"; signature:", record[7] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "End of moderations..." + + records = self.moderationcastdb.getAllModerators() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Existing moderators..." + for record in records: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", " modid:",record[0],"; status:", record[1], "; timestamp:",record[2] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "End of moderators..." + diff --git a/tribler-mod/Tribler/Core/BuddyCast/moderationcast_util.py b/tribler-mod/Tribler/Core/BuddyCast/moderationcast_util.py new file mode 100644 index 0000000..01a4f6a --- /dev/null +++ b/tribler-mod/Tribler/Core/BuddyCast/moderationcast_util.py @@ -0,0 +1,388 @@ +from time import localtime, strftime +# Written by Vincent Heinink and Rameez Rahman +# see LICENSE.txt for license information +# +#Utilities for moderationcast (including databases) +# +import sys + +from Tribler.Core.CacheDB.sqlitecachedb import bin2str, str2bin + +DEBUG = False + +# NO_RECENT_OWN_MODERATIONS_PER_HAVE = 13 +# NO_RANDOM_OWN_MODERATIONS_PER_HAVE = 12 +# NO_RECENT_FORWARD_MODERATIONS_PER_HAVE = 13 +# NO_RANDOM_FORWARD_MODERATIONS_PER_HAVE = 12 +# NO_MODERATIONS_PER_HAVE = NO_RECENT_OWN_MODERATIONS_PER_HAVE + NO_RANDOM_OWN_MODERATIONS_PER_HAVE +\ +# NO_RECENT_FORWARD_MODERATIONS_PER_HAVE + NO_RANDOM_FORWARD_MODERATIONS_PER_HAVE +# UPLOAD_BANDWIDTH_LIMIT = 5*1024 #5KByte/s +# DOWNLOAD_BANDWIDTH_LIMIT = 20*1024 #20KByte/s + +# MAX_HAVE_LENGTH = NO_MODERATIONS_PER_HAVE * 40 #40 bytes per (infohash, timestamp, size-combination)? +# MAX_REQUEST_LENGTH = NO_MODERATIONS_PER_HAVE * 25 #25 bytes per infohash? + +SINGLE_HAVE_LENGTH = 40 #40 bytes per (infohash, timestamp, size-combination)? +SINGLE_REQUEST_LENGTH = 25 #25 bytes per infohash? +MAX_REPLY_LENGTH = 2 * 1024 * 1024 #2 MByte + +HAVE_COMPRESSION = True +REQUEST_COMPRESSION = True +REPLY_COMPRESSION = True + +TIMESTAMP_IN_FUTURE = 5 * 60 # 5 minutes is okay +MAX_THUMBNAIL_SIZE = 20 * 1024 # 20 Kilobyte +MAX_SUBTITLE_SIZE = 100 * 1024 # 100 Kilobyte +MAX_DESCRIPTION_SIZE = 2 * 1024 # 2 Kilobyte +MAX_TAGS = 50 # 50 tags max +MAX_TAG_SIZE = 30 # 30 characters max per tag + +BLOCK_HAVE_TIME = 30 # Do not reply a have message with a have message, to peers that have received one in the last 30 seconds + +LANGUAGES = { #The language-codes and their representations for languages that we allow (ISO-639-3) + 'ron':'Romanian', + 'jpn':'Japanese', + 'swe':'Swedish', + 'por':'Portuguese', + 'ita':'Italian', + 'ara':'Arabic', + 'pol':'Polish', + 'nld':'Dutch', + 'ind':'Indonesian', + 'spa':'Spanish', + 'fra':'French', + 'est':'Estonian', + 'ell':'Modern Greek (1453-)', + 'eng':'English', + 'hrv':'Croatian', + 'tur':'Turkish', + 'heb':'Hebrew', + 'kor':'Korean', + 'fin':'Finnish', + 'hun':'Hungarian', + 'fas':'Persian', + 'dan':'Danish', + 'ces':'Czech', + 'bul':'Bulgarian', + 'rus':'Russian', + 'nor':'Norwegian', + 'vie':'Vietnamese', + 'deu':'German', + 'srp':'Serbian', + 'slk':'Slovak', + 'zho':'Chinese' +} + +#For debugging messages +import sys + +#For validity-checks +from types import StringType, ListType, DictType +from time import time +from Tribler.Core.BitTornado.bencode import bencode +from Tribler.Core.Overlay.permid import verify_data +from os.path import exists, isfile + +#*****************Validity-checks***************** +def validInfohash(infohash): + """ Returns True iff infohash is a valid infohash """ + r = type(infohash) == str + if not r: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid infohash: type(infohash) ==", str(type(infohash))+\ + ", len(infohash) ==", str(len(infohash)) + return r + +def validPermid(permid): + """ Returns True iff permid is a valid Tribler Perm-ID """ + r = type(permid) == str and len(permid) <= 120 + if not r: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid permid: type(permid) ==", str(type(permid))+\ + ", len(permid) ==", str(len(permid)) + return r + +def validSignature(moderation): + """ Returns True iff the (signature, moderator) in moderation is correct for this moderation """ + + #return True + #UNFREEZE LATER + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Checking signature of moderation:", repr(moderation) + blob = str2bin(moderation['signature']) + permid = str2bin(moderation['mod_id']) + #Plaintext excludes signature: + del moderation['signature'] + plaintext = bencode(moderation) + moderation['signature'] = bin2str(blob) + signature = verify_data(plaintext,permid, blob) + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Checking signature of moderation after verify_data:", repr(moderation) + + r = verify_data(plaintext, permid, blob) + if not r: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid signature" + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Proper signature:", moderation['signature'] + return r + +def now(): + """ Returns current-system-time in UTC, seconds since the epoch (type==int) """ + return int(time()) + +def validTimestamp(timestamp): + """ Returns True iff timestamp is a valid timestamp """ + r = timestamp is not None and type(timestamp) == int and timestamp > 0 and timestamp <= now() + TIMESTAMP_IN_FUTURE + if not r: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid timestamp" + return r + +def validSize(size): + """ Returns True iff size is a valid size """ + r = size is not None and (type(size) == int or type(size) == double) and size > 0 + if not r: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid size" + return r + +def validThumbnail(thumbnail): + """ Returns True iff thumbnail is a valid thumbnail """ + r = type(thumbnail) == str + if not r: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid thumbnail: type(thumbnail) ==", str(type(thumbnail)) + return False + + r = len(thumbnail) <= MAX_THUMBNAIL_SIZE + if not r: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid thumbnail: len(thumbnail) ==", str(len(thumbnail)) + return False + + return True + +def validUIThumbnail(thumbnail): + """ Returns True iff thumbnail is a valid thumbnail """ + r = type(thumbnail) == str and exists(thumbnail) and isfile(thumbnail) + if not r: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid UIthumbnail" + return r + +def validDBThumbnail(thumbnail): + """ Returns True """ + return True + +def validDescription(description): + """ Returns True iff description is a valid description """ + r = (type(description) == str or type(description) == unicode) and len(description) <= MAX_DESCRIPTION_SIZE + if not r: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid DBDescription" + return r + +def validSubtitles(subtitles): + """ Returns True iff subtitles is a valid collection of subtitles """ + if type(subtitles) != dict: #Dictionary + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid subtitles: type(subtitles) ==", str(type(subtitles)) + return False + + for (language, data) in subtitles.iteritems(): #Valid language and data + if not validLanguage(language): + return False + if type(data) != str or len(data) > MAX_SUBTITLE_SIZE: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid subtitle:", language, "has invalid data" + return False + + return True #Ok + +def validUISubtitles(subtitles): + """ Returns True iff subtitles is a valid collection of subtitles """ + if type(subtitles) != dict: #Dictionary + return False + + for (language, file) in subtitles.iteritems(): #Valid language and file + if not validLanguage(language): + return False + if type(file) != str or not exists(file) or not isfile(file): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid UISubtitle:", language, "has invalid file" + return False + + return True + +def validDBSubtitles(subtitles): + """ Returns True """ + return True + +def validTags(tags): + """ Returns True iff tags is a valid collection of tags """ + if type(tags) != tuple and type(tags) != list: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid tags: non-list/tuple" + return False + + if len(tags) > MAX_TAGS: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid tags: too many tags:", str(len(tags)) + return False + + for tag in tags: + if (type(tag) != str and type(tag) != unicode) or len(tag) > MAX_TAG_SIZE: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid tags: too long tag:", tag + return False + + return True + +def validLanguage(language): + """ Returns True iff language is a valid language """ + r = (type(language) == str or type(language) == unicode) and language in LANGUAGES.keys() + if not r: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid language" + return r + +def validUILanguage(language): + """ Returns True iff language is a valid language """ + r = (type(language) == str or type(language) == unicode) and language in LANGUAGES.values() + if not r: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid UIlanguage" + return r + +def validDBModeration(moderation): + + required = {'infohash':validInfohash, 'mod_id':validPermid, 'time_stamp':validTimestamp, 'signature':validSignature, 'size':validSize} + + #Check for DictType + if type(moderation) != DictType: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderation is non-DictType, but of type:", str(type(moderation)) + return False + + #Check required-keys and their values + for key, check_function in required.iteritems(): + if not moderation.has_key(key): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderation does not have", key+"-key" + return False + if not check_function(moderation[key]): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderation has invalid required", key+"-value" + return False + + + return True + +def validUIModeration(moderation): + + required = {'infohash':validInfohash, 'mod_id':validPermid, 'time_stamp':validTimestamp, 'signature':lambda x:True} + + #Check for DictType + if type(moderation) != DictType: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderation is non-DictType, but of type:", str(type(moderation)) + return False + + #Check required-keys and their values + for key, check_function in required.iteritems(): + if not moderation.has_key(key): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderation does not have", key+"-key" + return False + if not check_function(moderation[key]): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderation has invalid required", key+"-value" + return False + + return True + +def validModeration(moderation): + + required = {'infohash':validInfohash, 'mod_id':validPermid, 'time_stamp':validTimestamp, 'signature':lambda x:validSignature(moderation)} + + #Check for DictType + if type(moderation) != DictType: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderation is non-DictType, but of type:", str(type(moderation)) + return False + + #Check required-keys and their values + for key, check_function in required.iteritems(): + if not moderation.has_key(key): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderation does not have", key+"-key" + return False + if not check_function(moderation[key]): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderation has invalid required", key+"-value" + return False + + return True + +def validModerationCastHaveMsg(data): + """ MODERATIONCAST_HAVE-message should be a of type: [(infohash, time_stamp)] """ + + if data is None or not type(data) == ListType: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "validModerationCastMsg: non-ListType" + return False + + for item in data: + if not type(item) == ListType or len(item) != 2: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "validModerationCastMsg: item non-3-list:" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "type(item):", str(type(item)) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "len(item) != 3:", str(len(item) != 3) + return False + + (infohash, timestamp) = item + if not validInfohash(infohash) or not validTimestamp(timestamp): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "validModerationCastMsg: item invalid:" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "validInfohash(infohash):", str(validInfohash(infohash)) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "validTimestamp(timestamp):", str(validTimestamp(timestamp)) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "validSize(size):", str(validSize(size)) + return False + + return True + +def validModerationCastRequestMsg(data): + """ Returns True iff MODERATIONCAST_REQUEST-message is valid, shoud be of type: [infohash] """ + if data is None or not type(data) == ListType: + return False + + for item in data: + if not validInfohash(item): + return False + + return True + +def validModerationCastReplyMsg(data): + """ Returns True iff MODERATIONCAST_REPLY-message is valid, should be a of type: [moderation] """ + if data is None or not type(data) == ListType: + return False + + for item in data: + if not validModeration(item): + return False + + return True + +def validVoteCastMsg(data): + """ Returns True if VoteCastMsg is valid, ie, be of type [(mod_id,vote) """ + if data is None or not type(data) == ListType: + return False + + for record in data: + if not type(record[0]) == StringType: + return False + if not type(record[1]) == int: + return False + + + return True + + +#************************************************* + +def moderationCastHaveMsgToString(data): + """ Pre: data is a valid MODERATIONCAST_HAVE-message + Post: returns a string-representation of the MODERATIONCAST_HAVE-message + """ + return repr(data) + +def moderationCastRequestMsgToString(data): + """ Pre: data is a valid MODERATIONCAST_REQUEST-message + Post: returns a string-representation of the MODERATIONCAST_REQUEST-message + """ + return repr(data) + +def moderationCastReplyMsgToString(data): + """ Pre: data is a valid MODERATIONCAST_REPLY-message + Post: returns a string-representation of the MODERATIONCAST_REPLY-message + """ + return repr(data) + +def voteCastMsgToString(data): + return repr(data) diff --git a/tribler-mod/Tribler/Core/BuddyCast/moderationcast_util.py.bak b/tribler-mod/Tribler/Core/BuddyCast/moderationcast_util.py.bak new file mode 100644 index 0000000..ad2bf2b --- /dev/null +++ b/tribler-mod/Tribler/Core/BuddyCast/moderationcast_util.py.bak @@ -0,0 +1,387 @@ +# Written by Vincent Heinink and Rameez Rahman +# see LICENSE.txt for license information +# +#Utilities for moderationcast (including databases) +# +import sys + +from Tribler.Core.CacheDB.sqlitecachedb import bin2str, str2bin + +DEBUG = False + +# NO_RECENT_OWN_MODERATIONS_PER_HAVE = 13 +# NO_RANDOM_OWN_MODERATIONS_PER_HAVE = 12 +# NO_RECENT_FORWARD_MODERATIONS_PER_HAVE = 13 +# NO_RANDOM_FORWARD_MODERATIONS_PER_HAVE = 12 +# NO_MODERATIONS_PER_HAVE = NO_RECENT_OWN_MODERATIONS_PER_HAVE + NO_RANDOM_OWN_MODERATIONS_PER_HAVE +\ +# NO_RECENT_FORWARD_MODERATIONS_PER_HAVE + NO_RANDOM_FORWARD_MODERATIONS_PER_HAVE +# UPLOAD_BANDWIDTH_LIMIT = 5*1024 #5KByte/s +# DOWNLOAD_BANDWIDTH_LIMIT = 20*1024 #20KByte/s + +# MAX_HAVE_LENGTH = NO_MODERATIONS_PER_HAVE * 40 #40 bytes per (infohash, timestamp, size-combination)? +# MAX_REQUEST_LENGTH = NO_MODERATIONS_PER_HAVE * 25 #25 bytes per infohash? + +SINGLE_HAVE_LENGTH = 40 #40 bytes per (infohash, timestamp, size-combination)? +SINGLE_REQUEST_LENGTH = 25 #25 bytes per infohash? +MAX_REPLY_LENGTH = 2 * 1024 * 1024 #2 MByte + +HAVE_COMPRESSION = True +REQUEST_COMPRESSION = True +REPLY_COMPRESSION = True + +TIMESTAMP_IN_FUTURE = 5 * 60 # 5 minutes is okay +MAX_THUMBNAIL_SIZE = 20 * 1024 # 20 Kilobyte +MAX_SUBTITLE_SIZE = 100 * 1024 # 100 Kilobyte +MAX_DESCRIPTION_SIZE = 2 * 1024 # 2 Kilobyte +MAX_TAGS = 50 # 50 tags max +MAX_TAG_SIZE = 30 # 30 characters max per tag + +BLOCK_HAVE_TIME = 30 # Do not reply a have message with a have message, to peers that have received one in the last 30 seconds + +LANGUAGES = { #The language-codes and their representations for languages that we allow (ISO-639-3) + 'ron':'Romanian', + 'jpn':'Japanese', + 'swe':'Swedish', + 'por':'Portuguese', + 'ita':'Italian', + 'ara':'Arabic', + 'pol':'Polish', + 'nld':'Dutch', + 'ind':'Indonesian', + 'spa':'Spanish', + 'fra':'French', + 'est':'Estonian', + 'ell':'Modern Greek (1453-)', + 'eng':'English', + 'hrv':'Croatian', + 'tur':'Turkish', + 'heb':'Hebrew', + 'kor':'Korean', + 'fin':'Finnish', + 'hun':'Hungarian', + 'fas':'Persian', + 'dan':'Danish', + 'ces':'Czech', + 'bul':'Bulgarian', + 'rus':'Russian', + 'nor':'Norwegian', + 'vie':'Vietnamese', + 'deu':'German', + 'srp':'Serbian', + 'slk':'Slovak', + 'zho':'Chinese' +} + +#For debugging messages +import sys + +#For validity-checks +from types import StringType, ListType, DictType +from time import time +from Tribler.Core.BitTornado.bencode import bencode +from Tribler.Core.Overlay.permid import verify_data +from os.path import exists, isfile + +#*****************Validity-checks***************** +def validInfohash(infohash): + """ Returns True iff infohash is a valid infohash """ + r = type(infohash) == str + if not r: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid infohash: type(infohash) ==", str(type(infohash))+\ + ", len(infohash) ==", str(len(infohash)) + return r + +def validPermid(permid): + """ Returns True iff permid is a valid Tribler Perm-ID """ + r = type(permid) == str and len(permid) <= 120 + if not r: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid permid: type(permid) ==", str(type(permid))+\ + ", len(permid) ==", str(len(permid)) + return r + +def validSignature(moderation): + """ Returns True iff the (signature, moderator) in moderation is correct for this moderation """ + + #return True + #UNFREEZE LATER + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Checking signature of moderation:", repr(moderation) + blob = str2bin(moderation['signature']) + permid = str2bin(moderation['mod_id']) + #Plaintext excludes signature: + del moderation['signature'] + plaintext = bencode(moderation) + moderation['signature'] = bin2str(blob) + signature = verify_data(plaintext,permid, blob) + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Checking signature of moderation after verify_data:", repr(moderation) + + r = verify_data(plaintext, permid, blob) + if not r: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid signature" + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Proper signature:", moderation['signature'] + return r + +def now(): + """ Returns current-system-time in UTC, seconds since the epoch (type==int) """ + return int(time()) + +def validTimestamp(timestamp): + """ Returns True iff timestamp is a valid timestamp """ + r = timestamp is not None and type(timestamp) == int and timestamp > 0 and timestamp <= now() + TIMESTAMP_IN_FUTURE + if not r: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid timestamp" + return r + +def validSize(size): + """ Returns True iff size is a valid size """ + r = size is not None and (type(size) == int or type(size) == double) and size > 0 + if not r: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid size" + return r + +def validThumbnail(thumbnail): + """ Returns True iff thumbnail is a valid thumbnail """ + r = type(thumbnail) == str + if not r: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid thumbnail: type(thumbnail) ==", str(type(thumbnail)) + return False + + r = len(thumbnail) <= MAX_THUMBNAIL_SIZE + if not r: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid thumbnail: len(thumbnail) ==", str(len(thumbnail)) + return False + + return True + +def validUIThumbnail(thumbnail): + """ Returns True iff thumbnail is a valid thumbnail """ + r = type(thumbnail) == str and exists(thumbnail) and isfile(thumbnail) + if not r: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid UIthumbnail" + return r + +def validDBThumbnail(thumbnail): + """ Returns True """ + return True + +def validDescription(description): + """ Returns True iff description is a valid description """ + r = (type(description) == str or type(description) == unicode) and len(description) <= MAX_DESCRIPTION_SIZE + if not r: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid DBDescription" + return r + +def validSubtitles(subtitles): + """ Returns True iff subtitles is a valid collection of subtitles """ + if type(subtitles) != dict: #Dictionary + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid subtitles: type(subtitles) ==", str(type(subtitles)) + return False + + for (language, data) in subtitles.iteritems(): #Valid language and data + if not validLanguage(language): + return False + if type(data) != str or len(data) > MAX_SUBTITLE_SIZE: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid subtitle:", language, "has invalid data" + return False + + return True #Ok + +def validUISubtitles(subtitles): + """ Returns True iff subtitles is a valid collection of subtitles """ + if type(subtitles) != dict: #Dictionary + return False + + for (language, file) in subtitles.iteritems(): #Valid language and file + if not validLanguage(language): + return False + if type(file) != str or not exists(file) or not isfile(file): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid UISubtitle:", language, "has invalid file" + return False + + return True + +def validDBSubtitles(subtitles): + """ Returns True """ + return True + +def validTags(tags): + """ Returns True iff tags is a valid collection of tags """ + if type(tags) != tuple and type(tags) != list: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid tags: non-list/tuple" + return False + + if len(tags) > MAX_TAGS: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid tags: too many tags:", str(len(tags)) + return False + + for tag in tags: + if (type(tag) != str and type(tag) != unicode) or len(tag) > MAX_TAG_SIZE: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid tags: too long tag:", tag + return False + + return True + +def validLanguage(language): + """ Returns True iff language is a valid language """ + r = (type(language) == str or type(language) == unicode) and language in LANGUAGES.keys() + if not r: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid language" + return r + +def validUILanguage(language): + """ Returns True iff language is a valid language """ + r = (type(language) == str or type(language) == unicode) and language in LANGUAGES.values() + if not r: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid UIlanguage" + return r + +def validDBModeration(moderation): + + required = {'infohash':validInfohash, 'mod_id':validPermid, 'time_stamp':validTimestamp, 'signature':validSignature, 'size':validSize} + + #Check for DictType + if type(moderation) != DictType: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderation is non-DictType, but of type:", str(type(moderation)) + return False + + #Check required-keys and their values + for key, check_function in required.iteritems(): + if not moderation.has_key(key): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderation does not have", key+"-key" + return False + if not check_function(moderation[key]): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderation has invalid required", key+"-value" + return False + + + return True + +def validUIModeration(moderation): + + required = {'infohash':validInfohash, 'mod_id':validPermid, 'time_stamp':validTimestamp, 'signature':lambda x:True} + + #Check for DictType + if type(moderation) != DictType: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderation is non-DictType, but of type:", str(type(moderation)) + return False + + #Check required-keys and their values + for key, check_function in required.iteritems(): + if not moderation.has_key(key): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderation does not have", key+"-key" + return False + if not check_function(moderation[key]): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderation has invalid required", key+"-value" + return False + + return True + +def validModeration(moderation): + + required = {'infohash':validInfohash, 'mod_id':validPermid, 'time_stamp':validTimestamp, 'signature':lambda x:validSignature(moderation)} + + #Check for DictType + if type(moderation) != DictType: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderation is non-DictType, but of type:", str(type(moderation)) + return False + + #Check required-keys and their values + for key, check_function in required.iteritems(): + if not moderation.has_key(key): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderation does not have", key+"-key" + return False + if not check_function(moderation[key]): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderation has invalid required", key+"-value" + return False + + return True + +def validModerationCastHaveMsg(data): + """ MODERATIONCAST_HAVE-message should be a of type: [(infohash, time_stamp)] """ + + if data is None or not type(data) == ListType: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "validModerationCastMsg: non-ListType" + return False + + for item in data: + if not type(item) == ListType or len(item) != 2: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "validModerationCastMsg: item non-3-list:" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "type(item):", str(type(item)) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "len(item) != 3:", str(len(item) != 3) + return False + + (infohash, timestamp) = item + if not validInfohash(infohash) or not validTimestamp(timestamp): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "validModerationCastMsg: item invalid:" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "validInfohash(infohash):", str(validInfohash(infohash)) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "validTimestamp(timestamp):", str(validTimestamp(timestamp)) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "validSize(size):", str(validSize(size)) + return False + + return True + +def validModerationCastRequestMsg(data): + """ Returns True iff MODERATIONCAST_REQUEST-message is valid, shoud be of type: [infohash] """ + if data is None or not type(data) == ListType: + return False + + for item in data: + if not validInfohash(item): + return False + + return True + +def validModerationCastReplyMsg(data): + """ Returns True iff MODERATIONCAST_REPLY-message is valid, should be a of type: [moderation] """ + if data is None or not type(data) == ListType: + return False + + for item in data: + if not validModeration(item): + return False + + return True + +def validVoteCastMsg(data): + """ Returns True if VoteCastMsg is valid, ie, be of type [(mod_id,vote) """ + if data is None or not type(data) == ListType: + return False + + for record in data: + if not type(record[0]) == StringType: + return False + if not type(record[1]) == int: + return False + + + return True + + +#************************************************* + +def moderationCastHaveMsgToString(data): + """ Pre: data is a valid MODERATIONCAST_HAVE-message + Post: returns a string-representation of the MODERATIONCAST_HAVE-message + """ + return repr(data) + +def moderationCastRequestMsgToString(data): + """ Pre: data is a valid MODERATIONCAST_REQUEST-message + Post: returns a string-representation of the MODERATIONCAST_REQUEST-message + """ + return repr(data) + +def moderationCastReplyMsgToString(data): + """ Pre: data is a valid MODERATIONCAST_REPLY-message + Post: returns a string-representation of the MODERATIONCAST_REPLY-message + """ + return repr(data) + +def voteCastMsgToString(data): + return repr(data) diff --git a/tribler-mod/Tribler/Core/BuddyCast/similarity.py b/tribler-mod/Tribler/Core/BuddyCast/similarity.py new file mode 100644 index 0000000..da960c4 --- /dev/null +++ b/tribler-mod/Tribler/Core/BuddyCast/similarity.py @@ -0,0 +1,101 @@ +from time import localtime, strftime +# Written by Jun Wang, Jie Yang +# see LICENSE.txt for license information + +__fool_epydoc = 481 +""" +Formulas: + P(I|U) = sum{U'<-I} P(U'|U) # U' has I in his profile + P(U'|U) = Sum{I}Pbs(U'|I)Pml(I|U) # P2PSim + Pbs(U|I) = (c(U,I) + mu*Pml(U))/(Sum{U}c(U,I) + mu) # mu=1 by tuning on tribler dataset + Pml(I|U) = c(U,I)/Sum{I}c(U,I) + Pml(U) = Sum{I}c(U,I) / Sum{U,I}c(U,I) + +Data Structur: + preferences - U:{I|c(U,I)>0}, # c(U,I) # Sum{I}c(U,I) = len(preferences[U]) + owners - I:{U|c(U,I)>0} # I:I:Sum{U}c(U,I) = len(owners[I]) + userSim - U':P(U'|U) + itemSim - I:P(I|U) + total - Sum{U,I}c(U,I) # Pml(U) = len(preferences[U])/total + +Test: + Using hash(permid) as user id, hash(infohash) as torrent id + Incremental change == overall change +""" + +from sets import Set + +def P2PSim(pref1, pref2): + """ Calculate simple similarity between peers """ + + cooccurrence = len(Set(pref1) & Set(pref2)) + if cooccurrence == 0: + return 0 + normValue = (len(pref1)*len(pref2))**0.5 + _sim = cooccurrence/normValue + sim = int(_sim*1000) # use integer for bencode + return sim + +def getCooccurrence(pref1, pref2): # pref1 and pref2 are sorted + i = 0 + j = 0 + co = 0 + size1 = len(pref1) + size2 = len(pref2) + if size1 == 0 or size2 == 0: + return 0 + while 1: + if (i>= size1) or (j>=size2): break + Curr_ID1 = pref1[i] + Curr_ID2 = pref2[j] + if Curr_ID1 < Curr_ID2 : + i=i+1 + elif Curr_ID1 > Curr_ID2 : + j=j+1 + else: + co +=1 + i+=1 + j+=1 + return co + +def P2PSimSorted(pref1, pref2): + """ Calculate similarity between peers """ + + cooccurrence = getCooccurrence(pref1, pref2) + if cooccurrence == 0: + return 0 + normValue = (len(pref1)*len(pref2))**0.5 + _sim = cooccurrence/normValue + sim = int(_sim*1000) # use integer for bencode + return sim + + +def P2PSimLM(peer_permid, my_pref, peer_pref, owners, total_prefs, mu=1.0): + """ + Calculate similarity between two peers using Bayesian Smooth. + P(U|U') = Sum{I}Pbs(U|I)Pml(I|U') + Pbs(U|I) = (c(U,I) + mu*Pml(U))/(Sum{U}c(U,I) + mu) + Pml(U) = Sum{I}c(U,I) / Sum{U,I}c(U,I) + Pml(I|U') = c(U',I)/Sum{I}c(U',I) + """ + + npeerprefs = len(peer_pref) + if npeerprefs == 0 or total_prefs == 0: + return 0 + + nmyprefs = len(my_pref) + if nmyprefs == 0: + return 0 + + PmlU = float(npeerprefs) / total_prefs + PmlIU = 1.0 / nmyprefs + peer_sim = 0.0 + for item in owners: + nowners = len(owners[item]) + 1 # add myself + cUI = item in peer_pref + PbsUI = float(cUI + mu*PmlU)/(nowners + mu) + peer_sim += PbsUI*PmlIU + return peer_sim * 100000 + + + \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/BuddyCast/similarity.py.bak b/tribler-mod/Tribler/Core/BuddyCast/similarity.py.bak new file mode 100644 index 0000000..6fc7ac2 --- /dev/null +++ b/tribler-mod/Tribler/Core/BuddyCast/similarity.py.bak @@ -0,0 +1,100 @@ +# Written by Jun Wang, Jie Yang +# see LICENSE.txt for license information + +__fool_epydoc = 481 +""" +Formulas: + P(I|U) = sum{U'<-I} P(U'|U) # U' has I in his profile + P(U'|U) = Sum{I}Pbs(U'|I)Pml(I|U) # P2PSim + Pbs(U|I) = (c(U,I) + mu*Pml(U))/(Sum{U}c(U,I) + mu) # mu=1 by tuning on tribler dataset + Pml(I|U) = c(U,I)/Sum{I}c(U,I) + Pml(U) = Sum{I}c(U,I) / Sum{U,I}c(U,I) + +Data Structur: + preferences - U:{I|c(U,I)>0}, # c(U,I) # Sum{I}c(U,I) = len(preferences[U]) + owners - I:{U|c(U,I)>0} # I:I:Sum{U}c(U,I) = len(owners[I]) + userSim - U':P(U'|U) + itemSim - I:P(I|U) + total - Sum{U,I}c(U,I) # Pml(U) = len(preferences[U])/total + +Test: + Using hash(permid) as user id, hash(infohash) as torrent id + Incremental change == overall change +""" + +from sets import Set + +def P2PSim(pref1, pref2): + """ Calculate simple similarity between peers """ + + cooccurrence = len(Set(pref1) & Set(pref2)) + if cooccurrence == 0: + return 0 + normValue = (len(pref1)*len(pref2))**0.5 + _sim = cooccurrence/normValue + sim = int(_sim*1000) # use integer for bencode + return sim + +def getCooccurrence(pref1, pref2): # pref1 and pref2 are sorted + i = 0 + j = 0 + co = 0 + size1 = len(pref1) + size2 = len(pref2) + if size1 == 0 or size2 == 0: + return 0 + while 1: + if (i>= size1) or (j>=size2): break + Curr_ID1 = pref1[i] + Curr_ID2 = pref2[j] + if Curr_ID1 < Curr_ID2 : + i=i+1 + elif Curr_ID1 > Curr_ID2 : + j=j+1 + else: + co +=1 + i+=1 + j+=1 + return co + +def P2PSimSorted(pref1, pref2): + """ Calculate similarity between peers """ + + cooccurrence = getCooccurrence(pref1, pref2) + if cooccurrence == 0: + return 0 + normValue = (len(pref1)*len(pref2))**0.5 + _sim = cooccurrence/normValue + sim = int(_sim*1000) # use integer for bencode + return sim + + +def P2PSimLM(peer_permid, my_pref, peer_pref, owners, total_prefs, mu=1.0): + """ + Calculate similarity between two peers using Bayesian Smooth. + P(U|U') = Sum{I}Pbs(U|I)Pml(I|U') + Pbs(U|I) = (c(U,I) + mu*Pml(U))/(Sum{U}c(U,I) + mu) + Pml(U) = Sum{I}c(U,I) / Sum{U,I}c(U,I) + Pml(I|U') = c(U',I)/Sum{I}c(U',I) + """ + + npeerprefs = len(peer_pref) + if npeerprefs == 0 or total_prefs == 0: + return 0 + + nmyprefs = len(my_pref) + if nmyprefs == 0: + return 0 + + PmlU = float(npeerprefs) / total_prefs + PmlIU = 1.0 / nmyprefs + peer_sim = 0.0 + for item in owners: + nowners = len(owners[item]) + 1 # add myself + cUI = item in peer_pref + PbsUI = float(cUI + mu*PmlU)/(nowners + mu) + peer_sim += PbsUI*PmlIU + return peer_sim * 100000 + + + \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/BuddyCast/votecast.py b/tribler-mod/Tribler/Core/BuddyCast/votecast.py new file mode 100644 index 0000000..a7f5420 --- /dev/null +++ b/tribler-mod/Tribler/Core/BuddyCast/votecast.py @@ -0,0 +1,204 @@ +from time import localtime, strftime +# Written by Rameez Rahman +# see LICENSE.txt for license information +# + +import sys +from time import time + +from Tribler.Core.BitTornado.bencode import bencode, bdecode +from Tribler.Core.Statistics.Logger import OverlayLogger +from Tribler.Core.BitTornado.BT1.MessageID import VOTECAST +from Tribler.Core.CacheDB.CacheDBHandler import VoteCastDBHandler +from Tribler.Core.Utilities.utilities import * +from Tribler.Core.Overlay.permid import permid_for_user +from Tribler.Core.CacheDB.sqlitecachedb import bin2str, str2bin +from Tribler.Core.BuddyCast.moderationcast_util import * + +DEBUG_UI = False +DEBUG = False #Default debug +debug = False #For send-errors and other low-level stuff + +AUTO_MODERATE = False #Automatically moderate content, with bogus moderations +AUTO_MODERATE_INTERVAL = 1 #Number of seconds between creation of moderations + +NO_RANDOM_VOTES = 12 +NO_RECENT_VOTES = 13 + + +class VoteCastCore: + """ VoteCastCore is responsible for sending and receiving VOTECAST-messages """ + + ################################ + def __init__(self, data_handler, secure_overlay, session, buddycast_interval_function, log = '', dnsindb = None): + """ Returns an instance of this class + """ + #Keep reference to interval-function of BuddycastFactory + self.interval = buddycast_interval_function + self.data_handler = data_handler + self.dnsindb = dnsindb + self.log = log + self.secure_overlay = secure_overlay + self.votecastdb = VoteCastDBHandler.getInstance() + self.my_permid = self.votecastdb.my_permid + self.max_have_length = SINGLE_HAVE_LENGTH * session.get_moderationcast_moderations_per_have() + + self.network_delay = 30 + #Reference to buddycast-core, set by the buddycast-core (as it is created by the + #buddycast-factory after calling this constructor). + self.buddycast_core = None + + #Debug-interface + if DEBUG_UI: + from moderationcast_test import ModerationCastTest + ModerationCastTest(self) + + #Extend logging with ModerationCAST-messages and status + if self.log: + self.overlay_log = OverlayLogger.getInstance(self.log) + ##self.dnsindb = self.data_handler.get_dns_from_peerdb # Arno, 2009-05-15: Don't see why this must be diff. Don't exist anymore anyway self.dnsindb = self.data_handler.get_dns_from_peerdb + + if AUTO_MODERATE: + assert AUTO_MODERATE_INTERVAL > 0 + from moderationcast_experiment import BogusAutoModerator + self.auto_moderator = BogusAutoModerator(AUTO_MODERATE_INTERVAL) + + def initialized(self): + return self.buddycast_core is not None + + ################################ + def createAndSendVoteCastMessage(self, target_permid, selversion): + votecast_data = self.createVoteCastMessage(target_permid) + if len(votecast_data) == 0: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "No votes there.. hence we do not send" + return + + votecast_msg = bencode(votecast_data) + + if self.log: + dns = self.dnsindb(target_permid) + if dns: + ip,port = dns + MSG_ID = "VOTECAST" + msg = voteCastReplyMsgToString(votecast_data) + self.overlay_log('SEND_MSG', ip, port, show_permid(target_permid), selversion, MSG_ID, msg) + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Sending votecastmsg",voteCastMsgToString(votecast_data) + data = VOTECAST+votecast_msg + self.secure_overlay.send(target_permid, data, self.voteCastSendCallback) + + + ################################ + def createVoteCastMessage(self, target_permid): + """ Create a VOTECAST message """ + + #Select latest own moderations + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Creating votecastmsg..." + records = self.votecastdb.recentVotes(NO_RECENT_VOTES) + + #Add random own moderations + size = NO_RANDOM_VOTES+NO_RECENT_VOTES + random_own = self.votecastdb.randomVotes(size) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "random own >>>>>>>>>>> ", random_own + + for vote in random_own: + if len(records) == size: + break + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "votes information", vote + if vote not in records: + records.append(vote) + data = [] + for record in records: + mod_id = record[0] + vote = record[1] + data.append((mod_id, vote)) + return data + + + ################################ + def voteCastSendCallback(self, exc, target_permid, other=0): + if DEBUG: + if exc is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","votecast: *** msg was sent successfully to peer", permid_for_user(target_permid) + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "votecast: *** warning - error in sending msg to", permid_for_user(target_permid), exc + + ################################ + def gotVoteCastMessage(self, recv_msg, sender_permid, selversion): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'votecast: Received a msg from ', permid_for_user(sender_permid) + + if not sender_permid or sender_permid == self.my_permid: + if DEBUG: + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "votecast: error - got votecastMsg from a None peer", \ + permid_for_user(sender_permid), recv_msg + return False + + if self.max_have_length > 0 and len(recv_msg) > self.max_have_length: + if DEBUG: + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "votecast: warning - got large voteCastHaveMsg", len(t) + return False + + votecast_data = {} + + try: + votecast_data = bdecode(recv_msg) + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "votecast: warning, invalid bencoded data" + return False + + # check message-structure + if not validVoteCastMsg(votecast_data): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "votecast: invalid votecast_message" + return False + + st = time() + self.handleVoteCastMsg(sender_permid, votecast_data) + et = time() + diff = et - st + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","votecast: HANDLE took %.4f" % diff + + #Log RECV_MSG of uncompressed message + if self.log: + dns = self.dnsindb(sender_permid) + if dns: + ip,port = dns + MSG_ID = "VOTECAST" + msg = voteCastMsgToString(votecast_data) + self.overlay_log('RECV_MSG', ip, port, show_permid(sender_permid), selversion, MSG_ID, msg) + + return True + + ################################ + ################################ + def handleVoteCastMsg(self, sender_permid, data): + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Processing VOTECAST msg from: ", permid_for_user(sender_permid), "; data: ", repr(data) + + for value in data: + vote = {} + vote['mod_id'] = value[0] + vote['voter_id'] = self.votecastdb.getPeerID(sender_permid) + vote['vote'] = value[1] + self.votecastdb.addVote(vote) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Processing VOTECAST msg from: ", permid_for_user(sender_permid), "DONE; data:" + + def showAllVotes(self): + """ Currently this function is only for testing, to show all votes """ + if DEBUG: + records = self.votecastdb.getAll() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Existing votes..." + for record in records: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", " mod_id:",record[0],"; voter_id:", record[1], "; votes:",record[2],"; timestamp:", record[3] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "End of votes..." + + + + + ################################ diff --git a/tribler-mod/Tribler/Core/BuddyCast/votecast.py.bak b/tribler-mod/Tribler/Core/BuddyCast/votecast.py.bak new file mode 100644 index 0000000..ed74579 --- /dev/null +++ b/tribler-mod/Tribler/Core/BuddyCast/votecast.py.bak @@ -0,0 +1,203 @@ +# Written by Rameez Rahman +# see LICENSE.txt for license information +# + +import sys +from time import time + +from Tribler.Core.BitTornado.bencode import bencode, bdecode +from Tribler.Core.Statistics.Logger import OverlayLogger +from Tribler.Core.BitTornado.BT1.MessageID import VOTECAST +from Tribler.Core.CacheDB.CacheDBHandler import VoteCastDBHandler +from Tribler.Core.Utilities.utilities import * +from Tribler.Core.Overlay.permid import permid_for_user +from Tribler.Core.CacheDB.sqlitecachedb import bin2str, str2bin +from Tribler.Core.BuddyCast.moderationcast_util import * + +DEBUG_UI = False +DEBUG = False #Default debug +debug = False #For send-errors and other low-level stuff + +AUTO_MODERATE = False #Automatically moderate content, with bogus moderations +AUTO_MODERATE_INTERVAL = 1 #Number of seconds between creation of moderations + +NO_RANDOM_VOTES = 12 +NO_RECENT_VOTES = 13 + + +class VoteCastCore: + """ VoteCastCore is responsible for sending and receiving VOTECAST-messages """ + + ################################ + def __init__(self, data_handler, secure_overlay, session, buddycast_interval_function, log = '', dnsindb = None): + """ Returns an instance of this class + """ + #Keep reference to interval-function of BuddycastFactory + self.interval = buddycast_interval_function + self.data_handler = data_handler + self.dnsindb = dnsindb + self.log = log + self.secure_overlay = secure_overlay + self.votecastdb = VoteCastDBHandler.getInstance() + self.my_permid = self.votecastdb.my_permid + self.max_have_length = SINGLE_HAVE_LENGTH * session.get_moderationcast_moderations_per_have() + + self.network_delay = 30 + #Reference to buddycast-core, set by the buddycast-core (as it is created by the + #buddycast-factory after calling this constructor). + self.buddycast_core = None + + #Debug-interface + if DEBUG_UI: + from moderationcast_test import ModerationCastTest + ModerationCastTest(self) + + #Extend logging with ModerationCAST-messages and status + if self.log: + self.overlay_log = OverlayLogger.getInstance(self.log) + ##self.dnsindb = self.data_handler.get_dns_from_peerdb # Arno, 2009-05-15: Don't see why this must be diff. Don't exist anymore anyway self.dnsindb = self.data_handler.get_dns_from_peerdb + + if AUTO_MODERATE: + assert AUTO_MODERATE_INTERVAL > 0 + from moderationcast_experiment import BogusAutoModerator + self.auto_moderator = BogusAutoModerator(AUTO_MODERATE_INTERVAL) + + def initialized(self): + return self.buddycast_core is not None + + ################################ + def createAndSendVoteCastMessage(self, target_permid, selversion): + votecast_data = self.createVoteCastMessage(target_permid) + if len(votecast_data) == 0: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "No votes there.. hence we do not send" + return + + votecast_msg = bencode(votecast_data) + + if self.log: + dns = self.dnsindb(target_permid) + if dns: + ip,port = dns + MSG_ID = "VOTECAST" + msg = voteCastReplyMsgToString(votecast_data) + self.overlay_log('SEND_MSG', ip, port, show_permid(target_permid), selversion, MSG_ID, msg) + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Sending votecastmsg",voteCastMsgToString(votecast_data) + data = VOTECAST+votecast_msg + self.secure_overlay.send(target_permid, data, self.voteCastSendCallback) + + + ################################ + def createVoteCastMessage(self, target_permid): + """ Create a VOTECAST message """ + + #Select latest own moderations + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Creating votecastmsg..." + records = self.votecastdb.recentVotes(NO_RECENT_VOTES) + + #Add random own moderations + size = NO_RANDOM_VOTES+NO_RECENT_VOTES + random_own = self.votecastdb.randomVotes(size) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "random own >>>>>>>>>>> ", random_own + + for vote in random_own: + if len(records) == size: + break + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "votes information", vote + if vote not in records: + records.append(vote) + data = [] + for record in records: + mod_id = record[0] + vote = record[1] + data.append((mod_id, vote)) + return data + + + ################################ + def voteCastSendCallback(self, exc, target_permid, other=0): + if DEBUG: + if exc is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","votecast: *** msg was sent successfully to peer", permid_for_user(target_permid) + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "votecast: *** warning - error in sending msg to", permid_for_user(target_permid), exc + + ################################ + def gotVoteCastMessage(self, recv_msg, sender_permid, selversion): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'votecast: Received a msg from ', permid_for_user(sender_permid) + + if not sender_permid or sender_permid == self.my_permid: + if DEBUG: + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "votecast: error - got votecastMsg from a None peer", \ + permid_for_user(sender_permid), recv_msg + return False + + if self.max_have_length > 0 and len(recv_msg) > self.max_have_length: + if DEBUG: + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "votecast: warning - got large voteCastHaveMsg", len(t) + return False + + votecast_data = {} + + try: + votecast_data = bdecode(recv_msg) + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "votecast: warning, invalid bencoded data" + return False + + # check message-structure + if not validVoteCastMsg(votecast_data): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "votecast: invalid votecast_message" + return False + + st = time() + self.handleVoteCastMsg(sender_permid, votecast_data) + et = time() + diff = et - st + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","votecast: HANDLE took %.4f" % diff + + #Log RECV_MSG of uncompressed message + if self.log: + dns = self.dnsindb(sender_permid) + if dns: + ip,port = dns + MSG_ID = "VOTECAST" + msg = voteCastMsgToString(votecast_data) + self.overlay_log('RECV_MSG', ip, port, show_permid(sender_permid), selversion, MSG_ID, msg) + + return True + + ################################ + ################################ + def handleVoteCastMsg(self, sender_permid, data): + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Processing VOTECAST msg from: ", permid_for_user(sender_permid), "; data: ", repr(data) + + for value in data: + vote = {} + vote['mod_id'] = value[0] + vote['voter_id'] = self.votecastdb.getPeerID(sender_permid) + vote['vote'] = value[1] + self.votecastdb.addVote(vote) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Processing VOTECAST msg from: ", permid_for_user(sender_permid), "DONE; data:" + + def showAllVotes(self): + """ Currently this function is only for testing, to show all votes """ + if DEBUG: + records = self.votecastdb.getAll() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Existing votes..." + for record in records: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", " mod_id:",record[0],"; voter_id:", record[1], "; votes:",record[2],"; timestamp:", record[3] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "End of votes..." + + + + + ################################ diff --git a/tribler-mod/Tribler/Core/CacheDB/BsdCacheDBHandler.py b/tribler-mod/Tribler/Core/CacheDB/BsdCacheDBHandler.py new file mode 100644 index 0000000..6c7a681 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/BsdCacheDBHandler.py @@ -0,0 +1,1257 @@ +from time import localtime, strftime +# Written by Jie Yang +# see LICENSE.txt for license information + +from cachedb import * +from copy import deepcopy +from sets import Set +from traceback import print_exc +from threading import currentThread +from time import time +import base64, socket + +from Tribler.Core.BitTornado.bencode import bencode, bdecode +from sets import Set + +from Tribler.Core.Utilities.utilities import show_permid_shorter, validIP, validPort, validPermid, validName +from Tribler.Core.CacheDB.Notifier import Notifier +from Tribler.Core.simpledefs import * + +DEBUG = False + +class BasicDBHandler: + __single = None + + def __init__(self): + self.dbs = [] # don't include read only database + self._single = self + + def getInstance(*args, **kw): + if BasicDBHandler.__single is None: + BasicDBHandler(*args, **kw) + return BasicDBHandler.__single + getInstance = staticmethod(getInstance) + + def __del__(self): + try: + self.sync() + except: + # Arno: on windows it may happen that tribler_done() is called + # before these __del__ statements. tribler_done() closes the + # databases, so this indirect call to db._sync() will throw + # an exception saying the database has already been closed. + pass + #print_exc() + + def close(self): + for db in self.dbs: + db.close() + + def size(self): + return self.dbs[0]._size() + + def iteritems(self): + return self.dbs[0]._iteritems() + + def sync(self): + for db in self.dbs: + db._sync() + + def clear(self): + for db in self.dbs: + db._clear() + + def printList(self): + records = self.dbs[0]._items() + for key, value in records: + print key, value + + + + +class SuperPeerDBHandler(BasicDBHandler): + """ + Jelle: now superpeers are read from file and then kept in memory only. + Necessary to pickle? + """ + def __init__(self, config, db_dir=''): + BasicDBHandler.__init__(self) + self.peer_db = PeerDB.getInstance(db_dir=db_dir) + self.dbs = [self.peer_db] + self.notifier = Notifier.getInstance() + filename = os.path.join(config['install_dir'], config['superpeer_file']) + self.superpeer_list = self.readSuperPeerList(filename) + #print 'sp list: %s' % self.superpeer_list + self.updatePeerDB() + + __single = None + def getInstance(*args, **kw): + if SuperPeerDBHandler.__single is None: + SuperPeerDBHandler.__single = SuperPeerDBHandler(*args, **kw) + return SuperPeerDBHandler.__single + getInstance = staticmethod(getInstance) + + + def clear(self): # clean database + self.superpeer_list = {} + + + def getSuperPeers(self): + # return only permids + return [a['permid'] for a in self.superpeer_list] + + def size(self): + return len(self.getSuperPeers()) + + def printList(self): + print self.getSuperPeers() + + def readSuperPeerList(self, filename=''): + """ read (name, permid, superpeer_ip, superpeer_port) lines from a text file """ + + try: + filepath = os.path.abspath(filename) + file = open(filepath, "r") + except IOError: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "superpeer: cannot open superpeer file", filepath + return [] + + superpeers = file.readlines() + file.close() + superpeers_info = [] + for superpeer in superpeers: + if superpeer.strip().startswith("#"): # skip commended lines + continue + superpeer_line = superpeer.split(',') + superpeer_info = [a.strip() for a in superpeer_line] + try: + superpeer_info[2] = base64.decodestring(superpeer_info[2]+'\n' ) + except: + print_exc() + continue + if self.validSuperPeerList(superpeer_info): + try: + ip = socket.gethostbyname(superpeer_info[0]) + superpeer = {'ip':ip, 'port':superpeer_info[1], + 'permid':superpeer_info[2]} + if len(superpeer_info) > 3: + superpeer['name'] = superpeer_info[3] + superpeers_info.append(superpeer) + except: + print_exc() + pass + + return superpeers_info + + def validSuperPeerList(self, superpeer_info): + try: + if len(superpeer_info) < 3: + raise RuntimeError, "one line in superpeers.txt contains at least 3 elements" + #validIP(superpeer_info[0]) + validPort(int(superpeer_info[1])) + validPermid(superpeer_info[2]) + except Exception: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","superpeer: Parse error reading",superpeer_info + print_exc(file=sys.stderr) + return False + else: + return True + + def updatePeerDB(self): + print 'superpeers: updating db' + for superpeer in self.superpeer_list: + superpeer = deepcopy(superpeer) + if not isinstance(superpeer, dict) or 'permid' not in superpeer: + continue + permid = superpeer.pop('permid') + self.peer_db.updateItem(permid, superpeer) + self.notifier.notify(NTFY_PEERS, NTFY_UPDATE, permid) + +class FriendDBHandler(BasicDBHandler): + + def __init__(self, db_dir=''): + BasicDBHandler.__init__(self) + #self.my_db = MyDB.getInstance(db_dir=db_dir) + self.peer_db = PeerDB.getInstance(db_dir=db_dir) + self.dbs = [self.peer_db] + + __single = None + def getInstance(*args, **kw): + if FriendDBHandler.__single is None: + FriendDBHandler.__single = FriendDBHandler(*args, **kw) + return FriendDBHandler.__single + getInstance = staticmethod(getInstance) + + def size(self): + return len(self.my_db.getFriends()) + + def printList(self): + print self.my_db.getFriends() + + def addFriend(self, permid): + self.my_db.addFriend(permid) + #self.my_db._sync() + + def addExternalFriend(self, friend): + if not isinstance(friend, dict) or 'permid' not in friend: + return + permid = friend.pop('permid') + if permid not in self.getFriends(): + self.peer_db.updateItem(permid, friend) + #self.peer_db._sync() + #self.my_db.addFriend(permid) # Fixme + #self.my_db._sync() + else: + self.peer_db.updateItem(permid, friend, update_time=False) + #self.peer_db._sync() + + def getFriendList(self): + """returns a list of permids""" + return [] # FIXME, no friendsdb yet + + def getFriends(self): + """returns a list of peer infos including permid""" + return [] # Fixme + + def isFriend(self, permid): + return False # Fixme + + def deleteFriend(self,permid): + pass + + def updateFriendIcon(self, permid, icon_path): + self.peer_db.updatePeer(permid, 'icon', icon_path) + +class PeerDBHandler(BasicDBHandler): + + def __init__(self, config, db_dir=''): + BasicDBHandler.__init__(self) + self.notifier = Notifier.getInstance() + self.peer_db = PeerDB.getInstance(db_dir=db_dir) + self.pref_db = PreferenceDB.getInstance(db_dir=db_dir) + self.friends_db_handler = FriendDBHandler.getInstance() + self.pref_db_handler = PreferenceDBHandler(db_dir=db_dir) + self.ip_db = IP2PermIDDB.getInstance(db_dir=db_dir) + #self.mm = Mugshot Manager.getInstance() + #self.mm.register(config) + self.dbs = [self.peer_db, self.ip_db] + + __single = None + def getInstance(*args, **kw): + if PeerDBHandler.__single is None: + PeerDBHandler.__single = PeerDBHandler(*args, **kw) + return PeerDBHandler.__single + getInstance = staticmethod(getInstance) + + def __len__(self): + return self.peer_db._size() + + def getPeer(self, permid, default=False): + return self.peer_db.getItem(permid, default) + + def getPeerSim(self, permid): + x = self.peer_db.getItem(permid) + if not x: + return 0 + return x.get('similarity', 0) + + def getPeerList(self): # get the list of all peers' permid + return self.peer_db._keys() + + def getTasteBuddyList(self): + return self.pref_db._keys() + + def getRandomPeerList(self): # Expensive + # TODO: improve performance + return list(Set(self.peer_db._keys()) - Set(self.pref_db._keys())) + + def getPeers(self, peer_list, keys): # get a list of dictionaries given peer list + peers = [] + if 'permid' in keys: + permid = True + keys.remove('permid') + else: + permid = False + + count = 0 + for peer in peer_list: + p = self.peer_db.getItem(peer) + if not p: + break # database is closed + if permid: + d = {'permid':peer} + else: + d = {} + for key in keys: + d[key] = p[key] + peers.append(d) + + count += 1 + if count % 1000 == 0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","peerdb: Read items",count,currentThread().getName() + + return peers + + def getPeersValue(self, peer_list, keys=None): # get a list of values given peer list + if not keys: + keys = self.peer_db.default_item.keys() + values = [] + for peer in peer_list: + p = self.peer_db.getItem(peer, default=True) + if len(keys) == 1: + values.append(p[keys[0]]) + else: + d = [] + for key in keys: + d.append(p[key]) + values.append(d) + + return values + + def addPeer(self, permid, value, update_dns=True, updateFlag = True): + + if value.has_key('last_seen'): # get the latest last_seen + old_last_seen = 0 + old_data = self.getPeer(permid) + if old_data: + old_last_seen = old_data.get('last_seen', 0) + last_seen = value['last_seen'] + now = int(time()) + value['last_seen'] = min(now, max(last_seen, old_last_seen)) + + self.peer_db.updateItem(permid, value, update_dns) + + if value.has_key('ip') and update_dns: + self.updatePeerIP(permid, value['ip']) + + if updateFlag: + self.notifier.notify(NTFY_PEERS, NTFY_INSERT, permid) + + def hasPeer(self, permid): + return self.peer_db.hasItem(permid) + + def findPeers(self, key, value): + # Warning: if key is not 'permid', then it is a very EXPENSIVE operation. + res = [] + if key == 'permid': + peer = self.getPeer(value) + if peer: + peer.update({'permid':value}) + res.append(peer) + else: + for permid, peer in self.peer_db._items(): + try: + if peer[key] == value: + peer.update({'permid':permid}) + res.append(peer) + except KeyError: + pass + return res + + def updatePeer(self, permid, key, value, updateFlag = True): + self.peer_db.updateItem(permid, {key:value}) + if key == 'ip': + self.updatePeerIP(permid, value) + if updateFlag: + self.notifier.notify(NTFY_PEERS, NTFY_UPDATE, permid, key) + + def updatePeerIcon(self, permid, icontype, icondata, updateFlag = True): + self.mm.save_data(permid, icontype, icondata) + if updateFlag: + self.notifier.notify(NTFY_PEERS, NTFY_UPDATE, permid, 'icon') + + + def getPeerIcon(self, permid, name = ''): + return self.mm.load_data(permid, name) + + def updatePeerIP(self, permid, ip): + peer_data = self.peer_db._get(permid, {}) + old_ip = peer_data.get('ip', None) + if not old_ip: # not exist in peer_db, don't touch it either + return + + if old_ip != ip: # changed ip + old_permid = self.ip_db.getPermIDByIP(old_ip) + if old_permid == permid: # ip_db is consistent with peer_db + self.ip_db.deleteItem(old_ip) # delete the old map + permid2 = self.ip_db.getPermIDByIP(ip) + if permid2 != permid: + self.ip_db.addIP(ip,permid) + + + def deletePeer(self, permid, updateFlag = True): + if self.friends_db_handler.isFriend(permid): + return False + self.peer_db._delete(permid) + self.pref_db_handler.deletePeer(permid) + self.ip_db.deletePermID(permid) + + if updateFlag: + self.notifier.notify(NTFY_PEERS, NTFY_DELETE, permid) + + return True + + def updateTimes(self, permid, key, change, updateFlag = True): + item = self.peer_db.getItem(permid) + if not item: + return + if not item.has_key(key): + value = 0 + else: + value = item[key] + value += change + self.peer_db.updateItem(permid, {key:value}) + + if updateFlag: + self.notifier.notify(NTFY_PEERS, NTFY_UPDATE, permid, key) + + def getPermIDByIP(self,ip): + return self.ip_db.getPermIDByIP(ip) + +class PreferenceDBHandler(BasicDBHandler): + + def __init__(self, db_dir=''): + BasicDBHandler.__init__(self) + self.owner_db = OwnerDB.getInstance(db_dir=db_dir) + self.pref_db = PreferenceDB.getInstance(db_dir=db_dir) + self.dbs = [self.pref_db, self.owner_db] + + __single = None + def getInstance(*args, **kw): + if PreferenceDBHandler.__single is None: + PreferenceDBHandler.__single = PreferenceDBHandler(*args, **kw) + return PreferenceDBHandler.__single + getInstance = staticmethod(getInstance) + + def getPreferences(self, permid): + return self.pref_db.getItem(permid) + + def getPrefList(self, permid): + return self.pref_db._get(permid,{}).keys() + + def addPreference(self, permid, infohash, data={}): + self.pref_db.addPreference(permid, infohash, data) + self.owner_db.addOwner(infohash, permid) + + def deletePeer(self, permid): # delete a peer from pref_db + prefs = self.pref_db.getItem(permid) + for infohash in prefs: + self.owner_db.deleteOwner(infohash, permid) + self.pref_db.deleteItem(permid) + + def deletePreference(self, permid, infohash): + self.pref_db.deletePreference(permid, infohash) + self.owner_db.deleteOwner(infohash, permid) + + def hasPreference(self, permid): + return self.pref_db._has_key(permid) + + def getNumPrefs(self, permid): + if not self.pref_db._has_key(permid): + return 0 + x = self.pref_db.getItem(permid) + return len(x) + +class TorrentDBHandler(BasicDBHandler): + + def __init__(self, db_dir=''): + BasicDBHandler.__init__(self) + self.notifier = Notifier.getInstance() + self.torrent_db = TorrentDB.getInstance(db_dir=db_dir) + self.mypref_db = MyPreferenceDB.getInstance(db_dir=db_dir) + self.owner_db = OwnerDB.getInstance(db_dir=db_dir) + self.dbs = [self.torrent_db] + + __single = None + def getInstance(*args, **kw): + if TorrentDBHandler.__single is None: + TorrentDBHandler.__single = TorrentDBHandler(*args, **kw) + return TorrentDBHandler.__single + getInstance = staticmethod(getInstance) + + def addTorrent(self, infohash, torrent={}, new_metadata=False, updateFlag = True): + # add a new torrent or update an old torrent's info + if not torrent and self.hasTorrent(infohash): # no need to add + return False + self.torrent_db.updateItem(infohash, torrent) + + if updateFlag: + self.notifier.notify(NTFY_TORRENTS, NTFY_INSERT, infohash) + + try: + # Arno: PARANOID SYNC + self.sync() + except: + print_exc() + return True + + def updateTorrent(self, infohash, **kw): # watch the schema of database + updateFlag = kw.get('updateFlag', True) + if kw.has_key('updateFlag'): + del kw['updateFlag'] + self.torrent_db.updateItem(infohash, kw) + + if updateFlag: + self.notifier.notify(NTFY_TORRENTS, NTFY_UPDATE, infohash, kw.keys()) + + + def deleteTorrent(self, infohash, delete_file=False, updateFlag = True): + if self.mypref_db.hasPreference(infohash): # don't remove torrents in my pref + return False + + if delete_file: +# data = self.torrent_db._get(infohash) +# if data and data['torrent_name']: +# live = data.get('status', 'unknown') +# if live != 'dead' and live != 'unknown': + deleted = self.eraseTorrentFile(infohash) +# if deleted: +# # may remove dead torrents, so this number is not consistent +# self.torrent_db.num_metadatalive -= 1 + else: + deleted = True + + if deleted: + self.torrent_db._delete(infohash) + if updateFlag: + self.notifier.notify(NTFY_TORRENTS, NTFY_DELETE, infohash) + + return deleted + + def eraseTorrentFile(self, infohash): + data = self.torrent_db._get(infohash) + if not data or not data['torrent_name'] or not data['info']: + return False + src = os.path.join(data['torrent_dir'], data['torrent_name']) + if not os.path.exists(src): # already removed + return True + + try: + os.remove(src) + except Exception, msg: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "cachedbhandler: failed to erase torrent", src, Exception, msg + return False + + return True + + def getTorrent(self, infohash, num_owners=False,savemem=False): + torrent = self.torrent_db.getItem(infohash,savemem=savemem) + if torrent and num_owners: + torrent['num_owners'] = self.owner_db.getNumOwners(infohash) + return torrent + + def getTorrents(self, torrent_list, keys=None): # get a list of dictionaries given torrent list + if not keys: + keys = self.torrent_db.default_item.keys() + keys += ['infohash'] + torrents = [] + if 'infohash' in keys: + infohash = True + keys.remove('infohash') + else: + infohash = False + if 'num_owners' in keys: + num_owners = True + else: + num_owners = False + for torrent in torrent_list: + p = self.torrent_db.getItem(torrent, default=True) + if num_owners: + p['num_owners'] = self.owner_db.getNumOwners(torrent) + if infohash: + p['infohash'] = torrent + torrents.append(p) + return torrents + + def getAllTorrents(self): + return self.torrent_db._keys() + + + def getRecommendedTorrents(self, light=True, all=False, myprefs=False, countcallback=None): + """ get torrents on disk but not in my pref + BE AWARE: the returned object of this call may consume lots of memory. + You should delete the object when possible + """ + + #print '>>>>>>'*5, "getRecommendedTorrents", currentThread().getName() + #print_stack() + #loaded by DataLoadingThread + + start_time = time() + mypref_set = Set(self.mypref_db._keys()) + + if myprefs: + all_list = mypref_set + else: + if all: + all_list = self.torrent_db._keys() + else: + all_list = Set(self.torrent_db._keys()) - mypref_set + + + # Arno: save memory by reusing dict keys + key_infohash = 'infohash' + key_myDownloadHistory = 'myDownloadHistory' + key_download_started = 'download_started' + key_num_owners = 'key_num_owners' + + torrents = [] + count = 0 +# num_live_torrents = 0 + setOfInfohashes = Set() + for torrent in all_list: + if torrent in setOfInfohashes: # do not add 2 torrents with same infohash + continue + p = self.torrent_db.getItem(torrent,savemem=True) + if not p: + break #database not available any more + if not type(p) == dict or not p.get('torrent_name', None) or not p.get('info', None): + deleted = self.deleteTorrent(torrent) # remove infohashes without torrent + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "TorrentDBHandler: deleted empty torrent", deleted, p.get('torrent_name', None), p.get('info', None) + +# if torrent not in mypref_set: +# live = p.get('status', 'unknown') +# if live != 'dead' and live != 'unknown': +# num_live_torrents += 1 + + if torrent in mypref_set: + p[key_myDownloadHistory] = True + mypref_obj = self.mypref_db.getItem(torrent) + if mypref_obj: + p[key_download_started] = mypref_obj['created_time'] + + p[key_infohash] = torrent + setOfInfohashes.add(torrent) + if not light: # set light as ture to be faster + p[key_num_owners] = self.owner_db.getNumOwners(torrent) + + torrents.append(p) + + count += 1 + if count % 1000 == 0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","torrentdb: Read items",count,currentThread().getName() + if countcallback is not None: + countcallback(count) + + del all_list + del setOfInfohashes + +# from traceback import print_stack +# print_stack() +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '[StartUpDebug]----------- from getRecommendedTorrents ----------', time()-start_time, currentThread().getName(), '\n\n' + +# self.torrent_db.num_metadatalive = num_live_torrents + #print 'Returning %d torrents' % len(torrents) + + return torrents + + + + def getCollectedTorrentHashes(self): + """ get infohashes of torrents on disk, used by torrent checking, + and metadata handler + """ + all_list = Set(self.torrent_db._keys()) + all_list -= Set(self.mypref_db._keys()) + + return all_list + + def hasTorrent(self, infohash): + return self.torrent_db._has_key(infohash) + + def getLiveTorrents(self, peerlist): + ret = [] + for infohash in peerlist: + data = self.torrent_db._get(infohash) + if isinstance(data, dict): + live = data.get('status', 'unknown') + if live != 'dead': + ret.append(infohash) + return ret + + def getOthersTorrentList(self, num=-1, sorted=True): # get the list of torrents which are not in my preference + all_list = list(Set(self.torrent_db._keys()) - Set(self.mypref_db._keys())) + if num < 0: + return all_list + if not sorted: #TODO: seperate sort function from getOthersTorrentList + return all_list + values = [] + for torrent in all_list: + t = self.torrent_db.getItem(torrent, default=True) + values.append(t['relevance']) + nlist = len(all_list) + aux = [(values[i], i) for i in xrange(nlist)] + aux.sort() + aux.reverse() + return [all_list[i] for k, i in aux[:num]] + + def getTorrentsValue(self, torrent_list, keys=None): # get a list of values given peer list + if not keys: + keys = self.torrent_db.default_item.keys() + if not isinstance(keys, list): + keys = [str(keys)] + values = [] + for torrent in torrent_list: + t = self.torrent_db.getItem(torrent, default=True) + if len(keys) == 1: + values.append(t[keys[0]]) + else: + d = [] + for key in keys: + d.append(t[key]) + values.append(d) + + return values + + def getNoMetaTorrents(self): # get the list of torrents which only have an infohash without the metadata + def hasNoTorrentFile(key): + data = self.torrent_db._get(key) + if not data: # if no record, ignore + return False + if not data['torrent_name'] or not data['info']: # if no info, selected + return True + return False # if has info but no file, it means the torrent file has been removed. ignore + + all_keys = self.torrent_db._keys() + no_metadata_list = filter(hasNoTorrentFile, all_keys) + return no_metadata_list + + def hasMetaData(self, infohash): + value = self.torrent_db._get(infohash) + if not value: + return False + name = value.get('torrent_name', None) + if not name: + return False + return True + + def getOwners(self, infohash): + return self.owner_db.getItem(infohash) + + def updateTorrentRelevance(self, infohash, relevance, updateFlag = True): + self.torrent_db.updateItem(infohash, {'relevance':relevance}) + if updateFlag: + self.notifier.notify(NTFY_TORRENTS, NTFY_UPDATE, infohash, 'relevance') + +#=============================================================================== +# def getNumMetadataAndLive(self): # TODO +# return self.torrent_db.num_metadatalive +#=============================================================================== + +class MyPreferenceDBHandler(BasicDBHandler): + + def __init__(self, db_dir=''): + BasicDBHandler.__init__(self) + self.notifier = Notifier.getInstance() + self.mypref_db = MyPreferenceDB.getInstance(db_dir=db_dir) + self.torrent_db = TorrentDB.getInstance(db_dir=db_dir) + self.dbs = [self.mypref_db, self.torrent_db] + + __single = None + def getInstance(*args, **kw): + if MyPreferenceDBHandler.__single is None: + MyPreferenceDBHandler.__single = MyPreferenceDBHandler(*args, **kw) + return MyPreferenceDBHandler.__single + getInstance = staticmethod(getInstance) + + def getPreferences(self, key=None): + all_items = self.mypref_db._items() + if key is None: + ret = [] + for item in all_items: + item[1].update({'infohash':item[0]}) + ret.append(item[1]) + return ret + else: + return [all_items[i][1][key] for i in xrange(len(all_items))] + + def getPrefList(self): + return self.mypref_db._keys() + + def getCreationTime(self, infohash): + "Return creation time. Used for sorting in library view" + item = self.mypref_db.getItem(infohash, default=False) + if item: + return item.get('created_time') + else: + return None + + def getPrefs(self, pref_list, keys): # get a list of dictionaries given peer list + peers = [] + for torrent in pref_list: + d = self.mypref_db.getItem(torrent, default=True) + t = self.torrent_db.getItem(torrent, default=True) + try: + d.update(t) + except: + continue + if 'infohash' in keys: + d.update({'infohash':torrent}) + for key in d.keys(): + if key not in keys: + d.pop(key) + peers.append(d) + + return peers + + def removeFakeAndDeadTorrents(self, items): + def fakeFilter(item): + infohash = item[0] # infohash + valid = self.mypref_db.getRank(infohash) >= 0 + torrentdata = self.torrent_db.getItem(infohash, default=True) # defaulttorrent has status 'unknown' + alive = torrentdata.get('status', 'unknown') != 'dead' + secret = torrentdata.get('secret', False) # exclude secret downloads. + return alive and valid and not secret + return filter(fakeFilter, items) + + + def getRecentPrefList(self, num=0): # num = 0: all files + all_items = self.mypref_db._items() + valid_items = self.removeFakeAndDeadTorrents(all_items) + prefs = [(item[1]['last_seen'], item[0]) for item in valid_items] + prefs.sort() + prefs.reverse() + if num > 0: + return [item[1] for item in prefs[:num]] + else: + return [item[1] for item in prefs] + + def hasPreference(self, infohash): + return self.mypref_db._has_key(infohash) + + def addPreference(self, infohash, data={}): + if not data and self.hasPreference(infohash): + return False + self.mypref_db.updateItem(infohash, data) + return True + + def deletePreference(self, infohash): + self.mypref_db.deleteItem(infohash) + + def updateRank(self, infohash, rank): + self.mypref_db.updateItem(infohash, {'rank':rank}) + self.sync() + + +class OwnerDBHandler(BasicDBHandler): + + def __init__(self, db_dir=''): + BasicDBHandler.__init__(self) + self.owner_db = OwnerDB.getInstance(db_dir=db_dir) + self.pref_db = PreferenceDB.getInstance(db_dir=db_dir) + self.mypref_db = MyPreferenceDB.getInstance(db_dir=db_dir) + self.torrent_db = TorrentDB.getInstance(db_dir=db_dir) + self.dbs = [self.owner_db] + self.sim_cache = {} # used to cache the getSimItems + + __single = None + def getInstance(*args, **kw): + if OwnerDBHandler.__single is None: + OwnerDBHandler.__single = OwnerDBHandler(*args, **kw) + return OwnerDBHandler.__single + getInstance = staticmethod(getInstance) + + def getTorrents(self): + return self.owner_db._keys() + + def getSimItems(self, torrent_hash, num=15): + """ Get a list of similar torrents given a torrent hash. The torrents + must exist and be not dead. + Input + torrent_hash: the infohash of a torrent + num: the number of similar torrents to get + output: + returns a list of infohashes, sorted by similarity, + """ + + start = time() + mypref_list = self.mypref_db._keys() + if torrent_hash in self.sim_cache: + mypref_set = Set(mypref_list) + oldrec = self.sim_cache[torrent_hash] + for item in oldrec[:]: # remove common torrents + if item in mypref_set: + oldrec.remove(item) + return oldrec + + owners = self.owner_db._get(torrent_hash, {}) + nowners = len(owners) + if not owners or nowners < 1: + return [] + co_torrents = {} # torrents have co + for owner in owners: + prefs = self.pref_db.getItem(owner) + for torrent in prefs: + if torrent not in co_torrents: + co_torrents[torrent] = 1 + else: + co_torrents[torrent] += 1 + if torrent_hash in co_torrents: + co_torrents.pop(torrent_hash) + for infohash in mypref_list: + if infohash in co_torrents: + co_torrents.pop(infohash) + + sim_items = [] + + for torrent in co_torrents: + co = co_torrents[torrent] +# if co <= 1: +# continue + + # check if the torrent is collected and live + has_key = self.torrent_db._has_key(torrent) + if has_key == False: + continue + elif has_key == None: + break + value = self.torrent_db._get(torrent) + if not value: # sth. is wrong + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "cachedbhandler: getSimItems meets error in getting data" + break + info = value.get('info', {}) + name = info.get('name', None) + if not name: + continue + live = value.get('status', 'unknown') + if live == 'dead': + continue + + nowners2 = self.owner_db.getNumOwners(torrent) + if nowners2 == 0: # sth. is wrong + continue + sim = co/(nowners*nowners2)**0.5 + sim_items.append((sim, torrent)) + + sim_items.sort() + sim_items.reverse() + sim_torrents = [torrent for sim, torrent in sim_items[:num]] + + self.sim_cache[torrent_hash] = sim_torrents + return sim_torrents + + + +class BarterCastDBHandler(BasicDBHandler): + + def __init__(self, session, db_dir=''): + BasicDBHandler.__init__(self) + self.bartercast_db = BarterCastDB.getInstance(db_dir=db_dir) + self.peer_db = PeerDB.getInstance(db_dir=db_dir) + self.dbs = [self.bartercast_db] + self.my_permid = session.get_permid() + + __single = None + def getInstance(*args, **kw): + if BarterCastDBHandler.__single is None: + BarterCastDBHandler.__single = BarterCastDBHandler(*args, **kw) + return BarterCastDBHandler.__single + getInstance = staticmethod(getInstance) + + def __len__(self): + return self.bartercast_db._size() + + def getName(self, permid): + + if permid == 'testpermid_1': + return "Test_1" + elif permid == 'testpermid_2': + return "Test_2" + elif permid == 'non-tribler': + return "Non-tribler" + + peer = self.peer_db.getItem(permid, False) + if peer == None: + return 'peer %s' % show_permid_shorter(permid) + else: + name = peer.get('name', '') + if name == '': + name = 'peer %s' % show_permid_shorter(permid) + return name + + def getItem(self, (permid_1, permid_2), default=False): + + # in the database, permid-tuple is always sorted + # to ensure unique entries for each permid combination + if permid_1 > permid_2: + reverse = True + permid_from = permid_2 + permid_to = permid_1 + else: + reverse = False + permid_from = permid_1 + permid_to = permid_2 + + item = self.bartercast_db.getItem((permid_from, permid_to), default) + + # if peer in peerdb but not in bartercastdb: add peer + if item == None: # and peerdb_peer != None: + + if DEBUG: + print 'Item (%s, %s) added to BarterCastDB' % (self.getName(permid_from), self.getName(permid_to)) + + self.addItem((permid_from, permid_to), self.bartercast_db.default_item) + + # get item again now it exists + item = self.bartercast_db.getItem((permid_from, permid_to), default) + + # if reverse: exchange up and down so that the caller doesnt have to worry + # about the order of permids in the tuple + if reverse: + down = item['downloaded'] + up = item['uploaded'] + item['downloaded'] = up + item['uploaded'] = down + + return item + + + def getItemList(self): # get the list of all peers' permid + keys = map(lambda key: bdecode(key), self.bartercast_db._keys()) + return keys + + # Return (sorted) list of the top N peers with the highest (combined) values for the given keys + def getTopNPeers(self, n, local_only = False): + n = max(1, n) + itemlist = self.getItemList() + + if local_only: + # get only items of my local dealings + itemlist = filter(lambda (permid_from, permid_to): permid_to == self.my_permid or permid_from == self.my_permid, itemlist) + + total_up = {} + total_down = {} + + + for (permid_1, permid_2) in itemlist: + + item = self.getItem((permid_1, permid_2)) + + up = item['uploaded'] *1024 # make into bytes + down = item['downloaded'] *1024 + + if DEBUG: + print "BarterCast DB entry: (%s, %s) up = %d down = %d" % (self.getName(permid_1), self.getName(permid_2), up, down) + + # process permid_1 + total_up[permid_1] = total_up.get(permid_1, 0) + up + total_down[permid_1] = total_down.get(permid_1, 0) + down + + # process permid_2 + total_up[permid_2] = total_up.get(permid_2, 0) + down + total_down[permid_2] = total_down.get(permid_2, 0) + up + + + # create top N peers + top = [] + min = 0 + + for peer in total_up.keys(): + + up = total_up[peer] + down = total_down[peer] + + if DEBUG: + print "BarterCast: total of %s: up = %d down = %d" % (self.getName(peer), up, down) + + # we know rank on total upload? + value = up + + # check if peer belongs to current top N + if peer != 'non-tribler' and peer != self.my_permid and (len(top) < n or value > min): + + top.append((peer, up, down)) + + # sort based on value + top.sort(cmp = lambda (p1, u1, d1), (p2, u2, d2): cmp(u2, u1)) + + # if list contains more than N elements: remove the last (=lowest value) + if len(top) > n: + del top[-1] + + # determine new minimum of values + min = top[-1][1] + + + + result = {} + + result['top'] = top + + # My total up and download, including interaction with non-tribler peers + result['total_up'] = total_up.get(self.my_permid, 0) + result['total_down'] = total_down.get(self.my_permid, 0) + + # My up and download with tribler peers only + result['tribler_up'] = result['total_up'] - total_down.get('non-tribler', 0) + result['tribler_down'] = result['total_down'] - total_up.get('non-tribler', 0) + + return result + + def addItem(self, (permid_1, permid_2), item): + +# if value.has_key('last_seen'): # get the latest last_seen +# old_last_seen = 0 +# old_data = self.getPeer(permid) +# if old_data: +# old_last_seen = old_data.get('last_seen', 0) +# last_seen = value['last_seen'] +# value['last_seen'] = max(last_seen, old_last_seen) + + # in the database, permid-tuple is always sorted + # to ensure unique entries for each permid combination + if permid_1 > permid_2: + reverse = True + permid_from = permid_2 + permid_to = permid_1 + else: + reverse = False + permid_from = permid_1 + permid_to = permid_2 + + # if reverse: exchange up and down + if reverse: + down = item['downloaded'] + up = item['uploaded'] + item['downloaded'] = up + item['uploaded'] = down + + self.bartercast_db.updateItem((permid_from, permid_to), item) + + + def hasItem(self, (permid_1, permid_2)): + + # in the database, permid-tuple is always sorted + # to ensure unique entries for each permid combination + if permid_1 > permid_2: + reverse = True + permid_from = permid_2 + permid_to = permid_1 + else: + reverse = False + permid_from = permid_1 + permid_to = permid_2 + + item = self.bartercast_db.hasItem((permid_from, permid_to)) + + if reverse: + down = item['downloaded'] + up = item['uploaded'] + item['downloaded'] = up + item['uploaded'] = down + + return item + + + def updateItem(self, (permid_1, permid_2), key, value): + + # in the database, permid-tuple is always sorted + # to ensure unique entries for each permid combination + if permid_1 > permid_2: + permid_from = permid_2 + permid_to = permid_1 + if key == 'uploaded': + key = 'downloaded' + elif key == 'downloaded': + key = 'uploaded' + else: + permid_from = permid_1 + permid_to = permid_2 + + if DEBUG: + print "BarterCast: update (%s, %s) [%s] += %s" % (self.getName(permid_from), self.getName(permid_to), key, str(value)) + + self.bartercast_db.updateItem((permid_from, permid_to), {key:value}) + + + def incrementItem(self, (permid_1, permid_2), key, value): + + # in the database, permid-tuple is always sorted + # to ensure unique entries for each permid combination + if permid_1 > permid_2: + permid_from = permid_2 + permid_to = permid_1 + if key == 'uploaded': + key = 'downloaded' + elif key == 'downloaded': + key = 'uploaded' + else: + permid_from = permid_1 + permid_to = permid_2 + + if DEBUG: + print "BarterCast: increment (%s, %s) [%s] += %s" % (self.getName(permid_from), self.getName(permid_to), key, str(value)) + + item = self.getItem((permid_from, permid_to)) + + if key in item.keys(): + old_value = item[key] + new_value = old_value + value + self.bartercast_db.updateItem((permid_from, permid_to), {key:new_value}) + return new_value + + return None + + + # TODO: include this functionality in PeerDB? + def deleteItem(self, (permid_1, permid_2)): + + # in the database, permid-tuple is always sorted + # to ensure unique entries for each permid combination + if permid_1 > permid_2: + permid_from = permid_2 + permid_to = permid_1 + else: + permid_from = permid_1 + permid_to = permid_2 + + self.bartercast_db._delete((permid_from, permid_to)) + + return True + + + + + +def test_myprefDB(): + myprefdb = MyPreferenceDBHandler.getInstance() + print myprefdb.getRecentPrefList() + +def test_all(): + test_myprefDB() + +def test_getSimItems(db_dir): + owner_db = OwnerDBHandler(db_dir) + torrent_db = TorrentDBHandler(db_dir) + torrents = owner_db.getTorrents() + for torrent in torrents: + value = torrent_db.getTorrent(torrent) + if not value: + continue + info = value.get('info', {}) + name = info.get('name', None) + if not name: + continue + live = value.get('status', 'unknown') + if live == 'dead': + continue + start = time() + simtorrents = owner_db.getSimItems(torrent) + if len(simtorrents) > 0: + try: + print "------", name, "------" + except: + print "------", `name`, "------" + for infohash, torrent_name, sim in simtorrents: + print " ", + try: + print torrent_name, sim, time()-start + except: + print `torrent_name` + +if __name__ == '__main__': + db_dir = sys.argv[1] + test_getSimItems(db_dir) + + diff --git a/tribler-mod/Tribler/Core/CacheDB/BsdCacheDBHandler.py.bak b/tribler-mod/Tribler/Core/CacheDB/BsdCacheDBHandler.py.bak new file mode 100644 index 0000000..c014cba --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/BsdCacheDBHandler.py.bak @@ -0,0 +1,1256 @@ +# Written by Jie Yang +# see LICENSE.txt for license information + +from cachedb import * +from copy import deepcopy +from sets import Set +from traceback import print_exc +from threading import currentThread +from time import time +import base64, socket + +from Tribler.Core.BitTornado.bencode import bencode, bdecode +from sets import Set + +from Tribler.Core.Utilities.utilities import show_permid_shorter, validIP, validPort, validPermid, validName +from Tribler.Core.CacheDB.Notifier import Notifier +from Tribler.Core.simpledefs import * + +DEBUG = False + +class BasicDBHandler: + __single = None + + def __init__(self): + self.dbs = [] # don't include read only database + self._single = self + + def getInstance(*args, **kw): + if BasicDBHandler.__single is None: + BasicDBHandler(*args, **kw) + return BasicDBHandler.__single + getInstance = staticmethod(getInstance) + + def __del__(self): + try: + self.sync() + except: + # Arno: on windows it may happen that tribler_done() is called + # before these __del__ statements. tribler_done() closes the + # databases, so this indirect call to db._sync() will throw + # an exception saying the database has already been closed. + pass + #print_exc() + + def close(self): + for db in self.dbs: + db.close() + + def size(self): + return self.dbs[0]._size() + + def iteritems(self): + return self.dbs[0]._iteritems() + + def sync(self): + for db in self.dbs: + db._sync() + + def clear(self): + for db in self.dbs: + db._clear() + + def printList(self): + records = self.dbs[0]._items() + for key, value in records: + print key, value + + + + +class SuperPeerDBHandler(BasicDBHandler): + """ + Jelle: now superpeers are read from file and then kept in memory only. + Necessary to pickle? + """ + def __init__(self, config, db_dir=''): + BasicDBHandler.__init__(self) + self.peer_db = PeerDB.getInstance(db_dir=db_dir) + self.dbs = [self.peer_db] + self.notifier = Notifier.getInstance() + filename = os.path.join(config['install_dir'], config['superpeer_file']) + self.superpeer_list = self.readSuperPeerList(filename) + #print 'sp list: %s' % self.superpeer_list + self.updatePeerDB() + + __single = None + def getInstance(*args, **kw): + if SuperPeerDBHandler.__single is None: + SuperPeerDBHandler.__single = SuperPeerDBHandler(*args, **kw) + return SuperPeerDBHandler.__single + getInstance = staticmethod(getInstance) + + + def clear(self): # clean database + self.superpeer_list = {} + + + def getSuperPeers(self): + # return only permids + return [a['permid'] for a in self.superpeer_list] + + def size(self): + return len(self.getSuperPeers()) + + def printList(self): + print self.getSuperPeers() + + def readSuperPeerList(self, filename=''): + """ read (name, permid, superpeer_ip, superpeer_port) lines from a text file """ + + try: + filepath = os.path.abspath(filename) + file = open(filepath, "r") + except IOError: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "superpeer: cannot open superpeer file", filepath + return [] + + superpeers = file.readlines() + file.close() + superpeers_info = [] + for superpeer in superpeers: + if superpeer.strip().startswith("#"): # skip commended lines + continue + superpeer_line = superpeer.split(',') + superpeer_info = [a.strip() for a in superpeer_line] + try: + superpeer_info[2] = base64.decodestring(superpeer_info[2]+'\n' ) + except: + print_exc() + continue + if self.validSuperPeerList(superpeer_info): + try: + ip = socket.gethostbyname(superpeer_info[0]) + superpeer = {'ip':ip, 'port':superpeer_info[1], + 'permid':superpeer_info[2]} + if len(superpeer_info) > 3: + superpeer['name'] = superpeer_info[3] + superpeers_info.append(superpeer) + except: + print_exc() + pass + + return superpeers_info + + def validSuperPeerList(self, superpeer_info): + try: + if len(superpeer_info) < 3: + raise RuntimeError, "one line in superpeers.txt contains at least 3 elements" + #validIP(superpeer_info[0]) + validPort(int(superpeer_info[1])) + validPermid(superpeer_info[2]) + except Exception: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","superpeer: Parse error reading",superpeer_info + print_exc(file=sys.stderr) + return False + else: + return True + + def updatePeerDB(self): + print 'superpeers: updating db' + for superpeer in self.superpeer_list: + superpeer = deepcopy(superpeer) + if not isinstance(superpeer, dict) or 'permid' not in superpeer: + continue + permid = superpeer.pop('permid') + self.peer_db.updateItem(permid, superpeer) + self.notifier.notify(NTFY_PEERS, NTFY_UPDATE, permid) + +class FriendDBHandler(BasicDBHandler): + + def __init__(self, db_dir=''): + BasicDBHandler.__init__(self) + #self.my_db = MyDB.getInstance(db_dir=db_dir) + self.peer_db = PeerDB.getInstance(db_dir=db_dir) + self.dbs = [self.peer_db] + + __single = None + def getInstance(*args, **kw): + if FriendDBHandler.__single is None: + FriendDBHandler.__single = FriendDBHandler(*args, **kw) + return FriendDBHandler.__single + getInstance = staticmethod(getInstance) + + def size(self): + return len(self.my_db.getFriends()) + + def printList(self): + print self.my_db.getFriends() + + def addFriend(self, permid): + self.my_db.addFriend(permid) + #self.my_db._sync() + + def addExternalFriend(self, friend): + if not isinstance(friend, dict) or 'permid' not in friend: + return + permid = friend.pop('permid') + if permid not in self.getFriends(): + self.peer_db.updateItem(permid, friend) + #self.peer_db._sync() + #self.my_db.addFriend(permid) # Fixme + #self.my_db._sync() + else: + self.peer_db.updateItem(permid, friend, update_time=False) + #self.peer_db._sync() + + def getFriendList(self): + """returns a list of permids""" + return [] # FIXME, no friendsdb yet + + def getFriends(self): + """returns a list of peer infos including permid""" + return [] # Fixme + + def isFriend(self, permid): + return False # Fixme + + def deleteFriend(self,permid): + pass + + def updateFriendIcon(self, permid, icon_path): + self.peer_db.updatePeer(permid, 'icon', icon_path) + +class PeerDBHandler(BasicDBHandler): + + def __init__(self, config, db_dir=''): + BasicDBHandler.__init__(self) + self.notifier = Notifier.getInstance() + self.peer_db = PeerDB.getInstance(db_dir=db_dir) + self.pref_db = PreferenceDB.getInstance(db_dir=db_dir) + self.friends_db_handler = FriendDBHandler.getInstance() + self.pref_db_handler = PreferenceDBHandler(db_dir=db_dir) + self.ip_db = IP2PermIDDB.getInstance(db_dir=db_dir) + #self.mm = Mugshot Manager.getInstance() + #self.mm.register(config) + self.dbs = [self.peer_db, self.ip_db] + + __single = None + def getInstance(*args, **kw): + if PeerDBHandler.__single is None: + PeerDBHandler.__single = PeerDBHandler(*args, **kw) + return PeerDBHandler.__single + getInstance = staticmethod(getInstance) + + def __len__(self): + return self.peer_db._size() + + def getPeer(self, permid, default=False): + return self.peer_db.getItem(permid, default) + + def getPeerSim(self, permid): + x = self.peer_db.getItem(permid) + if not x: + return 0 + return x.get('similarity', 0) + + def getPeerList(self): # get the list of all peers' permid + return self.peer_db._keys() + + def getTasteBuddyList(self): + return self.pref_db._keys() + + def getRandomPeerList(self): # Expensive + # TODO: improve performance + return list(Set(self.peer_db._keys()) - Set(self.pref_db._keys())) + + def getPeers(self, peer_list, keys): # get a list of dictionaries given peer list + peers = [] + if 'permid' in keys: + permid = True + keys.remove('permid') + else: + permid = False + + count = 0 + for peer in peer_list: + p = self.peer_db.getItem(peer) + if not p: + break # database is closed + if permid: + d = {'permid':peer} + else: + d = {} + for key in keys: + d[key] = p[key] + peers.append(d) + + count += 1 + if count % 1000 == 0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","peerdb: Read items",count,currentThread().getName() + + return peers + + def getPeersValue(self, peer_list, keys=None): # get a list of values given peer list + if not keys: + keys = self.peer_db.default_item.keys() + values = [] + for peer in peer_list: + p = self.peer_db.getItem(peer, default=True) + if len(keys) == 1: + values.append(p[keys[0]]) + else: + d = [] + for key in keys: + d.append(p[key]) + values.append(d) + + return values + + def addPeer(self, permid, value, update_dns=True, updateFlag = True): + + if value.has_key('last_seen'): # get the latest last_seen + old_last_seen = 0 + old_data = self.getPeer(permid) + if old_data: + old_last_seen = old_data.get('last_seen', 0) + last_seen = value['last_seen'] + now = int(time()) + value['last_seen'] = min(now, max(last_seen, old_last_seen)) + + self.peer_db.updateItem(permid, value, update_dns) + + if value.has_key('ip') and update_dns: + self.updatePeerIP(permid, value['ip']) + + if updateFlag: + self.notifier.notify(NTFY_PEERS, NTFY_INSERT, permid) + + def hasPeer(self, permid): + return self.peer_db.hasItem(permid) + + def findPeers(self, key, value): + # Warning: if key is not 'permid', then it is a very EXPENSIVE operation. + res = [] + if key == 'permid': + peer = self.getPeer(value) + if peer: + peer.update({'permid':value}) + res.append(peer) + else: + for permid, peer in self.peer_db._items(): + try: + if peer[key] == value: + peer.update({'permid':permid}) + res.append(peer) + except KeyError: + pass + return res + + def updatePeer(self, permid, key, value, updateFlag = True): + self.peer_db.updateItem(permid, {key:value}) + if key == 'ip': + self.updatePeerIP(permid, value) + if updateFlag: + self.notifier.notify(NTFY_PEERS, NTFY_UPDATE, permid, key) + + def updatePeerIcon(self, permid, icontype, icondata, updateFlag = True): + self.mm.save_data(permid, icontype, icondata) + if updateFlag: + self.notifier.notify(NTFY_PEERS, NTFY_UPDATE, permid, 'icon') + + + def getPeerIcon(self, permid, name = ''): + return self.mm.load_data(permid, name) + + def updatePeerIP(self, permid, ip): + peer_data = self.peer_db._get(permid, {}) + old_ip = peer_data.get('ip', None) + if not old_ip: # not exist in peer_db, don't touch it either + return + + if old_ip != ip: # changed ip + old_permid = self.ip_db.getPermIDByIP(old_ip) + if old_permid == permid: # ip_db is consistent with peer_db + self.ip_db.deleteItem(old_ip) # delete the old map + permid2 = self.ip_db.getPermIDByIP(ip) + if permid2 != permid: + self.ip_db.addIP(ip,permid) + + + def deletePeer(self, permid, updateFlag = True): + if self.friends_db_handler.isFriend(permid): + return False + self.peer_db._delete(permid) + self.pref_db_handler.deletePeer(permid) + self.ip_db.deletePermID(permid) + + if updateFlag: + self.notifier.notify(NTFY_PEERS, NTFY_DELETE, permid) + + return True + + def updateTimes(self, permid, key, change, updateFlag = True): + item = self.peer_db.getItem(permid) + if not item: + return + if not item.has_key(key): + value = 0 + else: + value = item[key] + value += change + self.peer_db.updateItem(permid, {key:value}) + + if updateFlag: + self.notifier.notify(NTFY_PEERS, NTFY_UPDATE, permid, key) + + def getPermIDByIP(self,ip): + return self.ip_db.getPermIDByIP(ip) + +class PreferenceDBHandler(BasicDBHandler): + + def __init__(self, db_dir=''): + BasicDBHandler.__init__(self) + self.owner_db = OwnerDB.getInstance(db_dir=db_dir) + self.pref_db = PreferenceDB.getInstance(db_dir=db_dir) + self.dbs = [self.pref_db, self.owner_db] + + __single = None + def getInstance(*args, **kw): + if PreferenceDBHandler.__single is None: + PreferenceDBHandler.__single = PreferenceDBHandler(*args, **kw) + return PreferenceDBHandler.__single + getInstance = staticmethod(getInstance) + + def getPreferences(self, permid): + return self.pref_db.getItem(permid) + + def getPrefList(self, permid): + return self.pref_db._get(permid,{}).keys() + + def addPreference(self, permid, infohash, data={}): + self.pref_db.addPreference(permid, infohash, data) + self.owner_db.addOwner(infohash, permid) + + def deletePeer(self, permid): # delete a peer from pref_db + prefs = self.pref_db.getItem(permid) + for infohash in prefs: + self.owner_db.deleteOwner(infohash, permid) + self.pref_db.deleteItem(permid) + + def deletePreference(self, permid, infohash): + self.pref_db.deletePreference(permid, infohash) + self.owner_db.deleteOwner(infohash, permid) + + def hasPreference(self, permid): + return self.pref_db._has_key(permid) + + def getNumPrefs(self, permid): + if not self.pref_db._has_key(permid): + return 0 + x = self.pref_db.getItem(permid) + return len(x) + +class TorrentDBHandler(BasicDBHandler): + + def __init__(self, db_dir=''): + BasicDBHandler.__init__(self) + self.notifier = Notifier.getInstance() + self.torrent_db = TorrentDB.getInstance(db_dir=db_dir) + self.mypref_db = MyPreferenceDB.getInstance(db_dir=db_dir) + self.owner_db = OwnerDB.getInstance(db_dir=db_dir) + self.dbs = [self.torrent_db] + + __single = None + def getInstance(*args, **kw): + if TorrentDBHandler.__single is None: + TorrentDBHandler.__single = TorrentDBHandler(*args, **kw) + return TorrentDBHandler.__single + getInstance = staticmethod(getInstance) + + def addTorrent(self, infohash, torrent={}, new_metadata=False, updateFlag = True): + # add a new torrent or update an old torrent's info + if not torrent and self.hasTorrent(infohash): # no need to add + return False + self.torrent_db.updateItem(infohash, torrent) + + if updateFlag: + self.notifier.notify(NTFY_TORRENTS, NTFY_INSERT, infohash) + + try: + # Arno: PARANOID SYNC + self.sync() + except: + print_exc() + return True + + def updateTorrent(self, infohash, **kw): # watch the schema of database + updateFlag = kw.get('updateFlag', True) + if kw.has_key('updateFlag'): + del kw['updateFlag'] + self.torrent_db.updateItem(infohash, kw) + + if updateFlag: + self.notifier.notify(NTFY_TORRENTS, NTFY_UPDATE, infohash, kw.keys()) + + + def deleteTorrent(self, infohash, delete_file=False, updateFlag = True): + if self.mypref_db.hasPreference(infohash): # don't remove torrents in my pref + return False + + if delete_file: +# data = self.torrent_db._get(infohash) +# if data and data['torrent_name']: +# live = data.get('status', 'unknown') +# if live != 'dead' and live != 'unknown': + deleted = self.eraseTorrentFile(infohash) +# if deleted: +# # may remove dead torrents, so this number is not consistent +# self.torrent_db.num_metadatalive -= 1 + else: + deleted = True + + if deleted: + self.torrent_db._delete(infohash) + if updateFlag: + self.notifier.notify(NTFY_TORRENTS, NTFY_DELETE, infohash) + + return deleted + + def eraseTorrentFile(self, infohash): + data = self.torrent_db._get(infohash) + if not data or not data['torrent_name'] or not data['info']: + return False + src = os.path.join(data['torrent_dir'], data['torrent_name']) + if not os.path.exists(src): # already removed + return True + + try: + os.remove(src) + except Exception, msg: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "cachedbhandler: failed to erase torrent", src, Exception, msg + return False + + return True + + def getTorrent(self, infohash, num_owners=False,savemem=False): + torrent = self.torrent_db.getItem(infohash,savemem=savemem) + if torrent and num_owners: + torrent['num_owners'] = self.owner_db.getNumOwners(infohash) + return torrent + + def getTorrents(self, torrent_list, keys=None): # get a list of dictionaries given torrent list + if not keys: + keys = self.torrent_db.default_item.keys() + keys += ['infohash'] + torrents = [] + if 'infohash' in keys: + infohash = True + keys.remove('infohash') + else: + infohash = False + if 'num_owners' in keys: + num_owners = True + else: + num_owners = False + for torrent in torrent_list: + p = self.torrent_db.getItem(torrent, default=True) + if num_owners: + p['num_owners'] = self.owner_db.getNumOwners(torrent) + if infohash: + p['infohash'] = torrent + torrents.append(p) + return torrents + + def getAllTorrents(self): + return self.torrent_db._keys() + + + def getRecommendedTorrents(self, light=True, all=False, myprefs=False, countcallback=None): + """ get torrents on disk but not in my pref + BE AWARE: the returned object of this call may consume lots of memory. + You should delete the object when possible + """ + + #print '>>>>>>'*5, "getRecommendedTorrents", currentThread().getName() + #print_stack() + #loaded by DataLoadingThread + + start_time = time() + mypref_set = Set(self.mypref_db._keys()) + + if myprefs: + all_list = mypref_set + else: + if all: + all_list = self.torrent_db._keys() + else: + all_list = Set(self.torrent_db._keys()) - mypref_set + + + # Arno: save memory by reusing dict keys + key_infohash = 'infohash' + key_myDownloadHistory = 'myDownloadHistory' + key_download_started = 'download_started' + key_num_owners = 'key_num_owners' + + torrents = [] + count = 0 +# num_live_torrents = 0 + setOfInfohashes = Set() + for torrent in all_list: + if torrent in setOfInfohashes: # do not add 2 torrents with same infohash + continue + p = self.torrent_db.getItem(torrent,savemem=True) + if not p: + break #database not available any more + if not type(p) == dict or not p.get('torrent_name', None) or not p.get('info', None): + deleted = self.deleteTorrent(torrent) # remove infohashes without torrent + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "TorrentDBHandler: deleted empty torrent", deleted, p.get('torrent_name', None), p.get('info', None) + +# if torrent not in mypref_set: +# live = p.get('status', 'unknown') +# if live != 'dead' and live != 'unknown': +# num_live_torrents += 1 + + if torrent in mypref_set: + p[key_myDownloadHistory] = True + mypref_obj = self.mypref_db.getItem(torrent) + if mypref_obj: + p[key_download_started] = mypref_obj['created_time'] + + p[key_infohash] = torrent + setOfInfohashes.add(torrent) + if not light: # set light as ture to be faster + p[key_num_owners] = self.owner_db.getNumOwners(torrent) + + torrents.append(p) + + count += 1 + if count % 1000 == 0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","torrentdb: Read items",count,currentThread().getName() + if countcallback is not None: + countcallback(count) + + del all_list + del setOfInfohashes + +# from traceback import print_stack +# print_stack() +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '[StartUpDebug]----------- from getRecommendedTorrents ----------', time()-start_time, currentThread().getName(), '\n\n' + +# self.torrent_db.num_metadatalive = num_live_torrents + #print 'Returning %d torrents' % len(torrents) + + return torrents + + + + def getCollectedTorrentHashes(self): + """ get infohashes of torrents on disk, used by torrent checking, + and metadata handler + """ + all_list = Set(self.torrent_db._keys()) + all_list -= Set(self.mypref_db._keys()) + + return all_list + + def hasTorrent(self, infohash): + return self.torrent_db._has_key(infohash) + + def getLiveTorrents(self, peerlist): + ret = [] + for infohash in peerlist: + data = self.torrent_db._get(infohash) + if isinstance(data, dict): + live = data.get('status', 'unknown') + if live != 'dead': + ret.append(infohash) + return ret + + def getOthersTorrentList(self, num=-1, sorted=True): # get the list of torrents which are not in my preference + all_list = list(Set(self.torrent_db._keys()) - Set(self.mypref_db._keys())) + if num < 0: + return all_list + if not sorted: #TODO: seperate sort function from getOthersTorrentList + return all_list + values = [] + for torrent in all_list: + t = self.torrent_db.getItem(torrent, default=True) + values.append(t['relevance']) + nlist = len(all_list) + aux = [(values[i], i) for i in xrange(nlist)] + aux.sort() + aux.reverse() + return [all_list[i] for k, i in aux[:num]] + + def getTorrentsValue(self, torrent_list, keys=None): # get a list of values given peer list + if not keys: + keys = self.torrent_db.default_item.keys() + if not isinstance(keys, list): + keys = [str(keys)] + values = [] + for torrent in torrent_list: + t = self.torrent_db.getItem(torrent, default=True) + if len(keys) == 1: + values.append(t[keys[0]]) + else: + d = [] + for key in keys: + d.append(t[key]) + values.append(d) + + return values + + def getNoMetaTorrents(self): # get the list of torrents which only have an infohash without the metadata + def hasNoTorrentFile(key): + data = self.torrent_db._get(key) + if not data: # if no record, ignore + return False + if not data['torrent_name'] or not data['info']: # if no info, selected + return True + return False # if has info but no file, it means the torrent file has been removed. ignore + + all_keys = self.torrent_db._keys() + no_metadata_list = filter(hasNoTorrentFile, all_keys) + return no_metadata_list + + def hasMetaData(self, infohash): + value = self.torrent_db._get(infohash) + if not value: + return False + name = value.get('torrent_name', None) + if not name: + return False + return True + + def getOwners(self, infohash): + return self.owner_db.getItem(infohash) + + def updateTorrentRelevance(self, infohash, relevance, updateFlag = True): + self.torrent_db.updateItem(infohash, {'relevance':relevance}) + if updateFlag: + self.notifier.notify(NTFY_TORRENTS, NTFY_UPDATE, infohash, 'relevance') + +#=============================================================================== +# def getNumMetadataAndLive(self): # TODO +# return self.torrent_db.num_metadatalive +#=============================================================================== + +class MyPreferenceDBHandler(BasicDBHandler): + + def __init__(self, db_dir=''): + BasicDBHandler.__init__(self) + self.notifier = Notifier.getInstance() + self.mypref_db = MyPreferenceDB.getInstance(db_dir=db_dir) + self.torrent_db = TorrentDB.getInstance(db_dir=db_dir) + self.dbs = [self.mypref_db, self.torrent_db] + + __single = None + def getInstance(*args, **kw): + if MyPreferenceDBHandler.__single is None: + MyPreferenceDBHandler.__single = MyPreferenceDBHandler(*args, **kw) + return MyPreferenceDBHandler.__single + getInstance = staticmethod(getInstance) + + def getPreferences(self, key=None): + all_items = self.mypref_db._items() + if key is None: + ret = [] + for item in all_items: + item[1].update({'infohash':item[0]}) + ret.append(item[1]) + return ret + else: + return [all_items[i][1][key] for i in xrange(len(all_items))] + + def getPrefList(self): + return self.mypref_db._keys() + + def getCreationTime(self, infohash): + "Return creation time. Used for sorting in library view" + item = self.mypref_db.getItem(infohash, default=False) + if item: + return item.get('created_time') + else: + return None + + def getPrefs(self, pref_list, keys): # get a list of dictionaries given peer list + peers = [] + for torrent in pref_list: + d = self.mypref_db.getItem(torrent, default=True) + t = self.torrent_db.getItem(torrent, default=True) + try: + d.update(t) + except: + continue + if 'infohash' in keys: + d.update({'infohash':torrent}) + for key in d.keys(): + if key not in keys: + d.pop(key) + peers.append(d) + + return peers + + def removeFakeAndDeadTorrents(self, items): + def fakeFilter(item): + infohash = item[0] # infohash + valid = self.mypref_db.getRank(infohash) >= 0 + torrentdata = self.torrent_db.getItem(infohash, default=True) # defaulttorrent has status 'unknown' + alive = torrentdata.get('status', 'unknown') != 'dead' + secret = torrentdata.get('secret', False) # exclude secret downloads. + return alive and valid and not secret + return filter(fakeFilter, items) + + + def getRecentPrefList(self, num=0): # num = 0: all files + all_items = self.mypref_db._items() + valid_items = self.removeFakeAndDeadTorrents(all_items) + prefs = [(item[1]['last_seen'], item[0]) for item in valid_items] + prefs.sort() + prefs.reverse() + if num > 0: + return [item[1] for item in prefs[:num]] + else: + return [item[1] for item in prefs] + + def hasPreference(self, infohash): + return self.mypref_db._has_key(infohash) + + def addPreference(self, infohash, data={}): + if not data and self.hasPreference(infohash): + return False + self.mypref_db.updateItem(infohash, data) + return True + + def deletePreference(self, infohash): + self.mypref_db.deleteItem(infohash) + + def updateRank(self, infohash, rank): + self.mypref_db.updateItem(infohash, {'rank':rank}) + self.sync() + + +class OwnerDBHandler(BasicDBHandler): + + def __init__(self, db_dir=''): + BasicDBHandler.__init__(self) + self.owner_db = OwnerDB.getInstance(db_dir=db_dir) + self.pref_db = PreferenceDB.getInstance(db_dir=db_dir) + self.mypref_db = MyPreferenceDB.getInstance(db_dir=db_dir) + self.torrent_db = TorrentDB.getInstance(db_dir=db_dir) + self.dbs = [self.owner_db] + self.sim_cache = {} # used to cache the getSimItems + + __single = None + def getInstance(*args, **kw): + if OwnerDBHandler.__single is None: + OwnerDBHandler.__single = OwnerDBHandler(*args, **kw) + return OwnerDBHandler.__single + getInstance = staticmethod(getInstance) + + def getTorrents(self): + return self.owner_db._keys() + + def getSimItems(self, torrent_hash, num=15): + """ Get a list of similar torrents given a torrent hash. The torrents + must exist and be not dead. + Input + torrent_hash: the infohash of a torrent + num: the number of similar torrents to get + output: + returns a list of infohashes, sorted by similarity, + """ + + start = time() + mypref_list = self.mypref_db._keys() + if torrent_hash in self.sim_cache: + mypref_set = Set(mypref_list) + oldrec = self.sim_cache[torrent_hash] + for item in oldrec[:]: # remove common torrents + if item in mypref_set: + oldrec.remove(item) + return oldrec + + owners = self.owner_db._get(torrent_hash, {}) + nowners = len(owners) + if not owners or nowners < 1: + return [] + co_torrents = {} # torrents have co + for owner in owners: + prefs = self.pref_db.getItem(owner) + for torrent in prefs: + if torrent not in co_torrents: + co_torrents[torrent] = 1 + else: + co_torrents[torrent] += 1 + if torrent_hash in co_torrents: + co_torrents.pop(torrent_hash) + for infohash in mypref_list: + if infohash in co_torrents: + co_torrents.pop(infohash) + + sim_items = [] + + for torrent in co_torrents: + co = co_torrents[torrent] +# if co <= 1: +# continue + + # check if the torrent is collected and live + has_key = self.torrent_db._has_key(torrent) + if has_key == False: + continue + elif has_key == None: + break + value = self.torrent_db._get(torrent) + if not value: # sth. is wrong + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "cachedbhandler: getSimItems meets error in getting data" + break + info = value.get('info', {}) + name = info.get('name', None) + if not name: + continue + live = value.get('status', 'unknown') + if live == 'dead': + continue + + nowners2 = self.owner_db.getNumOwners(torrent) + if nowners2 == 0: # sth. is wrong + continue + sim = co/(nowners*nowners2)**0.5 + sim_items.append((sim, torrent)) + + sim_items.sort() + sim_items.reverse() + sim_torrents = [torrent for sim, torrent in sim_items[:num]] + + self.sim_cache[torrent_hash] = sim_torrents + return sim_torrents + + + +class BarterCastDBHandler(BasicDBHandler): + + def __init__(self, session, db_dir=''): + BasicDBHandler.__init__(self) + self.bartercast_db = BarterCastDB.getInstance(db_dir=db_dir) + self.peer_db = PeerDB.getInstance(db_dir=db_dir) + self.dbs = [self.bartercast_db] + self.my_permid = session.get_permid() + + __single = None + def getInstance(*args, **kw): + if BarterCastDBHandler.__single is None: + BarterCastDBHandler.__single = BarterCastDBHandler(*args, **kw) + return BarterCastDBHandler.__single + getInstance = staticmethod(getInstance) + + def __len__(self): + return self.bartercast_db._size() + + def getName(self, permid): + + if permid == 'testpermid_1': + return "Test_1" + elif permid == 'testpermid_2': + return "Test_2" + elif permid == 'non-tribler': + return "Non-tribler" + + peer = self.peer_db.getItem(permid, False) + if peer == None: + return 'peer %s' % show_permid_shorter(permid) + else: + name = peer.get('name', '') + if name == '': + name = 'peer %s' % show_permid_shorter(permid) + return name + + def getItem(self, (permid_1, permid_2), default=False): + + # in the database, permid-tuple is always sorted + # to ensure unique entries for each permid combination + if permid_1 > permid_2: + reverse = True + permid_from = permid_2 + permid_to = permid_1 + else: + reverse = False + permid_from = permid_1 + permid_to = permid_2 + + item = self.bartercast_db.getItem((permid_from, permid_to), default) + + # if peer in peerdb but not in bartercastdb: add peer + if item == None: # and peerdb_peer != None: + + if DEBUG: + print 'Item (%s, %s) added to BarterCastDB' % (self.getName(permid_from), self.getName(permid_to)) + + self.addItem((permid_from, permid_to), self.bartercast_db.default_item) + + # get item again now it exists + item = self.bartercast_db.getItem((permid_from, permid_to), default) + + # if reverse: exchange up and down so that the caller doesnt have to worry + # about the order of permids in the tuple + if reverse: + down = item['downloaded'] + up = item['uploaded'] + item['downloaded'] = up + item['uploaded'] = down + + return item + + + def getItemList(self): # get the list of all peers' permid + keys = map(lambda key: bdecode(key), self.bartercast_db._keys()) + return keys + + # Return (sorted) list of the top N peers with the highest (combined) values for the given keys + def getTopNPeers(self, n, local_only = False): + n = max(1, n) + itemlist = self.getItemList() + + if local_only: + # get only items of my local dealings + itemlist = filter(lambda (permid_from, permid_to): permid_to == self.my_permid or permid_from == self.my_permid, itemlist) + + total_up = {} + total_down = {} + + + for (permid_1, permid_2) in itemlist: + + item = self.getItem((permid_1, permid_2)) + + up = item['uploaded'] *1024 # make into bytes + down = item['downloaded'] *1024 + + if DEBUG: + print "BarterCast DB entry: (%s, %s) up = %d down = %d" % (self.getName(permid_1), self.getName(permid_2), up, down) + + # process permid_1 + total_up[permid_1] = total_up.get(permid_1, 0) + up + total_down[permid_1] = total_down.get(permid_1, 0) + down + + # process permid_2 + total_up[permid_2] = total_up.get(permid_2, 0) + down + total_down[permid_2] = total_down.get(permid_2, 0) + up + + + # create top N peers + top = [] + min = 0 + + for peer in total_up.keys(): + + up = total_up[peer] + down = total_down[peer] + + if DEBUG: + print "BarterCast: total of %s: up = %d down = %d" % (self.getName(peer), up, down) + + # we know rank on total upload? + value = up + + # check if peer belongs to current top N + if peer != 'non-tribler' and peer != self.my_permid and (len(top) < n or value > min): + + top.append((peer, up, down)) + + # sort based on value + top.sort(cmp = lambda (p1, u1, d1), (p2, u2, d2): cmp(u2, u1)) + + # if list contains more than N elements: remove the last (=lowest value) + if len(top) > n: + del top[-1] + + # determine new minimum of values + min = top[-1][1] + + + + result = {} + + result['top'] = top + + # My total up and download, including interaction with non-tribler peers + result['total_up'] = total_up.get(self.my_permid, 0) + result['total_down'] = total_down.get(self.my_permid, 0) + + # My up and download with tribler peers only + result['tribler_up'] = result['total_up'] - total_down.get('non-tribler', 0) + result['tribler_down'] = result['total_down'] - total_up.get('non-tribler', 0) + + return result + + def addItem(self, (permid_1, permid_2), item): + +# if value.has_key('last_seen'): # get the latest last_seen +# old_last_seen = 0 +# old_data = self.getPeer(permid) +# if old_data: +# old_last_seen = old_data.get('last_seen', 0) +# last_seen = value['last_seen'] +# value['last_seen'] = max(last_seen, old_last_seen) + + # in the database, permid-tuple is always sorted + # to ensure unique entries for each permid combination + if permid_1 > permid_2: + reverse = True + permid_from = permid_2 + permid_to = permid_1 + else: + reverse = False + permid_from = permid_1 + permid_to = permid_2 + + # if reverse: exchange up and down + if reverse: + down = item['downloaded'] + up = item['uploaded'] + item['downloaded'] = up + item['uploaded'] = down + + self.bartercast_db.updateItem((permid_from, permid_to), item) + + + def hasItem(self, (permid_1, permid_2)): + + # in the database, permid-tuple is always sorted + # to ensure unique entries for each permid combination + if permid_1 > permid_2: + reverse = True + permid_from = permid_2 + permid_to = permid_1 + else: + reverse = False + permid_from = permid_1 + permid_to = permid_2 + + item = self.bartercast_db.hasItem((permid_from, permid_to)) + + if reverse: + down = item['downloaded'] + up = item['uploaded'] + item['downloaded'] = up + item['uploaded'] = down + + return item + + + def updateItem(self, (permid_1, permid_2), key, value): + + # in the database, permid-tuple is always sorted + # to ensure unique entries for each permid combination + if permid_1 > permid_2: + permid_from = permid_2 + permid_to = permid_1 + if key == 'uploaded': + key = 'downloaded' + elif key == 'downloaded': + key = 'uploaded' + else: + permid_from = permid_1 + permid_to = permid_2 + + if DEBUG: + print "BarterCast: update (%s, %s) [%s] += %s" % (self.getName(permid_from), self.getName(permid_to), key, str(value)) + + self.bartercast_db.updateItem((permid_from, permid_to), {key:value}) + + + def incrementItem(self, (permid_1, permid_2), key, value): + + # in the database, permid-tuple is always sorted + # to ensure unique entries for each permid combination + if permid_1 > permid_2: + permid_from = permid_2 + permid_to = permid_1 + if key == 'uploaded': + key = 'downloaded' + elif key == 'downloaded': + key = 'uploaded' + else: + permid_from = permid_1 + permid_to = permid_2 + + if DEBUG: + print "BarterCast: increment (%s, %s) [%s] += %s" % (self.getName(permid_from), self.getName(permid_to), key, str(value)) + + item = self.getItem((permid_from, permid_to)) + + if key in item.keys(): + old_value = item[key] + new_value = old_value + value + self.bartercast_db.updateItem((permid_from, permid_to), {key:new_value}) + return new_value + + return None + + + # TODO: include this functionality in PeerDB? + def deleteItem(self, (permid_1, permid_2)): + + # in the database, permid-tuple is always sorted + # to ensure unique entries for each permid combination + if permid_1 > permid_2: + permid_from = permid_2 + permid_to = permid_1 + else: + permid_from = permid_1 + permid_to = permid_2 + + self.bartercast_db._delete((permid_from, permid_to)) + + return True + + + + + +def test_myprefDB(): + myprefdb = MyPreferenceDBHandler.getInstance() + print myprefdb.getRecentPrefList() + +def test_all(): + test_myprefDB() + +def test_getSimItems(db_dir): + owner_db = OwnerDBHandler(db_dir) + torrent_db = TorrentDBHandler(db_dir) + torrents = owner_db.getTorrents() + for torrent in torrents: + value = torrent_db.getTorrent(torrent) + if not value: + continue + info = value.get('info', {}) + name = info.get('name', None) + if not name: + continue + live = value.get('status', 'unknown') + if live == 'dead': + continue + start = time() + simtorrents = owner_db.getSimItems(torrent) + if len(simtorrents) > 0: + try: + print "------", name, "------" + except: + print "------", `name`, "------" + for infohash, torrent_name, sim in simtorrents: + print " ", + try: + print torrent_name, sim, time()-start + except: + print `torrent_name` + +if __name__ == '__main__': + db_dir = sys.argv[1] + test_getSimItems(db_dir) + + diff --git a/tribler-mod/Tribler/Core/CacheDB/CacheDBHandler.py b/tribler-mod/Tribler/Core/CacheDB/CacheDBHandler.py new file mode 100644 index 0000000..54c2c84 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/CacheDBHandler.py @@ -0,0 +1,5 @@ +from time import localtime, strftime +# Written by Jie Yang +# see LICENSE.txt for license information + +from SqliteCacheDBHandler import * diff --git a/tribler-mod/Tribler/Core/CacheDB/CacheDBHandler.py.bak b/tribler-mod/Tribler/Core/CacheDB/CacheDBHandler.py.bak new file mode 100644 index 0000000..038ed32 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/CacheDBHandler.py.bak @@ -0,0 +1,4 @@ +# Written by Jie Yang +# see LICENSE.txt for license information + +from SqliteCacheDBHandler import * diff --git a/tribler-mod/Tribler/Core/CacheDB/EditDist.py b/tribler-mod/Tribler/Core/CacheDB/EditDist.py new file mode 100644 index 0000000..4e3e8a4 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/EditDist.py @@ -0,0 +1,55 @@ +from time import localtime, strftime +# Written by Maarten Clemens, Jelle Roozenburg +# see LICENSE.txt for license information + +#http://en.wikipedia.org/wiki/Damerau-Levenshtein_distance + +def editDist(str1,str2, maxlength=14): + # If fast is set: only calculate titles with same #fast initial chars + if not str1 or not str2: # protect against empty strings + return 1.0 + + str1 = str1[:maxlength].lower() + str2 = str2[:maxlength].lower() + + lenStr1 = len(str1) + lenStr2 = len(str2) + + d = [range(lenStr2+1)] + row = [] + + for i in range(lenStr1): + row.append(i+1) + for j in range(lenStr2): + penalty = 1./max(i+1,j+1) + ##penalty = 1 + if str1[i] == str2[j]: + cost = 0 + else: + cost = penalty + deletion = d[i][j+1] + penalty + insertion = row[j] + penalty + substitution = d[i][j] + cost + row.append(min(deletion,insertion,substitution)) + (deletion,insertion,substitution) + if i>0 and j>0 and str1[i] == str2[j-1] and str1[i-1] == str2[j]: + row[j+1] = min(row[j+1], d[i-1][j-1]+cost) # transposition + d.append(row) + row = [] + + ##maxi = max(lenStr1,lenStr2) # for penalty = 1 + maxi = sum([1./j for j in range(max(lenStr1,lenStr2)+1)[1:]]) + return 1.*d[lenStr1][lenStr2]/ maxi + + +if __name__ == '__main__': + import sys + str1 = sys.argv[1] + str2 = sys.argv[2] + print editDist(str1, str2) + + +## d,e = EditDist('mamamstein','levenstein') +## print e +## for i in d: +## print i diff --git a/tribler-mod/Tribler/Core/CacheDB/EditDist.py.bak b/tribler-mod/Tribler/Core/CacheDB/EditDist.py.bak new file mode 100644 index 0000000..1244ece --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/EditDist.py.bak @@ -0,0 +1,54 @@ +# Written by Maarten Clemens, Jelle Roozenburg +# see LICENSE.txt for license information + +#http://en.wikipedia.org/wiki/Damerau-Levenshtein_distance + +def editDist(str1,str2, maxlength=14): + # If fast is set: only calculate titles with same #fast initial chars + if not str1 or not str2: # protect against empty strings + return 1.0 + + str1 = str1[:maxlength].lower() + str2 = str2[:maxlength].lower() + + lenStr1 = len(str1) + lenStr2 = len(str2) + + d = [range(lenStr2+1)] + row = [] + + for i in range(lenStr1): + row.append(i+1) + for j in range(lenStr2): + penalty = 1./max(i+1,j+1) + ##penalty = 1 + if str1[i] == str2[j]: + cost = 0 + else: + cost = penalty + deletion = d[i][j+1] + penalty + insertion = row[j] + penalty + substitution = d[i][j] + cost + row.append(min(deletion,insertion,substitution)) + (deletion,insertion,substitution) + if i>0 and j>0 and str1[i] == str2[j-1] and str1[i-1] == str2[j]: + row[j+1] = min(row[j+1], d[i-1][j-1]+cost) # transposition + d.append(row) + row = [] + + ##maxi = max(lenStr1,lenStr2) # for penalty = 1 + maxi = sum([1./j for j in range(max(lenStr1,lenStr2)+1)[1:]]) + return 1.*d[lenStr1][lenStr2]/ maxi + + +if __name__ == '__main__': + import sys + str1 = sys.argv[1] + str2 = sys.argv[2] + print editDist(str1, str2) + + +## d,e = EditDist('mamamstein','levenstein') +## print e +## for i in d: +## print i diff --git a/tribler-mod/Tribler/Core/CacheDB/Notifier.py b/tribler-mod/Tribler/Core/CacheDB/Notifier.py new file mode 100644 index 0000000..8698cb5 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/Notifier.py @@ -0,0 +1,83 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg +# see LICENSE.txt for license information + +import threading + +from Tribler.Core.simpledefs import * + +class Notifier: + + SUBJECTS = [NTFY_PEERS, NTFY_TORRENTS, NTFY_PREFERENCES, NTFY_MYPREFERENCES, NTFY_ACTIVITIES, NTFY_REACHABLE] + + #. . . + # todo: add all datahandler types+other observables + __single = None + + def __init__(self, pool = None): + if Notifier.__single: + raise RuntimeError, "Notifier is singleton" + self.pool = pool + self.observers = [] + self.observerLock = threading.Lock() + Notifier.__single = self + + def getInstance(*args, **kw): + if Notifier.__single is None: + Notifier(*args, **kw) + return Notifier.__single + getInstance = staticmethod(getInstance) + + def add_observer(self, func, subject, changeTypes = [NTFY_UPDATE, NTFY_INSERT, NTFY_DELETE], id = None): + """ + Add observer function which will be called upon certain event + Example: + addObserver(NTFY_PEERS, [NTFY_INSERT,NTFY_DELETE]) -> get callbacks + when peers are added or deleted + addObserver(NTFY_PEERS, [NTFY_SEARCH_RESULT], 'a_search_id') -> get + callbacks when peer-searchresults of of search + with id=='a_search_id' come in + """ + assert type(changeTypes) == list + assert subject in self.SUBJECTS + + obs = (func, subject, changeTypes, id) + self.observerLock.acquire() + self.observers.append(obs) + self.observerLock.release() + + def remove_observer(self, func): + """ Remove all observers with function func + """ + + self.observerLock.acquire() + i=0 + while i < len(self.observers): + ofunc = self.observers[i][0] + if ofunc == func: + del self.observers[i] + else: + i+=1 + self.observerLock.release() + + def notify(self, subject, changeType, obj_id, *args): + """ + Notify all interested observers about an event with threads from the pool + """ + tasks = [] + assert subject in self.SUBJECTS + + self.observerLock.acquire() + for ofunc, osubject, ochangeTypes, oid in self.observers: + if (subject == osubject and + changeType in ochangeTypes and + (oid is None or oid == obj_id)): + tasks.append(ofunc) + self.observerLock.release() + args = [subject, changeType, obj_id] + list(args) + for task in tasks: + if self.pool: + self.pool.queueTask(task, args) + else: + task(*args) # call observer function in this thread + diff --git a/tribler-mod/Tribler/Core/CacheDB/Notifier.py.bak b/tribler-mod/Tribler/Core/CacheDB/Notifier.py.bak new file mode 100644 index 0000000..fe3d606 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/Notifier.py.bak @@ -0,0 +1,82 @@ +# Written by Jelle Roozenburg +# see LICENSE.txt for license information + +import threading + +from Tribler.Core.simpledefs import * + +class Notifier: + + SUBJECTS = [NTFY_PEERS, NTFY_TORRENTS, NTFY_PREFERENCES, NTFY_MYPREFERENCES, NTFY_ACTIVITIES, NTFY_REACHABLE] + + #. . . + # todo: add all datahandler types+other observables + __single = None + + def __init__(self, pool = None): + if Notifier.__single: + raise RuntimeError, "Notifier is singleton" + self.pool = pool + self.observers = [] + self.observerLock = threading.Lock() + Notifier.__single = self + + def getInstance(*args, **kw): + if Notifier.__single is None: + Notifier(*args, **kw) + return Notifier.__single + getInstance = staticmethod(getInstance) + + def add_observer(self, func, subject, changeTypes = [NTFY_UPDATE, NTFY_INSERT, NTFY_DELETE], id = None): + """ + Add observer function which will be called upon certain event + Example: + addObserver(NTFY_PEERS, [NTFY_INSERT,NTFY_DELETE]) -> get callbacks + when peers are added or deleted + addObserver(NTFY_PEERS, [NTFY_SEARCH_RESULT], 'a_search_id') -> get + callbacks when peer-searchresults of of search + with id=='a_search_id' come in + """ + assert type(changeTypes) == list + assert subject in self.SUBJECTS + + obs = (func, subject, changeTypes, id) + self.observerLock.acquire() + self.observers.append(obs) + self.observerLock.release() + + def remove_observer(self, func): + """ Remove all observers with function func + """ + + self.observerLock.acquire() + i=0 + while i < len(self.observers): + ofunc = self.observers[i][0] + if ofunc == func: + del self.observers[i] + else: + i+=1 + self.observerLock.release() + + def notify(self, subject, changeType, obj_id, *args): + """ + Notify all interested observers about an event with threads from the pool + """ + tasks = [] + assert subject in self.SUBJECTS + + self.observerLock.acquire() + for ofunc, osubject, ochangeTypes, oid in self.observers: + if (subject == osubject and + changeType in ochangeTypes and + (oid is None or oid == obj_id)): + tasks.append(ofunc) + self.observerLock.release() + args = [subject, changeType, obj_id] + list(args) + for task in tasks: + if self.pool: + self.pool.queueTask(task, args) + else: + task(*args) # call observer function in this thread + diff --git a/tribler-mod/Tribler/Core/CacheDB/SqliteCacheDBHandler.py b/tribler-mod/Tribler/Core/CacheDB/SqliteCacheDBHandler.py new file mode 100644 index 0000000..06158e5 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/SqliteCacheDBHandler.py @@ -0,0 +1,3444 @@ +from time import localtime, strftime +# Written by Jie Yang +# see LICENSE.txt for license information +# Note for Developers: Please write a unittest in Tribler/Test/test_sqlitecachedbhandler.py +# for any function you add to database. +# Please reuse the functions in sqlitecachedb as much as possible + +from Tribler.Core.CacheDB.sqlitecachedb import SQLiteCacheDB, bin2str, str2bin, NULL +from unicode import name2unicode,dunno2unicode +from copy import deepcopy,copy +from sets import Set +from traceback import print_exc +from time import time +from sha import sha +import sys +import os +import socket +import threading +import base64 +from random import randint, sample +from sets import Set +import math + + +from maxflow import Network +from math import atan, pi + + +from Tribler.Core.BitTornado.bencode import bencode, bdecode +from Notifier import Notifier +from Tribler.Core.simpledefs import * +from Tribler.Core.BuddyCast.moderationcast_util import * +from Tribler.Core.Overlay.permid import sign_data, verify_data +from Tribler.Category.Category import Category + +# maxflow constants +MAXFLOW_DISTANCE = 2 +ALPHA = float(1)/30000 + +DEBUG = False +SHOW_ERROR = False + +MAX_KEYWORDS_STORED = 5 +MAX_KEYWORD_LENGTH = 50 + +def show_permid_shorter(permid): + if not permid: + return 'None' + s = base64.encodestring(permid).replace("\n","") + return s[-5:] + +class BasicDBHandler: + def __init__(self,db, table_name): ## self, table_name + self._db = db ## SQLiteCacheDB.getInstance() + self.table_name = table_name + self.notifier = Notifier.getInstance() + + def __del__(self): + try: + self.sync() + except: + if SHOW_ERROR: + print_exc() + + def close(self): + try: + self._db.close() + except: + if SHOW_ERROR: + print_exc() + + def size(self): + return self._db.size(self.table_name) + + def sync(self): + self._db.commit() + + def commit(self): + self._db.commit() + + def getOne(self, value_name, where=None, conj='and', **kw): + return self._db.getOne(self.table_name, value_name, where=where, conj=conj, **kw) + + def getAll(self, value_name, where=None, group_by=None, having=None, order_by=None, limit=None, offset=None, conj='and', **kw): + return self._db.getAll(self.table_name, value_name, where=where, group_by=group_by, having=having, order_by=order_by, limit=limit, offset=offset, conj=conj, **kw) + + +class MyDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if MyDBHandler.__single is None: + MyDBHandler.lock.acquire() + try: + if MyDBHandler.__single is None: + MyDBHandler(*args, **kw) + finally: + MyDBHandler.lock.release() + return MyDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if MyDBHandler.__single is not None: + raise RuntimeError, "MyDBHandler is singleton" + MyDBHandler.__single = self + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self,db,'MyInfo') ## self,db,'MyInfo' + # keys: version, torrent_dir + + def get(self, key, default_value=None): + value = self.getOne('value', entry=key) + if value is not NULL: + return value + else: + if default_value is not None: + return default_value + else: + raise KeyError, key + + def put(self, key, value, commit=True): + if self.getOne('value', entry=key) is NULL: + self._db.insert(self.table_name, commit=commit, entry=key, value=value) + else: + where = "entry=" + repr(key) + self._db.update(self.table_name, where, commit=commit, value=value) + +class FriendDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if FriendDBHandler.__single is None: + FriendDBHandler.lock.acquire() + try: + if FriendDBHandler.__single is None: + FriendDBHandler(*args, **kw) + finally: + FriendDBHandler.lock.release() + return FriendDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if FriendDBHandler.__single is not None: + raise RuntimeError, "FriendDBHandler is singleton" + FriendDBHandler.__single = self + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self,db, 'Peer') ## self,db,'Peer' + + def setFriendState(self, permid, state=1, commit=True): + self._db.update(self.table_name, 'permid='+repr(bin2str(permid)), commit=commit, friend=state) + self.notifier.notify(NTFY_PEERS, NTFY_UPDATE, permid, 'friend', state) + + def getFriends(self,state=1): + where = 'friend=%d ' % state + res = self._db.getAll('Friend', 'permid',where=where) + return [str2bin(p[0]) for p in res] + #raise Exception('Use PeerDBHandler getGUIPeers(category = "friend")!') + + def getFriendState(self, permid): + res = self.getOne('friend', permid=bin2str(permid)) + return res + + def deleteFriend(self,permid): + self.setFriendState(permid,0) + + def searchNames(self,kws): + return doPeerSearchNames(self,'Friend',kws) + + def getRanks(self): + # TODO + return [] + + def size(self): + return self._db.size('Friend') + + def addExternalFriend(self, peer): + peerdb = PeerDBHandler.getInstance() + peerdb.addPeer(peer['permid'], peer) + self.setFriendState(peer['permid']) + +NETW_MIME_TYPE = 'image/jpeg' + +class PeerDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + gui_value_name = ('permid', 'name', 'ip', 'port', 'similarity', 'friend', + 'num_peers', 'num_torrents', 'num_prefs', + 'connected_times', 'buddycast_times', 'last_connected') + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if PeerDBHandler.__single is None: + PeerDBHandler.lock.acquire() + try: + if PeerDBHandler.__single is None: + PeerDBHandler(*args, **kw) + finally: + PeerDBHandler.lock.release() + return PeerDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if PeerDBHandler.__single is not None: + raise RuntimeError, "PeerDBHandler is singleton" + PeerDBHandler.__single = self + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self, db,'Peer') ## self, db ,'Peer' + self.pref_db = PreferenceDBHandler.getInstance() + self.online_peers = set() + + + def __len__(self): + return self.size() + + def getPeerID(self, permid): + return self._db.getPeerID(permid) + + def getPeer(self, permid, keys=None): + if keys is not None: + res = self.getOne(keys, permid=bin2str(permid)) + return res + else: + # return a dictionary + # make it compatible for calls to old bsddb interface + value_name = ('permid', 'name', 'ip', 'port', 'similarity', 'friend', + 'num_peers', 'num_torrents', 'num_prefs', 'num_queries', + 'connected_times', 'buddycast_times', 'last_connected', 'last_seen', 'last_buddycast') + + item = self.getOne(value_name, permid=bin2str(permid)) + if not item: + return None + peer = dict(zip(value_name, item)) + peer['permid'] = str2bin(peer['permid']) + return peer + + def getPeerSim(self, permid): + permid_str = bin2str(permid) + sim = self.getOne('similarity', permid=permid_str) + if sim is None: + sim = 0 + return sim + + def getPeerList(self, peerids=None): # get the list of all peers' permid + if peerids is None: + permid_strs = self.getAll('permid') + return [str2bin(permid_str[0]) for permid_str in permid_strs] + else: + if not peerids: + return [] + s = str(peerids).replace('[','(').replace(']',')') +# if len(peerids) == 1: +# s = '(' + str(peerids[0]) + ')' # tuple([1]) = (1,), syntax error for sql +# else: +# s = str(tuple(peerids)) + sql = 'select permid from Peer where peer_id in ' + s + permid_strs = self._db.fetchall(sql) + return [str2bin(permid_str[0]) for permid_str in permid_strs] + + + def getPeers(self, peer_list, keys): # get a list of dictionaries given peer list + # BUG: keys must contain 2 entries, otherwise the records in all are single values?? + value_names = ",".join(keys) + sql = 'select %s from Peer where permid=?;'%value_names + all = [] + for permid in peer_list: + permid_str = bin2str(permid) + p = self._db.fetchone(sql, (permid_str,)) + all.append(p) + + peers = [] + for i in range(len(all)): + p = all[i] + peer = dict(zip(keys,p)) + peer['permid'] = peer_list[i] + peers.append(peer) + + return peers + + def addPeer(self, permid, value, update_dns=True, update_connected=False, commit=True): + # add or update a peer + # ARNO: AAARGGH a method that silently changes the passed value param!!! + # Jie: deepcopy(value)? + + _permid = _last_seen = _ip = _port = None + if 'permid' in value: + _permid = value.pop('permid') + + if not update_dns: + if value.has_key('ip'): + _ip = value.pop('ip') + if value.has_key('port'): + _port = value.pop('port') + + if update_connected: + old_connected = self.getOne('connected_times', permid=bin2str(permid)) + if not old_connected: + value['connected_times'] = 1 + else: + value['connected_times'] = old_connected + 1 + + peer_existed = self._db.insertPeer(permid, commit=commit, **value) + + if _permid is not None: + value['permid'] = permid + if _last_seen is not None: + value['last_seen'] = _last_seen + if _ip is not None: + value['ip'] = _ip + if _port is not None: + value['port'] = _port + + if peer_existed: + self.notifier.notify(NTFY_PEERS, NTFY_UPDATE, permid) + # Jie: only notify the GUI when a peer was connected + if 'connected_times' in value: + self.notifier.notify(NTFY_PEERS, NTFY_INSERT, permid) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","sqldbhand: addPeer",`permid`,self._db.getPeerID(permid),`value` + #print_stack() + + + def hasPeer(self, permid): + return self._db.hasPeer(permid) + + def findPeers(self, key, value): + # only used by Connecter + if key == 'permid': + value = bin2str(value) + res = self.getAll('permid', **{key:value}) + if not res: + return [] + ret = [] + for p in res: + ret.append({'permid':str2bin(p[0])}) + return ret + + def updatePeer(self, permid, commit=True, **argv): + self._db.update(self.table_name, 'permid='+repr(bin2str(permid)), commit=commit, **argv) + self.notifier.notify(NTFY_PEERS, NTFY_UPDATE, permid) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","sqldbhand: updatePeer",`permid`,argv + #print_stack() + + def deletePeer(self, permid=None, peer_id=None, force=False, commit=True): + # don't delete friend of superpeers, except that force is True + # to do: add transaction + #self._db._begin() # begin a transaction + if peer_id is None: + peer_id = self._db.getPeerID(permid) + if peer_id is None: + return + deleted = self._db.deletePeer(permid=permid, peer_id=peer_id, force=force, commit=commit) + if deleted: + self.pref_db._deletePeer(peer_id=peer_id, commit=commit) + self.notifier.notify(NTFY_PEERS, NTFY_DELETE, permid) + + def updateTimes(self, permid, key, change=1, commit=True): + permid_str = bin2str(permid) + sql = "SELECT peer_id,%s FROM Peer WHERE permid==?"%key + find = self._db.fetchone(sql, (permid_str,)) + if find: + peer_id,value = find + if value is None: + value = 1 + else: + value += change + sql_update_peer = "UPDATE Peer SET %s=? WHERE peer_id=?"%key + self._db.execute_write(sql_update_peer, (value, peer_id), commit=commit) + self.notifier.notify(NTFY_PEERS, NTFY_UPDATE, permid) + + def updatePeerSims(self, sim_list, commit=True): + sql_update_sims = 'UPDATE Peer SET similarity=? WHERE peer_id=?' + s = time() + self._db.executemany(sql_update_sims, sim_list, commit=commit) + + def getPermIDByIP(self,ip): + permid = self.getOne('permid', ip=ip) + if permid is not None: + return str2bin(permid) + else: + return None + + def getPermid(self, peer_id): + permid = self.getOne('permid', peer_id=peer_id) + if permid is not None: + return str2bin(permid) + else: + return None + + def getNumberPeers(self, category_name = 'all'): + # 28/07/08 boudewijn: counting the union from two seperate + # select statements is faster than using a single select + # statement with an OR in the WHERE clause. Note that UNION + # returns a distinct list of peer_id's. + if category_name == 'friend': + sql = 'SELECT COUNT(peer_id) FROM Peer WHERE last_connected > 0 AND friend = 1' + else: + sql = 'SELECT COUNT(peer_id) FROM (SELECT peer_id FROM Peer WHERE last_connected > 0 UNION SELECT peer_id FROM Peer WHERE friend = 1)' + res = self._db.fetchone(sql) + if not res: + res = 0 + return res + + def getGUIPeers(self, category_name = 'all', range = None, sort = None, reverse = False, get_online=False, get_ranks=True): + # + # ARNO: WHY DIFF WITH NORMAL getPeers?????? + # load peers for GUI + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'getGUIPeers(%s, %s, %s, %s)' % (category_name, range, sort, reverse) + """ + db keys: peer_id, permid, name, ip, port, thumbnail, oversion, + similarity, friend, superpeer, last_seen, last_connected, + last_buddycast, connected_times, buddycast_times, num_peers, + num_torrents, num_prefs, num_queries, + + @in: get_online: boolean: if true, give peers a key 'online' if there is a connection now + """ + value_name = PeerDBHandler.gui_value_name + + where = '(last_connected>0 or friend=1 or friend=2 or friend=3) ' + if category_name in ('friend', 'friends'): + # Show mutual, I invited and he invited + where += 'and (friend=1 or friend=2 or friend=3) ' + if range: + offset= range[0] + limit = range[1] - range[0] + else: + limit = offset = None + if sort: + # Arno, 2008-10-6: buggy: not reverse??? + desc = (reverse) and 'desc' or '' + if sort in ('name'): + order_by = ' lower(%s) %s' % (sort, desc) + else: + order_by = ' %s %s' % (sort, desc) + else: + order_by = None + + # Must come before query + if get_ranks: + ranks = self.getRanks() + # Arno, 2008-10-23: Someone disabled ranking of people, why? + + res_list = self.getAll(value_name, where, offset= offset, limit=limit, order_by=order_by) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","getGUIPeers: where",where,"offset",offset,"limit",limit,"order",order_by + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","getGUIPeers: returned len",len(res_list) + + peer_list = [] + for item in res_list: + peer = dict(zip(value_name, item)) + peer['name'] = dunno2unicode(peer['name']) + peer['simRank'] = ranksfind(ranks,peer['permid']) + peer['permid'] = str2bin(peer['permid']) + peer_list.append(peer) + + if get_online: + self.checkOnline(peer_list) + + # peer_list consumes about 1.5M for 1400 peers, and this function costs about 0.015 second + + return peer_list + + + def getRanks(self): + value_name = 'permid' + order_by = 'similarity desc' + rankList_size = 20 + where = '(last_connected>0 or friend=1) ' + res_list = self._db.getAll('Peer', value_name, where=where, limit=rankList_size, order_by=order_by) + return [a[0] for a in res_list] + + def checkOnline(self, peerlist): + # Add 'online' key in peers when their permid + # Called by any thread, accesses single online_peers-dict + # Peers will never be sorted by 'online' because it is not in the db. + # Do not sort here, because then it would be sorted with a partial select (1 page in the grid) + self.lock.acquire() + for peer in peerlist: + peer['online'] = (peer['permid'] in self.online_peers) + self.lock.release() + + + + def setOnline(self,subject,changeType,permid,*args): + """Called by callback threads + with NTFY_CONNECTION, args[0] is boolean: connection opened/closed + """ + self.lock.acquire() + if args[0]: # connection made + self.online_peers.add(permid) + else: # connection closed + self.online_peers.remove(permid) + self.lock.release() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", (('#'*50)+'\n')*5+'%d peers online' % len(self.online_peers) + + def registerConnectionUpdater(self, session): + session.add_observer(self.setOnline, NTFY_PEERS, [NTFY_CONNECTION], None) + + def updatePeerIcon(self, permid, icontype, icondata, updateFlag = True): + # save thumb in db + self.updatePeer(permid, thumbnail=bin2str(icondata)) + #if self.mm is not None: + # self.mm.save_data(permid, icontype, icondata) + + + def getPeerIcon(self, permid): + item = self.getOne('thumbnail', permid=bin2str(permid)) + if item: + return NETW_MIME_TYPE, str2bin(item) + else: + return None, None + #if self.mm is not None: + # return self.mm.load_data(permid) + #3else: + # return None + + + def searchNames(self,kws): + return doPeerSearchNames(self,'Peer',kws) + + + +class SuperPeerDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if SuperPeerDBHandler.__single is None: + SuperPeerDBHandler.lock.acquire() + try: + if SuperPeerDBHandler.__single is None: + SuperPeerDBHandler(*args, **kw) + finally: + SuperPeerDBHandler.lock.release() + return SuperPeerDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if SuperPeerDBHandler.__single is not None: + raise RuntimeError, "SuperPeerDBHandler is singleton" + SuperPeerDBHandler.__single = self + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self, db, 'SuperPeer') + self.peer_db_handler = PeerDBHandler.getInstance() + + def loadSuperPeers(self, config, refresh=False): + filename = os.path.join(config['install_dir'], config['superpeer_file']) + superpeer_list = self.readSuperPeerList(filename) + self.insertSuperPeers(superpeer_list, refresh) + + def readSuperPeerList(self, filename=u''): + """ read (superpeer_ip, superpeer_port, permid [, name]) lines from a text file """ + + try: + filepath = os.path.abspath(filename) + file = open(filepath, "r") + except IOError: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "superpeer: cannot open superpeer file", filepath + return [] + + superpeers = file.readlines() + file.close() + superpeers_info = [] + for superpeer in superpeers: + if superpeer.strip().startswith("#"): # skip commended lines + continue + superpeer_line = superpeer.split(',') + superpeer_info = [a.strip() for a in superpeer_line] + try: + superpeer_info[2] = base64.decodestring(superpeer_info[2]+'\n' ) + except: + print_exc() + continue + try: + ip = socket.gethostbyname(superpeer_info[0]) + superpeer = {'ip':ip, 'port':superpeer_info[1], + 'permid':superpeer_info[2], 'superpeer':1} + if len(superpeer_info) > 3: + superpeer['name'] = superpeer_info[3] + superpeers_info.append(superpeer) + except: + print_exc() + + return superpeers_info + + def insertSuperPeers(self, superpeer_list, refresh=False): + for superpeer in superpeer_list: + superpeer = deepcopy(superpeer) + if not isinstance(superpeer, dict) or 'permid' not in superpeer: + continue + permid = superpeer.pop('permid') + self.peer_db_handler.addPeer(permid, superpeer, commit=False) + self.peer_db_handler.commit() + + def getSuperPeers(self): + # return list with permids of superpeers + res_list = self._db.getAll(self.table_name, 'permid') + return [str2bin(a[0]) for a in res_list] + + def addExternalSuperPeer(self, peer): + _peer = deepcopy(peer) + permid = _peer.pop('permid') + _peer['superpeer'] = 1 + self._db.insertPeer(permid, **_peer) + + +class CrawlerDBHandler: + """ + The CrawlerDBHandler is not an actual handle to a + database. Instead it uses a local file (usually crawler.txt) to + identify crawler processes. + """ + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if CrawlerDBHandler.__single is None: + CrawlerDBHandler.lock.acquire() + try: + if CrawlerDBHandler.__single is None: + CrawlerDBHandler(*args, **kw) + finally: + CrawlerDBHandler.lock.release() + return CrawlerDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if CrawlerDBHandler.__single is not None: + raise RuntimeError, "CrawlerDBHandler is singleton" + CrawlerDBHandler.__single = self + self._crawler_list = [] + + def loadCrawlers(self, config, refresh=False): + filename = os.path.join(config['crawler_file']) + self._crawler_list = self.readCrawlerList(filename) + + def readCrawlerList(self, filename=''): + """ + read (permid [, name]) lines from a text file + returns a list containing permids + """ + + try: + filepath = os.path.abspath(filename) + file = open(filepath, "r") + except IOError: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: cannot open crawler file", filepath + return [] + + crawlers = file.readlines() + file.close() + crawlers_info = [] + for crawler in crawlers: + if crawler.strip().startswith("#"): # skip commended lines + continue + crawler_info = [a.strip() for a in crawler.split(",")] + try: + crawler_info[0] = base64.decodestring(crawler_info[0]+'\n') + except: + print_exc() + continue + crawlers_info.append(str2bin(crawler)) + + return crawlers_info + + def temporarilyAddCrawler(self, permid): + """ + Because of security reasons we will not allow crawlers to be + added to the crawler.txt list. This temporarilyAddCrawler + method can be used to add one for the running session. Usefull + for debugging and testing. + """ + if not permid in self._crawler_list: + self._crawler_list.append(permid) + + def getCrawlers(self): + """ + returns a list with permids of crawlers + """ + return self._crawler_list + + + +class PreferenceDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if PreferenceDBHandler.__single is None: + PreferenceDBHandler.lock.acquire() + try: + if PreferenceDBHandler.__single is None: + PreferenceDBHandler(*args, **kw) + finally: + PreferenceDBHandler.lock.release() + return PreferenceDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if PreferenceDBHandler.__single is not None: + raise RuntimeError, "PreferenceDBHandler is singleton" + PreferenceDBHandler.__single = self + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self,db, 'Preference') ## self,db,'Preference' + + def _getTorrentOwnersID(self, torrent_id): + sql_get_torrent_owners_id = u"SELECT peer_id FROM Preference WHERE torrent_id==?" + res = self._db.fetchall(sql_get_torrent_owners_id, (torrent_id,)) + return [t[0] for t in res] + + def getPrefList(self, permid, return_infohash=False): + # get a peer's preference list of infohash or torrent_id according to return_infohash + peer_id = self._db.getPeerID(permid) + if peer_id is None: + return [] + + if not return_infohash: + sql_get_peer_prefs_id = u"SELECT torrent_id FROM Preference WHERE peer_id==?" + res = self._db.fetchall(sql_get_peer_prefs_id, (peer_id,)) + return [t[0] for t in res] + else: + sql_get_infohash = u"SELECT infohash FROM Torrent WHERE torrent_id IN (SELECT torrent_id FROM Preference WHERE peer_id==?)" + res = self._db.fetchall(sql_get_infohash, (peer_id,)) + return [str2bin(t[0]) for t in res] + + def _deletePeer(self, permid=None, peer_id=None, commit=True): # delete a peer from pref_db + # should only be called by PeerDBHandler + if peer_id is None: + peer_id = self._db.getPeerID(permid) + if peer_id is None: + return + + self._db.delete(self.table_name, commit=commit, peer_id=peer_id) + + def addPreference(self, permid, infohash, data={}, commit=True): + # This function should be replaced by addPeerPreferences + # peer_permid and prefs are binaries, the peer must have been inserted in Peer table + # Nicolas: did not change this function as it seems addPreference*s* is getting called + peer_id = self._db.getPeerID(permid) + if peer_id is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'PreferenceDBHandler: add preference of a peer which is not existed in Peer table', `permid` + return + + sql_insert_peer_torrent = u"INSERT INTO Preference (peer_id, torrent_id) VALUES (?,?)" + torrent_id = self._db.getTorrentID(infohash) + if not torrent_id: + self._db.insertInfohash(infohash) + torrent_id = self._db.getTorrentID(infohash) + try: + self._db.execute_write(sql_insert_peer_torrent, (peer_id, torrent_id), commit=commit) + except Exception, msg: # duplicated + print_exc() + + + + def addPreferences(self, peer_permid, prefs, is_torrent_id=False, commit=True): + # peer_permid and prefs are binaries, the peer must have been inserted in Peer table + # + # boudewijn: for buddycast version >= OLPROTO_VER_EIGTH the + # prefs list may contain both strings (indicating an infohash) + # or dictionaries (indicating an infohash with metadata) + + peer_id = self._db.getPeerID(peer_permid) + if peer_id is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'PreferenceDBHandler: add preference of a peer which is not existed in Peer table', `peer_permid` + return + + prefs = [type(pref) is str and {"infohash":pref} or pref + for pref + in prefs] + + if is_torrent_id: + torrent_id_prefs = [(peer_id, + pref['torrent_id'], + pref.get('position', -1), + pref.get('reranking_strategy', -1)) + for pref in prefs] + else: + # Nicolas: do not know why this would be called, but let's handle it smoothly + torrent_id_prefs = [] + for pref in prefs: + if type(pref)==dict: + infohash = pref["infohash"] + else: + infohash = pref # Nicolas: from wherever this might come, we even handle old list of infohashes style + torrent_id = self._db.getTorrentID(infohash) + if not torrent_id: + self._db.insertInfohash(infohash) + torrent_id = self._db.getTorrentID(infohash) + torrent_id_prefs.append((peer_id, torrent_id, -1, -1)) + + sql_insert_peer_torrent = u"INSERT INTO Preference (peer_id, torrent_id, click_position, reranking_strategy) VALUES (?,?,?,?)" + if len(prefs) > 0: + try: + self._db.executemany(sql_insert_peer_torrent, torrent_id_prefs, commit=commit) + except Exception, msg: # duplicated + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'dbhandler: addPreferences:', Exception, msg + + # now, store search terms + + # Nicolas: if maximum number of search terms is exceeded, abort storing them. + # Although this may seem a bit strict, this means that something different than a genuine Tribler client + # is on the other side, so we might rather err on the side of caution here and simply let clicklog go. + nums_of_search_terms = [len(pref.get('search_terms',[])) for pref in prefs] + if max(nums_of_search_terms)>MAX_KEYWORDS_STORED: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "peer %d exceeds max number %d of keywords per torrent, aborting storing keywords" % \ + (peer_id, MAX_KEYWORDS_STORED) + return + + all_terms_unclean = Set([]) + for pref in prefs: + newterms = Set(pref.get('search_terms',[])) + all_terms_unclean = all_terms_unclean.union(newterms) + + all_terms = [] + for term in all_terms_unclean: + cleanterm = '' + for i in range(0,len(term)): + c = term[i] + if c.isalnum(): + cleanterm += c + if len(cleanterm)>0: + all_terms.append(cleanterm) + + + # maybe we haven't received a single key word, no need to loop again over prefs then + if len(all_terms)==0: + return + + termdb = TermDBHandler.getInstance() + searchdb = SearchDBHandler.getInstance() + + # insert all unknown terms NOW so we can rebuild the index at once + termdb.bulkInsertTerms(all_terms) + + # get local term ids for terms. + foreign2local = dict([(str(foreign_term), termdb.getTermID(foreign_term)) + for foreign_term + in all_terms]) + + # process torrent data + for pref in prefs: + torrent_id = pref.get('torrent_id', None) + search_terms = pref.get('search_terms', []) + + if search_terms==[]: + continue + if not torrent_id: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "torrent_id not set, retrieving manually!" + torrent_id = TorrentDBHandler.getInstance().getTorrentID(infohash) + + term_ids = [foreign2local[str(foreign)] for foreign in search_terms] + searchdb.storeKeywordsByID(peer_id, torrent_id, term_ids, commit=False) + if commit: + searchdb.commit() + + def getAllEntries(self): + """use with caution,- for testing purposes""" + return self.getAll("rowid, peer_id, torrent_id, click_position,reranking_strategy", order_by="peer_id, torrent_id") + + + def getRecentPeersPrefs(self, key, num=None): + # get the recently seen peers' preference. used by buddycast + sql = "select peer_id,torrent_id from Preference where peer_id in (select peer_id from Peer order by %s desc)"%key + if num is not None: + sql = sql[:-1] + " limit %d)"%num + res = self._db.fetchall(sql) + return res + + def getPositionScore(self, torrent_id, keywords): + """returns a tuple (num, positionScore) stating how many times the torrent id was found in preferences, + and the average position score, where each click at position i receives 1-(1/i) points""" + + if not keywords: + return (0,0) + + term_db = TermDBHandler.getInstance() + term_ids = [term_db.getTermID(keyword) for keyword in keywords] + s_term_ids = str(term_ids).replace("[","(").replace("]",")").replace("L","") + + # we're not really interested in the peer_id here, + # just make sure we don't count twice if we hit more than one keyword in a search + # ... one might treat keywords a bit more strictly here anyway (AND instead of OR) + sql = """ +SELECT DISTINCT Preference.peer_id, Preference.click_position +FROM Preference +INNER JOIN ClicklogSearch +ON + Preference.torrent_id = ClicklogSearch.torrent_id + AND + Preference.peer_id = ClicklogSearch.peer_id +WHERE + ClicklogSearch.term_id IN %s + AND + ClicklogSearch.torrent_id = %s""" % (s_term_ids, torrent_id) + res = self._db.fetchall(sql) + scores = [1.0-1.0/float(click_position+1) + for (peer_id, click_position) + in res + if click_position>-1] + if len(scores)==0: + return (0,0) + score = float(sum(scores))/len(scores) + return (len(scores), score) + + +class TorrentDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if TorrentDBHandler.__single is None: + TorrentDBHandler.lock.acquire() + try: + if TorrentDBHandler.__single is None: + TorrentDBHandler(*args, **kw) + finally: + TorrentDBHandler.lock.release() + return TorrentDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if TorrentDBHandler.__single is not None: + raise RuntimeError, "TorrentDBHandler is singleton" + TorrentDBHandler.__single = self + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self,db, 'Torrent') ## self,db,torrent + + self.mypref_db = MyPreferenceDBHandler.getInstance() + + self.status_table = {'good':1, 'unknown':0, 'dead':2} + self.status_table.update(self._db.getTorrentStatusTable()) + self.id2status = dict([(x,y) for (y,x) in self.status_table.items()]) + self.torrent_dir = None + # 0 - unknown + # 1 - good + # 2 - dead + + self.category_table = {'Video':1, + 'VideoClips':2, + 'Audio':3, + 'Compressed':4, + 'Document':5, + 'Picture':6, + 'xxx':7, + 'other':8,} + self.category_table.update(self._db.getTorrentCategoryTable()) + self.category_table['unknown'] = 0 + self.id2category = dict([(x,y) for (y,x) in self.category_table.items()]) + # 1 - Video + # 2 - VideoClips + # 3 - Audio + # 4 - Compressed + # 5 - Document + # 6 - Picture + # 7 - xxx + # 8 - other + + self.src_table = self._db.getTorrentSourceTable() + self.id2src = dict([(x,y) for (y,x) in self.src_table.items()]) + # 0 - '' # local added + # 1 - BC + # 2,3,4... - URL of RSS feed + self.keys = ['torrent_id', 'name', 'torrent_file_name', + 'length', 'creation_date', 'num_files', 'thumbnail', + 'insert_time', 'secret', 'relevance', + 'source_id', 'category_id', 'status_id', + 'num_seeders', 'num_leechers', 'comment'] + self.existed_torrents = Set() + + + self.value_name = ['C.torrent_id', 'category_id', 'status_id', 'name', 'creation_date', 'num_files', + 'num_leechers', 'num_seeders', 'length', + 'secret', 'insert_time', 'source_id', 'torrent_file_name', + 'relevance', 'infohash', 'tracker', 'last_check'] + + def register(self, category, torrent_dir): + self.category = category + self.torrent_dir = torrent_dir + + def getTorrentID(self, infohash): + return self._db.getTorrentID(infohash) + + def getInfohash(self, torrent_id): + return self._db.getInfohash(torrent_id) + + def hasTorrent(self, infohash): + if infohash in self.existed_torrents: #to do: not thread safe + return True + infohash_str = bin2str(infohash) + existed = self._db.getOne('CollectedTorrent', 'torrent_id', infohash=infohash_str) + if existed is None: + return False + else: + self.existed_torrents.add(infohash) + return True + + def addExternalTorrent(self, filename, source='BC', extra_info={}, metadata=None): + infohash, torrent = self._readTorrentData(filename, source, extra_info, metadata) + if infohash is None: + return torrent + if not self.hasTorrent(infohash): + self._addTorrentToDB(infohash, torrent, commit=True) + self.notifier.notify(NTFY_TORRENTS, NTFY_INSERT, infohash) + + return torrent + + def _readTorrentData(self, filename, source='BC', extra_info={}, metadata=None): + # prepare data to insert into database + try: + if metadata is None: + f = open(filename, 'rb') + metadata = f.read() + f.close() + + metainfo = bdecode(metadata) + except Exception,msg: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", Exception,msg,`metadata` + return None,None + + namekey = name2unicode(metainfo) # convert info['name'] to type(unicode) + info = metainfo['info'] + infohash = sha(bencode(info)).digest() + + torrent = {'infohash': infohash} + torrent['torrent_file_name'] = os.path.split(filename)[1] + torrent['name'] = info.get(namekey, '') + + length = 0 + nf = 0 + if info.has_key('length'): + length = info.get('length', 0) + nf = 1 + elif info.has_key('files'): + for li in info['files']: + nf += 1 + if li.has_key('length'): + length += li['length'] + torrent['length'] = length + torrent['num_files'] = nf + torrent['announce'] = metainfo.get('announce', '') + torrent['announce-list'] = metainfo.get('announce-list', '') + torrent['creation_date'] = metainfo.get('creation date', 0) + + torrent['comment'] = metainfo.get('comment', None) + + torrent["ignore_number"] = 0 + torrent["retry_number"] = 0 + torrent["num_seeders"] = extra_info.get('seeder', -1) + torrent["num_leechers"] = extra_info.get('leecher', -1) + other_last_check = extra_info.get('last_check_time', -1) + if other_last_check >= 0: + torrent["last_check_time"] = int(time()) - other_last_check + else: + torrent["last_check_time"] = 0 + torrent["status"] = self._getStatusID(extra_info.get('status', "unknown")) + + torrent["source"] = self._getSourceID(source) + torrent["insert_time"] = long(time()) + + torrent['category'] = self._getCategoryID(self.category.calculateCategory(metainfo, torrent['name'])) + torrent['secret'] = 0 # to do: check if torrent is secret + torrent['relevance'] = 0.0 + thumbnail = 0 + if 'azureus_properties' in metainfo and 'Content' in metainfo['azureus_properties']: + if metainfo['azureus_properties']['Content'].get('Thumbnail',''): + thumbnail = 1 + torrent['thumbnail'] = thumbnail + + #if (torrent['category'] != []): + # print '### one torrent added from MetadataHandler: ' + str(torrent['category']) + ' ' + torrent['torrent_name'] + '###' + return infohash, torrent + + def addInfohash(self, infohash, commit=True): + if self._db.getTorrentID(infohash) is None: + self._db.insert('Torrent', commit=commit, infohash=bin2str(infohash)) + + def _getStatusID(self, status): + return self.status_table.get(status.lower(), 0) + + def _getCategoryID(self, category_list): + if len(category_list) > 0: + category = category_list[0].lower() + cat_int = self.category_table[category] + else: + cat_int = 0 + return cat_int + + def _getSourceID(self, src): + if src in self.src_table: + src_int = self.src_table[src] + else: + src_int = self._insertNewSrc(src) # add a new src, e.g., a RSS feed + self.src_table[src] = src_int + self.id2src[src_int] = src + return src_int + + def _addTorrentToDB(self, infohash, data, commit=True): + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is None: # not in db + infohash_str = bin2str(infohash) + self._db.insert('Torrent', + commit=True, # must commit to get the torrent id + infohash = infohash_str, + name = dunno2unicode(data['name']), + torrent_file_name = data['torrent_file_name'], + length = data['length'], + creation_date = data['creation_date'], + num_files = data['num_files'], + thumbnail = data['thumbnail'], + insert_time = data['insert_time'], + secret = data['secret'], + relevance = data['relevance'], + source_id = data['source'], + category_id = data['category'], + status_id = data['status'], + num_seeders = data['num_seeders'], + num_leechers = data['num_leechers'], + comment = dunno2unicode(data['comment'])) + torrent_id = self._db.getTorrentID(infohash) + else: # infohash in db + where = 'torrent_id = %d'%torrent_id + self._db.update('Torrent', where = where, + commit=False, + name = dunno2unicode(data['name']), + torrent_file_name = data['torrent_file_name'], + length = data['length'], + creation_date = data['creation_date'], + num_files = data['num_files'], + thumbnail = data['thumbnail'], + insert_time = data['insert_time'], + secret = data['secret'], + relevance = data['relevance'], + source_id = data['source'], + category_id = data['category'], + status_id = data['status'], + num_seeders = data['num_seeders'], + num_leechers = data['num_leechers'], + comment = dunno2unicode(data['comment'])) + + self._addTorrentTracker(torrent_id, data, commit=False) + if commit: + self.commit() + self._db.show_execute = False + return torrent_id + + def _insertNewSrc(self, src, commit=True): + desc = '' + if src.startswith('http') and src.endswith('xml'): + desc = 'RSS' + self._db.insert('TorrentSource', commit=commit, name=src, description=desc) + src_id = self._db.getOne('TorrentSource', 'source_id', name=src) + return src_id + + def _addTorrentTracker(self, torrent_id, data, add_all=False, commit=True): + # Set add_all to True if you want to put all multi-trackers into db. + # In the current version (4.2) only the main tracker is used. + exist = self._db.getOne('TorrentTracker', 'tracker', torrent_id=torrent_id) + if exist: + return + + announce = data['announce'] + ignore_number = data['ignore_number'] + retry_number = data['retry_number'] + last_check_time = data['last_check_time'] + + announce_list = data['announce-list'] + + sql_insert_torrent_tracker = """ + INSERT INTO TorrentTracker + (torrent_id, tracker, announce_tier, + ignored_times, retried_times, last_check) + VALUES (?,?,?, ?,?,?) + """ + + values = [(torrent_id, announce, 1, ignore_number, retry_number, last_check_time)] + # each torrent only has one announce with tier number 1 + tier_num = 2 + trackers = {announce:None} + if add_all: + for tier in announce_list: + for tracker in tier: + if tracker in trackers: + continue + value = (torrent_id, tracker, tier_num, 0, 0, 0) + values.append(value) + trackers[tracker] = None + tier_num += 1 + + self._db.executemany(sql_insert_torrent_tracker, values, commit=commit) + + def updateTorrent(self, infohash, commit=True, **kw): # watch the schema of database + if 'category' in kw: + cat_id = self._getCategoryID(kw.pop('category')) + kw['category_id'] = cat_id + if 'status' in kw: + status_id = self._getStatusID(kw.pop('status')) + kw['status_id'] = status_id + if 'progress' in kw: + self.mypref_db.updateProgress(infohash, kw.pop('progress'), commit=False)# commit at end of function + if 'seeder' in kw: + kw['num_seeders'] = kw.pop('seeder') + if 'leecher' in kw: + kw['num_leechers'] = kw.pop('leecher') + if 'last_check_time' in kw or 'ignore_number' in kw or 'retry_number' in kw \ + or 'retried_times' in kw or 'ignored_times' in kw: + self.updateTracker(infohash, kw, commit=False) + + for key in kw.keys(): + if key not in self.keys: + kw.pop(key) + + if len(kw) > 0: + infohash_str = bin2str(infohash) + where = "infohash='%s'"%infohash_str + self._db.update(self.table_name, where, commit=False, **kw) + + if commit: + self.commit() + # to.do: update the torrent panel's number of seeders/leechers + self.notifier.notify(NTFY_TORRENTS, NTFY_UPDATE, infohash) + + def updateTracker(self, infohash, kw, tier=1, tracker=None, commit=True): + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is None: + return + update = {} + assert type(kw) == dict and kw, 'updateTracker error: kw should be filled dict, but is: %s' % kw + if 'last_check_time' in kw: + update['last_check'] = kw.pop('last_check_time') + if 'ignore_number' in kw: + update['ignored_times'] = kw.pop('ignore_number') + if 'ignored_times' in kw: + update['ignored_times'] = kw.pop('ignored_times') + if 'retry_number' in kw: + update['retried_times'] = kw.pop('retry_number') + if 'retried_times' in kw: + update['retried_times'] = kw.pop('retried_times') + + if tracker is None: + where = 'torrent_id=%d AND announce_tier=%d'%(torrent_id, tier) + else: + where = 'torrent_id=%d AND tracker=%s'%(torrent_id, repr(tracker)) + self._db.update('TorrentTracker', where, commit=commit, **update) + + def deleteTorrent(self, infohash, delete_file=False, commit = True): + if not self.hasTorrent(infohash): + return False + + if self.mypref_db.hasMyPreference(infohash): # don't remove torrents in my pref + return False + + if delete_file: + deleted = self.eraseTorrentFile(infohash) + else: + deleted = True + + if deleted: + self._deleteTorrent(infohash, commit=commit) + + self.notifier.notify(NTFY_TORRENTS, NTFY_DELETE, infohash) + return deleted + + def _deleteTorrent(self, infohash, keep_infohash=True, commit=True): + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is not None: + if keep_infohash: + self._db.update(self.table_name, where="torrent_id=%d"%torrent_id, commit=commit, torrent_file_name=None) + else: + self._db.delete(self.table_name, commit=commit, torrent_id=torrent_id) + if infohash in self.existed_torrents: + self.existed_torrents.remove(infohash) + self._db.delete('TorrentTracker', commit=commit, torrent_id=torrent_id) + #print '******* delete torrent', torrent_id, `infohash`, self.hasTorrent(infohash) + + def eraseTorrentFile(self, infohash): + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is not None: + torrent_dir = self.getTorrentDir() + torrent_name = self.getOne('torrent_file_name', torrent_id=torrent_id) + src = os.path.join(torrent_dir, torrent_name) + if not os.path.exists(src): # already removed + return True + + try: + os.remove(src) + except Exception, msg: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "cachedbhandler: failed to erase torrent", src, Exception, msg + return False + + return True + + def getTracker(self, infohash, tier=0): + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is not None: + sql = "SELECT tracker, announce_tier FROM TorrentTracker WHERE torrent_id==%d"%torrent_id + if tier > 0: + sql += " AND announce_tier<=%d"%tier + return self._db.fetchall(sql) + + def getTorrentDir(self): + return self.torrent_dir + + + def getTorrent(self, infohash, keys=None, include_mypref=True): + # to do: replace keys like source -> source_id and status-> status_id ?? + + if keys is None: + keys = deepcopy(self.value_name) + #('torrent_id', 'category_id', 'status_id', 'name', 'creation_date', 'num_files', + # 'num_leechers', 'num_seeders', 'length', + # 'secret', 'insert_time', 'source_id', 'torrent_file_name', + # 'relevance', 'infohash', 'torrent_id') + else: + keys = list(keys) + where = 'C.torrent_id = T.torrent_id and announce_tier=1 ' + + res = self._db.getOne('CollectedTorrent C, TorrentTracker T', keys, where=where, infohash=bin2str(infohash)) + if not res: + return None + torrent = dict(zip(keys, res)) + if 'source_id' in torrent: + torrent['source'] = self.id2src[torrent['source_id']] + del torrent['source_id'] + if 'category_id' in torrent: + torrent['category'] = [self.id2category[torrent['category_id']]] + del torrent['category_id'] + if 'status_id' in torrent: + torrent['status'] = self.id2status[torrent['status_id']] + del torrent['status_id'] + torrent['infohash'] = infohash + if 'last_check' in torrent: + torrent['last_check_time'] = torrent['last_check'] + del torrent['last_check'] + + if include_mypref: + tid = torrent['C.torrent_id'] + stats = self.mypref_db.getMyPrefStats(tid) + del torrent['C.torrent_id'] + if stats: + torrent['myDownloadHistory'] = True + torrent['creation_time'] = stats[tid][0] + torrent['progress'] = stats[tid][1] + torrent['destination_path'] = stats[tid][2] + + + return torrent + + def getNumberTorrents(self, category_name = 'all', library = False): + table = 'CollectedTorrent' + value = 'count(torrent_id)' + where = '1 ' + + if category_name != 'all': + where += ' and category_id= %d' % self.category_table.get(category_name.lower(), -1) # unkown category_name returns no torrents + if library: + where += ' and torrent_id in (select torrent_id from MyPreference where destination_path != "")' + else: + where += ' and status_id=%d ' % self.status_table['good'] + # add familyfilter + where += self.category.get_family_filter_sql(self._getCategoryID) + + number = self._db.getOne(table, value, where) + if not number: + number = 0 + return number + + def getTorrents(self, category_name = 'all', range = None, library = False, sort = None, reverse = False): + """ + get Torrents of some category and with alive status (opt. not in family filter) + + @return Returns a list of dicts with keys: + torrent_id, infohash, name, category, status, creation_date, num_files, num_leechers, num_seeders, + length, secret, insert_time, source, torrent_filename, relevance, simRank, tracker, last_check + (if in library: myDownloadHistory, download_started, progress, dest_dir) + + """ + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'TorrentDBHandler: getTorrents(%s, %s, %s, %s, %s)' % (category_name, range, library, sort, reverse) + s = time() + + value_name = deepcopy(self.value_name) + + where = 'T.torrent_id = C.torrent_id and announce_tier=1 ' + + if category_name != 'all': + where += ' and category_id= %d' % self.category_table.get(category_name.lower(), -1) # unkown category_name returns no torrents + if library: + if sort in value_name: + where += ' and C.torrent_id in (select torrent_id from MyPreference where destination_path != "")' + else: + value_name[0] = 'C.torrent_id' + where += ' and C.torrent_id = M.torrent_id and announce_tier=1' + else: + where += ' and status_id=%d ' % self.status_table['good'] # if not library, show only good files + # add familyfilter + where += self.category.get_family_filter_sql(self._getCategoryID) + if range: + offset= range[0] + limit = range[1] - range[0] + else: + limit = offset = None + if sort: + # Arno, 2008-10-6: buggy: not reverse??? + desc = (reverse) and 'desc' or '' + if sort in ('name'): + order_by = ' lower(%s) %s' % (sort, desc) + else: + order_by = ' %s %s' % (sort, desc) + else: + order_by = None + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDBHandler: GET TORRENTS val",value_name,"where",where,"limit",limit,"offset",offset,"order",order_by + #print_stack + + # Must come before query + ranks = self.getRanks() + + #self._db.show_execute = True + if library and sort not in value_name: + res_list = self._db.getAll('CollectedTorrent C, MyPreference M, TorrentTracker T', value_name, where, limit=limit, offset=offset, order_by=order_by) + else: + res_list = self._db.getAll('CollectedTorrent C, TorrentTracker T', value_name, where, limit=limit, offset=offset, order_by=order_by) + #self._db.show_execute = False + + mypref_stats = self.mypref_db.getMyPrefStats() + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDBHandler: getTorrents: getAll returned ###################",len(res_list) + + torrent_list = self.valuelist2torrentlist(value_name,res_list,ranks,mypref_stats) + del res_list + del mypref_stats + return torrent_list + + def valuelist2torrentlist(self,value_name,res_list,ranks,mypref_stats): + + torrent_list = [] + for item in res_list: + value_name[0] = 'torrent_id' + torrent = dict(zip(value_name, item)) + + try: + torrent['source'] = self.id2src[torrent['source_id']] + except: + print_exc() + # Arno: RSS subscription and id2src issue + torrent['source'] = 'http://some/RSS/feed' + + torrent['category'] = [self.id2category[torrent['category_id']]] + torrent['status'] = self.id2status[torrent['status_id']] + torrent['simRank'] = ranksfind(ranks,torrent['infohash']) + torrent['infohash'] = str2bin(torrent['infohash']) + #torrent['num_swarm'] = torrent['num_seeders'] + torrent['num_leechers'] + torrent['last_check_time'] = torrent['last_check'] + del torrent['last_check'] + del torrent['source_id'] + del torrent['category_id'] + del torrent['status_id'] + torrent_id = torrent['torrent_id'] + if mypref_stats is not None and torrent_id in mypref_stats: + # add extra info for torrent in mypref + torrent['myDownloadHistory'] = True + data = mypref_stats[torrent_id] #(create_time,progress,destdir) + torrent['download_started'] = data[0] + torrent['progress'] = data[1] + torrent['destdir'] = data[2] + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDBHandler: GET TORRENTS",`torrent` + + torrent_list.append(torrent) + return torrent_list + + def getRanks(self,): + value_name = 'infohash' + order_by = 'relevance desc' + rankList_size = 20 + where = 'status_id=%d ' % self.status_table['good'] + res_list = self._db.getAll('Torrent', value_name, where = where, limit=rankList_size, order_by=order_by) + return [a[0] for a in res_list] + + def getNumberCollectedTorrents(self): + #return self._db.size('CollectedTorrent') + return self._db.getOne('CollectedTorrent', 'count(torrent_id)') + + def freeSpace(self, torrents2del): +# if torrents2del > 100: # only delete so many torrents each time +# torrents2del = 100 + sql = """ + select torrent_file_name, torrent_id, infohash, relevance, + min(relevance,2500) + min(500,num_leechers) + 4*min(500,num_seeders) - (max(0,min(500,(%d-creation_date)/86400)) ) as weight + from CollectedTorrent + where torrent_id not in (select torrent_id from MyPreference) + order by weight + limit %d + """ % (int(time()), torrents2del) + res_list = self._db.fetchall(sql) + if len(res_list) == 0: + return False + + # delete torrents from db + sql_del_torrent = "delete from Torrent where torrent_id=?" + sql_del_tracker = "delete from TorrentTracker where torrent_id=?" + sql_del_pref = "delete from Preference where torrent_id=?" + tids = [(torrent_id,) for torrent_file_name, torrent_id, infohash, relevance, weight in res_list] + + self._db.executemany(sql_del_torrent, tids, commit=False) + self._db.executemany(sql_del_tracker, tids, commit=False) + self._db.executemany(sql_del_pref, tids, commit=False) + + self._db.commit() + + # but keep the infohash in db to maintain consistence with preference db + #torrent_id_infohashes = [(torrent_id,infohash_str,relevance) for torrent_file_name, torrent_id, infohash_str, relevance, weight in res_list] + #sql_insert = "insert into Torrent (torrent_id, infohash, relevance) values (?,?,?)" + #self._db.executemany(sql_insert, torrent_id_infohashes, commit=True) + + torrent_dir = self.getTorrentDir() + deleted = 0 # deleted any file? + for torrent_file_name, torrent_id, infohash, relevance, weight in res_list: + torrent_path = os.path.join(torrent_dir, torrent_file_name) + try: + os.remove(torrent_path) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Erase torrent:", os.path.basename(torrent_path) + deleted += 1 + except Exception, msg: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Error in erase torrent", Exception, msg + pass + + self.notifier.notify(NTFY_TORRENTS, NTFY_DELETE, str2bin(infohash)) # refresh gui + + return deleted + + def hasMetaData(self, infohash): + return self.hasTorrent(infohash) + + def getTorrentRelevances(self, tids): + sql = 'SELECT torrent_id, relevance from Torrent WHERE torrent_id in ' + str(tuple(tids)) + return self._db.fetchall(sql) + + def updateTorrentRelevance(self, infohash, relevance): + self.updateTorrent(infohash, relevance=relevance) + + def updateTorrentRelevances(self, tid_rel_pairs, commit=True): + if len(tid_rel_pairs) > 0: + sql_update_sims = 'UPDATE Torrent SET relevance=? WHERE torrent_id=?' + self._db.executemany(sql_update_sims, tid_rel_pairs, commit=commit) + + def searchNames(self,kws): + """ Get all torrents (good and bad) that have the specified keywords in + their name. Return a list of dictionaries. Each dict is in the + NEWDBSTANDARD format. + @param kws A list of keyword strings + @return A list of dictionaries. + """ + + mypref_stats = self.mypref_db.getMyPrefStats() + + where = 'C.torrent_id = T.torrent_id and announce_tier=1' + for i in range(len(kws)): + kw = kws[i] + # Strip special chars. Note that s.translate() does special stuff for Unicode, which we don't want + cleankw = '' + for i in range(0,len(kw)): + c = kw[i] + if c.isalnum(): + cleankw += c + + where += ' and name like "%'+cleankw+'%"' + + value_name = copy(self.value_name) + if 'torrent_id' in value_name: + index = value_name.index('torrent_id') + value_name.remove('torrent_id') + value_name.insert(index, 'C.torrent_id') + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","torrent_db: searchNames: where",where + res_list = self._db.getAll('CollectedTorrent C, TorrentTracker T', value_name, where) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","torrent_db: searchNames: res",`res_list` + + torrent_list = self.valuelist2torrentlist(value_name,res_list,None,mypref_stats) + del res_list + del mypref_stats + + return torrent_list + + + def selectTorrentToCollect(self, permid, candidate_list=None): + """ select a torrent to collect from a given candidate list + If candidate_list is not present or None, all torrents of + this peer will be used for sampling. + Return: the infohashed of selected torrent + """ + + if candidate_list is None: + sql = """ + select infohash + from Torrent,Peer,Preference + where Peer.permid==? + and Peer.peer_id==Preference.peer_id + and Torrent.torrent_id==Preference.torrent_id + and torrent_file_name is NULL + order by relevance desc + """ + permid_str = bin2str(permid) + res = self._db.fetchone(sql, (permid_str,)) + else: + cand_str = [bin2str(infohash) for infohash in candidate_list] + s = repr(cand_str).replace('[','(').replace(']',')') + sql = 'select infohash from Torrent where torrent_file_name is NULL and infohash in ' + s + sql += ' order by relevance desc' + res = self._db.fetchone(sql) + if res is None: + return None + return str2bin(res) + + def selectTorrentToCheck(self, policy='random', infohash=None, return_value=None): # for tracker checking + """ select a torrent to update tracker info (number of seeders and leechers) + based on the torrent checking policy. + RETURN: a dictionary containing all useful info. + + Policy 1: Random [policy='random'] + Randomly select a torrent to collect (last_check < 5 min ago) + + Policy 2: Oldest (unknown) first [policy='oldest'] + Select the non-dead torrent which was not been checked for the longest time (last_check < 5 min ago) + + Policy 3: Popular first [policy='popular'] + Select the non-dead most popular (3*num_seeders+num_leechers) one which has not been checked in last N seconds + (The default N = 4 hours, so at most 4h/torrentchecking_interval popular peers) + """ + + #import threading + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "****** selectTorrentToCheck", threading.currentThread().getName() + + if infohash is None: + # create a view? + sql = """select T.torrent_id, ignored_times, retried_times, torrent_file_name, infohash, status_id, num_seeders, num_leechers, last_check + from CollectedTorrent T, TorrentTracker TT + where TT.torrent_id=T.torrent_id and announce_tier=1 """ + if policy.lower() == 'random': + ntorrents = self.getNumberCollectedTorrents() + if ntorrents == 0: + rand_pos = 0 + else: + rand_pos = randint(0, ntorrents-1) + last_check_threshold = int(time()) - 300 + sql += """and last_check < %d + limit 1 offset %d """%(last_check_threshold, rand_pos) + elif policy.lower() == 'oldest': + last_check_threshold = int(time()) - 300 + sql += """ and last_check < %d and status_id <> 2 + order by last_check + limit 1 """%last_check_threshold + elif policy.lower() == 'popular': + last_check_threshold = int(time()) - 4*60*60 + sql += """ and last_check < %d and status_id <> 2 + order by 3*num_seeders+num_leechers desc + limit 1 """%last_check_threshold + res = self._db.fetchone(sql) + else: + sql = """select T.torrent_id, ignored_times, retried_times, torrent_file_name, infohash, status_id, num_seeders, num_leechers, last_check + from CollectedTorrent T, TorrentTracker TT + where TT.torrent_id=T.torrent_id and announce_tier=1 + and infohash=? + """ + infohash_str = bin2str(infohash) + res = self._db.fetchone(sql, (infohash_str,)) + + if res: + torrent_file_name = res[3] + torrent_dir = self.getTorrentDir() + torrent_path = os.path.join(torrent_dir, torrent_file_name) + if res is not None: + res = {'torrent_id':res[0], + 'ignored_times':res[1], + 'retried_times':res[2], + 'torrent_path':torrent_path, + 'infohash':str2bin(res[4]) + } + return_value['torrent'] = res + return_value['event'].set() + + + def getTorrentsFromSource(self,source): + """ Get all torrents from the specified Subscription source. + Return a list of dictionaries. Each dict is in the NEWDBSTANDARD format. + """ + id = self._getSourceID(source) + + where = 'C.source_id = %d and C.torrent_id = T.torrent_id and announce_tier=1' % (id) + # add familyfilter + where += self.category.get_family_filter_sql(self._getCategoryID) + + value_name = deepcopy(self.value_name) + + res_list = self._db.getAll('Torrent C, TorrentTracker T', value_name, where) + + torrent_list = self.valuelist2torrentlist(value_name,res_list,None,None) + del res_list + + return torrent_list + + + def setSecret(self,infohash,secret): + kw = {'secret': secret} + self.updateTorrent(infohash, updateFlag=True, **kw) + + +class MyPreferenceDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if MyPreferenceDBHandler.__single is None: + MyPreferenceDBHandler.lock.acquire() + try: + if MyPreferenceDBHandler.__single is None: + MyPreferenceDBHandler(*args, **kw) + finally: + MyPreferenceDBHandler.lock.release() + return MyPreferenceDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if MyPreferenceDBHandler.__single is not None: + raise RuntimeError, "MyPreferenceDBHandler is singleton" + MyPreferenceDBHandler.__single = self + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self,db, 'MyPreference') ## self,db,'MyPreference' + + self.status_table = {'good':1, 'unknown':0, 'dead':2} + self.status_table.update(self._db.getTorrentStatusTable()) + self.status_good = self.status_table['good'] + self.recent_preflist = None + self.recent_preflist_with_clicklog = None + self.rlock = threading.RLock() + + + def loadData(self): + self.rlock.acquire() + try: + self.recent_preflist = self._getRecentLivePrefList() + self.recent_preflist_with_clicklog = self._getRecentLivePrefListWithClicklog() + finally: + self.rlock.release() + + def getMyPrefList(self, order_by=None): + res = self.getAll('torrent_id', order_by=order_by) + return [p[0] for p in res] + + def getMyPrefListInfohash(self): + sql = 'select infohash from Torrent where torrent_id in (select torrent_id from MyPreference)' + res = self._db.fetchall(sql) + return [str2bin(p[0]) for p in res] + + def getMyPrefStats(self, torrent_id=None): + # get the full {torrent_id:(create_time,progress,destdir)} + value_name = ('torrent_id','creation_time','progress','destination_path') + if torrent_id is not None: + where = 'torrent_id=%s' % torrent_id + else: + where = None + res = self.getAll(value_name, where) + mypref_stats = {} + for pref in res: + torrent_id,creation_time,progress,destination_path = pref + mypref_stats[torrent_id] = (creation_time,progress,destination_path) + return mypref_stats + + def getCreationTime(self, infohash): + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is not None: + ct = self.getOne('creation_time', torrent_id=torrent_id) + return ct + else: + return None + + def getRecentLivePrefListWithClicklog(self, num=0): + """returns OL 8 style preference list: a list of lists, with each of the inner lists + containing infohash, search terms, click position, and reranking strategy""" + + if self.recent_preflist_with_clicklog is None: + self.rlock.acquire() + try: + if self.recent_preflist_with_clicklog is None: + self.recent_preflist_with_clicklog = self._getRecentLivePrefListWithClicklog() + finally: + self.rlock.release() + if num > 0: + return self.recent_preflist_with_clicklog[:num] + else: + return self.recent_preflist_with_clicklog + + + def getRecentLivePrefList(self, num=0): + if self.recent_preflist is None: + self.rlock.acquire() + try: + if self.recent_preflist is None: + self.recent_preflist = self._getRecentLivePrefList() + finally: + self.rlock.release() + if num > 0: + return self.recent_preflist[:num] + else: + return self.recent_preflist + + + + def addClicklogToMyPreference(self, infohash, clicklog_data, commit=True): + torrent_id = self._db.getTorrentID(infohash) + clicklog_already_stored = False # equivalent to hasMyPreference TODO + if torrent_id is None or clicklog_already_stored: + return False + + d = {} + # copy those elements of the clicklog data which are used in the update command + for clicklog_key in ["click_position", "reranking_strategy"]: + if clicklog_key in clicklog_data: + d[clicklog_key] = clicklog_data[clicklog_key] + + if d=={}: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "no updatable information given to addClicklogToMyPreference" + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "addClicklogToMyPreference: updatable clicklog data: %s" % d + self._db.update(self.table_name, 'torrent_id=%d' % torrent_id, commit=commit, **d) + + # have keywords stored by SearchDBHandler + if 'keywords' in clicklog_data: + if not clicklog_data['keywords']==[]: + searchdb = SearchDBHandler.getInstance() + searchdb.storeKeywords(peer_id=0, + torrent_id=torrent_id, + terms=clicklog_data['keywords'], + commit=commit) + + + + + + + + + def _getRecentLivePrefListWithClicklog(self, num=0): + """returns a list containing a list for each torrent: [infohash, [seach terms], click position, reranking strategy]""" + + sql = """ + select infohash, click_position, reranking_strategy, m.torrent_id from MyPreference m, Torrent t + where m.torrent_id == t.torrent_id + and status_id == %d + order by creation_time desc + """ % self.status_good + + recent_preflist_with_clicklog = self._db.fetchall(sql) + if recent_preflist_with_clicklog is None: + recent_preflist_with_clicklog = [] + else: + recent_preflist_with_clicklog = [[str2bin(t[0]), + t[3], # insert search terms in next step, only for those actually required, store torrent id for now + t[1], # click position + t[2]] # reranking strategy + for t in recent_preflist_with_clicklog] + + if num != 0: + recent_preflist_with_clicklog = recent_preflist_with_clicklog[:num] + + # now that we only have those torrents left in which we are actually interested, + # replace torrent id by user's search terms for torrent id + termdb = TermDBHandler.getInstance() + searchdb = SearchDBHandler.getInstance() + for pref in recent_preflist_with_clicklog: + torrent_id = pref[1] + search_terms = searchdb.getMyTorrentSearchTerms(torrent_id) + pref[1] = [termdb.getTerm(search_term) for search_term in search_terms] + + return recent_preflist_with_clicklog + + + def _getRecentLivePrefList(self, num=0): # num = 0: all files + # get recent and live torrents + sql = """ + select infohash from MyPreference m, Torrent t + where m.torrent_id == t.torrent_id + and status_id == %d + order by creation_time desc + """ % self.status_good + + recent_preflist = self._db.fetchall(sql) + if recent_preflist is None: + recent_preflist = [] + else: + recent_preflist = [str2bin(t[0]) for t in recent_preflist] + + if num != 0: + return recent_preflist[:num] + else: + return recent_preflist + + def hasMyPreference(self, infohash): + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is None: + return False + res = self.getOne('torrent_id', torrent_id=torrent_id) + if res is not None: + return True + else: + return False + + def addMyPreference(self, infohash, data, commit=True): + # keys in data: destination_path, progress, creation_time, torrent_id + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is None or self.hasMyPreference(infohash): + # Arno, 2009-03-09: Torrent already exists in myrefs. + # Hack for hiding from lib while keeping in myprefs. + # see standardOverview.removeTorrentFromLibrary() + # + self.updateDestDir(infohash,data.get('destination_path'),commit=commit) + return False + d = {} + d['destination_path'] = data.get('destination_path') + d['progress'] = data.get('progress', 0) + d['creation_time'] = data.get('creation_time', int(time())) + d['torrent_id'] = torrent_id + self._db.insert(self.table_name, commit=commit, **d) + self.notifier.notify(NTFY_MYPREFERENCES, NTFY_INSERT, infohash) + self.rlock.acquire() + try: + if self.recent_preflist is None: + self.recent_preflist = self._getRecentLivePrefList() + else: + self.recent_preflist.insert(0, infohash) + finally: + self.rlock.release() + return True + + def deletePreference(self, infohash, commit=True): + # Arno: when deleting a preference, you may also need to do + # some stuff in BuddyCast: see delMyPref() + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is None: + return + self._db.delete(self.table_name, commit=commit, **{'torrent_id':torrent_id}) + self.notifier.notify(NTFY_MYPREFERENCES, NTFY_DELETE, infohash) + self.rlock.acquire() + try: + if self.recent_preflist is not None and infohash in self.recent_preflist: + self.recent_preflist.remove(infohash) + finally: + self.rlock.release() + + + def updateProgress(self, infohash, progress, commit=True): + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is None: + return + self._db.update(self.table_name, 'torrent_id=%d'%torrent_id, commit=commit, progress=progress) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '********* update progress', `infohash`, progress, commit + + def getAllEntries(self): + """use with caution,- for testing purposes""" + return self.getAll("torrent_id, click_position, reranking_strategy", order_by="torrent_id") + + def updateDestDir(self, infohash, destdir, commit=True): + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is None: + return + self._db.update(self.table_name, 'torrent_id=%d'%torrent_id, commit=commit, destination_path=destdir) + + +# def getAllTorrentCoccurrence(self): +# # should be placed in PreferenceDBHandler, but put here to be convenient for TorrentCollecting +# sql = """select torrent_id, count(torrent_id) as coocurrency from Preference where peer_id in +# (select peer_id from Preference where torrent_id in +# (select torrent_id from MyPreference)) and torrent_id not in +# (select torrent_id from MyPreference) +# group by torrent_id +# """ +# coccurrence = dict(self._db.fetchall(sql)) +# return coccurrence + + +class BarterCastDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + + if BarterCastDBHandler.__single is None: + BarterCastDBHandler.lock.acquire() + try: + if BarterCastDBHandler.__single is None: + BarterCastDBHandler(*args, **kw) + finally: + BarterCastDBHandler.lock.release() + return BarterCastDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + BarterCastDBHandler.__single = self + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self, db,'BarterCast') ## self,db,'BarterCast' + self.peer_db = PeerDBHandler.getInstance() + + # create the maxflow network + self.network = Network({}) + self.update_network() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercastdb: MyPermid is ", self.my_permid + + + ##def registerSession(self, session): + ## self.session = session + + # Retrieve MyPermid + ## self.my_permid = session.get_permid() + + + def registerSession(self, session): + self.session = session + + # Retrieve MyPermid + self.my_permid = session.get_permid() + + if self.my_permid is None: + raise ValueError('Cannot get permid from Session') + + # Keep administration of total upload and download + # (to include in BarterCast message) + self.my_peerid = self.getPeerID(self.my_permid) + + if self.my_peerid != None: + where = "peer_id_from=%s" % (self.my_peerid) + item = self.getOne(('sum(uploaded)', 'sum(downloaded)'), where=where) + else: + item = None + + if item != None and len(item) == 2 and item[0] != None and item[1] != None: + self.total_up = int(item[0]) + self.total_down = int(item[1]) + else: + self.total_up = 0 + self.total_down = 0 + +# if DEBUG: +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "My reputation: ", self.getMyReputation() + + + def getTotals(self): + return (self.total_up, self.total_down) + + def getName(self, permid): + + if permid == 'non-tribler': + return "non-tribler" + elif permid == self.my_permid: + return "local_tribler" + + name = self.peer_db.getPeer(permid, 'name') + + if name == None or name == '': + return 'peer %s' % show_permid_shorter(permid) + else: + return name + + def getNameByID(self, peer_id): + permid = self.getPermid(peer_id) + return self.getName(permid) + + + def getPermid(self, peer_id): + + # by convention '-1' is the id of non-tribler peers + if peer_id == -1: + return 'non-tribler' + else: + return self.peer_db.getPermid(peer_id) + + + def getPeerID(self, permid): + + # by convention '-1' is the id of non-tribler peers + if permid == "non-tribler": + return -1 + else: + return self.peer_db.getPeerID(permid) + + def getItem(self, (permid_from, permid_to), default=False): + + # ARNODB: now converting back to dbid! just did reverse in getItemList + peer_id1 = self.getPeerID(permid_from) + peer_id2 = self.getPeerID(permid_to) + + if peer_id1 is None: + self._db.insertPeer(permid_from) # ARNODB: database write + peer_id1 = self.getPeerID(permid_from) # ARNODB: database write + + if peer_id2 is None: + self._db.insertPeer(permid_to) + peer_id2 = self.getPeerID(permid_to) + + return self.getItemByIDs((peer_id1,peer_id2),default=default) + + + def getItemByIDs(self, (peer_id_from, peer_id_to), default=False): + if peer_id_from is not None and peer_id_to is not None: + + where = "peer_id_from=%s and peer_id_to=%s" % (peer_id_from, peer_id_to) + item = self.getOne(('downloaded', 'uploaded', 'last_seen'), where=where) + + if item is None: + return None + + if len(item) != 3: + return None + + itemdict = {} + itemdict['downloaded'] = item[0] + itemdict['uploaded'] = item[1] + itemdict['last_seen'] = item[2] + itemdict['peer_id_from'] = peer_id_from + itemdict['peer_id_to'] = peer_id_to + + return itemdict + + else: + return None + + + def getItemList(self): # get the list of all peers' permid + + keys = self.getAll(('peer_id_from','peer_id_to')) + # ARNODB: this dbid -> permid translation is more efficiently done + # on the final top-N list. + keys = map(lambda (id_from, id_to): (self.getPermid(id_from), self.getPermid(id_to)), keys) + return keys + + + def addItem(self, (permid_from, permid_to), item, commit=True): + +# if value.has_key('last_seen'): # get the latest last_seen +# old_last_seen = 0 +# old_data = self.getPeer(permid) +# if old_data: +# old_last_seen = old_data.get('last_seen', 0) +# last_seen = value['last_seen'] +# value['last_seen'] = max(last_seen, old_last_seen) + + # get peer ids + peer_id1 = self.getPeerID(permid_from) + peer_id2 = self.getPeerID(permid_to) + + # check if they already exist in database; if not: add + if peer_id1 is None: + self._db.insertPeer(permid_from) + peer_id1 = self.getPeerID(permid_from) + if peer_id2 is None: + self._db.insertPeer(permid_to) + peer_id2 = self.getPeerID(permid_to) + + item['peer_id_from'] = peer_id1 + item['peer_id_to'] = peer_id2 + + self._db.insert(self.table_name, commit=commit, **item) + + def updateItem(self, (permid_from, permid_to), key, value, commit=True): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercastdb: update (%s, %s) [%s] += %s" % (self.getName(permid_from), self.getName(permid_to), key, str(value)) + + itemdict = self.getItem((permid_from, permid_to)) + + # if item doesn't exist: add it + if itemdict == None: + self.addItem((permid_from, permid_to), {'uploaded':0, 'downloaded': 0, 'last_seen': int(time())}, commit=True) + itemdict = self.getItem((permid_from, permid_to)) + + # get peer ids + peer_id1 = itemdict['peer_id_from'] + peer_id2 = itemdict['peer_id_to'] + + if key in itemdict.keys(): + + where = "peer_id_from=%s and peer_id_to=%s" % (peer_id1, peer_id2) + item = {key: value} + self._db.update(self.table_name, where = where, commit=commit, **item) + + def incrementItem(self, (permid_from, permid_to), key, value, commit=True): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercastdb: increment (%s, %s) [%s] += %s" % (self.getName(permid_from), self.getName(permid_to), key, str(value)) + + # adjust total_up and total_down + if permid_from == self.my_permid: + if key == 'uploaded': + self.total_up += int(value) + if key == 'downloaded': + self.total_down += int(value) + + itemdict = self.getItem((permid_from, permid_to)) + + # if item doesn't exist: add it + if itemdict == None: + self.addItem((permid_from, permid_to), {'uploaded':0, 'downloaded': 0, 'last_seen': int(time())}, commit=True) + itemdict = self.getItem((permid_from, permid_to)) + + # get peer ids + peer_id1 = itemdict['peer_id_from'] + peer_id2 = itemdict['peer_id_to'] + + if key in itemdict.keys(): + old_value = itemdict[key] + new_value = old_value + value + + where = "peer_id_from=%s and peer_id_to=%s" % (peer_id1, peer_id2) + + item = {key: new_value} + self._db.update(self.table_name, where = where, commit=commit, **item) + return new_value + + return None + + def addPeersBatch(self,permids): + """ Add unknown permids as batch -> single transaction """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercastdb: addPeersBatch: n=",len(permids) + + for permid in permids: + peer_id = self.getPeerID(permid) + # check if they already exist in database; if not: add + if peer_id is None: + self._db.insertPeer(permid,commit=False) + self._db.commit() + + def updateULDL(self, (permid_from, permid_to), ul, dl, commit=True): + """ Add ul/dl record to database as a single write """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercastdb: updateULDL (%s, %s) ['ul'] += %s ['dl'] += %s" % (self.getName(permid_from), self.getName(permid_to), str(ul), str(dl)) + + itemdict = self.getItem((permid_from, permid_to)) + + # if item doesn't exist: add it + if itemdict == None: + itemdict = {'uploaded':ul, 'downloaded': dl, 'last_seen': int(time())} + self.addItem((permid_from, permid_to), itemdict, commit=commit) + return + + # get peer ids + peer_id1 = itemdict['peer_id_from'] + peer_id2 = itemdict['peer_id_to'] + + if 'uploaded' in itemdict.keys() and 'downloaded' in itemdict.keys(): + where = "peer_id_from=%s and peer_id_to=%s" % (peer_id1, peer_id2) + item = {'uploaded': ul, 'downloaded':dl} + self._db.update(self.table_name, where = where, commit=commit, **item) + + def getPeerIDPairs(self): + keys = self.getAll(('peer_id_from','peer_id_to')) + return keys + + def getTopNPeers(self, n, local_only = False): + """ + Return (sorted) list of the top N peers with the highest (combined) + values for the given keys. This version uses batched reads and peer_ids + in calculation + @return a dict containing a 'top' key with a list of (permid,up,down) + tuples, a 'total_up', 'total_down', 'tribler_up', 'tribler_down' field. + Sizes are in kilobytes. + """ + + # TODO: this won't scale to many interactions, as the size of the DB + # is NxN + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercastdb: getTopNPeers: local = ", local_only + #print_stack() + + n = max(1, n) + my_peer_id = self.getPeerID(self.my_permid) + total_up = {} + total_down = {} + # Arno, 2008-10-30: I speculate this is to count transfers only once, + # i.e. the DB stored (a,b) and (b,a) and we want to count just one. + + processed = Set() + + + value_name = '*' + increment = 500 + + nrecs = self.size() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","NEXTtopN: size is",nrecs + + for offset in range(0,nrecs,increment): + if offset+increment > nrecs: + limit = nrecs-offset + else: + limit = increment + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","NEXTtopN: get",offset,limit + + reslist = self.getAll(value_name, offset=offset, limit=limit) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","NEXTtopN: res len is",len(reslist),`reslist` + for res in reslist: + (peer_id_from,peer_id_to,downloaded,uploaded,last_seen,value) = res + + if local_only: + if not (peer_id_to == my_peer_id or peer_id_from == my_peer_id): + # get only items of my local dealings + continue + + if (not (peer_id_to, peer_id_from) in processed) and (not peer_id_to == peer_id_from): + #if (not peer_id_to == peer_id_from): + + up = uploaded *1024 # make into bytes + down = downloaded *1024 + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercastdb: getTopNPeers: DB entry: (%s, %s) up = %d down = %d" % (self.getNameByID(peer_id_from), self.getNameByID(peer_id_to), up, down) + + processed.add((peer_id_from, peer_id_to)) + + # fix for multiple my_permids + if peer_id_from == -1: # 'non-tribler': + peer_id_to = my_peer_id + if peer_id_to == -1: # 'non-tribler': + peer_id_from = my_peer_id + + # process peer_id_from + total_up[peer_id_from] = total_up.get(peer_id_from, 0) + up + total_down[peer_id_from] = total_down.get(peer_id_from, 0) + down + + # process peer_id_to + total_up[peer_id_to] = total_up.get(peer_id_to, 0) + down + total_down[peer_id_to] = total_down.get(peer_id_to, 0) + up + + + # create top N peers + top = [] + min = 0 + + for peer_id in total_up.keys(): + + up = total_up[peer_id] + down = total_down[peer_id] + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercastdb: getTopNPeers: total of %s: up = %d down = %d" % (self.getName(peer_id), up, down) + + # we know rank on total upload? + value = up + + # check if peer belongs to current top N + if peer_id != -1 and peer_id != my_peer_id and (len(top) < n or value > min): + + top.append((peer_id, up, down)) + + # sort based on value + top.sort(cmp = lambda (p1, u1, d1), (p2, u2, d2): cmp(u2, u1)) + + # if list contains more than N elements: remove the last (=lowest value) + if len(top) > n: + del top[-1] + + # determine new minimum of values + min = top[-1][1] + + # Now convert to permid + permidtop = [] + for peer_id,up,down in top: + permid = self.getPermid(peer_id) + permidtop.append((permid,up,down)) + + result = {} + + result['top'] = permidtop + + # My total up and download, including interaction with non-tribler peers + result['total_up'] = total_up.get(my_peer_id, 0) + result['total_down'] = total_down.get(my_peer_id, 0) + + # My up and download with tribler peers only + result['tribler_up'] = result['total_up'] - total_down.get(-1, 0) # -1 = 'non-tribler' + result['tribler_down'] = result['total_down'] - total_up.get(-1, 0) # -1 = 'non-tribler' + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", result + + return result + + + ################################ + def update_network(self): + + + keys = self.getPeerIDPairs() #getItemList() + + + ################################ + def getMyReputation(self, alpha = ALPHA): + + rep = atan((self.total_up - self.total_down) * alpha)/(0.5 * pi) + return rep + + + + + + + +class ModerationCastDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + + if ModerationCastDBHandler.__single is None: + ModerationCastDBHandler.lock.acquire() + try: + if ModerationCastDBHandler.__single is None: + ModerationCastDBHandler(*args, **kw) + finally: + ModerationCastDBHandler.lock.release() + return ModerationCastDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + ModerationCastDBHandler.__single = self + try: + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self,db,'ModerationCast') + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "modcast: DB made" + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "modcast: couldn't create DB table" + self.peer_db = PeerDBHandler.getInstance() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "MODERATIONCAST: MyPermid is ", self.my_permid + + def registerSession(self, session): + self.session = session + self.my_permid = session.get_permid() + + def __len__(self): + return sum([db._size() for db in self.dbs]) + + def getAll(self): + sql = 'select * from ModerationCast' + records = self._db.fetchall(sql) + return records + + def getAllModerations(self, permid): + sql = 'select * from ModerationCast where mod_id==?' + records = self._db.fetchall(sql, (permid,)) + return records + + def getModeration(self, infohash): + #assert validInfohash(infohash) + sql = 'select * from ModerationCast where infohash==?' #and time_stamp in (select max(time_stamp) latest FROM ModerationCast where infohash==? group by infohash)' + item = self._db.fetchone(sql,(infohash,)) + return item + + + def hasModeration(self, infohash): + """ Returns True iff there is a moderation for infohash infohash """ + sql = 'select mod_id from ModerationCast where infohash==?' + item = self._db.fetchone(sql,(infohash,)) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","MCDB: hasModeration: infohash:",infohash," ; item:",item + if item is None: + return False + else: + return True + + def hasModerator(self, permid): + """ Returns True iff there is a moderator for PermID permid in the moderatorDB """ + sql = "Select mod_id from Moderators where mod_id==?" + args = permid + + item = self._db.fetchone(sql,(permid,)) + if item is None: + return False + else: + return True + + def getModerator(self, permid): + sql = 'select * from Moderators where mod_id==?'# + str(permid) + item = self._db.fetchone(sql,(permid,)) + return item + + def getModeratorPermids(self): + sql = 'select mod_id from Moderators' + item = self._db.fetchall(sql) + return item + + def getAllModerators(self): + sql = 'select * from Moderators' + item = self._db.fetchall(sql) + return item + + + def getVotedModerators(self): + sql = 'select * from Moderators where status != 0' + item = self._db.fetchall(sql) + return item + + + def getForwardModeratorPermids(self): + sql = 'select mod_id from Moderators where status==1' + permid_strs = self._db.fetchall(sql) + return permid_strs + + def getBlockedModeratorPermids(self): + sql = 'select mod_id from Moderators where status==-1' + item = self._db.fetchall(sql) + return item + #CALL VOTECAST TABLES and return the value + #return [permid for permid in self.moderator_db.getKeys() if permid['blocked']] + + def getTopModeratorPermids(self, top=10): + withmod = [permid for permid in self.moderator_db.getKeys() if permid.has_key('moderations') and permid['moderations'] != []] + + def topSort(moda, modb): + return len(moda['moderations'])-len(modb['moderations']) + + return withmod.sort(topSort)[0:top] + + def updateModeration(self, moderation): + assert type(moderation) == dict + assert moderation.has_key('time_stamp') and validTimestamp(moderation['time_stamp']) + assert moderation.has_key('mod_id') and validPermid(moderation['mod_id']) + self.validSignature(moderation) + infohash = moderation['infohash'] + moderator = moderation['mod_id'] + if self.hasModerator(moderator) and moderator in self.getBlockedModeratorPermids(): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Got moderation from blocked moderator", show_permid_short(moderator)+", hence we drop this moderation!" + return + + if not self.hasModeration(infohash) or self.getModeration(infohash)[3] < moderation['time_stamp']: + self.addModeration(moderation) + + def addOwnModeration(self, mod, clone=False): + assert type(mod) == dict + assert mod.has_key('infohash') + assert validInfohash(mod['infohash']) + + moderation = mod + moderation['mod_name'] = self.session.get_nickname() + #Add current time as a timestamp + moderation['time_stamp'] = now() + moderation['mod_id'] = bin2str(self.my_permid) + #Add permid and signature: + self._sign(moderation) + + self.addModeration(moderation, clone=False) + + def addModeration(self, moderation, clone=True): + if self.hasModeration(moderation['infohash']): + if self.getModeration(moderation['infohash'])[3] < moderation['time_stamp']: + self.deleteModeration(moderation['infohash']) + else: + return + + self._db.insert(self.table_name, **moderation) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Moderation inserted:", repr(moderation) + + if self.getModeratorPermids() is None or not self.hasModerator(moderation['mod_id']): + new = {} + new['mod_id'] = moderation['mod_id'] + #change it later RAMEEZ + new['status'] = 0 + new['time_stamp'] = now() + self._db.insert('Moderators', **new) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "New Moderator inserted:", repr(new) + + def deleteModeration(self, infohash): + sql = 'Delete From ModerationCast where infohash==?' + self._db.execute_write(sql,(infohash,)) + + def deleteModerations(self, permid): + sql = 'Delete From ModerationCast where mod_id==?' + self._db.execute_write(sql,(permid,)) + + def deleteModerator(self, permid): + """ Deletes moderator with permid permid from database """ + sql = 'Delete From Moderators where mod_id==?' + self._db.execute_write(sql,(permid,)) + + self.deleteModerations(permid) + + def blockModerator(self, permid, blocked=True): + """ Blocks/unblocks moderator with permid permid """ + if blocked: + + self.deleteModerations(permid) + sql = 'Update Moderators set status = -1, time_stamp=' + str(now()) + ' where mod_id==?' + self._db.execute_write(sql,(permid,)) + else: + self.forwardModerator(permid) + + ################################ + def maxflow(self, peerid, max_distance = MAXFLOW_DISTANCE): + + self.update_network() + upflow = self.network.maxflow(peerid, self.my_peerid, max_distance) + downflow = self.network.maxflow(self.my_peerid, peerid, max_distance) + + return (upflow, downflow) + + ################################ + def getReputationByID(self, peerid, max_distance = MAXFLOW_DISTANCE, alpha = ALPHA): + + (upflow, downflow) = self.maxflow(peerid, max_distance) + rep = atan((upflow - downflow) * alpha)/(0.5 * pi) + return rep + + + ################################ + def getReputation(self, permid, max_distance = MAXFLOW_DISTANCE, alpha = ALPHA): + + peerid = self.getPeerID(permid) + return self.reputationByID(peerid, max_distance, alpha) + + + ################################ + def getMyReputation(self, alpha = ALPHA): + + rep = atan((self.total_up - self.total_down) * alpha)/(0.5 * pi) + return rep + + def forwardModerator(self, permid, forward=True): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Before updating Moderator's status..", repr(self.getModerator(permid)) + sql = 'Update Moderators set status = 1, time_stamp=' + str(now()) + ' where mod_id==?' + self._db.execute_write(sql,(permid,)) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Updated Moderator's status..", repr(self.getModerator(permid)) + + def getName(self, permid): + + name = self.peer_db.getPeer(permid, 'name') + + if name == None or name == '': + return 'peer %s' % show_permid_shorter(permid) + else: + return name + + def getPermid(self, peer_id): + + # by convention '-1' is the id of non-tribler peers + if peer_id == -1: + return 'non-tribler' + else: + return self.peer_db.getPermid(peer_id) + + + def getPeerID(self, permid): + # by convention '-1' is the id of non-tribler peers + if permid == "non-tribler": + return -1 + else: + return self.peer_db.getPeerID(permid) + + + def hasPeer(self, permid): + return self.peer_db.hasPeer(permid) + + + def recentOwnModerations(self, nr=13): + """ Returns the most recent nr moderations (if existing) that you have created """ + + + #List of our moderations + if not self.hasModerator(bin2str(self.my_permid)): + return [] + + forwardable = self.getAllModerations(bin2str(self.my_permid)) + + #Sort the infohashes in this list based on timestamp + forwardable.sort(self._compareFunction) + + #Return most recent, forwardable, moderations (max nr) + return forwardable[0:nr] + + def randomOwnModerations(self, nr=12): + """ Returns nr random moderations (if existing) that you have created """ + + #List of our moderations + if not self.hasModerator(bin2str(self.my_permid)): + return [] + + forwardable = self.getAllModerations(bin2str(self.my_permid)) + + if len(forwardable) > nr: + #Return random subset of size nr + return sample(forwardable, nr) + else: + #Return complete set + return forwardable + + def recentModerations(self, nr=13): + """ Returns the most recent nr moderations (if existing), for moderators that you selected to forward for """ + forwardable = [] + + #Create a list of infohashes that we are willing to forward + keys = self.getModeratorPermids() + for key in keys: + moderator = self.getModerator(key[0]) + if moderator[1] == 1: + forwardable.extend(self.getAllModerations(key[0])) + + + #Sort the infohashes in this list based on timestamp + forwardable.sort(self._compareFunction) + + #Return most recent, forwardable, moderations (max nr) + return forwardable[0:nr] + + + + + def randomModerations(self, nr=12): + """ Returns nr random moderations (if existing), for moderators that you selected to forward for """ + forwardable = [] + + #Create a list of infohashes that we are willing to forward + keys = self.getModeratorPermids() + for key in keys: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "what is the average now baby?????????", key[0] + moderator = self.getModerator(key[0]) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "what is the average now my sooooonnnn", moderator[1] + if moderator[1] == 1: + forwardable.extend(self.getAllModerations(key[0])) + + if len(forwardable) > nr: + #Return random subset of size nr + return sample(forwardable, nr) + else: + #Return complete set + return forwardable + + + def getModerationInfohashes(self): + return self.moderation_db.getKeys() + + + def _compareFunction(self,moderationx,moderationy): + if moderationx[3] > moderationy[3]: + return 1 + if moderationx[3] == moderationy[3]: + return 0 + return -1 + + '''def _compareFunction(self,infohashx,infohashy): + """ Compare function to sort an infohash-list based on the moderation-timestamps """ + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "what's it all about ?????????????", infohashx[0], infohashy[0] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "i am a great great man ;-)", infohashx,"?????????????",infohashy + tx = self.getModeration(infohashx[3]) + ty = self.getModeration(infohashy[3]) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "i am a great great man ;-)", tx,"?????????????",ty + + if tx > ty: + return 1 + if tx == ty: + return 0 + return -1''' + + + def _sign(self, moderation): + assert moderation is not None + assert type(moderation) == dict + assert not moderation.has_key('signature') #This would corrupt the signature + moderation['mod_id'] = bin2str(self.my_permid) + bencoding = bencode(moderation) + moderation['signature'] = bin2str(sign_data(bencoding, self.session.keypair)) + + def validSignature(self,moderation): + blob = str2bin(moderation['signature']) + permid = str2bin(moderation['mod_id']) + #Plaintext excludes signature: + del moderation['signature'] + plaintext = bencode(moderation) + moderation['signature'] = bin2str(blob) + + r = verify_data(plaintext, permid, blob) + if not r: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","modcastdb: Invalid signature >>>>>>" + return r + + +#end moderation +class VoteCastDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + + if VoteCastDBHandler.__single is None: + VoteCastDBHandler.lock.acquire() + try: + if VoteCastDBHandler.__single is None: + VoteCastDBHandler(*args, **kw) + finally: + VoteCastDBHandler.lock.release() + return VoteCastDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + VoteCastDBHandler.__single = self + try: + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self,db,'VoteCast') + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "votecast: DB made" + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "votecast: couldn't make the table" + + self.peer_db = PeerDBHandler.getInstance() + self.moderationcast_db = ModerationCastDBHandler.getInstance() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "votecast: My permid is",`self.my_permid` + + def registerSession(self, session): + self.session = session + self.my_permid = session.get_permid() + + def __len__(self): + return sum([db._size() for db in self.dbs]) + + def getAllVotes(self, permid): + sql = 'select * from VoteCast where mod_id==?' + + records = self._db.fetchall(sql, (permid,)) + return records + + def getAll(self): + sql = 'select * from VoteCast' + + records = self._db.fetchall(sql) + return records + + + def getAverageVotes(self): + moderators = self.moderationcast_db.getModeratorPermids() + if len(moderators) == 0: + return 0 + + total_votes = 0.0 + + for mod in moderators: + votes = self.getAllVotes(mod[0]) + total_votes += len(votes) + + + avg = total_votes/len(moderators) + return avg + + + def getAverageRank(self): + moderators = self.moderationcast_db.getModeratorPermids() + if len(moderators) == 0: + return 0 + avg = 0.0 + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "number of moderatosr has increased ", len(moderators) + for mod in moderators: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderators ####: ", mod + votes = self.getPosNegVotes(mod) + pos = votes[0] + neg = votes[1] + if pos + neg == 0: + rank = 0 + else: + rank = pos/(pos+neg) + avg +=rank + + value = avg/len(moderators) + return value + + def getPosNegVotes(self, permid): + sql = 'select * from VoteCast where mod_id==?' + + records = self._db.fetchall(sql, (permid[0],)) + pos_votes = 0 + neg_votes = 0 + + if records is None: + return(pos_votes,neg_votes) + + for vote in records: + + if vote[2] == "1": + pos_votes +=1 + else: + neg_votes +=1 + return (pos_votes, neg_votes) + + + def getAllVotesByVoter(self, permid): + #assert validInfohash(infohash) + sql = 'select * from VoteCast where voter_id==?' #and time_stamp in (select max(time_stamp) latest FROM ModerationCast where infohash==? group by infohash)' + item = self._db.fetchone(sql,(self.getPeerID(permid),)) + return item + + + def hasVote(self, permid, voter_peerid): + """ Returns True iff there is a moderation for infohash infohash """ + sql = 'select mod_id, voter_id from VoteCast where mod_id==? and voter_id==?' + item = self._db.fetchone(sql,(permid,voter_peerid,)) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","well well well",infohash," sdd",item + if item is None: + return False + else: + return True + + def getBallotBox(self): + sql = 'select * from VoteCast' + items = self._db.fetchall(sql) + return items + + + def getVote(self,permid,peerid): + sql = 'select * from VoteCast where mod_id==? and voter_id==?' + item = self._db.fetchone(sql,(permid,peerid,)) + return item + + def addVote(self, vote, clone=True): + vote['time_stamp'] = now() + if self.hasVote(vote['mod_id'],vote['voter_id']): + self.deleteVote(vote['mod_id'],vote['voter_id']) + self._db.insert(self.table_name, **vote) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Vote added:",repr(vote) + + def deleteVotes(self, permid): + sql = 'Delete From VoteCast where mod_id==?' + self._db.execute_write(sql,(permid,)) + + def deleteVote(self, permid, voter_id): + sql = 'Delete From VoteCast where mod_id==? and voter_id==?' + self._db.execute_write(sql,(permid,voter_id,)) + + def getPermid(self, peer_id): + + # by convention '-1' is the id of non-tribler peers + if peer_id == -1: + return 'non-tribler' + else: + return self.peer_db.getPermid(peer_id) + + + def getPeerID(self, permid): + # by convention '-1' is the id of non-tribler peers + if permid == "non-tribler": + return -1 + else: + return self.peer_db.getPeerID(permid) + + + def hasPeer(self, permid): + return self.peer_db.hasPeer(permid) + + def recentVotes(self, nr=25): + """ Returns the most recent nr moderations (if existing), for moderators that you selected to forward for """ + forwardable = [] + + #Create a list of infohashes that we are willing to forward + keys = self.moderationcast_db.getVotedModerators() + + for key in keys: + forwardable.append(key) + + forwardable.sort(self._compareFunction) + return forwardable[0:nr] + + def randomVotes(self, nr=25): + """ Returns nr random moderations (if existing), for moderators that you selected to forward for """ + forwardable = [] + + #Create a list of infohashes that we are willing to forward + keys = self.moderationcast_db.getVotedModerators() + + for key in keys: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "votes i don't know ", key + forwardable.append(key) + + if len(forwardable) > nr: + #Return random subset of size nr + return sample(forwardable, nr) + else: + #Return complete set + return forwardable + + def _compareFunction(self,moderatorx, moderatory): + """ Compare function to sort an infohash-list based on the moderation-timestamps """ + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "what are you comparing", moderatorx,"sdfafdsfds", moderatory + + if moderatorx[2] > moderatory[2]: + return 1 + + if moderatorx[2] == moderatory[2]: + return 0 + return -1 + +#end votes + + + + +class GUIDBHandler: + """ All the functions of this class are only (or mostly) used by GUI. + It is not associated with any db table, but will use any of them + """ + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if GUIDBHandler.__single is None: + GUIDBHandler.lock.acquire() + try: + if GUIDBHandler.__single is None: + GUIDBHandler(*args, **kw) + finally: + GUIDBHandler.lock.release() + return GUIDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if GUIDBHandler.__single is not None: + raise RuntimeError, "GUIDBHandler is singleton" + self._db = SQLiteCacheDB.getInstance() + self.notifier = Notifier.getInstance() + GUIDBHandler.__single = self + + def getCommonFiles(self, permid): + peer_id = self._db.getPeerID(permid) + if peer_id is None: + return [] + + sql_get_common_files = """select name from CollectedTorrent where torrent_id in ( + select torrent_id from Preference where peer_id=? + and torrent_id in (select torrent_id from MyPreference) + ) and status_id <> 2 + """ + self.get_family_filter_sql() + res = self._db.fetchall(sql_get_common_files, (peer_id,)) + return [t[0] for t in res] + + def getOtherFiles(self, permid): + peer_id = self._db.getPeerID(permid) + if peer_id is None: + return [] + + sql_get_other_files = """select infohash,name from CollectedTorrent where torrent_id in ( + select torrent_id from Preference where peer_id=? + and torrent_id not in (select torrent_id from MyPreference) + ) and status_id <> 2 + """ + self.get_family_filter_sql() + res = self._db.fetchall(sql_get_other_files, (peer_id,)) + return [(str2bin(t[0]),t[1]) for t in res] + + def getSimItems(self, infohash, limit): + # recommendation based on collaborative filtering + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is None: + return [] + + sql_get_sim_files = """ + select infohash, name, status_id, count(P2.torrent_id) c + from Preference as P1, Preference as P2, CollectedTorrent as T + where P1.peer_id=P2.peer_id and T.torrent_id=P2.torrent_id + and P2.torrent_id <> P1.torrent_id + and P1.torrent_id=? + and P2.torrent_id not in (select torrent_id from MyPreference) + %s + group by P2.torrent_id + order by c desc + limit ? + """ % self.get_family_filter_sql('T') + + res = self._db.fetchall(sql_get_sim_files, (torrent_id,limit)) + return [(str2bin(t[0]),t[1], t[2], t[3]) for t in res] + + def getSimilarTitles(self, name, limit, infohash, prefix_len=5): + # recommendation based on similar titles + name = name.replace("'","`") + sql_get_sim_files = """ + select infohash, name, status_id from Torrent + where name like '%s%%' + and infohash <> '%s' + and torrent_id not in (select torrent_id from MyPreference) + %s + order by name + limit ? + """ % (name[:prefix_len], bin2str(infohash), self.get_family_filter_sql()) + + res = self._db.fetchall(sql_get_sim_files, (limit,)) + return [(str2bin(t[0]),t[1], t[2]) for t in res] + + def _how_many_prefix(self): + """ test how long the prefix is enough to find similar titles """ + # Jie: I found 5 is the best value. + + sql = "select name from Torrent where name is not NULL order by name" + names = self._db.fetchall(sql) + + for top in range(3, 10): + sta = {} + for line in names: + prefix = line[0][:top] + if prefix not in sta: + sta[prefix] = 1 + else: + sta[prefix] += 1 + + res = [(v,k) for k,v in sta.items()] + res.sort() + res.reverse() + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '------------', top, '-------------' + for k in res[:10]: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", k + + def get_family_filter_sql(self, table_name=''): + torrent_db_handler = TorrentDBHandler.getInstance() + return torrent_db_handler.category.get_family_filter_sql(torrent_db_handler._getCategoryID, table_name=table_name) + + + +class TermDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if TermDBHandler.__single is None: + TermDBHandler.lock.acquire() + try: + if TermDBHandler.__single is None: + TermDBHandler(*args, **kw) + finally: + TermDBHandler.lock.release() + return TermDBHandler.__single + getInstance = staticmethod(getInstance) + + def __init__(self): + if TermDBHandler.__single is not None: + raise RuntimeError, "TermDBHandler is singleton" + TermDBHandler.__single = self + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self,db, 'ClicklogTerm') + + + def getNumTerms(self): + """returns number of terms stored""" + return self.getOne("count(*)") + + + + def bulkInsertTerms(self, terms, commit=True): + for term in terms: + term_id = self.getTermIDNoInsert(term) + if not term_id: + self.insertTerm(term, commit=False) # this HAS to commit, otherwise last_insert_row_id() won't work. + # if you want to avoid committing too often, use bulkInsertTerm + if commit: + self.commit() + + def getTermIDNoInsert(self, term): + return self.getOne('term_id', term=term[:MAX_KEYWORD_LENGTH].lower()) + + def getTermID(self, term): + """returns the ID of term in table ClicklogTerm; creates a new entry if necessary""" + term_id = self.getTermIDNoInsert(term) + if term_id: + return term_id + else: + self.insertTerm(term, commit=True) # this HAS to commit, otherwise last_insert_row_id() won't work. + return self.getOne("last_insert_rowid()") + + def insertTerm(self, term, commit=True): + """creates a new entry for term in table Term""" + self._db.insert(self.table_name, commit=commit, term=term[:MAX_KEYWORD_LENGTH]) + + def getTerm(self, term_id): + """returns the term for a given term_id""" + return self.getOne("term", term_id=term_id) + # if term_id==-1: + # return "" + # term = self.getOne('term', term_id=term_id) + # try: + # return str2bin(term) + # except: + # return term + + def getTermsStartingWith(self, beginning, num=10): + """returns num most frequently encountered terms starting with beginning""" + + # request twice the amount of hits because we need to apply + # the familiy filter... + terms = self.getAll('term', + term=("like", u"%s%%" % beginning), + order_by="times_seen DESC", + limit=num * 2) + + if terms: + # terms is a list containing lists. We only want the first + # item of the inner lists. + terms = [term for (term,) in terms] + + catobj = Category.getInstance() + if catobj.family_filter_enabled(): + return filter(lambda term: not catobj.xxx_filter.foundXXXTerm(term), terms)[:num] + else: + return terms[:num] + + else: + return [] + + def getAllEntries(self): + """use with caution,- for testing purposes""" + return self.getAll("term_id, term", order_by="term_id") + + +class SearchDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if SearchDBHandler.__single is None: + SearchDBHandler.lock.acquire() + try: + if SearchDBHandler.__single is None: + SearchDBHandler(*args, **kw) + finally: + SearchDBHandler.lock.release() + return SearchDBHandler.__single + getInstance = staticmethod(getInstance) + + def __init__(self): + if SearchDBHandler.__single is not None: + raise RuntimeError, "SearchDBHandler is singleton" + SearchDBHandler.__single = self + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self,db, 'ClicklogSearch') ## self,db,'Search' + + + ### write methods + + def storeKeywordsByID(self, peer_id, torrent_id, term_ids, commit=True): + sql_insert_search = u"INSERT INTO ClicklogSearch (peer_id, torrent_id, term_id, term_order) values (?, ?, ?, ?)" + + if len(term_ids)>MAX_KEYWORDS_STORED: + term_ids= term_ids[0:MAX_KEYWORDS_STORED] + + # TODO before we insert, we should delete all potentially existing entries + # with these exact values + # otherwise, some strange attacks might become possible + # and again we cannot assume that user/torrent/term only occurs once + + # create insert data + values = [(peer_id, torrent_id, term_id, term_order) + for (term_id, term_order) + in zip(term_ids, range(len(term_ids)))] + self._db.executemany(sql_insert_search, values, commit=commit) + + # update term popularity + sql_update_term_popularity= u"UPDATE ClicklogTerm SET times_seen = times_seen+1 WHERE term_id=?" + self._db.executemany(sql_update_term_popularity, [[term_id] for term_id in term_ids], commit=commit) + + def storeKeywords(self, peer_id, torrent_id, terms, commit=True): + """creates a single entry in Search with peer_id and torrent_id for every term in terms""" + terms = [term.strip() for term in terms if len(term.strip())>0] + term_db = TermDBHandler.getInstance() + term_ids = [term_db.getTermID(term) for term in terms] + self.storeKeywordsByID(peer_id, torrent_id, term_ids, commit) + + def getAllEntries(self): + """use with caution,- for testing purposes""" + return self.getAll("rowid, peer_id, torrent_id, term_id, term_order ", order_by="rowid") + + def getAllOwnEntries(self): + """use with caution,- for testing purposes""" + return self.getAll("rowid, peer_id, torrent_id, term_id, term_order ", where="peer_id=0", order_by="rowid") + + + + ### read methods + + def getNumTermsPerTorrent(self, torrent_id): + """returns the number of terms associated with a given torrent""" + return self.getOne("COUNT (DISTINCT term_id)", torrent_id=torrent_id) + + def getNumTorrentsPerTerm(self, term_id): + """returns the number of torrents stored with a given term.""" + return self.getOne("COUNT (DISTINCT torrent_id)", term_id=term_id) + + def getNumTorrentTermCooccurrences(self, term_id, torrent_id): + """returns the number of times a torrent has been associated with a term""" + return self.getOne("COUNT (*)", term_id=term_id, torrent_id=torrent_id) + + def getRelativeTermFrequency(self, term_id, torrent_id): + """returns the relative importance of a term for a torrent + This is basically tf/idf + term frequency tf = # keyword used per torrent/# keywords used with torrent at all + inverse document frequency = # of torrents associated with term at all + + normalization in tf ensures that a torrent cannot get most important for all keywords just + by, e.g., poisoning the db with a lot of keywords for this torrent + idf normalization ensures that returned values are meaningful across several keywords + """ + + terms_per_torrent = self.getNumTermsPerTorrent(torrent_id) + if terms_per_torrent==0: + return 0 + + torrents_per_term = self.getNumTorrentsPerTerm(term_id) + if torrents_per_term == 0: + return 0 + + coocc = self.getNumTorrentTermCooccurrences(term_id, torrent_id) + + tf = coocc/float(terms_per_torrent) + idf = 1.0/math.log(torrents_per_term+1) + + return tf*idf + + + def getTorrentSearchTerms(self, torrent_id, peer_id): + return self.getAll("term_id", "torrent_id=%d AND peer_id=%s" % (torrent_id, peer_id), order_by="term_order") + + def getMyTorrentSearchTerms(self, torrent_id): + return [x[0] for x in self.getTorrentSearchTerms(torrent_id, peer_id=0)] + + + ### currently unused + + def numSearchesWithTerm(self, term_id): + """returns the number of searches stored with a given term. + I feel like I might miss something, but this should simply be the number of rows containing + the term""" + return self.getOne("COUNT (*)", term_id=term_id) + + def getNumTorrentPeers(self, torrent_id): + """returns the number of users for a given torrent. if this should be used + extensively, an index on torrent_id might be in order""" + return self.getOne("COUNT (DISTINCT peer_id)", torrent_id=torrent_id) + + def removeKeywords(self, peer_id, torrent_id, commit=True): + """removes records of keywords used by peer_id to find torrent_id""" + # TODO + # would need to be called by deletePreference + pass + + + + +def doPeerSearchNames(self,dbname,kws): + """ Get all peers that have the specified keywords in their name. + Return a list of dictionaries. Each dict is in the NEWDBSTANDARD format. + """ + if dbname == 'Peer': + where = '(Peer.last_connected>0 or Peer.friend=1) and ' + elif dbname == 'Friend': + where = '' + else: + raise Exception('unknown dbname: %s' % dbname) + + # Must come before query + ranks = self.getRanks() + + for i in range(len(kws)): + kw = kws[i] + where += ' name like "%'+kw+'%"' + if (i+1) != len(kws): + where += ' and' + + # See getGUIPeers() + value_name = PeerDBHandler.gui_value_name + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","peer_db: searchNames: sql",where + res_list = self._db.getAll(dbname, value_name, where) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","peer_db: searchNames: res",res_list + + peer_list = [] + for item in res_list: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","peer_db: searchNames: Got Record",`item` + peer = dict(zip(value_name, item)) + peer['name'] = dunno2unicode(peer['name']) + peer['simRank'] = ranksfind(ranks,peer['permid']) + peer['permid'] = str2bin(peer['permid']) + peer_list.append(peer) + return peer_list + +def ranksfind(ranks,key): + if ranks is None: + return -1 + try: + return ranks.index(key)+1 + except: + return -1 + + + + diff --git a/tribler-mod/Tribler/Core/CacheDB/SqliteCacheDBHandler.py.bak b/tribler-mod/Tribler/Core/CacheDB/SqliteCacheDBHandler.py.bak new file mode 100644 index 0000000..473118a --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/SqliteCacheDBHandler.py.bak @@ -0,0 +1,3443 @@ +# Written by Jie Yang +# see LICENSE.txt for license information +# Note for Developers: Please write a unittest in Tribler/Test/test_sqlitecachedbhandler.py +# for any function you add to database. +# Please reuse the functions in sqlitecachedb as much as possible + +from Tribler.Core.CacheDB.sqlitecachedb import SQLiteCacheDB, bin2str, str2bin, NULL +from unicode import name2unicode,dunno2unicode +from copy import deepcopy,copy +from sets import Set +from traceback import print_exc +from time import time +from sha import sha +import sys +import os +import socket +import threading +import base64 +from random import randint, sample +from sets import Set +import math + + +from maxflow import Network +from math import atan, pi + + +from Tribler.Core.BitTornado.bencode import bencode, bdecode +from Notifier import Notifier +from Tribler.Core.simpledefs import * +from Tribler.Core.BuddyCast.moderationcast_util import * +from Tribler.Core.Overlay.permid import sign_data, verify_data +from Tribler.Category.Category import Category + +# maxflow constants +MAXFLOW_DISTANCE = 2 +ALPHA = float(1)/30000 + +DEBUG = False +SHOW_ERROR = False + +MAX_KEYWORDS_STORED = 5 +MAX_KEYWORD_LENGTH = 50 + +def show_permid_shorter(permid): + if not permid: + return 'None' + s = base64.encodestring(permid).replace("\n","") + return s[-5:] + +class BasicDBHandler: + def __init__(self,db, table_name): ## self, table_name + self._db = db ## SQLiteCacheDB.getInstance() + self.table_name = table_name + self.notifier = Notifier.getInstance() + + def __del__(self): + try: + self.sync() + except: + if SHOW_ERROR: + print_exc() + + def close(self): + try: + self._db.close() + except: + if SHOW_ERROR: + print_exc() + + def size(self): + return self._db.size(self.table_name) + + def sync(self): + self._db.commit() + + def commit(self): + self._db.commit() + + def getOne(self, value_name, where=None, conj='and', **kw): + return self._db.getOne(self.table_name, value_name, where=where, conj=conj, **kw) + + def getAll(self, value_name, where=None, group_by=None, having=None, order_by=None, limit=None, offset=None, conj='and', **kw): + return self._db.getAll(self.table_name, value_name, where=where, group_by=group_by, having=having, order_by=order_by, limit=limit, offset=offset, conj=conj, **kw) + + +class MyDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if MyDBHandler.__single is None: + MyDBHandler.lock.acquire() + try: + if MyDBHandler.__single is None: + MyDBHandler(*args, **kw) + finally: + MyDBHandler.lock.release() + return MyDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if MyDBHandler.__single is not None: + raise RuntimeError, "MyDBHandler is singleton" + MyDBHandler.__single = self + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self,db,'MyInfo') ## self,db,'MyInfo' + # keys: version, torrent_dir + + def get(self, key, default_value=None): + value = self.getOne('value', entry=key) + if value is not NULL: + return value + else: + if default_value is not None: + return default_value + else: + raise KeyError, key + + def put(self, key, value, commit=True): + if self.getOne('value', entry=key) is NULL: + self._db.insert(self.table_name, commit=commit, entry=key, value=value) + else: + where = "entry=" + repr(key) + self._db.update(self.table_name, where, commit=commit, value=value) + +class FriendDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if FriendDBHandler.__single is None: + FriendDBHandler.lock.acquire() + try: + if FriendDBHandler.__single is None: + FriendDBHandler(*args, **kw) + finally: + FriendDBHandler.lock.release() + return FriendDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if FriendDBHandler.__single is not None: + raise RuntimeError, "FriendDBHandler is singleton" + FriendDBHandler.__single = self + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self,db, 'Peer') ## self,db,'Peer' + + def setFriendState(self, permid, state=1, commit=True): + self._db.update(self.table_name, 'permid='+repr(bin2str(permid)), commit=commit, friend=state) + self.notifier.notify(NTFY_PEERS, NTFY_UPDATE, permid, 'friend', state) + + def getFriends(self,state=1): + where = 'friend=%d ' % state + res = self._db.getAll('Friend', 'permid',where=where) + return [str2bin(p[0]) for p in res] + #raise Exception('Use PeerDBHandler getGUIPeers(category = "friend")!') + + def getFriendState(self, permid): + res = self.getOne('friend', permid=bin2str(permid)) + return res + + def deleteFriend(self,permid): + self.setFriendState(permid,0) + + def searchNames(self,kws): + return doPeerSearchNames(self,'Friend',kws) + + def getRanks(self): + # TODO + return [] + + def size(self): + return self._db.size('Friend') + + def addExternalFriend(self, peer): + peerdb = PeerDBHandler.getInstance() + peerdb.addPeer(peer['permid'], peer) + self.setFriendState(peer['permid']) + +NETW_MIME_TYPE = 'image/jpeg' + +class PeerDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + gui_value_name = ('permid', 'name', 'ip', 'port', 'similarity', 'friend', + 'num_peers', 'num_torrents', 'num_prefs', + 'connected_times', 'buddycast_times', 'last_connected') + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if PeerDBHandler.__single is None: + PeerDBHandler.lock.acquire() + try: + if PeerDBHandler.__single is None: + PeerDBHandler(*args, **kw) + finally: + PeerDBHandler.lock.release() + return PeerDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if PeerDBHandler.__single is not None: + raise RuntimeError, "PeerDBHandler is singleton" + PeerDBHandler.__single = self + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self, db,'Peer') ## self, db ,'Peer' + self.pref_db = PreferenceDBHandler.getInstance() + self.online_peers = set() + + + def __len__(self): + return self.size() + + def getPeerID(self, permid): + return self._db.getPeerID(permid) + + def getPeer(self, permid, keys=None): + if keys is not None: + res = self.getOne(keys, permid=bin2str(permid)) + return res + else: + # return a dictionary + # make it compatible for calls to old bsddb interface + value_name = ('permid', 'name', 'ip', 'port', 'similarity', 'friend', + 'num_peers', 'num_torrents', 'num_prefs', 'num_queries', + 'connected_times', 'buddycast_times', 'last_connected', 'last_seen', 'last_buddycast') + + item = self.getOne(value_name, permid=bin2str(permid)) + if not item: + return None + peer = dict(zip(value_name, item)) + peer['permid'] = str2bin(peer['permid']) + return peer + + def getPeerSim(self, permid): + permid_str = bin2str(permid) + sim = self.getOne('similarity', permid=permid_str) + if sim is None: + sim = 0 + return sim + + def getPeerList(self, peerids=None): # get the list of all peers' permid + if peerids is None: + permid_strs = self.getAll('permid') + return [str2bin(permid_str[0]) for permid_str in permid_strs] + else: + if not peerids: + return [] + s = str(peerids).replace('[','(').replace(']',')') +# if len(peerids) == 1: +# s = '(' + str(peerids[0]) + ')' # tuple([1]) = (1,), syntax error for sql +# else: +# s = str(tuple(peerids)) + sql = 'select permid from Peer where peer_id in ' + s + permid_strs = self._db.fetchall(sql) + return [str2bin(permid_str[0]) for permid_str in permid_strs] + + + def getPeers(self, peer_list, keys): # get a list of dictionaries given peer list + # BUG: keys must contain 2 entries, otherwise the records in all are single values?? + value_names = ",".join(keys) + sql = 'select %s from Peer where permid=?;'%value_names + all = [] + for permid in peer_list: + permid_str = bin2str(permid) + p = self._db.fetchone(sql, (permid_str,)) + all.append(p) + + peers = [] + for i in range(len(all)): + p = all[i] + peer = dict(zip(keys,p)) + peer['permid'] = peer_list[i] + peers.append(peer) + + return peers + + def addPeer(self, permid, value, update_dns=True, update_connected=False, commit=True): + # add or update a peer + # ARNO: AAARGGH a method that silently changes the passed value param!!! + # Jie: deepcopy(value)? + + _permid = _last_seen = _ip = _port = None + if 'permid' in value: + _permid = value.pop('permid') + + if not update_dns: + if value.has_key('ip'): + _ip = value.pop('ip') + if value.has_key('port'): + _port = value.pop('port') + + if update_connected: + old_connected = self.getOne('connected_times', permid=bin2str(permid)) + if not old_connected: + value['connected_times'] = 1 + else: + value['connected_times'] = old_connected + 1 + + peer_existed = self._db.insertPeer(permid, commit=commit, **value) + + if _permid is not None: + value['permid'] = permid + if _last_seen is not None: + value['last_seen'] = _last_seen + if _ip is not None: + value['ip'] = _ip + if _port is not None: + value['port'] = _port + + if peer_existed: + self.notifier.notify(NTFY_PEERS, NTFY_UPDATE, permid) + # Jie: only notify the GUI when a peer was connected + if 'connected_times' in value: + self.notifier.notify(NTFY_PEERS, NTFY_INSERT, permid) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","sqldbhand: addPeer",`permid`,self._db.getPeerID(permid),`value` + #print_stack() + + + def hasPeer(self, permid): + return self._db.hasPeer(permid) + + def findPeers(self, key, value): + # only used by Connecter + if key == 'permid': + value = bin2str(value) + res = self.getAll('permid', **{key:value}) + if not res: + return [] + ret = [] + for p in res: + ret.append({'permid':str2bin(p[0])}) + return ret + + def updatePeer(self, permid, commit=True, **argv): + self._db.update(self.table_name, 'permid='+repr(bin2str(permid)), commit=commit, **argv) + self.notifier.notify(NTFY_PEERS, NTFY_UPDATE, permid) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","sqldbhand: updatePeer",`permid`,argv + #print_stack() + + def deletePeer(self, permid=None, peer_id=None, force=False, commit=True): + # don't delete friend of superpeers, except that force is True + # to do: add transaction + #self._db._begin() # begin a transaction + if peer_id is None: + peer_id = self._db.getPeerID(permid) + if peer_id is None: + return + deleted = self._db.deletePeer(permid=permid, peer_id=peer_id, force=force, commit=commit) + if deleted: + self.pref_db._deletePeer(peer_id=peer_id, commit=commit) + self.notifier.notify(NTFY_PEERS, NTFY_DELETE, permid) + + def updateTimes(self, permid, key, change=1, commit=True): + permid_str = bin2str(permid) + sql = "SELECT peer_id,%s FROM Peer WHERE permid==?"%key + find = self._db.fetchone(sql, (permid_str,)) + if find: + peer_id,value = find + if value is None: + value = 1 + else: + value += change + sql_update_peer = "UPDATE Peer SET %s=? WHERE peer_id=?"%key + self._db.execute_write(sql_update_peer, (value, peer_id), commit=commit) + self.notifier.notify(NTFY_PEERS, NTFY_UPDATE, permid) + + def updatePeerSims(self, sim_list, commit=True): + sql_update_sims = 'UPDATE Peer SET similarity=? WHERE peer_id=?' + s = time() + self._db.executemany(sql_update_sims, sim_list, commit=commit) + + def getPermIDByIP(self,ip): + permid = self.getOne('permid', ip=ip) + if permid is not None: + return str2bin(permid) + else: + return None + + def getPermid(self, peer_id): + permid = self.getOne('permid', peer_id=peer_id) + if permid is not None: + return str2bin(permid) + else: + return None + + def getNumberPeers(self, category_name = 'all'): + # 28/07/08 boudewijn: counting the union from two seperate + # select statements is faster than using a single select + # statement with an OR in the WHERE clause. Note that UNION + # returns a distinct list of peer_id's. + if category_name == 'friend': + sql = 'SELECT COUNT(peer_id) FROM Peer WHERE last_connected > 0 AND friend = 1' + else: + sql = 'SELECT COUNT(peer_id) FROM (SELECT peer_id FROM Peer WHERE last_connected > 0 UNION SELECT peer_id FROM Peer WHERE friend = 1)' + res = self._db.fetchone(sql) + if not res: + res = 0 + return res + + def getGUIPeers(self, category_name = 'all', range = None, sort = None, reverse = False, get_online=False, get_ranks=True): + # + # ARNO: WHY DIFF WITH NORMAL getPeers?????? + # load peers for GUI + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'getGUIPeers(%s, %s, %s, %s)' % (category_name, range, sort, reverse) + """ + db keys: peer_id, permid, name, ip, port, thumbnail, oversion, + similarity, friend, superpeer, last_seen, last_connected, + last_buddycast, connected_times, buddycast_times, num_peers, + num_torrents, num_prefs, num_queries, + + @in: get_online: boolean: if true, give peers a key 'online' if there is a connection now + """ + value_name = PeerDBHandler.gui_value_name + + where = '(last_connected>0 or friend=1 or friend=2 or friend=3) ' + if category_name in ('friend', 'friends'): + # Show mutual, I invited and he invited + where += 'and (friend=1 or friend=2 or friend=3) ' + if range: + offset= range[0] + limit = range[1] - range[0] + else: + limit = offset = None + if sort: + # Arno, 2008-10-6: buggy: not reverse??? + desc = (reverse) and 'desc' or '' + if sort in ('name'): + order_by = ' lower(%s) %s' % (sort, desc) + else: + order_by = ' %s %s' % (sort, desc) + else: + order_by = None + + # Must come before query + if get_ranks: + ranks = self.getRanks() + # Arno, 2008-10-23: Someone disabled ranking of people, why? + + res_list = self.getAll(value_name, where, offset= offset, limit=limit, order_by=order_by) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","getGUIPeers: where",where,"offset",offset,"limit",limit,"order",order_by + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","getGUIPeers: returned len",len(res_list) + + peer_list = [] + for item in res_list: + peer = dict(zip(value_name, item)) + peer['name'] = dunno2unicode(peer['name']) + peer['simRank'] = ranksfind(ranks,peer['permid']) + peer['permid'] = str2bin(peer['permid']) + peer_list.append(peer) + + if get_online: + self.checkOnline(peer_list) + + # peer_list consumes about 1.5M for 1400 peers, and this function costs about 0.015 second + + return peer_list + + + def getRanks(self): + value_name = 'permid' + order_by = 'similarity desc' + rankList_size = 20 + where = '(last_connected>0 or friend=1) ' + res_list = self._db.getAll('Peer', value_name, where=where, limit=rankList_size, order_by=order_by) + return [a[0] for a in res_list] + + def checkOnline(self, peerlist): + # Add 'online' key in peers when their permid + # Called by any thread, accesses single online_peers-dict + # Peers will never be sorted by 'online' because it is not in the db. + # Do not sort here, because then it would be sorted with a partial select (1 page in the grid) + self.lock.acquire() + for peer in peerlist: + peer['online'] = (peer['permid'] in self.online_peers) + self.lock.release() + + + + def setOnline(self,subject,changeType,permid,*args): + """Called by callback threads + with NTFY_CONNECTION, args[0] is boolean: connection opened/closed + """ + self.lock.acquire() + if args[0]: # connection made + self.online_peers.add(permid) + else: # connection closed + self.online_peers.remove(permid) + self.lock.release() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", (('#'*50)+'\n')*5+'%d peers online' % len(self.online_peers) + + def registerConnectionUpdater(self, session): + session.add_observer(self.setOnline, NTFY_PEERS, [NTFY_CONNECTION], None) + + def updatePeerIcon(self, permid, icontype, icondata, updateFlag = True): + # save thumb in db + self.updatePeer(permid, thumbnail=bin2str(icondata)) + #if self.mm is not None: + # self.mm.save_data(permid, icontype, icondata) + + + def getPeerIcon(self, permid): + item = self.getOne('thumbnail', permid=bin2str(permid)) + if item: + return NETW_MIME_TYPE, str2bin(item) + else: + return None, None + #if self.mm is not None: + # return self.mm.load_data(permid) + #3else: + # return None + + + def searchNames(self,kws): + return doPeerSearchNames(self,'Peer',kws) + + + +class SuperPeerDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if SuperPeerDBHandler.__single is None: + SuperPeerDBHandler.lock.acquire() + try: + if SuperPeerDBHandler.__single is None: + SuperPeerDBHandler(*args, **kw) + finally: + SuperPeerDBHandler.lock.release() + return SuperPeerDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if SuperPeerDBHandler.__single is not None: + raise RuntimeError, "SuperPeerDBHandler is singleton" + SuperPeerDBHandler.__single = self + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self, db, 'SuperPeer') + self.peer_db_handler = PeerDBHandler.getInstance() + + def loadSuperPeers(self, config, refresh=False): + filename = os.path.join(config['install_dir'], config['superpeer_file']) + superpeer_list = self.readSuperPeerList(filename) + self.insertSuperPeers(superpeer_list, refresh) + + def readSuperPeerList(self, filename=u''): + """ read (superpeer_ip, superpeer_port, permid [, name]) lines from a text file """ + + try: + filepath = os.path.abspath(filename) + file = open(filepath, "r") + except IOError: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "superpeer: cannot open superpeer file", filepath + return [] + + superpeers = file.readlines() + file.close() + superpeers_info = [] + for superpeer in superpeers: + if superpeer.strip().startswith("#"): # skip commended lines + continue + superpeer_line = superpeer.split(',') + superpeer_info = [a.strip() for a in superpeer_line] + try: + superpeer_info[2] = base64.decodestring(superpeer_info[2]+'\n' ) + except: + print_exc() + continue + try: + ip = socket.gethostbyname(superpeer_info[0]) + superpeer = {'ip':ip, 'port':superpeer_info[1], + 'permid':superpeer_info[2], 'superpeer':1} + if len(superpeer_info) > 3: + superpeer['name'] = superpeer_info[3] + superpeers_info.append(superpeer) + except: + print_exc() + + return superpeers_info + + def insertSuperPeers(self, superpeer_list, refresh=False): + for superpeer in superpeer_list: + superpeer = deepcopy(superpeer) + if not isinstance(superpeer, dict) or 'permid' not in superpeer: + continue + permid = superpeer.pop('permid') + self.peer_db_handler.addPeer(permid, superpeer, commit=False) + self.peer_db_handler.commit() + + def getSuperPeers(self): + # return list with permids of superpeers + res_list = self._db.getAll(self.table_name, 'permid') + return [str2bin(a[0]) for a in res_list] + + def addExternalSuperPeer(self, peer): + _peer = deepcopy(peer) + permid = _peer.pop('permid') + _peer['superpeer'] = 1 + self._db.insertPeer(permid, **_peer) + + +class CrawlerDBHandler: + """ + The CrawlerDBHandler is not an actual handle to a + database. Instead it uses a local file (usually crawler.txt) to + identify crawler processes. + """ + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if CrawlerDBHandler.__single is None: + CrawlerDBHandler.lock.acquire() + try: + if CrawlerDBHandler.__single is None: + CrawlerDBHandler(*args, **kw) + finally: + CrawlerDBHandler.lock.release() + return CrawlerDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if CrawlerDBHandler.__single is not None: + raise RuntimeError, "CrawlerDBHandler is singleton" + CrawlerDBHandler.__single = self + self._crawler_list = [] + + def loadCrawlers(self, config, refresh=False): + filename = os.path.join(config['crawler_file']) + self._crawler_list = self.readCrawlerList(filename) + + def readCrawlerList(self, filename=''): + """ + read (permid [, name]) lines from a text file + returns a list containing permids + """ + + try: + filepath = os.path.abspath(filename) + file = open(filepath, "r") + except IOError: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: cannot open crawler file", filepath + return [] + + crawlers = file.readlines() + file.close() + crawlers_info = [] + for crawler in crawlers: + if crawler.strip().startswith("#"): # skip commended lines + continue + crawler_info = [a.strip() for a in crawler.split(",")] + try: + crawler_info[0] = base64.decodestring(crawler_info[0]+'\n') + except: + print_exc() + continue + crawlers_info.append(str2bin(crawler)) + + return crawlers_info + + def temporarilyAddCrawler(self, permid): + """ + Because of security reasons we will not allow crawlers to be + added to the crawler.txt list. This temporarilyAddCrawler + method can be used to add one for the running session. Usefull + for debugging and testing. + """ + if not permid in self._crawler_list: + self._crawler_list.append(permid) + + def getCrawlers(self): + """ + returns a list with permids of crawlers + """ + return self._crawler_list + + + +class PreferenceDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if PreferenceDBHandler.__single is None: + PreferenceDBHandler.lock.acquire() + try: + if PreferenceDBHandler.__single is None: + PreferenceDBHandler(*args, **kw) + finally: + PreferenceDBHandler.lock.release() + return PreferenceDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if PreferenceDBHandler.__single is not None: + raise RuntimeError, "PreferenceDBHandler is singleton" + PreferenceDBHandler.__single = self + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self,db, 'Preference') ## self,db,'Preference' + + def _getTorrentOwnersID(self, torrent_id): + sql_get_torrent_owners_id = u"SELECT peer_id FROM Preference WHERE torrent_id==?" + res = self._db.fetchall(sql_get_torrent_owners_id, (torrent_id,)) + return [t[0] for t in res] + + def getPrefList(self, permid, return_infohash=False): + # get a peer's preference list of infohash or torrent_id according to return_infohash + peer_id = self._db.getPeerID(permid) + if peer_id is None: + return [] + + if not return_infohash: + sql_get_peer_prefs_id = u"SELECT torrent_id FROM Preference WHERE peer_id==?" + res = self._db.fetchall(sql_get_peer_prefs_id, (peer_id,)) + return [t[0] for t in res] + else: + sql_get_infohash = u"SELECT infohash FROM Torrent WHERE torrent_id IN (SELECT torrent_id FROM Preference WHERE peer_id==?)" + res = self._db.fetchall(sql_get_infohash, (peer_id,)) + return [str2bin(t[0]) for t in res] + + def _deletePeer(self, permid=None, peer_id=None, commit=True): # delete a peer from pref_db + # should only be called by PeerDBHandler + if peer_id is None: + peer_id = self._db.getPeerID(permid) + if peer_id is None: + return + + self._db.delete(self.table_name, commit=commit, peer_id=peer_id) + + def addPreference(self, permid, infohash, data={}, commit=True): + # This function should be replaced by addPeerPreferences + # peer_permid and prefs are binaries, the peer must have been inserted in Peer table + # Nicolas: did not change this function as it seems addPreference*s* is getting called + peer_id = self._db.getPeerID(permid) + if peer_id is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'PreferenceDBHandler: add preference of a peer which is not existed in Peer table', `permid` + return + + sql_insert_peer_torrent = u"INSERT INTO Preference (peer_id, torrent_id) VALUES (?,?)" + torrent_id = self._db.getTorrentID(infohash) + if not torrent_id: + self._db.insertInfohash(infohash) + torrent_id = self._db.getTorrentID(infohash) + try: + self._db.execute_write(sql_insert_peer_torrent, (peer_id, torrent_id), commit=commit) + except Exception, msg: # duplicated + print_exc() + + + + def addPreferences(self, peer_permid, prefs, is_torrent_id=False, commit=True): + # peer_permid and prefs are binaries, the peer must have been inserted in Peer table + # + # boudewijn: for buddycast version >= OLPROTO_VER_EIGTH the + # prefs list may contain both strings (indicating an infohash) + # or dictionaries (indicating an infohash with metadata) + + peer_id = self._db.getPeerID(peer_permid) + if peer_id is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'PreferenceDBHandler: add preference of a peer which is not existed in Peer table', `peer_permid` + return + + prefs = [type(pref) is str and {"infohash":pref} or pref + for pref + in prefs] + + if is_torrent_id: + torrent_id_prefs = [(peer_id, + pref['torrent_id'], + pref.get('position', -1), + pref.get('reranking_strategy', -1)) + for pref in prefs] + else: + # Nicolas: do not know why this would be called, but let's handle it smoothly + torrent_id_prefs = [] + for pref in prefs: + if type(pref)==dict: + infohash = pref["infohash"] + else: + infohash = pref # Nicolas: from wherever this might come, we even handle old list of infohashes style + torrent_id = self._db.getTorrentID(infohash) + if not torrent_id: + self._db.insertInfohash(infohash) + torrent_id = self._db.getTorrentID(infohash) + torrent_id_prefs.append((peer_id, torrent_id, -1, -1)) + + sql_insert_peer_torrent = u"INSERT INTO Preference (peer_id, torrent_id, click_position, reranking_strategy) VALUES (?,?,?,?)" + if len(prefs) > 0: + try: + self._db.executemany(sql_insert_peer_torrent, torrent_id_prefs, commit=commit) + except Exception, msg: # duplicated + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'dbhandler: addPreferences:', Exception, msg + + # now, store search terms + + # Nicolas: if maximum number of search terms is exceeded, abort storing them. + # Although this may seem a bit strict, this means that something different than a genuine Tribler client + # is on the other side, so we might rather err on the side of caution here and simply let clicklog go. + nums_of_search_terms = [len(pref.get('search_terms',[])) for pref in prefs] + if max(nums_of_search_terms)>MAX_KEYWORDS_STORED: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "peer %d exceeds max number %d of keywords per torrent, aborting storing keywords" % \ + (peer_id, MAX_KEYWORDS_STORED) + return + + all_terms_unclean = Set([]) + for pref in prefs: + newterms = Set(pref.get('search_terms',[])) + all_terms_unclean = all_terms_unclean.union(newterms) + + all_terms = [] + for term in all_terms_unclean: + cleanterm = '' + for i in range(0,len(term)): + c = term[i] + if c.isalnum(): + cleanterm += c + if len(cleanterm)>0: + all_terms.append(cleanterm) + + + # maybe we haven't received a single key word, no need to loop again over prefs then + if len(all_terms)==0: + return + + termdb = TermDBHandler.getInstance() + searchdb = SearchDBHandler.getInstance() + + # insert all unknown terms NOW so we can rebuild the index at once + termdb.bulkInsertTerms(all_terms) + + # get local term ids for terms. + foreign2local = dict([(str(foreign_term), termdb.getTermID(foreign_term)) + for foreign_term + in all_terms]) + + # process torrent data + for pref in prefs: + torrent_id = pref.get('torrent_id', None) + search_terms = pref.get('search_terms', []) + + if search_terms==[]: + continue + if not torrent_id: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "torrent_id not set, retrieving manually!" + torrent_id = TorrentDBHandler.getInstance().getTorrentID(infohash) + + term_ids = [foreign2local[str(foreign)] for foreign in search_terms] + searchdb.storeKeywordsByID(peer_id, torrent_id, term_ids, commit=False) + if commit: + searchdb.commit() + + def getAllEntries(self): + """use with caution,- for testing purposes""" + return self.getAll("rowid, peer_id, torrent_id, click_position,reranking_strategy", order_by="peer_id, torrent_id") + + + def getRecentPeersPrefs(self, key, num=None): + # get the recently seen peers' preference. used by buddycast + sql = "select peer_id,torrent_id from Preference where peer_id in (select peer_id from Peer order by %s desc)"%key + if num is not None: + sql = sql[:-1] + " limit %d)"%num + res = self._db.fetchall(sql) + return res + + def getPositionScore(self, torrent_id, keywords): + """returns a tuple (num, positionScore) stating how many times the torrent id was found in preferences, + and the average position score, where each click at position i receives 1-(1/i) points""" + + if not keywords: + return (0,0) + + term_db = TermDBHandler.getInstance() + term_ids = [term_db.getTermID(keyword) for keyword in keywords] + s_term_ids = str(term_ids).replace("[","(").replace("]",")").replace("L","") + + # we're not really interested in the peer_id here, + # just make sure we don't count twice if we hit more than one keyword in a search + # ... one might treat keywords a bit more strictly here anyway (AND instead of OR) + sql = """ +SELECT DISTINCT Preference.peer_id, Preference.click_position +FROM Preference +INNER JOIN ClicklogSearch +ON + Preference.torrent_id = ClicklogSearch.torrent_id + AND + Preference.peer_id = ClicklogSearch.peer_id +WHERE + ClicklogSearch.term_id IN %s + AND + ClicklogSearch.torrent_id = %s""" % (s_term_ids, torrent_id) + res = self._db.fetchall(sql) + scores = [1.0-1.0/float(click_position+1) + for (peer_id, click_position) + in res + if click_position>-1] + if len(scores)==0: + return (0,0) + score = float(sum(scores))/len(scores) + return (len(scores), score) + + +class TorrentDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if TorrentDBHandler.__single is None: + TorrentDBHandler.lock.acquire() + try: + if TorrentDBHandler.__single is None: + TorrentDBHandler(*args, **kw) + finally: + TorrentDBHandler.lock.release() + return TorrentDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if TorrentDBHandler.__single is not None: + raise RuntimeError, "TorrentDBHandler is singleton" + TorrentDBHandler.__single = self + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self,db, 'Torrent') ## self,db,torrent + + self.mypref_db = MyPreferenceDBHandler.getInstance() + + self.status_table = {'good':1, 'unknown':0, 'dead':2} + self.status_table.update(self._db.getTorrentStatusTable()) + self.id2status = dict([(x,y) for (y,x) in self.status_table.items()]) + self.torrent_dir = None + # 0 - unknown + # 1 - good + # 2 - dead + + self.category_table = {'Video':1, + 'VideoClips':2, + 'Audio':3, + 'Compressed':4, + 'Document':5, + 'Picture':6, + 'xxx':7, + 'other':8,} + self.category_table.update(self._db.getTorrentCategoryTable()) + self.category_table['unknown'] = 0 + self.id2category = dict([(x,y) for (y,x) in self.category_table.items()]) + # 1 - Video + # 2 - VideoClips + # 3 - Audio + # 4 - Compressed + # 5 - Document + # 6 - Picture + # 7 - xxx + # 8 - other + + self.src_table = self._db.getTorrentSourceTable() + self.id2src = dict([(x,y) for (y,x) in self.src_table.items()]) + # 0 - '' # local added + # 1 - BC + # 2,3,4... - URL of RSS feed + self.keys = ['torrent_id', 'name', 'torrent_file_name', + 'length', 'creation_date', 'num_files', 'thumbnail', + 'insert_time', 'secret', 'relevance', + 'source_id', 'category_id', 'status_id', + 'num_seeders', 'num_leechers', 'comment'] + self.existed_torrents = Set() + + + self.value_name = ['C.torrent_id', 'category_id', 'status_id', 'name', 'creation_date', 'num_files', + 'num_leechers', 'num_seeders', 'length', + 'secret', 'insert_time', 'source_id', 'torrent_file_name', + 'relevance', 'infohash', 'tracker', 'last_check'] + + def register(self, category, torrent_dir): + self.category = category + self.torrent_dir = torrent_dir + + def getTorrentID(self, infohash): + return self._db.getTorrentID(infohash) + + def getInfohash(self, torrent_id): + return self._db.getInfohash(torrent_id) + + def hasTorrent(self, infohash): + if infohash in self.existed_torrents: #to do: not thread safe + return True + infohash_str = bin2str(infohash) + existed = self._db.getOne('CollectedTorrent', 'torrent_id', infohash=infohash_str) + if existed is None: + return False + else: + self.existed_torrents.add(infohash) + return True + + def addExternalTorrent(self, filename, source='BC', extra_info={}, metadata=None): + infohash, torrent = self._readTorrentData(filename, source, extra_info, metadata) + if infohash is None: + return torrent + if not self.hasTorrent(infohash): + self._addTorrentToDB(infohash, torrent, commit=True) + self.notifier.notify(NTFY_TORRENTS, NTFY_INSERT, infohash) + + return torrent + + def _readTorrentData(self, filename, source='BC', extra_info={}, metadata=None): + # prepare data to insert into database + try: + if metadata is None: + f = open(filename, 'rb') + metadata = f.read() + f.close() + + metainfo = bdecode(metadata) + except Exception,msg: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", Exception,msg,`metadata` + return None,None + + namekey = name2unicode(metainfo) # convert info['name'] to type(unicode) + info = metainfo['info'] + infohash = sha(bencode(info)).digest() + + torrent = {'infohash': infohash} + torrent['torrent_file_name'] = os.path.split(filename)[1] + torrent['name'] = info.get(namekey, '') + + length = 0 + nf = 0 + if info.has_key('length'): + length = info.get('length', 0) + nf = 1 + elif info.has_key('files'): + for li in info['files']: + nf += 1 + if li.has_key('length'): + length += li['length'] + torrent['length'] = length + torrent['num_files'] = nf + torrent['announce'] = metainfo.get('announce', '') + torrent['announce-list'] = metainfo.get('announce-list', '') + torrent['creation_date'] = metainfo.get('creation date', 0) + + torrent['comment'] = metainfo.get('comment', None) + + torrent["ignore_number"] = 0 + torrent["retry_number"] = 0 + torrent["num_seeders"] = extra_info.get('seeder', -1) + torrent["num_leechers"] = extra_info.get('leecher', -1) + other_last_check = extra_info.get('last_check_time', -1) + if other_last_check >= 0: + torrent["last_check_time"] = int(time()) - other_last_check + else: + torrent["last_check_time"] = 0 + torrent["status"] = self._getStatusID(extra_info.get('status', "unknown")) + + torrent["source"] = self._getSourceID(source) + torrent["insert_time"] = long(time()) + + torrent['category'] = self._getCategoryID(self.category.calculateCategory(metainfo, torrent['name'])) + torrent['secret'] = 0 # to do: check if torrent is secret + torrent['relevance'] = 0.0 + thumbnail = 0 + if 'azureus_properties' in metainfo and 'Content' in metainfo['azureus_properties']: + if metainfo['azureus_properties']['Content'].get('Thumbnail',''): + thumbnail = 1 + torrent['thumbnail'] = thumbnail + + #if (torrent['category'] != []): + # print '### one torrent added from MetadataHandler: ' + str(torrent['category']) + ' ' + torrent['torrent_name'] + '###' + return infohash, torrent + + def addInfohash(self, infohash, commit=True): + if self._db.getTorrentID(infohash) is None: + self._db.insert('Torrent', commit=commit, infohash=bin2str(infohash)) + + def _getStatusID(self, status): + return self.status_table.get(status.lower(), 0) + + def _getCategoryID(self, category_list): + if len(category_list) > 0: + category = category_list[0].lower() + cat_int = self.category_table[category] + else: + cat_int = 0 + return cat_int + + def _getSourceID(self, src): + if src in self.src_table: + src_int = self.src_table[src] + else: + src_int = self._insertNewSrc(src) # add a new src, e.g., a RSS feed + self.src_table[src] = src_int + self.id2src[src_int] = src + return src_int + + def _addTorrentToDB(self, infohash, data, commit=True): + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is None: # not in db + infohash_str = bin2str(infohash) + self._db.insert('Torrent', + commit=True, # must commit to get the torrent id + infohash = infohash_str, + name = dunno2unicode(data['name']), + torrent_file_name = data['torrent_file_name'], + length = data['length'], + creation_date = data['creation_date'], + num_files = data['num_files'], + thumbnail = data['thumbnail'], + insert_time = data['insert_time'], + secret = data['secret'], + relevance = data['relevance'], + source_id = data['source'], + category_id = data['category'], + status_id = data['status'], + num_seeders = data['num_seeders'], + num_leechers = data['num_leechers'], + comment = dunno2unicode(data['comment'])) + torrent_id = self._db.getTorrentID(infohash) + else: # infohash in db + where = 'torrent_id = %d'%torrent_id + self._db.update('Torrent', where = where, + commit=False, + name = dunno2unicode(data['name']), + torrent_file_name = data['torrent_file_name'], + length = data['length'], + creation_date = data['creation_date'], + num_files = data['num_files'], + thumbnail = data['thumbnail'], + insert_time = data['insert_time'], + secret = data['secret'], + relevance = data['relevance'], + source_id = data['source'], + category_id = data['category'], + status_id = data['status'], + num_seeders = data['num_seeders'], + num_leechers = data['num_leechers'], + comment = dunno2unicode(data['comment'])) + + self._addTorrentTracker(torrent_id, data, commit=False) + if commit: + self.commit() + self._db.show_execute = False + return torrent_id + + def _insertNewSrc(self, src, commit=True): + desc = '' + if src.startswith('http') and src.endswith('xml'): + desc = 'RSS' + self._db.insert('TorrentSource', commit=commit, name=src, description=desc) + src_id = self._db.getOne('TorrentSource', 'source_id', name=src) + return src_id + + def _addTorrentTracker(self, torrent_id, data, add_all=False, commit=True): + # Set add_all to True if you want to put all multi-trackers into db. + # In the current version (4.2) only the main tracker is used. + exist = self._db.getOne('TorrentTracker', 'tracker', torrent_id=torrent_id) + if exist: + return + + announce = data['announce'] + ignore_number = data['ignore_number'] + retry_number = data['retry_number'] + last_check_time = data['last_check_time'] + + announce_list = data['announce-list'] + + sql_insert_torrent_tracker = """ + INSERT INTO TorrentTracker + (torrent_id, tracker, announce_tier, + ignored_times, retried_times, last_check) + VALUES (?,?,?, ?,?,?) + """ + + values = [(torrent_id, announce, 1, ignore_number, retry_number, last_check_time)] + # each torrent only has one announce with tier number 1 + tier_num = 2 + trackers = {announce:None} + if add_all: + for tier in announce_list: + for tracker in tier: + if tracker in trackers: + continue + value = (torrent_id, tracker, tier_num, 0, 0, 0) + values.append(value) + trackers[tracker] = None + tier_num += 1 + + self._db.executemany(sql_insert_torrent_tracker, values, commit=commit) + + def updateTorrent(self, infohash, commit=True, **kw): # watch the schema of database + if 'category' in kw: + cat_id = self._getCategoryID(kw.pop('category')) + kw['category_id'] = cat_id + if 'status' in kw: + status_id = self._getStatusID(kw.pop('status')) + kw['status_id'] = status_id + if 'progress' in kw: + self.mypref_db.updateProgress(infohash, kw.pop('progress'), commit=False)# commit at end of function + if 'seeder' in kw: + kw['num_seeders'] = kw.pop('seeder') + if 'leecher' in kw: + kw['num_leechers'] = kw.pop('leecher') + if 'last_check_time' in kw or 'ignore_number' in kw or 'retry_number' in kw \ + or 'retried_times' in kw or 'ignored_times' in kw: + self.updateTracker(infohash, kw, commit=False) + + for key in kw.keys(): + if key not in self.keys: + kw.pop(key) + + if len(kw) > 0: + infohash_str = bin2str(infohash) + where = "infohash='%s'"%infohash_str + self._db.update(self.table_name, where, commit=False, **kw) + + if commit: + self.commit() + # to.do: update the torrent panel's number of seeders/leechers + self.notifier.notify(NTFY_TORRENTS, NTFY_UPDATE, infohash) + + def updateTracker(self, infohash, kw, tier=1, tracker=None, commit=True): + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is None: + return + update = {} + assert type(kw) == dict and kw, 'updateTracker error: kw should be filled dict, but is: %s' % kw + if 'last_check_time' in kw: + update['last_check'] = kw.pop('last_check_time') + if 'ignore_number' in kw: + update['ignored_times'] = kw.pop('ignore_number') + if 'ignored_times' in kw: + update['ignored_times'] = kw.pop('ignored_times') + if 'retry_number' in kw: + update['retried_times'] = kw.pop('retry_number') + if 'retried_times' in kw: + update['retried_times'] = kw.pop('retried_times') + + if tracker is None: + where = 'torrent_id=%d AND announce_tier=%d'%(torrent_id, tier) + else: + where = 'torrent_id=%d AND tracker=%s'%(torrent_id, repr(tracker)) + self._db.update('TorrentTracker', where, commit=commit, **update) + + def deleteTorrent(self, infohash, delete_file=False, commit = True): + if not self.hasTorrent(infohash): + return False + + if self.mypref_db.hasMyPreference(infohash): # don't remove torrents in my pref + return False + + if delete_file: + deleted = self.eraseTorrentFile(infohash) + else: + deleted = True + + if deleted: + self._deleteTorrent(infohash, commit=commit) + + self.notifier.notify(NTFY_TORRENTS, NTFY_DELETE, infohash) + return deleted + + def _deleteTorrent(self, infohash, keep_infohash=True, commit=True): + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is not None: + if keep_infohash: + self._db.update(self.table_name, where="torrent_id=%d"%torrent_id, commit=commit, torrent_file_name=None) + else: + self._db.delete(self.table_name, commit=commit, torrent_id=torrent_id) + if infohash in self.existed_torrents: + self.existed_torrents.remove(infohash) + self._db.delete('TorrentTracker', commit=commit, torrent_id=torrent_id) + #print '******* delete torrent', torrent_id, `infohash`, self.hasTorrent(infohash) + + def eraseTorrentFile(self, infohash): + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is not None: + torrent_dir = self.getTorrentDir() + torrent_name = self.getOne('torrent_file_name', torrent_id=torrent_id) + src = os.path.join(torrent_dir, torrent_name) + if not os.path.exists(src): # already removed + return True + + try: + os.remove(src) + except Exception, msg: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "cachedbhandler: failed to erase torrent", src, Exception, msg + return False + + return True + + def getTracker(self, infohash, tier=0): + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is not None: + sql = "SELECT tracker, announce_tier FROM TorrentTracker WHERE torrent_id==%d"%torrent_id + if tier > 0: + sql += " AND announce_tier<=%d"%tier + return self._db.fetchall(sql) + + def getTorrentDir(self): + return self.torrent_dir + + + def getTorrent(self, infohash, keys=None, include_mypref=True): + # to do: replace keys like source -> source_id and status-> status_id ?? + + if keys is None: + keys = deepcopy(self.value_name) + #('torrent_id', 'category_id', 'status_id', 'name', 'creation_date', 'num_files', + # 'num_leechers', 'num_seeders', 'length', + # 'secret', 'insert_time', 'source_id', 'torrent_file_name', + # 'relevance', 'infohash', 'torrent_id') + else: + keys = list(keys) + where = 'C.torrent_id = T.torrent_id and announce_tier=1 ' + + res = self._db.getOne('CollectedTorrent C, TorrentTracker T', keys, where=where, infohash=bin2str(infohash)) + if not res: + return None + torrent = dict(zip(keys, res)) + if 'source_id' in torrent: + torrent['source'] = self.id2src[torrent['source_id']] + del torrent['source_id'] + if 'category_id' in torrent: + torrent['category'] = [self.id2category[torrent['category_id']]] + del torrent['category_id'] + if 'status_id' in torrent: + torrent['status'] = self.id2status[torrent['status_id']] + del torrent['status_id'] + torrent['infohash'] = infohash + if 'last_check' in torrent: + torrent['last_check_time'] = torrent['last_check'] + del torrent['last_check'] + + if include_mypref: + tid = torrent['C.torrent_id'] + stats = self.mypref_db.getMyPrefStats(tid) + del torrent['C.torrent_id'] + if stats: + torrent['myDownloadHistory'] = True + torrent['creation_time'] = stats[tid][0] + torrent['progress'] = stats[tid][1] + torrent['destination_path'] = stats[tid][2] + + + return torrent + + def getNumberTorrents(self, category_name = 'all', library = False): + table = 'CollectedTorrent' + value = 'count(torrent_id)' + where = '1 ' + + if category_name != 'all': + where += ' and category_id= %d' % self.category_table.get(category_name.lower(), -1) # unkown category_name returns no torrents + if library: + where += ' and torrent_id in (select torrent_id from MyPreference where destination_path != "")' + else: + where += ' and status_id=%d ' % self.status_table['good'] + # add familyfilter + where += self.category.get_family_filter_sql(self._getCategoryID) + + number = self._db.getOne(table, value, where) + if not number: + number = 0 + return number + + def getTorrents(self, category_name = 'all', range = None, library = False, sort = None, reverse = False): + """ + get Torrents of some category and with alive status (opt. not in family filter) + + @return Returns a list of dicts with keys: + torrent_id, infohash, name, category, status, creation_date, num_files, num_leechers, num_seeders, + length, secret, insert_time, source, torrent_filename, relevance, simRank, tracker, last_check + (if in library: myDownloadHistory, download_started, progress, dest_dir) + + """ + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'TorrentDBHandler: getTorrents(%s, %s, %s, %s, %s)' % (category_name, range, library, sort, reverse) + s = time() + + value_name = deepcopy(self.value_name) + + where = 'T.torrent_id = C.torrent_id and announce_tier=1 ' + + if category_name != 'all': + where += ' and category_id= %d' % self.category_table.get(category_name.lower(), -1) # unkown category_name returns no torrents + if library: + if sort in value_name: + where += ' and C.torrent_id in (select torrent_id from MyPreference where destination_path != "")' + else: + value_name[0] = 'C.torrent_id' + where += ' and C.torrent_id = M.torrent_id and announce_tier=1' + else: + where += ' and status_id=%d ' % self.status_table['good'] # if not library, show only good files + # add familyfilter + where += self.category.get_family_filter_sql(self._getCategoryID) + if range: + offset= range[0] + limit = range[1] - range[0] + else: + limit = offset = None + if sort: + # Arno, 2008-10-6: buggy: not reverse??? + desc = (reverse) and 'desc' or '' + if sort in ('name'): + order_by = ' lower(%s) %s' % (sort, desc) + else: + order_by = ' %s %s' % (sort, desc) + else: + order_by = None + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDBHandler: GET TORRENTS val",value_name,"where",where,"limit",limit,"offset",offset,"order",order_by + #print_stack + + # Must come before query + ranks = self.getRanks() + + #self._db.show_execute = True + if library and sort not in value_name: + res_list = self._db.getAll('CollectedTorrent C, MyPreference M, TorrentTracker T', value_name, where, limit=limit, offset=offset, order_by=order_by) + else: + res_list = self._db.getAll('CollectedTorrent C, TorrentTracker T', value_name, where, limit=limit, offset=offset, order_by=order_by) + #self._db.show_execute = False + + mypref_stats = self.mypref_db.getMyPrefStats() + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDBHandler: getTorrents: getAll returned ###################",len(res_list) + + torrent_list = self.valuelist2torrentlist(value_name,res_list,ranks,mypref_stats) + del res_list + del mypref_stats + return torrent_list + + def valuelist2torrentlist(self,value_name,res_list,ranks,mypref_stats): + + torrent_list = [] + for item in res_list: + value_name[0] = 'torrent_id' + torrent = dict(zip(value_name, item)) + + try: + torrent['source'] = self.id2src[torrent['source_id']] + except: + print_exc() + # Arno: RSS subscription and id2src issue + torrent['source'] = 'http://some/RSS/feed' + + torrent['category'] = [self.id2category[torrent['category_id']]] + torrent['status'] = self.id2status[torrent['status_id']] + torrent['simRank'] = ranksfind(ranks,torrent['infohash']) + torrent['infohash'] = str2bin(torrent['infohash']) + #torrent['num_swarm'] = torrent['num_seeders'] + torrent['num_leechers'] + torrent['last_check_time'] = torrent['last_check'] + del torrent['last_check'] + del torrent['source_id'] + del torrent['category_id'] + del torrent['status_id'] + torrent_id = torrent['torrent_id'] + if mypref_stats is not None and torrent_id in mypref_stats: + # add extra info for torrent in mypref + torrent['myDownloadHistory'] = True + data = mypref_stats[torrent_id] #(create_time,progress,destdir) + torrent['download_started'] = data[0] + torrent['progress'] = data[1] + torrent['destdir'] = data[2] + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentDBHandler: GET TORRENTS",`torrent` + + torrent_list.append(torrent) + return torrent_list + + def getRanks(self,): + value_name = 'infohash' + order_by = 'relevance desc' + rankList_size = 20 + where = 'status_id=%d ' % self.status_table['good'] + res_list = self._db.getAll('Torrent', value_name, where = where, limit=rankList_size, order_by=order_by) + return [a[0] for a in res_list] + + def getNumberCollectedTorrents(self): + #return self._db.size('CollectedTorrent') + return self._db.getOne('CollectedTorrent', 'count(torrent_id)') + + def freeSpace(self, torrents2del): +# if torrents2del > 100: # only delete so many torrents each time +# torrents2del = 100 + sql = """ + select torrent_file_name, torrent_id, infohash, relevance, + min(relevance,2500) + min(500,num_leechers) + 4*min(500,num_seeders) - (max(0,min(500,(%d-creation_date)/86400)) ) as weight + from CollectedTorrent + where torrent_id not in (select torrent_id from MyPreference) + order by weight + limit %d + """ % (int(time()), torrents2del) + res_list = self._db.fetchall(sql) + if len(res_list) == 0: + return False + + # delete torrents from db + sql_del_torrent = "delete from Torrent where torrent_id=?" + sql_del_tracker = "delete from TorrentTracker where torrent_id=?" + sql_del_pref = "delete from Preference where torrent_id=?" + tids = [(torrent_id,) for torrent_file_name, torrent_id, infohash, relevance, weight in res_list] + + self._db.executemany(sql_del_torrent, tids, commit=False) + self._db.executemany(sql_del_tracker, tids, commit=False) + self._db.executemany(sql_del_pref, tids, commit=False) + + self._db.commit() + + # but keep the infohash in db to maintain consistence with preference db + #torrent_id_infohashes = [(torrent_id,infohash_str,relevance) for torrent_file_name, torrent_id, infohash_str, relevance, weight in res_list] + #sql_insert = "insert into Torrent (torrent_id, infohash, relevance) values (?,?,?)" + #self._db.executemany(sql_insert, torrent_id_infohashes, commit=True) + + torrent_dir = self.getTorrentDir() + deleted = 0 # deleted any file? + for torrent_file_name, torrent_id, infohash, relevance, weight in res_list: + torrent_path = os.path.join(torrent_dir, torrent_file_name) + try: + os.remove(torrent_path) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Erase torrent:", os.path.basename(torrent_path) + deleted += 1 + except Exception, msg: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Error in erase torrent", Exception, msg + pass + + self.notifier.notify(NTFY_TORRENTS, NTFY_DELETE, str2bin(infohash)) # refresh gui + + return deleted + + def hasMetaData(self, infohash): + return self.hasTorrent(infohash) + + def getTorrentRelevances(self, tids): + sql = 'SELECT torrent_id, relevance from Torrent WHERE torrent_id in ' + str(tuple(tids)) + return self._db.fetchall(sql) + + def updateTorrentRelevance(self, infohash, relevance): + self.updateTorrent(infohash, relevance=relevance) + + def updateTorrentRelevances(self, tid_rel_pairs, commit=True): + if len(tid_rel_pairs) > 0: + sql_update_sims = 'UPDATE Torrent SET relevance=? WHERE torrent_id=?' + self._db.executemany(sql_update_sims, tid_rel_pairs, commit=commit) + + def searchNames(self,kws): + """ Get all torrents (good and bad) that have the specified keywords in + their name. Return a list of dictionaries. Each dict is in the + NEWDBSTANDARD format. + @param kws A list of keyword strings + @return A list of dictionaries. + """ + + mypref_stats = self.mypref_db.getMyPrefStats() + + where = 'C.torrent_id = T.torrent_id and announce_tier=1' + for i in range(len(kws)): + kw = kws[i] + # Strip special chars. Note that s.translate() does special stuff for Unicode, which we don't want + cleankw = '' + for i in range(0,len(kw)): + c = kw[i] + if c.isalnum(): + cleankw += c + + where += ' and name like "%'+cleankw+'%"' + + value_name = copy(self.value_name) + if 'torrent_id' in value_name: + index = value_name.index('torrent_id') + value_name.remove('torrent_id') + value_name.insert(index, 'C.torrent_id') + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","torrent_db: searchNames: where",where + res_list = self._db.getAll('CollectedTorrent C, TorrentTracker T', value_name, where) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","torrent_db: searchNames: res",`res_list` + + torrent_list = self.valuelist2torrentlist(value_name,res_list,None,mypref_stats) + del res_list + del mypref_stats + + return torrent_list + + + def selectTorrentToCollect(self, permid, candidate_list=None): + """ select a torrent to collect from a given candidate list + If candidate_list is not present or None, all torrents of + this peer will be used for sampling. + Return: the infohashed of selected torrent + """ + + if candidate_list is None: + sql = """ + select infohash + from Torrent,Peer,Preference + where Peer.permid==? + and Peer.peer_id==Preference.peer_id + and Torrent.torrent_id==Preference.torrent_id + and torrent_file_name is NULL + order by relevance desc + """ + permid_str = bin2str(permid) + res = self._db.fetchone(sql, (permid_str,)) + else: + cand_str = [bin2str(infohash) for infohash in candidate_list] + s = repr(cand_str).replace('[','(').replace(']',')') + sql = 'select infohash from Torrent where torrent_file_name is NULL and infohash in ' + s + sql += ' order by relevance desc' + res = self._db.fetchone(sql) + if res is None: + return None + return str2bin(res) + + def selectTorrentToCheck(self, policy='random', infohash=None, return_value=None): # for tracker checking + """ select a torrent to update tracker info (number of seeders and leechers) + based on the torrent checking policy. + RETURN: a dictionary containing all useful info. + + Policy 1: Random [policy='random'] + Randomly select a torrent to collect (last_check < 5 min ago) + + Policy 2: Oldest (unknown) first [policy='oldest'] + Select the non-dead torrent which was not been checked for the longest time (last_check < 5 min ago) + + Policy 3: Popular first [policy='popular'] + Select the non-dead most popular (3*num_seeders+num_leechers) one which has not been checked in last N seconds + (The default N = 4 hours, so at most 4h/torrentchecking_interval popular peers) + """ + + #import threading + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "****** selectTorrentToCheck", threading.currentThread().getName() + + if infohash is None: + # create a view? + sql = """select T.torrent_id, ignored_times, retried_times, torrent_file_name, infohash, status_id, num_seeders, num_leechers, last_check + from CollectedTorrent T, TorrentTracker TT + where TT.torrent_id=T.torrent_id and announce_tier=1 """ + if policy.lower() == 'random': + ntorrents = self.getNumberCollectedTorrents() + if ntorrents == 0: + rand_pos = 0 + else: + rand_pos = randint(0, ntorrents-1) + last_check_threshold = int(time()) - 300 + sql += """and last_check < %d + limit 1 offset %d """%(last_check_threshold, rand_pos) + elif policy.lower() == 'oldest': + last_check_threshold = int(time()) - 300 + sql += """ and last_check < %d and status_id <> 2 + order by last_check + limit 1 """%last_check_threshold + elif policy.lower() == 'popular': + last_check_threshold = int(time()) - 4*60*60 + sql += """ and last_check < %d and status_id <> 2 + order by 3*num_seeders+num_leechers desc + limit 1 """%last_check_threshold + res = self._db.fetchone(sql) + else: + sql = """select T.torrent_id, ignored_times, retried_times, torrent_file_name, infohash, status_id, num_seeders, num_leechers, last_check + from CollectedTorrent T, TorrentTracker TT + where TT.torrent_id=T.torrent_id and announce_tier=1 + and infohash=? + """ + infohash_str = bin2str(infohash) + res = self._db.fetchone(sql, (infohash_str,)) + + if res: + torrent_file_name = res[3] + torrent_dir = self.getTorrentDir() + torrent_path = os.path.join(torrent_dir, torrent_file_name) + if res is not None: + res = {'torrent_id':res[0], + 'ignored_times':res[1], + 'retried_times':res[2], + 'torrent_path':torrent_path, + 'infohash':str2bin(res[4]) + } + return_value['torrent'] = res + return_value['event'].set() + + + def getTorrentsFromSource(self,source): + """ Get all torrents from the specified Subscription source. + Return a list of dictionaries. Each dict is in the NEWDBSTANDARD format. + """ + id = self._getSourceID(source) + + where = 'C.source_id = %d and C.torrent_id = T.torrent_id and announce_tier=1' % (id) + # add familyfilter + where += self.category.get_family_filter_sql(self._getCategoryID) + + value_name = deepcopy(self.value_name) + + res_list = self._db.getAll('Torrent C, TorrentTracker T', value_name, where) + + torrent_list = self.valuelist2torrentlist(value_name,res_list,None,None) + del res_list + + return torrent_list + + + def setSecret(self,infohash,secret): + kw = {'secret': secret} + self.updateTorrent(infohash, updateFlag=True, **kw) + + +class MyPreferenceDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if MyPreferenceDBHandler.__single is None: + MyPreferenceDBHandler.lock.acquire() + try: + if MyPreferenceDBHandler.__single is None: + MyPreferenceDBHandler(*args, **kw) + finally: + MyPreferenceDBHandler.lock.release() + return MyPreferenceDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if MyPreferenceDBHandler.__single is not None: + raise RuntimeError, "MyPreferenceDBHandler is singleton" + MyPreferenceDBHandler.__single = self + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self,db, 'MyPreference') ## self,db,'MyPreference' + + self.status_table = {'good':1, 'unknown':0, 'dead':2} + self.status_table.update(self._db.getTorrentStatusTable()) + self.status_good = self.status_table['good'] + self.recent_preflist = None + self.recent_preflist_with_clicklog = None + self.rlock = threading.RLock() + + + def loadData(self): + self.rlock.acquire() + try: + self.recent_preflist = self._getRecentLivePrefList() + self.recent_preflist_with_clicklog = self._getRecentLivePrefListWithClicklog() + finally: + self.rlock.release() + + def getMyPrefList(self, order_by=None): + res = self.getAll('torrent_id', order_by=order_by) + return [p[0] for p in res] + + def getMyPrefListInfohash(self): + sql = 'select infohash from Torrent where torrent_id in (select torrent_id from MyPreference)' + res = self._db.fetchall(sql) + return [str2bin(p[0]) for p in res] + + def getMyPrefStats(self, torrent_id=None): + # get the full {torrent_id:(create_time,progress,destdir)} + value_name = ('torrent_id','creation_time','progress','destination_path') + if torrent_id is not None: + where = 'torrent_id=%s' % torrent_id + else: + where = None + res = self.getAll(value_name, where) + mypref_stats = {} + for pref in res: + torrent_id,creation_time,progress,destination_path = pref + mypref_stats[torrent_id] = (creation_time,progress,destination_path) + return mypref_stats + + def getCreationTime(self, infohash): + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is not None: + ct = self.getOne('creation_time', torrent_id=torrent_id) + return ct + else: + return None + + def getRecentLivePrefListWithClicklog(self, num=0): + """returns OL 8 style preference list: a list of lists, with each of the inner lists + containing infohash, search terms, click position, and reranking strategy""" + + if self.recent_preflist_with_clicklog is None: + self.rlock.acquire() + try: + if self.recent_preflist_with_clicklog is None: + self.recent_preflist_with_clicklog = self._getRecentLivePrefListWithClicklog() + finally: + self.rlock.release() + if num > 0: + return self.recent_preflist_with_clicklog[:num] + else: + return self.recent_preflist_with_clicklog + + + def getRecentLivePrefList(self, num=0): + if self.recent_preflist is None: + self.rlock.acquire() + try: + if self.recent_preflist is None: + self.recent_preflist = self._getRecentLivePrefList() + finally: + self.rlock.release() + if num > 0: + return self.recent_preflist[:num] + else: + return self.recent_preflist + + + + def addClicklogToMyPreference(self, infohash, clicklog_data, commit=True): + torrent_id = self._db.getTorrentID(infohash) + clicklog_already_stored = False # equivalent to hasMyPreference TODO + if torrent_id is None or clicklog_already_stored: + return False + + d = {} + # copy those elements of the clicklog data which are used in the update command + for clicklog_key in ["click_position", "reranking_strategy"]: + if clicklog_key in clicklog_data: + d[clicklog_key] = clicklog_data[clicklog_key] + + if d=={}: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "no updatable information given to addClicklogToMyPreference" + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "addClicklogToMyPreference: updatable clicklog data: %s" % d + self._db.update(self.table_name, 'torrent_id=%d' % torrent_id, commit=commit, **d) + + # have keywords stored by SearchDBHandler + if 'keywords' in clicklog_data: + if not clicklog_data['keywords']==[]: + searchdb = SearchDBHandler.getInstance() + searchdb.storeKeywords(peer_id=0, + torrent_id=torrent_id, + terms=clicklog_data['keywords'], + commit=commit) + + + + + + + + + def _getRecentLivePrefListWithClicklog(self, num=0): + """returns a list containing a list for each torrent: [infohash, [seach terms], click position, reranking strategy]""" + + sql = """ + select infohash, click_position, reranking_strategy, m.torrent_id from MyPreference m, Torrent t + where m.torrent_id == t.torrent_id + and status_id == %d + order by creation_time desc + """ % self.status_good + + recent_preflist_with_clicklog = self._db.fetchall(sql) + if recent_preflist_with_clicklog is None: + recent_preflist_with_clicklog = [] + else: + recent_preflist_with_clicklog = [[str2bin(t[0]), + t[3], # insert search terms in next step, only for those actually required, store torrent id for now + t[1], # click position + t[2]] # reranking strategy + for t in recent_preflist_with_clicklog] + + if num != 0: + recent_preflist_with_clicklog = recent_preflist_with_clicklog[:num] + + # now that we only have those torrents left in which we are actually interested, + # replace torrent id by user's search terms for torrent id + termdb = TermDBHandler.getInstance() + searchdb = SearchDBHandler.getInstance() + for pref in recent_preflist_with_clicklog: + torrent_id = pref[1] + search_terms = searchdb.getMyTorrentSearchTerms(torrent_id) + pref[1] = [termdb.getTerm(search_term) for search_term in search_terms] + + return recent_preflist_with_clicklog + + + def _getRecentLivePrefList(self, num=0): # num = 0: all files + # get recent and live torrents + sql = """ + select infohash from MyPreference m, Torrent t + where m.torrent_id == t.torrent_id + and status_id == %d + order by creation_time desc + """ % self.status_good + + recent_preflist = self._db.fetchall(sql) + if recent_preflist is None: + recent_preflist = [] + else: + recent_preflist = [str2bin(t[0]) for t in recent_preflist] + + if num != 0: + return recent_preflist[:num] + else: + return recent_preflist + + def hasMyPreference(self, infohash): + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is None: + return False + res = self.getOne('torrent_id', torrent_id=torrent_id) + if res is not None: + return True + else: + return False + + def addMyPreference(self, infohash, data, commit=True): + # keys in data: destination_path, progress, creation_time, torrent_id + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is None or self.hasMyPreference(infohash): + # Arno, 2009-03-09: Torrent already exists in myrefs. + # Hack for hiding from lib while keeping in myprefs. + # see standardOverview.removeTorrentFromLibrary() + # + self.updateDestDir(infohash,data.get('destination_path'),commit=commit) + return False + d = {} + d['destination_path'] = data.get('destination_path') + d['progress'] = data.get('progress', 0) + d['creation_time'] = data.get('creation_time', int(time())) + d['torrent_id'] = torrent_id + self._db.insert(self.table_name, commit=commit, **d) + self.notifier.notify(NTFY_MYPREFERENCES, NTFY_INSERT, infohash) + self.rlock.acquire() + try: + if self.recent_preflist is None: + self.recent_preflist = self._getRecentLivePrefList() + else: + self.recent_preflist.insert(0, infohash) + finally: + self.rlock.release() + return True + + def deletePreference(self, infohash, commit=True): + # Arno: when deleting a preference, you may also need to do + # some stuff in BuddyCast: see delMyPref() + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is None: + return + self._db.delete(self.table_name, commit=commit, **{'torrent_id':torrent_id}) + self.notifier.notify(NTFY_MYPREFERENCES, NTFY_DELETE, infohash) + self.rlock.acquire() + try: + if self.recent_preflist is not None and infohash in self.recent_preflist: + self.recent_preflist.remove(infohash) + finally: + self.rlock.release() + + + def updateProgress(self, infohash, progress, commit=True): + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is None: + return + self._db.update(self.table_name, 'torrent_id=%d'%torrent_id, commit=commit, progress=progress) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '********* update progress', `infohash`, progress, commit + + def getAllEntries(self): + """use with caution,- for testing purposes""" + return self.getAll("torrent_id, click_position, reranking_strategy", order_by="torrent_id") + + def updateDestDir(self, infohash, destdir, commit=True): + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is None: + return + self._db.update(self.table_name, 'torrent_id=%d'%torrent_id, commit=commit, destination_path=destdir) + + +# def getAllTorrentCoccurrence(self): +# # should be placed in PreferenceDBHandler, but put here to be convenient for TorrentCollecting +# sql = """select torrent_id, count(torrent_id) as coocurrency from Preference where peer_id in +# (select peer_id from Preference where torrent_id in +# (select torrent_id from MyPreference)) and torrent_id not in +# (select torrent_id from MyPreference) +# group by torrent_id +# """ +# coccurrence = dict(self._db.fetchall(sql)) +# return coccurrence + + +class BarterCastDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + + if BarterCastDBHandler.__single is None: + BarterCastDBHandler.lock.acquire() + try: + if BarterCastDBHandler.__single is None: + BarterCastDBHandler(*args, **kw) + finally: + BarterCastDBHandler.lock.release() + return BarterCastDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + BarterCastDBHandler.__single = self + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self, db,'BarterCast') ## self,db,'BarterCast' + self.peer_db = PeerDBHandler.getInstance() + + # create the maxflow network + self.network = Network({}) + self.update_network() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercastdb: MyPermid is ", self.my_permid + + + ##def registerSession(self, session): + ## self.session = session + + # Retrieve MyPermid + ## self.my_permid = session.get_permid() + + + def registerSession(self, session): + self.session = session + + # Retrieve MyPermid + self.my_permid = session.get_permid() + + if self.my_permid is None: + raise ValueError('Cannot get permid from Session') + + # Keep administration of total upload and download + # (to include in BarterCast message) + self.my_peerid = self.getPeerID(self.my_permid) + + if self.my_peerid != None: + where = "peer_id_from=%s" % (self.my_peerid) + item = self.getOne(('sum(uploaded)', 'sum(downloaded)'), where=where) + else: + item = None + + if item != None and len(item) == 2 and item[0] != None and item[1] != None: + self.total_up = int(item[0]) + self.total_down = int(item[1]) + else: + self.total_up = 0 + self.total_down = 0 + +# if DEBUG: +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "My reputation: ", self.getMyReputation() + + + def getTotals(self): + return (self.total_up, self.total_down) + + def getName(self, permid): + + if permid == 'non-tribler': + return "non-tribler" + elif permid == self.my_permid: + return "local_tribler" + + name = self.peer_db.getPeer(permid, 'name') + + if name == None or name == '': + return 'peer %s' % show_permid_shorter(permid) + else: + return name + + def getNameByID(self, peer_id): + permid = self.getPermid(peer_id) + return self.getName(permid) + + + def getPermid(self, peer_id): + + # by convention '-1' is the id of non-tribler peers + if peer_id == -1: + return 'non-tribler' + else: + return self.peer_db.getPermid(peer_id) + + + def getPeerID(self, permid): + + # by convention '-1' is the id of non-tribler peers + if permid == "non-tribler": + return -1 + else: + return self.peer_db.getPeerID(permid) + + def getItem(self, (permid_from, permid_to), default=False): + + # ARNODB: now converting back to dbid! just did reverse in getItemList + peer_id1 = self.getPeerID(permid_from) + peer_id2 = self.getPeerID(permid_to) + + if peer_id1 is None: + self._db.insertPeer(permid_from) # ARNODB: database write + peer_id1 = self.getPeerID(permid_from) # ARNODB: database write + + if peer_id2 is None: + self._db.insertPeer(permid_to) + peer_id2 = self.getPeerID(permid_to) + + return self.getItemByIDs((peer_id1,peer_id2),default=default) + + + def getItemByIDs(self, (peer_id_from, peer_id_to), default=False): + if peer_id_from is not None and peer_id_to is not None: + + where = "peer_id_from=%s and peer_id_to=%s" % (peer_id_from, peer_id_to) + item = self.getOne(('downloaded', 'uploaded', 'last_seen'), where=where) + + if item is None: + return None + + if len(item) != 3: + return None + + itemdict = {} + itemdict['downloaded'] = item[0] + itemdict['uploaded'] = item[1] + itemdict['last_seen'] = item[2] + itemdict['peer_id_from'] = peer_id_from + itemdict['peer_id_to'] = peer_id_to + + return itemdict + + else: + return None + + + def getItemList(self): # get the list of all peers' permid + + keys = self.getAll(('peer_id_from','peer_id_to')) + # ARNODB: this dbid -> permid translation is more efficiently done + # on the final top-N list. + keys = map(lambda (id_from, id_to): (self.getPermid(id_from), self.getPermid(id_to)), keys) + return keys + + + def addItem(self, (permid_from, permid_to), item, commit=True): + +# if value.has_key('last_seen'): # get the latest last_seen +# old_last_seen = 0 +# old_data = self.getPeer(permid) +# if old_data: +# old_last_seen = old_data.get('last_seen', 0) +# last_seen = value['last_seen'] +# value['last_seen'] = max(last_seen, old_last_seen) + + # get peer ids + peer_id1 = self.getPeerID(permid_from) + peer_id2 = self.getPeerID(permid_to) + + # check if they already exist in database; if not: add + if peer_id1 is None: + self._db.insertPeer(permid_from) + peer_id1 = self.getPeerID(permid_from) + if peer_id2 is None: + self._db.insertPeer(permid_to) + peer_id2 = self.getPeerID(permid_to) + + item['peer_id_from'] = peer_id1 + item['peer_id_to'] = peer_id2 + + self._db.insert(self.table_name, commit=commit, **item) + + def updateItem(self, (permid_from, permid_to), key, value, commit=True): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercastdb: update (%s, %s) [%s] += %s" % (self.getName(permid_from), self.getName(permid_to), key, str(value)) + + itemdict = self.getItem((permid_from, permid_to)) + + # if item doesn't exist: add it + if itemdict == None: + self.addItem((permid_from, permid_to), {'uploaded':0, 'downloaded': 0, 'last_seen': int(time())}, commit=True) + itemdict = self.getItem((permid_from, permid_to)) + + # get peer ids + peer_id1 = itemdict['peer_id_from'] + peer_id2 = itemdict['peer_id_to'] + + if key in itemdict.keys(): + + where = "peer_id_from=%s and peer_id_to=%s" % (peer_id1, peer_id2) + item = {key: value} + self._db.update(self.table_name, where = where, commit=commit, **item) + + def incrementItem(self, (permid_from, permid_to), key, value, commit=True): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercastdb: increment (%s, %s) [%s] += %s" % (self.getName(permid_from), self.getName(permid_to), key, str(value)) + + # adjust total_up and total_down + if permid_from == self.my_permid: + if key == 'uploaded': + self.total_up += int(value) + if key == 'downloaded': + self.total_down += int(value) + + itemdict = self.getItem((permid_from, permid_to)) + + # if item doesn't exist: add it + if itemdict == None: + self.addItem((permid_from, permid_to), {'uploaded':0, 'downloaded': 0, 'last_seen': int(time())}, commit=True) + itemdict = self.getItem((permid_from, permid_to)) + + # get peer ids + peer_id1 = itemdict['peer_id_from'] + peer_id2 = itemdict['peer_id_to'] + + if key in itemdict.keys(): + old_value = itemdict[key] + new_value = old_value + value + + where = "peer_id_from=%s and peer_id_to=%s" % (peer_id1, peer_id2) + + item = {key: new_value} + self._db.update(self.table_name, where = where, commit=commit, **item) + return new_value + + return None + + def addPeersBatch(self,permids): + """ Add unknown permids as batch -> single transaction """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercastdb: addPeersBatch: n=",len(permids) + + for permid in permids: + peer_id = self.getPeerID(permid) + # check if they already exist in database; if not: add + if peer_id is None: + self._db.insertPeer(permid,commit=False) + self._db.commit() + + def updateULDL(self, (permid_from, permid_to), ul, dl, commit=True): + """ Add ul/dl record to database as a single write """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercastdb: updateULDL (%s, %s) ['ul'] += %s ['dl'] += %s" % (self.getName(permid_from), self.getName(permid_to), str(ul), str(dl)) + + itemdict = self.getItem((permid_from, permid_to)) + + # if item doesn't exist: add it + if itemdict == None: + itemdict = {'uploaded':ul, 'downloaded': dl, 'last_seen': int(time())} + self.addItem((permid_from, permid_to), itemdict, commit=commit) + return + + # get peer ids + peer_id1 = itemdict['peer_id_from'] + peer_id2 = itemdict['peer_id_to'] + + if 'uploaded' in itemdict.keys() and 'downloaded' in itemdict.keys(): + where = "peer_id_from=%s and peer_id_to=%s" % (peer_id1, peer_id2) + item = {'uploaded': ul, 'downloaded':dl} + self._db.update(self.table_name, where = where, commit=commit, **item) + + def getPeerIDPairs(self): + keys = self.getAll(('peer_id_from','peer_id_to')) + return keys + + def getTopNPeers(self, n, local_only = False): + """ + Return (sorted) list of the top N peers with the highest (combined) + values for the given keys. This version uses batched reads and peer_ids + in calculation + @return a dict containing a 'top' key with a list of (permid,up,down) + tuples, a 'total_up', 'total_down', 'tribler_up', 'tribler_down' field. + Sizes are in kilobytes. + """ + + # TODO: this won't scale to many interactions, as the size of the DB + # is NxN + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercastdb: getTopNPeers: local = ", local_only + #print_stack() + + n = max(1, n) + my_peer_id = self.getPeerID(self.my_permid) + total_up = {} + total_down = {} + # Arno, 2008-10-30: I speculate this is to count transfers only once, + # i.e. the DB stored (a,b) and (b,a) and we want to count just one. + + processed = Set() + + + value_name = '*' + increment = 500 + + nrecs = self.size() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","NEXTtopN: size is",nrecs + + for offset in range(0,nrecs,increment): + if offset+increment > nrecs: + limit = nrecs-offset + else: + limit = increment + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","NEXTtopN: get",offset,limit + + reslist = self.getAll(value_name, offset=offset, limit=limit) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","NEXTtopN: res len is",len(reslist),`reslist` + for res in reslist: + (peer_id_from,peer_id_to,downloaded,uploaded,last_seen,value) = res + + if local_only: + if not (peer_id_to == my_peer_id or peer_id_from == my_peer_id): + # get only items of my local dealings + continue + + if (not (peer_id_to, peer_id_from) in processed) and (not peer_id_to == peer_id_from): + #if (not peer_id_to == peer_id_from): + + up = uploaded *1024 # make into bytes + down = downloaded *1024 + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercastdb: getTopNPeers: DB entry: (%s, %s) up = %d down = %d" % (self.getNameByID(peer_id_from), self.getNameByID(peer_id_to), up, down) + + processed.add((peer_id_from, peer_id_to)) + + # fix for multiple my_permids + if peer_id_from == -1: # 'non-tribler': + peer_id_to = my_peer_id + if peer_id_to == -1: # 'non-tribler': + peer_id_from = my_peer_id + + # process peer_id_from + total_up[peer_id_from] = total_up.get(peer_id_from, 0) + up + total_down[peer_id_from] = total_down.get(peer_id_from, 0) + down + + # process peer_id_to + total_up[peer_id_to] = total_up.get(peer_id_to, 0) + down + total_down[peer_id_to] = total_down.get(peer_id_to, 0) + up + + + # create top N peers + top = [] + min = 0 + + for peer_id in total_up.keys(): + + up = total_up[peer_id] + down = total_down[peer_id] + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercastdb: getTopNPeers: total of %s: up = %d down = %d" % (self.getName(peer_id), up, down) + + # we know rank on total upload? + value = up + + # check if peer belongs to current top N + if peer_id != -1 and peer_id != my_peer_id and (len(top) < n or value > min): + + top.append((peer_id, up, down)) + + # sort based on value + top.sort(cmp = lambda (p1, u1, d1), (p2, u2, d2): cmp(u2, u1)) + + # if list contains more than N elements: remove the last (=lowest value) + if len(top) > n: + del top[-1] + + # determine new minimum of values + min = top[-1][1] + + # Now convert to permid + permidtop = [] + for peer_id,up,down in top: + permid = self.getPermid(peer_id) + permidtop.append((permid,up,down)) + + result = {} + + result['top'] = permidtop + + # My total up and download, including interaction with non-tribler peers + result['total_up'] = total_up.get(my_peer_id, 0) + result['total_down'] = total_down.get(my_peer_id, 0) + + # My up and download with tribler peers only + result['tribler_up'] = result['total_up'] - total_down.get(-1, 0) # -1 = 'non-tribler' + result['tribler_down'] = result['total_down'] - total_up.get(-1, 0) # -1 = 'non-tribler' + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", result + + return result + + + ################################ + def update_network(self): + + + keys = self.getPeerIDPairs() #getItemList() + + + ################################ + def getMyReputation(self, alpha = ALPHA): + + rep = atan((self.total_up - self.total_down) * alpha)/(0.5 * pi) + return rep + + + + + + + +class ModerationCastDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + + if ModerationCastDBHandler.__single is None: + ModerationCastDBHandler.lock.acquire() + try: + if ModerationCastDBHandler.__single is None: + ModerationCastDBHandler(*args, **kw) + finally: + ModerationCastDBHandler.lock.release() + return ModerationCastDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + ModerationCastDBHandler.__single = self + try: + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self,db,'ModerationCast') + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "modcast: DB made" + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "modcast: couldn't create DB table" + self.peer_db = PeerDBHandler.getInstance() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "MODERATIONCAST: MyPermid is ", self.my_permid + + def registerSession(self, session): + self.session = session + self.my_permid = session.get_permid() + + def __len__(self): + return sum([db._size() for db in self.dbs]) + + def getAll(self): + sql = 'select * from ModerationCast' + records = self._db.fetchall(sql) + return records + + def getAllModerations(self, permid): + sql = 'select * from ModerationCast where mod_id==?' + records = self._db.fetchall(sql, (permid,)) + return records + + def getModeration(self, infohash): + #assert validInfohash(infohash) + sql = 'select * from ModerationCast where infohash==?' #and time_stamp in (select max(time_stamp) latest FROM ModerationCast where infohash==? group by infohash)' + item = self._db.fetchone(sql,(infohash,)) + return item + + + def hasModeration(self, infohash): + """ Returns True iff there is a moderation for infohash infohash """ + sql = 'select mod_id from ModerationCast where infohash==?' + item = self._db.fetchone(sql,(infohash,)) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","MCDB: hasModeration: infohash:",infohash," ; item:",item + if item is None: + return False + else: + return True + + def hasModerator(self, permid): + """ Returns True iff there is a moderator for PermID permid in the moderatorDB """ + sql = "Select mod_id from Moderators where mod_id==?" + args = permid + + item = self._db.fetchone(sql,(permid,)) + if item is None: + return False + else: + return True + + def getModerator(self, permid): + sql = 'select * from Moderators where mod_id==?'# + str(permid) + item = self._db.fetchone(sql,(permid,)) + return item + + def getModeratorPermids(self): + sql = 'select mod_id from Moderators' + item = self._db.fetchall(sql) + return item + + def getAllModerators(self): + sql = 'select * from Moderators' + item = self._db.fetchall(sql) + return item + + + def getVotedModerators(self): + sql = 'select * from Moderators where status != 0' + item = self._db.fetchall(sql) + return item + + + def getForwardModeratorPermids(self): + sql = 'select mod_id from Moderators where status==1' + permid_strs = self._db.fetchall(sql) + return permid_strs + + def getBlockedModeratorPermids(self): + sql = 'select mod_id from Moderators where status==-1' + item = self._db.fetchall(sql) + return item + #CALL VOTECAST TABLES and return the value + #return [permid for permid in self.moderator_db.getKeys() if permid['blocked']] + + def getTopModeratorPermids(self, top=10): + withmod = [permid for permid in self.moderator_db.getKeys() if permid.has_key('moderations') and permid['moderations'] != []] + + def topSort(moda, modb): + return len(moda['moderations'])-len(modb['moderations']) + + return withmod.sort(topSort)[0:top] + + def updateModeration(self, moderation): + assert type(moderation) == dict + assert moderation.has_key('time_stamp') and validTimestamp(moderation['time_stamp']) + assert moderation.has_key('mod_id') and validPermid(moderation['mod_id']) + self.validSignature(moderation) + infohash = moderation['infohash'] + moderator = moderation['mod_id'] + if self.hasModerator(moderator) and moderator in self.getBlockedModeratorPermids(): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Got moderation from blocked moderator", show_permid_short(moderator)+", hence we drop this moderation!" + return + + if not self.hasModeration(infohash) or self.getModeration(infohash)[3] < moderation['time_stamp']: + self.addModeration(moderation) + + def addOwnModeration(self, mod, clone=False): + assert type(mod) == dict + assert mod.has_key('infohash') + assert validInfohash(mod['infohash']) + + moderation = mod + moderation['mod_name'] = self.session.get_nickname() + #Add current time as a timestamp + moderation['time_stamp'] = now() + moderation['mod_id'] = bin2str(self.my_permid) + #Add permid and signature: + self._sign(moderation) + + self.addModeration(moderation, clone=False) + + def addModeration(self, moderation, clone=True): + if self.hasModeration(moderation['infohash']): + if self.getModeration(moderation['infohash'])[3] < moderation['time_stamp']: + self.deleteModeration(moderation['infohash']) + else: + return + + self._db.insert(self.table_name, **moderation) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Moderation inserted:", repr(moderation) + + if self.getModeratorPermids() is None or not self.hasModerator(moderation['mod_id']): + new = {} + new['mod_id'] = moderation['mod_id'] + #change it later RAMEEZ + new['status'] = 0 + new['time_stamp'] = now() + self._db.insert('Moderators', **new) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "New Moderator inserted:", repr(new) + + def deleteModeration(self, infohash): + sql = 'Delete From ModerationCast where infohash==?' + self._db.execute_write(sql,(infohash,)) + + def deleteModerations(self, permid): + sql = 'Delete From ModerationCast where mod_id==?' + self._db.execute_write(sql,(permid,)) + + def deleteModerator(self, permid): + """ Deletes moderator with permid permid from database """ + sql = 'Delete From Moderators where mod_id==?' + self._db.execute_write(sql,(permid,)) + + self.deleteModerations(permid) + + def blockModerator(self, permid, blocked=True): + """ Blocks/unblocks moderator with permid permid """ + if blocked: + + self.deleteModerations(permid) + sql = 'Update Moderators set status = -1, time_stamp=' + str(now()) + ' where mod_id==?' + self._db.execute_write(sql,(permid,)) + else: + self.forwardModerator(permid) + + ################################ + def maxflow(self, peerid, max_distance = MAXFLOW_DISTANCE): + + self.update_network() + upflow = self.network.maxflow(peerid, self.my_peerid, max_distance) + downflow = self.network.maxflow(self.my_peerid, peerid, max_distance) + + return (upflow, downflow) + + ################################ + def getReputationByID(self, peerid, max_distance = MAXFLOW_DISTANCE, alpha = ALPHA): + + (upflow, downflow) = self.maxflow(peerid, max_distance) + rep = atan((upflow - downflow) * alpha)/(0.5 * pi) + return rep + + + ################################ + def getReputation(self, permid, max_distance = MAXFLOW_DISTANCE, alpha = ALPHA): + + peerid = self.getPeerID(permid) + return self.reputationByID(peerid, max_distance, alpha) + + + ################################ + def getMyReputation(self, alpha = ALPHA): + + rep = atan((self.total_up - self.total_down) * alpha)/(0.5 * pi) + return rep + + def forwardModerator(self, permid, forward=True): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Before updating Moderator's status..", repr(self.getModerator(permid)) + sql = 'Update Moderators set status = 1, time_stamp=' + str(now()) + ' where mod_id==?' + self._db.execute_write(sql,(permid,)) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Updated Moderator's status..", repr(self.getModerator(permid)) + + def getName(self, permid): + + name = self.peer_db.getPeer(permid, 'name') + + if name == None or name == '': + return 'peer %s' % show_permid_shorter(permid) + else: + return name + + def getPermid(self, peer_id): + + # by convention '-1' is the id of non-tribler peers + if peer_id == -1: + return 'non-tribler' + else: + return self.peer_db.getPermid(peer_id) + + + def getPeerID(self, permid): + # by convention '-1' is the id of non-tribler peers + if permid == "non-tribler": + return -1 + else: + return self.peer_db.getPeerID(permid) + + + def hasPeer(self, permid): + return self.peer_db.hasPeer(permid) + + + def recentOwnModerations(self, nr=13): + """ Returns the most recent nr moderations (if existing) that you have created """ + + + #List of our moderations + if not self.hasModerator(bin2str(self.my_permid)): + return [] + + forwardable = self.getAllModerations(bin2str(self.my_permid)) + + #Sort the infohashes in this list based on timestamp + forwardable.sort(self._compareFunction) + + #Return most recent, forwardable, moderations (max nr) + return forwardable[0:nr] + + def randomOwnModerations(self, nr=12): + """ Returns nr random moderations (if existing) that you have created """ + + #List of our moderations + if not self.hasModerator(bin2str(self.my_permid)): + return [] + + forwardable = self.getAllModerations(bin2str(self.my_permid)) + + if len(forwardable) > nr: + #Return random subset of size nr + return sample(forwardable, nr) + else: + #Return complete set + return forwardable + + def recentModerations(self, nr=13): + """ Returns the most recent nr moderations (if existing), for moderators that you selected to forward for """ + forwardable = [] + + #Create a list of infohashes that we are willing to forward + keys = self.getModeratorPermids() + for key in keys: + moderator = self.getModerator(key[0]) + if moderator[1] == 1: + forwardable.extend(self.getAllModerations(key[0])) + + + #Sort the infohashes in this list based on timestamp + forwardable.sort(self._compareFunction) + + #Return most recent, forwardable, moderations (max nr) + return forwardable[0:nr] + + + + + def randomModerations(self, nr=12): + """ Returns nr random moderations (if existing), for moderators that you selected to forward for """ + forwardable = [] + + #Create a list of infohashes that we are willing to forward + keys = self.getModeratorPermids() + for key in keys: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "what is the average now baby?????????", key[0] + moderator = self.getModerator(key[0]) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "what is the average now my sooooonnnn", moderator[1] + if moderator[1] == 1: + forwardable.extend(self.getAllModerations(key[0])) + + if len(forwardable) > nr: + #Return random subset of size nr + return sample(forwardable, nr) + else: + #Return complete set + return forwardable + + + def getModerationInfohashes(self): + return self.moderation_db.getKeys() + + + def _compareFunction(self,moderationx,moderationy): + if moderationx[3] > moderationy[3]: + return 1 + if moderationx[3] == moderationy[3]: + return 0 + return -1 + + '''def _compareFunction(self,infohashx,infohashy): + """ Compare function to sort an infohash-list based on the moderation-timestamps """ + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "what's it all about ?????????????", infohashx[0], infohashy[0] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "i am a great great man ;-)", infohashx,"?????????????",infohashy + tx = self.getModeration(infohashx[3]) + ty = self.getModeration(infohashy[3]) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "i am a great great man ;-)", tx,"?????????????",ty + + if tx > ty: + return 1 + if tx == ty: + return 0 + return -1''' + + + def _sign(self, moderation): + assert moderation is not None + assert type(moderation) == dict + assert not moderation.has_key('signature') #This would corrupt the signature + moderation['mod_id'] = bin2str(self.my_permid) + bencoding = bencode(moderation) + moderation['signature'] = bin2str(sign_data(bencoding, self.session.keypair)) + + def validSignature(self,moderation): + blob = str2bin(moderation['signature']) + permid = str2bin(moderation['mod_id']) + #Plaintext excludes signature: + del moderation['signature'] + plaintext = bencode(moderation) + moderation['signature'] = bin2str(blob) + + r = verify_data(plaintext, permid, blob) + if not r: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","modcastdb: Invalid signature >>>>>>" + return r + + +#end moderation +class VoteCastDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + + if VoteCastDBHandler.__single is None: + VoteCastDBHandler.lock.acquire() + try: + if VoteCastDBHandler.__single is None: + VoteCastDBHandler(*args, **kw) + finally: + VoteCastDBHandler.lock.release() + return VoteCastDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + VoteCastDBHandler.__single = self + try: + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self,db,'VoteCast') + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "votecast: DB made" + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "votecast: couldn't make the table" + + self.peer_db = PeerDBHandler.getInstance() + self.moderationcast_db = ModerationCastDBHandler.getInstance() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "votecast: My permid is",`self.my_permid` + + def registerSession(self, session): + self.session = session + self.my_permid = session.get_permid() + + def __len__(self): + return sum([db._size() for db in self.dbs]) + + def getAllVotes(self, permid): + sql = 'select * from VoteCast where mod_id==?' + + records = self._db.fetchall(sql, (permid,)) + return records + + def getAll(self): + sql = 'select * from VoteCast' + + records = self._db.fetchall(sql) + return records + + + def getAverageVotes(self): + moderators = self.moderationcast_db.getModeratorPermids() + if len(moderators) == 0: + return 0 + + total_votes = 0.0 + + for mod in moderators: + votes = self.getAllVotes(mod[0]) + total_votes += len(votes) + + + avg = total_votes/len(moderators) + return avg + + + def getAverageRank(self): + moderators = self.moderationcast_db.getModeratorPermids() + if len(moderators) == 0: + return 0 + avg = 0.0 + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "number of moderatosr has increased ", len(moderators) + for mod in moderators: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "moderators ####: ", mod + votes = self.getPosNegVotes(mod) + pos = votes[0] + neg = votes[1] + if pos + neg == 0: + rank = 0 + else: + rank = pos/(pos+neg) + avg +=rank + + value = avg/len(moderators) + return value + + def getPosNegVotes(self, permid): + sql = 'select * from VoteCast where mod_id==?' + + records = self._db.fetchall(sql, (permid[0],)) + pos_votes = 0 + neg_votes = 0 + + if records is None: + return(pos_votes,neg_votes) + + for vote in records: + + if vote[2] == "1": + pos_votes +=1 + else: + neg_votes +=1 + return (pos_votes, neg_votes) + + + def getAllVotesByVoter(self, permid): + #assert validInfohash(infohash) + sql = 'select * from VoteCast where voter_id==?' #and time_stamp in (select max(time_stamp) latest FROM ModerationCast where infohash==? group by infohash)' + item = self._db.fetchone(sql,(self.getPeerID(permid),)) + return item + + + def hasVote(self, permid, voter_peerid): + """ Returns True iff there is a moderation for infohash infohash """ + sql = 'select mod_id, voter_id from VoteCast where mod_id==? and voter_id==?' + item = self._db.fetchone(sql,(permid,voter_peerid,)) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","well well well",infohash," sdd",item + if item is None: + return False + else: + return True + + def getBallotBox(self): + sql = 'select * from VoteCast' + items = self._db.fetchall(sql) + return items + + + def getVote(self,permid,peerid): + sql = 'select * from VoteCast where mod_id==? and voter_id==?' + item = self._db.fetchone(sql,(permid,peerid,)) + return item + + def addVote(self, vote, clone=True): + vote['time_stamp'] = now() + if self.hasVote(vote['mod_id'],vote['voter_id']): + self.deleteVote(vote['mod_id'],vote['voter_id']) + self._db.insert(self.table_name, **vote) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Vote added:",repr(vote) + + def deleteVotes(self, permid): + sql = 'Delete From VoteCast where mod_id==?' + self._db.execute_write(sql,(permid,)) + + def deleteVote(self, permid, voter_id): + sql = 'Delete From VoteCast where mod_id==? and voter_id==?' + self._db.execute_write(sql,(permid,voter_id,)) + + def getPermid(self, peer_id): + + # by convention '-1' is the id of non-tribler peers + if peer_id == -1: + return 'non-tribler' + else: + return self.peer_db.getPermid(peer_id) + + + def getPeerID(self, permid): + # by convention '-1' is the id of non-tribler peers + if permid == "non-tribler": + return -1 + else: + return self.peer_db.getPeerID(permid) + + + def hasPeer(self, permid): + return self.peer_db.hasPeer(permid) + + def recentVotes(self, nr=25): + """ Returns the most recent nr moderations (if existing), for moderators that you selected to forward for """ + forwardable = [] + + #Create a list of infohashes that we are willing to forward + keys = self.moderationcast_db.getVotedModerators() + + for key in keys: + forwardable.append(key) + + forwardable.sort(self._compareFunction) + return forwardable[0:nr] + + def randomVotes(self, nr=25): + """ Returns nr random moderations (if existing), for moderators that you selected to forward for """ + forwardable = [] + + #Create a list of infohashes that we are willing to forward + keys = self.moderationcast_db.getVotedModerators() + + for key in keys: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "votes i don't know ", key + forwardable.append(key) + + if len(forwardable) > nr: + #Return random subset of size nr + return sample(forwardable, nr) + else: + #Return complete set + return forwardable + + def _compareFunction(self,moderatorx, moderatory): + """ Compare function to sort an infohash-list based on the moderation-timestamps """ + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "what are you comparing", moderatorx,"sdfafdsfds", moderatory + + if moderatorx[2] > moderatory[2]: + return 1 + + if moderatorx[2] == moderatory[2]: + return 0 + return -1 + +#end votes + + + + +class GUIDBHandler: + """ All the functions of this class are only (or mostly) used by GUI. + It is not associated with any db table, but will use any of them + """ + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if GUIDBHandler.__single is None: + GUIDBHandler.lock.acquire() + try: + if GUIDBHandler.__single is None: + GUIDBHandler(*args, **kw) + finally: + GUIDBHandler.lock.release() + return GUIDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if GUIDBHandler.__single is not None: + raise RuntimeError, "GUIDBHandler is singleton" + self._db = SQLiteCacheDB.getInstance() + self.notifier = Notifier.getInstance() + GUIDBHandler.__single = self + + def getCommonFiles(self, permid): + peer_id = self._db.getPeerID(permid) + if peer_id is None: + return [] + + sql_get_common_files = """select name from CollectedTorrent where torrent_id in ( + select torrent_id from Preference where peer_id=? + and torrent_id in (select torrent_id from MyPreference) + ) and status_id <> 2 + """ + self.get_family_filter_sql() + res = self._db.fetchall(sql_get_common_files, (peer_id,)) + return [t[0] for t in res] + + def getOtherFiles(self, permid): + peer_id = self._db.getPeerID(permid) + if peer_id is None: + return [] + + sql_get_other_files = """select infohash,name from CollectedTorrent where torrent_id in ( + select torrent_id from Preference where peer_id=? + and torrent_id not in (select torrent_id from MyPreference) + ) and status_id <> 2 + """ + self.get_family_filter_sql() + res = self._db.fetchall(sql_get_other_files, (peer_id,)) + return [(str2bin(t[0]),t[1]) for t in res] + + def getSimItems(self, infohash, limit): + # recommendation based on collaborative filtering + torrent_id = self._db.getTorrentID(infohash) + if torrent_id is None: + return [] + + sql_get_sim_files = """ + select infohash, name, status_id, count(P2.torrent_id) c + from Preference as P1, Preference as P2, CollectedTorrent as T + where P1.peer_id=P2.peer_id and T.torrent_id=P2.torrent_id + and P2.torrent_id <> P1.torrent_id + and P1.torrent_id=? + and P2.torrent_id not in (select torrent_id from MyPreference) + %s + group by P2.torrent_id + order by c desc + limit ? + """ % self.get_family_filter_sql('T') + + res = self._db.fetchall(sql_get_sim_files, (torrent_id,limit)) + return [(str2bin(t[0]),t[1], t[2], t[3]) for t in res] + + def getSimilarTitles(self, name, limit, infohash, prefix_len=5): + # recommendation based on similar titles + name = name.replace("'","`") + sql_get_sim_files = """ + select infohash, name, status_id from Torrent + where name like '%s%%' + and infohash <> '%s' + and torrent_id not in (select torrent_id from MyPreference) + %s + order by name + limit ? + """ % (name[:prefix_len], bin2str(infohash), self.get_family_filter_sql()) + + res = self._db.fetchall(sql_get_sim_files, (limit,)) + return [(str2bin(t[0]),t[1], t[2]) for t in res] + + def _how_many_prefix(self): + """ test how long the prefix is enough to find similar titles """ + # Jie: I found 5 is the best value. + + sql = "select name from Torrent where name is not NULL order by name" + names = self._db.fetchall(sql) + + for top in range(3, 10): + sta = {} + for line in names: + prefix = line[0][:top] + if prefix not in sta: + sta[prefix] = 1 + else: + sta[prefix] += 1 + + res = [(v,k) for k,v in sta.items()] + res.sort() + res.reverse() + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '------------', top, '-------------' + for k in res[:10]: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", k + + def get_family_filter_sql(self, table_name=''): + torrent_db_handler = TorrentDBHandler.getInstance() + return torrent_db_handler.category.get_family_filter_sql(torrent_db_handler._getCategoryID, table_name=table_name) + + + +class TermDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if TermDBHandler.__single is None: + TermDBHandler.lock.acquire() + try: + if TermDBHandler.__single is None: + TermDBHandler(*args, **kw) + finally: + TermDBHandler.lock.release() + return TermDBHandler.__single + getInstance = staticmethod(getInstance) + + def __init__(self): + if TermDBHandler.__single is not None: + raise RuntimeError, "TermDBHandler is singleton" + TermDBHandler.__single = self + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self,db, 'ClicklogTerm') + + + def getNumTerms(self): + """returns number of terms stored""" + return self.getOne("count(*)") + + + + def bulkInsertTerms(self, terms, commit=True): + for term in terms: + term_id = self.getTermIDNoInsert(term) + if not term_id: + self.insertTerm(term, commit=False) # this HAS to commit, otherwise last_insert_row_id() won't work. + # if you want to avoid committing too often, use bulkInsertTerm + if commit: + self.commit() + + def getTermIDNoInsert(self, term): + return self.getOne('term_id', term=term[:MAX_KEYWORD_LENGTH].lower()) + + def getTermID(self, term): + """returns the ID of term in table ClicklogTerm; creates a new entry if necessary""" + term_id = self.getTermIDNoInsert(term) + if term_id: + return term_id + else: + self.insertTerm(term, commit=True) # this HAS to commit, otherwise last_insert_row_id() won't work. + return self.getOne("last_insert_rowid()") + + def insertTerm(self, term, commit=True): + """creates a new entry for term in table Term""" + self._db.insert(self.table_name, commit=commit, term=term[:MAX_KEYWORD_LENGTH]) + + def getTerm(self, term_id): + """returns the term for a given term_id""" + return self.getOne("term", term_id=term_id) + # if term_id==-1: + # return "" + # term = self.getOne('term', term_id=term_id) + # try: + # return str2bin(term) + # except: + # return term + + def getTermsStartingWith(self, beginning, num=10): + """returns num most frequently encountered terms starting with beginning""" + + # request twice the amount of hits because we need to apply + # the familiy filter... + terms = self.getAll('term', + term=("like", u"%s%%" % beginning), + order_by="times_seen DESC", + limit=num * 2) + + if terms: + # terms is a list containing lists. We only want the first + # item of the inner lists. + terms = [term for (term,) in terms] + + catobj = Category.getInstance() + if catobj.family_filter_enabled(): + return filter(lambda term: not catobj.xxx_filter.foundXXXTerm(term), terms)[:num] + else: + return terms[:num] + + else: + return [] + + def getAllEntries(self): + """use with caution,- for testing purposes""" + return self.getAll("term_id, term", order_by="term_id") + + +class SearchDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if SearchDBHandler.__single is None: + SearchDBHandler.lock.acquire() + try: + if SearchDBHandler.__single is None: + SearchDBHandler(*args, **kw) + finally: + SearchDBHandler.lock.release() + return SearchDBHandler.__single + getInstance = staticmethod(getInstance) + + def __init__(self): + if SearchDBHandler.__single is not None: + raise RuntimeError, "SearchDBHandler is singleton" + SearchDBHandler.__single = self + db = SQLiteCacheDB.getInstance() + BasicDBHandler.__init__(self,db, 'ClicklogSearch') ## self,db,'Search' + + + ### write methods + + def storeKeywordsByID(self, peer_id, torrent_id, term_ids, commit=True): + sql_insert_search = u"INSERT INTO ClicklogSearch (peer_id, torrent_id, term_id, term_order) values (?, ?, ?, ?)" + + if len(term_ids)>MAX_KEYWORDS_STORED: + term_ids= term_ids[0:MAX_KEYWORDS_STORED] + + # TODO before we insert, we should delete all potentially existing entries + # with these exact values + # otherwise, some strange attacks might become possible + # and again we cannot assume that user/torrent/term only occurs once + + # create insert data + values = [(peer_id, torrent_id, term_id, term_order) + for (term_id, term_order) + in zip(term_ids, range(len(term_ids)))] + self._db.executemany(sql_insert_search, values, commit=commit) + + # update term popularity + sql_update_term_popularity= u"UPDATE ClicklogTerm SET times_seen = times_seen+1 WHERE term_id=?" + self._db.executemany(sql_update_term_popularity, [[term_id] for term_id in term_ids], commit=commit) + + def storeKeywords(self, peer_id, torrent_id, terms, commit=True): + """creates a single entry in Search with peer_id and torrent_id for every term in terms""" + terms = [term.strip() for term in terms if len(term.strip())>0] + term_db = TermDBHandler.getInstance() + term_ids = [term_db.getTermID(term) for term in terms] + self.storeKeywordsByID(peer_id, torrent_id, term_ids, commit) + + def getAllEntries(self): + """use with caution,- for testing purposes""" + return self.getAll("rowid, peer_id, torrent_id, term_id, term_order ", order_by="rowid") + + def getAllOwnEntries(self): + """use with caution,- for testing purposes""" + return self.getAll("rowid, peer_id, torrent_id, term_id, term_order ", where="peer_id=0", order_by="rowid") + + + + ### read methods + + def getNumTermsPerTorrent(self, torrent_id): + """returns the number of terms associated with a given torrent""" + return self.getOne("COUNT (DISTINCT term_id)", torrent_id=torrent_id) + + def getNumTorrentsPerTerm(self, term_id): + """returns the number of torrents stored with a given term.""" + return self.getOne("COUNT (DISTINCT torrent_id)", term_id=term_id) + + def getNumTorrentTermCooccurrences(self, term_id, torrent_id): + """returns the number of times a torrent has been associated with a term""" + return self.getOne("COUNT (*)", term_id=term_id, torrent_id=torrent_id) + + def getRelativeTermFrequency(self, term_id, torrent_id): + """returns the relative importance of a term for a torrent + This is basically tf/idf + term frequency tf = # keyword used per torrent/# keywords used with torrent at all + inverse document frequency = # of torrents associated with term at all + + normalization in tf ensures that a torrent cannot get most important for all keywords just + by, e.g., poisoning the db with a lot of keywords for this torrent + idf normalization ensures that returned values are meaningful across several keywords + """ + + terms_per_torrent = self.getNumTermsPerTorrent(torrent_id) + if terms_per_torrent==0: + return 0 + + torrents_per_term = self.getNumTorrentsPerTerm(term_id) + if torrents_per_term == 0: + return 0 + + coocc = self.getNumTorrentTermCooccurrences(term_id, torrent_id) + + tf = coocc/float(terms_per_torrent) + idf = 1.0/math.log(torrents_per_term+1) + + return tf*idf + + + def getTorrentSearchTerms(self, torrent_id, peer_id): + return self.getAll("term_id", "torrent_id=%d AND peer_id=%s" % (torrent_id, peer_id), order_by="term_order") + + def getMyTorrentSearchTerms(self, torrent_id): + return [x[0] for x in self.getTorrentSearchTerms(torrent_id, peer_id=0)] + + + ### currently unused + + def numSearchesWithTerm(self, term_id): + """returns the number of searches stored with a given term. + I feel like I might miss something, but this should simply be the number of rows containing + the term""" + return self.getOne("COUNT (*)", term_id=term_id) + + def getNumTorrentPeers(self, torrent_id): + """returns the number of users for a given torrent. if this should be used + extensively, an index on torrent_id might be in order""" + return self.getOne("COUNT (DISTINCT peer_id)", torrent_id=torrent_id) + + def removeKeywords(self, peer_id, torrent_id, commit=True): + """removes records of keywords used by peer_id to find torrent_id""" + # TODO + # would need to be called by deletePreference + pass + + + + +def doPeerSearchNames(self,dbname,kws): + """ Get all peers that have the specified keywords in their name. + Return a list of dictionaries. Each dict is in the NEWDBSTANDARD format. + """ + if dbname == 'Peer': + where = '(Peer.last_connected>0 or Peer.friend=1) and ' + elif dbname == 'Friend': + where = '' + else: + raise Exception('unknown dbname: %s' % dbname) + + # Must come before query + ranks = self.getRanks() + + for i in range(len(kws)): + kw = kws[i] + where += ' name like "%'+kw+'%"' + if (i+1) != len(kws): + where += ' and' + + # See getGUIPeers() + value_name = PeerDBHandler.gui_value_name + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","peer_db: searchNames: sql",where + res_list = self._db.getAll(dbname, value_name, where) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","peer_db: searchNames: res",res_list + + peer_list = [] + for item in res_list: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","peer_db: searchNames: Got Record",`item` + peer = dict(zip(value_name, item)) + peer['name'] = dunno2unicode(peer['name']) + peer['simRank'] = ranksfind(ranks,peer['permid']) + peer['permid'] = str2bin(peer['permid']) + peer_list.append(peer) + return peer_list + +def ranksfind(ranks,key): + if ranks is None: + return -1 + try: + return ranks.index(key)+1 + except: + return -1 + + + + diff --git a/tribler-mod/Tribler/Core/CacheDB/SqliteFriendshipStatsCacheDB.py b/tribler-mod/Tribler/Core/CacheDB/SqliteFriendshipStatsCacheDB.py new file mode 100644 index 0000000..669f5ae --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/SqliteFriendshipStatsCacheDB.py @@ -0,0 +1,201 @@ +from time import localtime, strftime +# Written by Ali Abbas +# see LICENSE.txt for license information + +import sys +import os +import threading + +from Tribler.__init__ import LIBRARYNAME +from Tribler.Core.CacheDB.sqlitecachedb import * +from Tribler.Core.CacheDB.SqliteCacheDBHandler import BasicDBHandler + +CREATE_FRIENDSHIP_STATS_SQL_FILE = None +CREATE_FRIENDSHIP_STATS_SQL_FILE_POSTFIX = os.path.join(LIBRARYNAME, 'Core', 'Statistics', 'tribler_friendship_stats_sdb.sql') +DB_FILE_NAME = 'tribler_friendship_stats.sdb' +DB_DIR_NAME = 'sqlite' # db file path = DB_DIR_NAME/DB_FILE_NAME +CURRENT_DB_VERSION = 2 + +DEBUG = False + +def init_friendship_stats(config, db_exception_handler = None): + """ create friendship statistics database """ + global CREATE_FRIENDSHIP_STATS_SQL_FILE + config_dir = config['state_dir'] + install_dir = config['install_dir'] + CREATE_FRIENDSHIP_STATS_SQL_FILE = os.path.join(install_dir,CREATE_FRIENDSHIP_STATS_SQL_FILE_POSTFIX) + sqlitedb = SQLiteFriendshipStatsCacheDB.getInstance(db_exception_handler) + sqlite_db_path = os.path.join(config_dir, DB_DIR_NAME, DB_FILE_NAME) + sqlitedb.initDB(sqlite_db_path, CREATE_FRIENDSHIP_STATS_SQL_FILE,current_db_version=CURRENT_DB_VERSION) # the first place to create db in Tribler + return sqlitedb + + +class FSCacheDBBaseV2(SQLiteCacheDBBase): + """ See Tribler/Core/Statistics/tribler_friendship_stats_sdb.sql + for a description of the various versions + """ + + def updateDB(self,fromver,tover): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fscachedb2: Upgrading",fromver,tover + if fromver == 1 and tover == 2: + # Do ALTER TABLE stuff to add crawler_permid field. + sql = "ALTER TABLE FriendshipStatistics ADD COLUMN crawled_permid TEXT DEFAULT client NOT NULL;" + self.execute_write(sql, commit=False) + # updating version stepwise so if this works, we store it + # regardless of later, potentially failing updates + self.writeDBVersion(2, commit=False) + self.commit() + + +class SQLiteFriendshipStatsCacheDB(FSCacheDBBaseV2): + __single = None # used for multithreaded singletons pattern + lock = threading.RLock() + + @classmethod + def getInstance(cls, *args, **kw): + # Singleton pattern with double-checking to ensure that it can only create one object + if cls.__single is None: + cls.lock.acquire() + try: + if cls.__single is None: + cls.__single = cls(*args, **kw) + finally: + cls.lock.release() + return cls.__single + + def __init__(self, *args, **kw): + # always use getInstance() to create this object + if self.__single != None: + raise RuntimeError, "SQLiteFriendshipStatsCacheDB is singleton" + + FSCacheDBBaseV2.__init__(self, *args, **kw) + + + +class FriendshipStatisticsDBHandler(BasicDBHandler): + + __single = None # used for multi-threaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if FriendshipStatisticsDBHandler.__single is None: + FriendshipStatisticsDBHandler.lock.acquire() + try: + if FriendshipStatisticsDBHandler.__single is None: + FriendshipStatisticsDBHandler(*args, **kw) + finally: + FriendshipStatisticsDBHandler.lock.release() + return FriendshipStatisticsDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if FriendshipStatisticsDBHandler.__single is not None: + raise RuntimeError, "FriendshipStatisticsDBHandler is singleton" + FriendshipStatisticsDBHandler.__single = self + db = SQLiteFriendshipStatsCacheDB.getInstance() + BasicDBHandler.__init__(self, db, 'FriendshipStatistics') + #BasicDBHandler.__init__(self, 'Peer') + #self.tableName = 'FriendshipStatistics' + + + def getAllFriendshipStatistics(self, permid, last_update_time = None, range = None, sort = None, reverse = False): + + """ + db keys: 'source_permid', 'target_permid', 'isForwarder', 'request_time', 'response_time', + 'no_of_attempts', 'no_of_helpers' + + @in: get_online: boolean: if true, give peers a key 'online' if there is a connection now + """ + + value_name = ('source_permid', 'target_permid', 'isForwarder', 'request_time', 'response_time', 'no_of_attempts', + 'no_of_helpers', 'modified_on') + where = 'request_time > '+str(last_update_time) # source_permid done below + + if range: + offset= range[0] + limit = range[1] - range[0] + else: + limit = offset = None + if sort: + desc = (not reverse) and 'desc' or '' + if sort in ('name'): + order_by = ' lower(%s) %s' % (sort, desc) + else: + order_by = ' %s %s' % (sort, desc) + else: + order_by = None + + permidstr = bin2str(permid) + res_list = self.getAll(value_name, where=where, offset= offset, limit=limit, order_by=order_by, source_permid=permidstr) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","FriendshipStatisticsDBHandler: getAll: result is",res_list + + return res_list + + def saveFriendshipStatisticData (self, data): + + self._db.insertMany('FriendshipStatistics', data) + + def insertFriendshipStatistics(self, my_permid, target_permid, current_time, isForwarder = 0, no_of_attempts = 0, no_of_helpers = 0, commit = True): + +# db keys: 'source_permid', 'target_permid', 'isForwarder', 'request_time', 'response_time', +# 'no_of_attempts', 'no_of_helpers' +# self._db.insert(self.table_name, entry=key, value=value) + + sql_insert_friendstatistics = "INSERT INTO FriendshipStatistics (source_permid, target_permid, isForwarder, request_time, response_time, no_of_attempts, no_of_helpers, modified_on) VALUES ('"+my_permid+"','"+target_permid+"',"+str(isForwarder)+","+str(current_time)+", 0 , "+str(no_of_attempts)+","+str(no_of_helpers)+","+str(current_time)+")" + + self._db.execute_write(sql_insert_friendstatistics,commit=commit) + + def updateFriendshipStatistics(self, my_permid, target_permid, current_time, isForwarder = 0, no_of_attempts = 0, no_of_helpers = 0, commit = True): + + sql_insert_friendstatistics = "UPDATE FriendshipStatistics SET request_time = "+str(current_time) +", no_of_attempts = "+str(no_of_attempts)+", no_of_helpers = "+str(no_of_helpers)+", modified_on = "+str(current_time)+" where source_permid = '"+my_permid+"' and target_permid = '"+target_permid+"'" + + self._db.execute_write(sql_insert_friendstatistics,commit=commit) + + def updateFriendshipResponseTime(self, my_permid, target_permid, current_time, commit = True): + + + sql_insert_friendstatistics = "UPDATE FriendshipStatistics SET response_time = "+str(current_time)+ ", modified_on = "+str(current_time)+" where source_permid = '"+my_permid+"' and target_permid = '"+target_permid+"'" + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", sql_insert_friendstatistics + + self._db.execute_write(sql_insert_friendstatistics,commit=commit) + + def insertOrUpdateFriendshipStatistics(self, my_permid, target_permid, current_time, isForwarder = 0, no_of_attempts = 0, no_of_helpers = 0, commit = True): + +# sql_entry_exists_of_the_peer = "SELECT souce_permid FROM FriendshipStatistics where source_permid = " + my_permid + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Friendship record being inserted of permid' + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", target_permid + res = self._db.getOne('FriendshipStatistics', 'target_permid', target_permid=target_permid) + + if not res: + sql_insert_friendstatistics = "INSERT INTO FriendshipStatistics (source_permid, target_permid, isForwarder, request_time, response_time, no_of_attempts, no_of_helpers, modified_on) VALUES ('"+my_permid+"','"+target_permid+"',"+str(isForwarder)+","+str(current_time)+", 0 , "+str(no_of_attempts)+","+str(no_of_helpers)+","+str(current_time)+")" + else: + sql_insert_friendstatistics = "UPDATE FriendshipStatistics SET no_of_attempts = "+str(no_of_attempts)+", no_of_helpers = "+str(no_of_helpers)+", modified_on = "+str(current_time)+" where source_permid = '"+my_permid+"' and target_permid = '"+target_permid+"'" + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'result is ', res + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", sql_insert_friendstatistics + + try: + self._db.execute_write(sql_insert_friendstatistics,commit=commit) + except: + print >> sys.stderr + + + def getLastUpdateTimeOfThePeer(self, permid): + + res = self._db.getAll('FriendshipStatistics', 'source_permid', order_by='modified_on desc', limit=1) + + if not res: + return 0 + else: + # todo! + return 0 # bug??? res['modified_on'] + + diff --git a/tribler-mod/Tribler/Core/CacheDB/SqliteFriendshipStatsCacheDB.py.bak b/tribler-mod/Tribler/Core/CacheDB/SqliteFriendshipStatsCacheDB.py.bak new file mode 100644 index 0000000..09f5462 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/SqliteFriendshipStatsCacheDB.py.bak @@ -0,0 +1,200 @@ +# Written by Ali Abbas +# see LICENSE.txt for license information + +import sys +import os +import threading + +from Tribler.__init__ import LIBRARYNAME +from Tribler.Core.CacheDB.sqlitecachedb import * +from Tribler.Core.CacheDB.SqliteCacheDBHandler import BasicDBHandler + +CREATE_FRIENDSHIP_STATS_SQL_FILE = None +CREATE_FRIENDSHIP_STATS_SQL_FILE_POSTFIX = os.path.join(LIBRARYNAME, 'Core', 'Statistics', 'tribler_friendship_stats_sdb.sql') +DB_FILE_NAME = 'tribler_friendship_stats.sdb' +DB_DIR_NAME = 'sqlite' # db file path = DB_DIR_NAME/DB_FILE_NAME +CURRENT_DB_VERSION = 2 + +DEBUG = False + +def init_friendship_stats(config, db_exception_handler = None): + """ create friendship statistics database """ + global CREATE_FRIENDSHIP_STATS_SQL_FILE + config_dir = config['state_dir'] + install_dir = config['install_dir'] + CREATE_FRIENDSHIP_STATS_SQL_FILE = os.path.join(install_dir,CREATE_FRIENDSHIP_STATS_SQL_FILE_POSTFIX) + sqlitedb = SQLiteFriendshipStatsCacheDB.getInstance(db_exception_handler) + sqlite_db_path = os.path.join(config_dir, DB_DIR_NAME, DB_FILE_NAME) + sqlitedb.initDB(sqlite_db_path, CREATE_FRIENDSHIP_STATS_SQL_FILE,current_db_version=CURRENT_DB_VERSION) # the first place to create db in Tribler + return sqlitedb + + +class FSCacheDBBaseV2(SQLiteCacheDBBase): + """ See Tribler/Core/Statistics/tribler_friendship_stats_sdb.sql + for a description of the various versions + """ + + def updateDB(self,fromver,tover): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fscachedb2: Upgrading",fromver,tover + if fromver == 1 and tover == 2: + # Do ALTER TABLE stuff to add crawler_permid field. + sql = "ALTER TABLE FriendshipStatistics ADD COLUMN crawled_permid TEXT DEFAULT client NOT NULL;" + self.execute_write(sql, commit=False) + # updating version stepwise so if this works, we store it + # regardless of later, potentially failing updates + self.writeDBVersion(2, commit=False) + self.commit() + + +class SQLiteFriendshipStatsCacheDB(FSCacheDBBaseV2): + __single = None # used for multithreaded singletons pattern + lock = threading.RLock() + + @classmethod + def getInstance(cls, *args, **kw): + # Singleton pattern with double-checking to ensure that it can only create one object + if cls.__single is None: + cls.lock.acquire() + try: + if cls.__single is None: + cls.__single = cls(*args, **kw) + finally: + cls.lock.release() + return cls.__single + + def __init__(self, *args, **kw): + # always use getInstance() to create this object + if self.__single != None: + raise RuntimeError, "SQLiteFriendshipStatsCacheDB is singleton" + + FSCacheDBBaseV2.__init__(self, *args, **kw) + + + +class FriendshipStatisticsDBHandler(BasicDBHandler): + + __single = None # used for multi-threaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if FriendshipStatisticsDBHandler.__single is None: + FriendshipStatisticsDBHandler.lock.acquire() + try: + if FriendshipStatisticsDBHandler.__single is None: + FriendshipStatisticsDBHandler(*args, **kw) + finally: + FriendshipStatisticsDBHandler.lock.release() + return FriendshipStatisticsDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if FriendshipStatisticsDBHandler.__single is not None: + raise RuntimeError, "FriendshipStatisticsDBHandler is singleton" + FriendshipStatisticsDBHandler.__single = self + db = SQLiteFriendshipStatsCacheDB.getInstance() + BasicDBHandler.__init__(self, db, 'FriendshipStatistics') + #BasicDBHandler.__init__(self, 'Peer') + #self.tableName = 'FriendshipStatistics' + + + def getAllFriendshipStatistics(self, permid, last_update_time = None, range = None, sort = None, reverse = False): + + """ + db keys: 'source_permid', 'target_permid', 'isForwarder', 'request_time', 'response_time', + 'no_of_attempts', 'no_of_helpers' + + @in: get_online: boolean: if true, give peers a key 'online' if there is a connection now + """ + + value_name = ('source_permid', 'target_permid', 'isForwarder', 'request_time', 'response_time', 'no_of_attempts', + 'no_of_helpers', 'modified_on') + where = 'request_time > '+str(last_update_time) # source_permid done below + + if range: + offset= range[0] + limit = range[1] - range[0] + else: + limit = offset = None + if sort: + desc = (not reverse) and 'desc' or '' + if sort in ('name'): + order_by = ' lower(%s) %s' % (sort, desc) + else: + order_by = ' %s %s' % (sort, desc) + else: + order_by = None + + permidstr = bin2str(permid) + res_list = self.getAll(value_name, where=where, offset= offset, limit=limit, order_by=order_by, source_permid=permidstr) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","FriendshipStatisticsDBHandler: getAll: result is",res_list + + return res_list + + def saveFriendshipStatisticData (self, data): + + self._db.insertMany('FriendshipStatistics', data) + + def insertFriendshipStatistics(self, my_permid, target_permid, current_time, isForwarder = 0, no_of_attempts = 0, no_of_helpers = 0, commit = True): + +# db keys: 'source_permid', 'target_permid', 'isForwarder', 'request_time', 'response_time', +# 'no_of_attempts', 'no_of_helpers' +# self._db.insert(self.table_name, entry=key, value=value) + + sql_insert_friendstatistics = "INSERT INTO FriendshipStatistics (source_permid, target_permid, isForwarder, request_time, response_time, no_of_attempts, no_of_helpers, modified_on) VALUES ('"+my_permid+"','"+target_permid+"',"+str(isForwarder)+","+str(current_time)+", 0 , "+str(no_of_attempts)+","+str(no_of_helpers)+","+str(current_time)+")" + + self._db.execute_write(sql_insert_friendstatistics,commit=commit) + + def updateFriendshipStatistics(self, my_permid, target_permid, current_time, isForwarder = 0, no_of_attempts = 0, no_of_helpers = 0, commit = True): + + sql_insert_friendstatistics = "UPDATE FriendshipStatistics SET request_time = "+str(current_time) +", no_of_attempts = "+str(no_of_attempts)+", no_of_helpers = "+str(no_of_helpers)+", modified_on = "+str(current_time)+" where source_permid = '"+my_permid+"' and target_permid = '"+target_permid+"'" + + self._db.execute_write(sql_insert_friendstatistics,commit=commit) + + def updateFriendshipResponseTime(self, my_permid, target_permid, current_time, commit = True): + + + sql_insert_friendstatistics = "UPDATE FriendshipStatistics SET response_time = "+str(current_time)+ ", modified_on = "+str(current_time)+" where source_permid = '"+my_permid+"' and target_permid = '"+target_permid+"'" + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", sql_insert_friendstatistics + + self._db.execute_write(sql_insert_friendstatistics,commit=commit) + + def insertOrUpdateFriendshipStatistics(self, my_permid, target_permid, current_time, isForwarder = 0, no_of_attempts = 0, no_of_helpers = 0, commit = True): + +# sql_entry_exists_of_the_peer = "SELECT souce_permid FROM FriendshipStatistics where source_permid = " + my_permid + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Friendship record being inserted of permid' + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", target_permid + res = self._db.getOne('FriendshipStatistics', 'target_permid', target_permid=target_permid) + + if not res: + sql_insert_friendstatistics = "INSERT INTO FriendshipStatistics (source_permid, target_permid, isForwarder, request_time, response_time, no_of_attempts, no_of_helpers, modified_on) VALUES ('"+my_permid+"','"+target_permid+"',"+str(isForwarder)+","+str(current_time)+", 0 , "+str(no_of_attempts)+","+str(no_of_helpers)+","+str(current_time)+")" + else: + sql_insert_friendstatistics = "UPDATE FriendshipStatistics SET no_of_attempts = "+str(no_of_attempts)+", no_of_helpers = "+str(no_of_helpers)+", modified_on = "+str(current_time)+" where source_permid = '"+my_permid+"' and target_permid = '"+target_permid+"'" + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'result is ', res + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", sql_insert_friendstatistics + + try: + self._db.execute_write(sql_insert_friendstatistics,commit=commit) + except: + print >> sys.stderr + + + def getLastUpdateTimeOfThePeer(self, permid): + + res = self._db.getAll('FriendshipStatistics', 'source_permid', order_by='modified_on desc', limit=1) + + if not res: + return 0 + else: + # todo! + return 0 # bug??? res['modified_on'] + + diff --git a/tribler-mod/Tribler/Core/CacheDB/SqliteSeedingStatsCacheDB.py b/tribler-mod/Tribler/Core/CacheDB/SqliteSeedingStatsCacheDB.py new file mode 100644 index 0000000..7eb773b --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/SqliteSeedingStatsCacheDB.py @@ -0,0 +1,203 @@ +from time import localtime, strftime +# Written by Boxun Zhang +# see LICENSE.txt for license information + +import os +from time import time +import threading +from traceback import print_exc + +from Tribler.__init__ import LIBRARYNAME +from Tribler.Core.CacheDB.sqlitecachedb import * +from Tribler.Core.CacheDB.SqliteCacheDBHandler import BasicDBHandler +from Tribler.Core.simpledefs import * + +CREATE_SEEDINGSTATS_SQL_FILE = None +CREATE_SEEDINGSTATS_SQL_FILE_POSTFIX = os.path.join(LIBRARYNAME, 'Core', 'Statistics', 'tribler_seedingstats_sdb.sql') +DB_FILE_NAME = 'tribler_seedingstats.sdb' +DB_DIR_NAME = 'sqlite' # db file path = DB_DIR_NAME/DB_FILE_NAME +CURRENT_DB_VERSION = 1 +DEFAULT_BUSY_TIMEOUT = 10000 +MAX_SQL_BATCHED_TO_TRANSACTION = 1000 # don't change it unless carefully tested. A transaction with 1000 batched updates took 1.5 seconds +SHOW_ALL_EXECUTE = False +costs = [] +cost_reads = [] + +DEBUG = False + +def init_seeding_stats(config, db_exception_handler = None): + """ create SeedingStats database """ + global CREATE_SEEDINGSTATS_SQL_FILE + config_dir = config['state_dir'] + install_dir = config['install_dir'] + CREATE_SEEDINGSTATS_SQL_FILE = os.path.join(install_dir,CREATE_SEEDINGSTATS_SQL_FILE_POSTFIX) + sqlitedb = SQLiteSeedingStatsCacheDB.getInstance(db_exception_handler) + sqlite_db_path = os.path.join(config_dir, DB_DIR_NAME, DB_FILE_NAME) + sqlitedb.initDB(sqlite_db_path, CREATE_SEEDINGSTATS_SQL_FILE) # the first place to create db in Tribler + return sqlitedb + +class SQLiteSeedingStatsCacheDB(SQLiteCacheDBBase): + __single = None # used for multithreaded singletons pattern + lock = threading.RLock() + + @classmethod + def getInstance(cls, *args, **kw): + # Singleton pattern with double-checking to ensure that it can only create one object + if cls.__single is None: + cls.lock.acquire() + try: + if cls.__single is None: + cls.__single = cls(*args, **kw) + finally: + cls.lock.release() + return cls.__single + + def __init__(self, *args, **kw): + # always use getInstance() to create this object + if self.__single != None: + raise RuntimeError, "SQLiteSeedingStatsCacheDB is singleton" + + SQLiteCacheDBBase.__init__(self, *args, **kw) + + +class SeedingStatsDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if SeedingStatsDBHandler.__single is None: + SeedingStatsDBHandler.lock.acquire() + try: + if SeedingStatsDBHandler.__single is None: + SeedingStatsDBHandler(*args, **kw) + finally: + SeedingStatsDBHandler.lock.release() + return SeedingStatsDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if SeedingStatsDBHandler.__single is not None: + raise RuntimeError, "SeedingStatDBHandler is singleton" + SeedingStatsDBHandler.__single = self + db = SQLiteSeedingStatsCacheDB.getInstance() + BasicDBHandler.__init__(self, db, 'SeedingStats') + + def updateSeedingStats(self, permID, reputation, dslist, interval): + permID = bin2str(permID) + + seedings = [] + + for item in dslist: + if item.get_status() == DLSTATUS_SEEDING: + seedings.append(item) + + commit = False + for i in range(0, len(seedings)): + ds = seedings[i] + + infohash = bin2str(ds.get_download().get_def().get_infohash()) + + stats = ds.stats['stats'] + ul = stats.upTotal + + if i == len(seedings)-1: + commit = True + + res = self.existedInfoHash(infohash) + + if res is not None: + # res is list of ONE tuple + #self.updateSeedingStat(infohash, reputation, res[0][0], interval, commit) + + # NAT/Firewall & Seeding Behavior + # Store upload amount instead peer reputation + self.updateSeedingStat(infohash, ul, res[0][0], interval, commit) + else: + # Insert new record + #self.insertSeedingStat(infohash, permID, reputation, interval, commit) + + # NAT/Firewall & Seeding Behavior + # Store upload amount instead peer reputation + self.insertSeedingStat(infohash, permID, ul, interval, commit) + + + def existedInfoHash(self, infohash): + + sql = "SELECT seeding_time FROM SeedingStats WHERE info_hash='%s' and crawled=0"%infohash + + try: + cursor = self._db.execute_read(sql) + if cursor: + res = list(cursor) + + if len(res) > 0: + return res + else: + return None + else: + # something wrong, throw an exception? + return None + except: + return None + + def updateSeedingStat(self, infohash, reputation, seedingtime, interval, commit): + try: + sql_update = "UPDATE SeedingStats SET seeding_time=%s, reputation=%s WHERE info_hash='%s' AND crawled=0"%(seedingtime + interval, reputation, infohash) + self._db.execute_write(sql_update, None, commit) + except: + print_exc() + + def insertSeedingStat(self, infohash, permID, reputation, interval, commit): + try: + sql_insert = "INSERT INTO SeedingStats VALUES(%s, '%s', '%s', %s, %s, %s)"%(time(), permID, infohash, interval, reputation, 0) + self._db.execute_write(sql_insert, None, commit) + except: + print_exc() + + +class SeedingStatsSettingsDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if SeedingStatsSettingsDBHandler.__single is None: + SeedingStatsSettingsDBHandler.lock.acquire() + try: + if SeedingStatsSettingsDBHandler.__single is None: + SeedingStatsSettingsDBHandler(*args, **kw) + finally: + SeedingStatsSettingsDBHandler.lock.release() + return SeedingStatsSettingsDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if SeedingStatsSettingsDBHandler.__single is not None: + raise RuntimeError, "SeedingStatDBHandler is singleton" + SeedingStatsSettingsDBHandler.__single = self + db = SQLiteSeedingStatsCacheDB.getInstance() + BasicDBHandler.__init__(self, db, 'CrawlingSettings') + + def loadCrawlingSettings(self): + try: + sql_query = "SELECT * FROM SeedingStatsSettings" + cursor = self._db.execute_read(sql_query) + + if cursor: + return list(cursor) + else: + return None + except: + print_exc() + + def updateCrawlingSettings(self, args): + try: + sql_update = "UPDATE SeedingStatsSettings SET crawling_interval=%s, crawling_enabled=%s WHERE version=1"%(args[0], args[1]) + cursor = self._db.execute_write(sql_update) + except: + print_exc() diff --git a/tribler-mod/Tribler/Core/CacheDB/SqliteSeedingStatsCacheDB.py.bak b/tribler-mod/Tribler/Core/CacheDB/SqliteSeedingStatsCacheDB.py.bak new file mode 100644 index 0000000..61a0635 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/SqliteSeedingStatsCacheDB.py.bak @@ -0,0 +1,202 @@ +# Written by Boxun Zhang +# see LICENSE.txt for license information + +import os +from time import time +import threading +from traceback import print_exc + +from Tribler.__init__ import LIBRARYNAME +from Tribler.Core.CacheDB.sqlitecachedb import * +from Tribler.Core.CacheDB.SqliteCacheDBHandler import BasicDBHandler +from Tribler.Core.simpledefs import * + +CREATE_SEEDINGSTATS_SQL_FILE = None +CREATE_SEEDINGSTATS_SQL_FILE_POSTFIX = os.path.join(LIBRARYNAME, 'Core', 'Statistics', 'tribler_seedingstats_sdb.sql') +DB_FILE_NAME = 'tribler_seedingstats.sdb' +DB_DIR_NAME = 'sqlite' # db file path = DB_DIR_NAME/DB_FILE_NAME +CURRENT_DB_VERSION = 1 +DEFAULT_BUSY_TIMEOUT = 10000 +MAX_SQL_BATCHED_TO_TRANSACTION = 1000 # don't change it unless carefully tested. A transaction with 1000 batched updates took 1.5 seconds +SHOW_ALL_EXECUTE = False +costs = [] +cost_reads = [] + +DEBUG = False + +def init_seeding_stats(config, db_exception_handler = None): + """ create SeedingStats database """ + global CREATE_SEEDINGSTATS_SQL_FILE + config_dir = config['state_dir'] + install_dir = config['install_dir'] + CREATE_SEEDINGSTATS_SQL_FILE = os.path.join(install_dir,CREATE_SEEDINGSTATS_SQL_FILE_POSTFIX) + sqlitedb = SQLiteSeedingStatsCacheDB.getInstance(db_exception_handler) + sqlite_db_path = os.path.join(config_dir, DB_DIR_NAME, DB_FILE_NAME) + sqlitedb.initDB(sqlite_db_path, CREATE_SEEDINGSTATS_SQL_FILE) # the first place to create db in Tribler + return sqlitedb + +class SQLiteSeedingStatsCacheDB(SQLiteCacheDBBase): + __single = None # used for multithreaded singletons pattern + lock = threading.RLock() + + @classmethod + def getInstance(cls, *args, **kw): + # Singleton pattern with double-checking to ensure that it can only create one object + if cls.__single is None: + cls.lock.acquire() + try: + if cls.__single is None: + cls.__single = cls(*args, **kw) + finally: + cls.lock.release() + return cls.__single + + def __init__(self, *args, **kw): + # always use getInstance() to create this object + if self.__single != None: + raise RuntimeError, "SQLiteSeedingStatsCacheDB is singleton" + + SQLiteCacheDBBase.__init__(self, *args, **kw) + + +class SeedingStatsDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if SeedingStatsDBHandler.__single is None: + SeedingStatsDBHandler.lock.acquire() + try: + if SeedingStatsDBHandler.__single is None: + SeedingStatsDBHandler(*args, **kw) + finally: + SeedingStatsDBHandler.lock.release() + return SeedingStatsDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if SeedingStatsDBHandler.__single is not None: + raise RuntimeError, "SeedingStatDBHandler is singleton" + SeedingStatsDBHandler.__single = self + db = SQLiteSeedingStatsCacheDB.getInstance() + BasicDBHandler.__init__(self, db, 'SeedingStats') + + def updateSeedingStats(self, permID, reputation, dslist, interval): + permID = bin2str(permID) + + seedings = [] + + for item in dslist: + if item.get_status() == DLSTATUS_SEEDING: + seedings.append(item) + + commit = False + for i in range(0, len(seedings)): + ds = seedings[i] + + infohash = bin2str(ds.get_download().get_def().get_infohash()) + + stats = ds.stats['stats'] + ul = stats.upTotal + + if i == len(seedings)-1: + commit = True + + res = self.existedInfoHash(infohash) + + if res is not None: + # res is list of ONE tuple + #self.updateSeedingStat(infohash, reputation, res[0][0], interval, commit) + + # NAT/Firewall & Seeding Behavior + # Store upload amount instead peer reputation + self.updateSeedingStat(infohash, ul, res[0][0], interval, commit) + else: + # Insert new record + #self.insertSeedingStat(infohash, permID, reputation, interval, commit) + + # NAT/Firewall & Seeding Behavior + # Store upload amount instead peer reputation + self.insertSeedingStat(infohash, permID, ul, interval, commit) + + + def existedInfoHash(self, infohash): + + sql = "SELECT seeding_time FROM SeedingStats WHERE info_hash='%s' and crawled=0"%infohash + + try: + cursor = self._db.execute_read(sql) + if cursor: + res = list(cursor) + + if len(res) > 0: + return res + else: + return None + else: + # something wrong, throw an exception? + return None + except: + return None + + def updateSeedingStat(self, infohash, reputation, seedingtime, interval, commit): + try: + sql_update = "UPDATE SeedingStats SET seeding_time=%s, reputation=%s WHERE info_hash='%s' AND crawled=0"%(seedingtime + interval, reputation, infohash) + self._db.execute_write(sql_update, None, commit) + except: + print_exc() + + def insertSeedingStat(self, infohash, permID, reputation, interval, commit): + try: + sql_insert = "INSERT INTO SeedingStats VALUES(%s, '%s', '%s', %s, %s, %s)"%(time(), permID, infohash, interval, reputation, 0) + self._db.execute_write(sql_insert, None, commit) + except: + print_exc() + + +class SeedingStatsSettingsDBHandler(BasicDBHandler): + + __single = None # used for multithreaded singletons pattern + lock = threading.Lock() + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if SeedingStatsSettingsDBHandler.__single is None: + SeedingStatsSettingsDBHandler.lock.acquire() + try: + if SeedingStatsSettingsDBHandler.__single is None: + SeedingStatsSettingsDBHandler(*args, **kw) + finally: + SeedingStatsSettingsDBHandler.lock.release() + return SeedingStatsSettingsDBHandler.__single + + getInstance = staticmethod(getInstance) + + def __init__(self): + if SeedingStatsSettingsDBHandler.__single is not None: + raise RuntimeError, "SeedingStatDBHandler is singleton" + SeedingStatsSettingsDBHandler.__single = self + db = SQLiteSeedingStatsCacheDB.getInstance() + BasicDBHandler.__init__(self, db, 'CrawlingSettings') + + def loadCrawlingSettings(self): + try: + sql_query = "SELECT * FROM SeedingStatsSettings" + cursor = self._db.execute_read(sql_query) + + if cursor: + return list(cursor) + else: + return None + except: + print_exc() + + def updateCrawlingSettings(self, args): + try: + sql_update = "UPDATE SeedingStatsSettings SET crawling_interval=%s, crawling_enabled=%s WHERE version=1"%(args[0], args[1]) + cursor = self._db.execute_write(sql_update) + except: + print_exc() diff --git a/tribler-mod/Tribler/Core/CacheDB/SqliteVideoPlaybackStatsCacheDB.py b/tribler-mod/Tribler/Core/CacheDB/SqliteVideoPlaybackStatsCacheDB.py new file mode 100644 index 0000000..23cbeba --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/SqliteVideoPlaybackStatsCacheDB.py @@ -0,0 +1,177 @@ +from time import localtime, strftime +# Written by Boudewijn +# see LICENSE.txt for license information + +""" +Database wrapper to add and retrieve Video playback statistics +""" + +import sys +import os +import thread +from time import time + +from Tribler.__init__ import LIBRARYNAME +from Tribler.Core.CacheDB.sqlitecachedb import SQLiteCacheDBBase +from Tribler.Core.CacheDB.SqliteCacheDBHandler import BasicDBHandler + +CREATE_VIDEOPLAYBACK_STATS_SQL_FILE = None +CREATE_VIDEOPLAYBACK_STATS_SQL_FILE_POSTFIX = os.path.join(LIBRARYNAME, 'Core', 'Statistics', "tribler_videoplayback_stats.sql") +DB_FILE_NAME = 'tribler_videoplayback_stats.sdb' +DB_DIR_NAME = 'sqlite' # db file path = DB_DIR_NAME/DB_FILE_NAME +CURRENT_DB_VERSION = 1 + +DEBUG = False + +def init_videoplayback_stats(config, db_exception_handler = None): + """ create VideoPlayback statistics database """ + global CREATE_VIDEOPLAYBACK_STATS_SQL_FILE + config_dir = config['state_dir'] + install_dir = config['install_dir'] + CREATE_VIDEOPLAYBACK_STATS_SQL_FILE = os.path.join(install_dir,CREATE_VIDEOPLAYBACK_STATS_SQL_FILE_POSTFIX) + sqlitedb = SQLiteVideoPlaybackStatsCacheDB.get_instance(db_exception_handler) + sqlite_db_path = os.path.join(config_dir, DB_DIR_NAME, DB_FILE_NAME) + sqlitedb.initDB(sqlite_db_path, CREATE_VIDEOPLAYBACK_STATS_SQL_FILE,current_db_version=CURRENT_DB_VERSION) # the first place to create db in Tribler + return sqlitedb + +class SQLiteVideoPlaybackStatsCacheDB(SQLiteCacheDBBase): + """ + Wrapper around Database engine. Used to perform raw SQL queries + and ensure that Database schema is correct. + """ + + __single = None # used for multithreaded singletons pattern + lock = thread.allocate_lock() + + @classmethod + def get_instance(cls, *args, **kw): + # Singleton pattern with double-checking to ensure that it can only create one object + if cls.__single is None: + cls.lock.acquire() + try: + if cls.__single is None: + cls.__single = cls(*args, **kw) + finally: + cls.lock.release() + return cls.__single + + def __init__(self, *args, **kw): + # always use get_instance() to create this object + if self.__single != None: + raise RuntimeError, "SQLiteVideoPlaybackStatsCacheDB is singleton" + SQLiteCacheDBBase.__init__(self, *args, **kw) + +class VideoPlaybackInfoDBHandler(BasicDBHandler): + """ + Interface to add and retrieve info from database. + + Manages the playback_info table. This table contains one entry + with info for each playback. This info contains things like: + piecesize, nat/firewall status, etc. + """ + + __single = None # used for multi-threaded singletons pattern + lock = thread.allocate_lock() + + @classmethod + def get_instance(cls, *args, **kw): + # Singleton pattern with double-checking to ensure that it can only create one object + if cls.__single is None: + cls.lock.acquire() + try: + if cls.__single is None: + cls.__single = cls(*args, **kw) + finally: + cls.lock.release() + return cls.__single + + def __init__(self): + if VideoPlaybackInfoDBHandler.__single is not None: + raise RuntimeError, "VideoPlaybackInfoDBHandler is singleton" + BasicDBHandler.__init__(self, SQLiteVideoPlaybackStatsCacheDB.get_instance(), 'playback_info') + + def create_entry(self, key, piece_size=0, num_pieces=0, bitrate=0, nat="", unique=False): + """ + Create an entry that can be updated using subsequent + set_... calls. + + When UNIQUE we assume that KEY does not yet exist in the + database. Otherwise a check is made. + """ + assert type(key) is str, type(key) + assert type(piece_size) is int, type(piece_size) + assert type(num_pieces) is int, type(num_pieces) + assert type(bitrate) in (int, float), type(bitrate) + assert type(nat) is str, type(nat) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "SqliteVideoPlaybackStatsCacheDB create_entry", key + if unique: + self._db.execute_write("INSERT INTO %s (key, timestamp, piece_size, num_pieces, bitrate, nat) VALUES ('%s', %s, %d, %d, %d, '%s')" % (self.table_name, key, time(), piece_size, num_pieces, bitrate, nat)) + return True + else: + (count,) = self._db.execute_read("SELECT COUNT(*) FROM %s WHERE key = '%s'" % (self.table_name, key)).next() + if count == 0: + return self.create_entry(key, piece_size=piece_size, num_pieces=num_pieces, bitrate=bitrate, nat=nat, unique=True) + else: + return False + + def set_piecesize(self, key, piece_size): + assert type(key) is str + assert type(piece_size) is int + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "SqliteVideoPlaybackStatsCacheDB set_piecesize", key, piece_size + self._db.execute_write("UPDATE %s SET piece_size = %d WHERE key = '%s'" % (self.table_name, piece_size, key)) + + def set_num_pieces(self, key, num_pieces): + assert type(key) is str + assert type(num_pieces) is int + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "SqliteVideoPlaybackStatsCacheDB set_num_pieces", key, num_pieces + self._db.execute_write("UPDATE %s SET num_pieces = %d WHERE key = '%s'" % (self.table_name, num_pieces, key)) + + def set_bitrate(self, key, bitrate): + assert type(key) is str + assert type(bitrate) in (int, float) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "SqliteVideoPlaybackStatsCacheDB set_bitrate", key, bitrate + self._db.execute_write("UPDATE %s SET bitrate = %d WHERE key = '%s'" % (self.table_name, bitrate, key)) + + def set_nat(self, key, nat): + assert type(key) is str + assert type(nat) is str + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "SqliteVideoPlaybackStatsCacheDB set_nat", key, nat + self._db.execute_write("UPDATE %s SET nat = '%s' WHERE key = '%s'" % (self.table_name, nat, key)) + +class VideoPlaybackEventDBHandler(BasicDBHandler): + """ + Interface to add and retrieve events from the database. + + Manages the playback_event table. This table may contain several + entries for events that occur during playback such as when it was + started and when it was paused. + """ + + __single = None # used for multi-threaded singletons pattern + lock = thread.allocate_lock() + + @classmethod + def get_instance(cls, *args, **kw): + # Singleton pattern with double-checking to ensure that it can only create one object + if cls.__single is None: + cls.lock.acquire() + try: + if cls.__single is None: + cls.__single = cls(*args, **kw) + finally: + cls.lock.release() + return cls.__single + + def __init__(self): + if VideoPlaybackEventDBHandler.__single is not None: + raise RuntimeError, "VideoPlaybackEventDBHandler is singleton" + BasicDBHandler.__init__(self, SQLiteVideoPlaybackStatsCacheDB.get_instance(), 'playback_event') + + def add_event(self, key, event, origin): + assert type(key) is str + assert type(event) is str + assert type(origin) is str + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "SqliteVideoPlaybackStatsCacheDB add_event", key, event, origin + self._db.execute_write("INSERT INTO %s (key, timestamp, event, origin) VALUES ('%s', %s, '%s', '%s')" % (self.table_name, key, time(), event, origin)) + + diff --git a/tribler-mod/Tribler/Core/CacheDB/SqliteVideoPlaybackStatsCacheDB.py.bak b/tribler-mod/Tribler/Core/CacheDB/SqliteVideoPlaybackStatsCacheDB.py.bak new file mode 100644 index 0000000..2472353 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/SqliteVideoPlaybackStatsCacheDB.py.bak @@ -0,0 +1,176 @@ +# Written by Boudewijn +# see LICENSE.txt for license information + +""" +Database wrapper to add and retrieve Video playback statistics +""" + +import sys +import os +import thread +from time import time + +from Tribler.__init__ import LIBRARYNAME +from Tribler.Core.CacheDB.sqlitecachedb import SQLiteCacheDBBase +from Tribler.Core.CacheDB.SqliteCacheDBHandler import BasicDBHandler + +CREATE_VIDEOPLAYBACK_STATS_SQL_FILE = None +CREATE_VIDEOPLAYBACK_STATS_SQL_FILE_POSTFIX = os.path.join(LIBRARYNAME, 'Core', 'Statistics', "tribler_videoplayback_stats.sql") +DB_FILE_NAME = 'tribler_videoplayback_stats.sdb' +DB_DIR_NAME = 'sqlite' # db file path = DB_DIR_NAME/DB_FILE_NAME +CURRENT_DB_VERSION = 1 + +DEBUG = False + +def init_videoplayback_stats(config, db_exception_handler = None): + """ create VideoPlayback statistics database """ + global CREATE_VIDEOPLAYBACK_STATS_SQL_FILE + config_dir = config['state_dir'] + install_dir = config['install_dir'] + CREATE_VIDEOPLAYBACK_STATS_SQL_FILE = os.path.join(install_dir,CREATE_VIDEOPLAYBACK_STATS_SQL_FILE_POSTFIX) + sqlitedb = SQLiteVideoPlaybackStatsCacheDB.get_instance(db_exception_handler) + sqlite_db_path = os.path.join(config_dir, DB_DIR_NAME, DB_FILE_NAME) + sqlitedb.initDB(sqlite_db_path, CREATE_VIDEOPLAYBACK_STATS_SQL_FILE,current_db_version=CURRENT_DB_VERSION) # the first place to create db in Tribler + return sqlitedb + +class SQLiteVideoPlaybackStatsCacheDB(SQLiteCacheDBBase): + """ + Wrapper around Database engine. Used to perform raw SQL queries + and ensure that Database schema is correct. + """ + + __single = None # used for multithreaded singletons pattern + lock = thread.allocate_lock() + + @classmethod + def get_instance(cls, *args, **kw): + # Singleton pattern with double-checking to ensure that it can only create one object + if cls.__single is None: + cls.lock.acquire() + try: + if cls.__single is None: + cls.__single = cls(*args, **kw) + finally: + cls.lock.release() + return cls.__single + + def __init__(self, *args, **kw): + # always use get_instance() to create this object + if self.__single != None: + raise RuntimeError, "SQLiteVideoPlaybackStatsCacheDB is singleton" + SQLiteCacheDBBase.__init__(self, *args, **kw) + +class VideoPlaybackInfoDBHandler(BasicDBHandler): + """ + Interface to add and retrieve info from database. + + Manages the playback_info table. This table contains one entry + with info for each playback. This info contains things like: + piecesize, nat/firewall status, etc. + """ + + __single = None # used for multi-threaded singletons pattern + lock = thread.allocate_lock() + + @classmethod + def get_instance(cls, *args, **kw): + # Singleton pattern with double-checking to ensure that it can only create one object + if cls.__single is None: + cls.lock.acquire() + try: + if cls.__single is None: + cls.__single = cls(*args, **kw) + finally: + cls.lock.release() + return cls.__single + + def __init__(self): + if VideoPlaybackInfoDBHandler.__single is not None: + raise RuntimeError, "VideoPlaybackInfoDBHandler is singleton" + BasicDBHandler.__init__(self, SQLiteVideoPlaybackStatsCacheDB.get_instance(), 'playback_info') + + def create_entry(self, key, piece_size=0, num_pieces=0, bitrate=0, nat="", unique=False): + """ + Create an entry that can be updated using subsequent + set_... calls. + + When UNIQUE we assume that KEY does not yet exist in the + database. Otherwise a check is made. + """ + assert type(key) is str, type(key) + assert type(piece_size) is int, type(piece_size) + assert type(num_pieces) is int, type(num_pieces) + assert type(bitrate) in (int, float), type(bitrate) + assert type(nat) is str, type(nat) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "SqliteVideoPlaybackStatsCacheDB create_entry", key + if unique: + self._db.execute_write("INSERT INTO %s (key, timestamp, piece_size, num_pieces, bitrate, nat) VALUES ('%s', %s, %d, %d, %d, '%s')" % (self.table_name, key, time(), piece_size, num_pieces, bitrate, nat)) + return True + else: + (count,) = self._db.execute_read("SELECT COUNT(*) FROM %s WHERE key = '%s'" % (self.table_name, key)).next() + if count == 0: + return self.create_entry(key, piece_size=piece_size, num_pieces=num_pieces, bitrate=bitrate, nat=nat, unique=True) + else: + return False + + def set_piecesize(self, key, piece_size): + assert type(key) is str + assert type(piece_size) is int + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "SqliteVideoPlaybackStatsCacheDB set_piecesize", key, piece_size + self._db.execute_write("UPDATE %s SET piece_size = %d WHERE key = '%s'" % (self.table_name, piece_size, key)) + + def set_num_pieces(self, key, num_pieces): + assert type(key) is str + assert type(num_pieces) is int + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "SqliteVideoPlaybackStatsCacheDB set_num_pieces", key, num_pieces + self._db.execute_write("UPDATE %s SET num_pieces = %d WHERE key = '%s'" % (self.table_name, num_pieces, key)) + + def set_bitrate(self, key, bitrate): + assert type(key) is str + assert type(bitrate) in (int, float) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "SqliteVideoPlaybackStatsCacheDB set_bitrate", key, bitrate + self._db.execute_write("UPDATE %s SET bitrate = %d WHERE key = '%s'" % (self.table_name, bitrate, key)) + + def set_nat(self, key, nat): + assert type(key) is str + assert type(nat) is str + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "SqliteVideoPlaybackStatsCacheDB set_nat", key, nat + self._db.execute_write("UPDATE %s SET nat = '%s' WHERE key = '%s'" % (self.table_name, nat, key)) + +class VideoPlaybackEventDBHandler(BasicDBHandler): + """ + Interface to add and retrieve events from the database. + + Manages the playback_event table. This table may contain several + entries for events that occur during playback such as when it was + started and when it was paused. + """ + + __single = None # used for multi-threaded singletons pattern + lock = thread.allocate_lock() + + @classmethod + def get_instance(cls, *args, **kw): + # Singleton pattern with double-checking to ensure that it can only create one object + if cls.__single is None: + cls.lock.acquire() + try: + if cls.__single is None: + cls.__single = cls(*args, **kw) + finally: + cls.lock.release() + return cls.__single + + def __init__(self): + if VideoPlaybackEventDBHandler.__single is not None: + raise RuntimeError, "VideoPlaybackEventDBHandler is singleton" + BasicDBHandler.__init__(self, SQLiteVideoPlaybackStatsCacheDB.get_instance(), 'playback_event') + + def add_event(self, key, event, origin): + assert type(key) is str + assert type(event) is str + assert type(origin) is str + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "SqliteVideoPlaybackStatsCacheDB add_event", key, event, origin + self._db.execute_write("INSERT INTO %s (key, timestamp, event, origin) VALUES ('%s', %s, '%s', '%s')" % (self.table_name, key, time(), event, origin)) + + diff --git a/tribler-mod/Tribler/Core/CacheDB/__init__.py b/tribler-mod/Tribler/Core/CacheDB/__init__.py new file mode 100644 index 0000000..b3799c4 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/__init__.py @@ -0,0 +1,3 @@ +from time import localtime, strftime +# Written by Jie Yang +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/CacheDB/__init__.py.bak b/tribler-mod/Tribler/Core/CacheDB/__init__.py.bak new file mode 100644 index 0000000..57fd4af --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/__init__.py.bak @@ -0,0 +1,2 @@ +# Written by Jie Yang +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/CacheDB/bsdcachedb.py b/tribler-mod/Tribler/Core/CacheDB/bsdcachedb.py new file mode 100644 index 0000000..ea45472 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/bsdcachedb.py @@ -0,0 +1,1137 @@ +from time import localtime, strftime +# Written by Jie Yang +# see LICENSE.txt for license information +# +# ! WARNING ! +# Arno: The database schemas used should be versioned such that a specific DB +# version number denotes a specific set of schemas. So when updating the schemas +# i.e. adding a field, you MUST provide an updateDB() method that converts the +# records in the old schema to the new schema. If you are the first to add a new +# field for a release, you must also increase the database version number. +# ! WARNING ! +# +# +## TODO: update database V3: +# TorrentDB: clean relevance, insert time +# PeerDB: clean similarity, insert time +# PreferenceDB: clean permid:torrent_id:{} + +""" +Database design +Value in bracket is the default value +Don't use None as a default value + +MyDB - (PeerDB) + mydata.bsd: # future keys: pictures, + version: int (curr_version) # required + permid: str # required + ip: str ('') + port: int (0) + name: str ('Tribler') + torrent_path: str ('') # default path to store torrents + prefxchg_queue: list ([]) # not used + bootstrapping: int (1) # not used + max_num_torrents: int (100000) + max_num_my_preferences: int (1000) + superpeers: Set([permid]) + friends: Set([permid]) + +PeerDB - (MyFriendDB, PreferenceDB, OwnerDB) + peers.bsd: # future keys: sys_trust, reliablity, speed, personal_info, .. + permid:{ + ip: str ('') + port: int (0) # listening port, even behind firewall + name: str ('unknown') + last_seen: int (0) # last seen of this peer by anyone. This info could be copied from other peers + similarity: int (0) # [0, 1000] + oversion: int(0) # overlay version, added in 3.7.1, overlay version 4 + connected_times: int(0) # times to connect the peer successfully + #tried_times: int(0) # times to attempt to connect the peer, removed from 3.7.1 + buddycast_times: int(0) # times to receive buddycast message + last_buddycast_time: int (0) # from buddycast 3/tribler 3.7 + #relability (uptime, IP fixed/changing) + #trust: int (0) # [0, 100] + #icon: str ('') # name + '_' + permid[-4:] + npeers: int(0) # added in 4.1, overlay version 6, DB version 4 + ntorrents: int(0) # added in 4.1, overlay version 6, DB version 4 + nprefs: int(0) # added in 4.1, overlay version 6, DB version 4 + nqueries: int(0) # added in 4.1, overlay version 6, DB version 4 + last_connected: int(0) # last time I connected to the peer. added in 4.1.3, overlay version 6, DB version 5 + } + +TorrentDB - (PreferenceDB, MyPreference, OwnerDB) + torrents.bsd: # future keys: names, tags, trackers, .. + infohash:{ + relevance: int (0) # [0, 1000] + torrent_name: str ('') # torrent name + torrent_dir: str ('') # path of the torrent (without the file name). '\x01' for default path + info: dict ({}) # {name, length, announce, creation date, comment, announce-list, num_files} + # new keys in database version 2 + leecher: int (-1) + seeder: int (-1) + category: list () + ignore_number: int (0) + last_check_time: long (time()) + retry_number: int (0) + status: str ("unknown") + source: str("") + inserttime: long (time()) + progress: float + destdir: str("") + } + +PreferenceDB - (PeerDB, TorrentDB) # other peers' preferences + preferences.bsd: + permid:{ + torrent_id:{ + # 'relevance': int (0), 'rank': int (0), removed from 3.6 + } + } + +MyPreferenceDB - (TorrentDB) + mypreferences.bsd: # future keys: speed + infohash:{ + created_time: int (0) # time to start download/upload the torrent + content_name: str ('') # real file name in disk, may be different with info['name'] + content_dir: str ('') # content_dir + content_name = full path + rank: int (0) # [-1, 5], # -1 means it is a fake torrent + last_seen: int (0) + } + +OwnerDB - (PeerDB, TorrentDB) + owner.bsd: + infohash: Set([permid]) # future keys: tags, name + +""" + +import os, sys +from time import time +from copy import deepcopy +from sets import Set +from traceback import print_exc +from threading import currentThread + +from Tribler.Core.BitTornado.bencode import bencode + +#from Tribler.utilities import isValidPermid, isValidInfohash + +def isValidIP(ip): + try: + return validIP(ip) + except: + return False + +import socket +def validIP(ip): + try: + try: + # Is IPv4 addr? + socket.inet_aton(ip) + return True + except socket.error: + # Is hostname / IPv6? + socket.getaddrinfo(ip, None) + return True + except: + print_exc() + raise RuntimeError, "invalid IP address: " + ip + + +try: + # For Python 2.3 + from bsddb import db, dbshelve, dbutils +except ImportError: + # For earlier Pythons w/distutils pybsddb + from bsddb3 import db, dbshelve, dbutils + +dbutils._deadlock_VerboseFile = sys.stderr + + +#permid_len = 0 #112 +#infohash_len = 20 +# + +home_dir = 'bsddb' +# Database schema versions (for all databases) +# 1 = First +# 2 = Added keys to TorrentDB: leecher,seeder,category,ignore_number,last_check_time,retry_number,status +# 3 = Added keys to TorrentDB: source,inserttime +# 4 = Added keys to PeerDB: npeers, ntorrents, nprefs, nqueries +# 5 = Added keys to PeerDB: last_connected +curr_version = 5 +permid_length = 112 +infohash_length = 20 +torrent_id_length = 20 +MAX_RETRIES = 12 +STRICT_CHECK = False +DEBUG = False + +def isValidPermid(permid): # validate permid in outer layer + return True + +def isValidInfohash(infohash): + return True + +def init(config_dir, myinfo, db_exception_handler = None): + """ create all databases """ + + global home_dir + home_dir = make_filename(config_dir, 'bsddb') + if DEBUG: + print "Init database at", home_dir + BasicDB.exception_handler = db_exception_handler + MyDB.getInstance(myinfo, home_dir) + PeerDB.getInstance(home_dir) + TorrentDB.getInstance(home_dir) + PreferenceDB.getInstance(home_dir) + MyPreferenceDB.getInstance(home_dir) + OwnerDB.getInstance(home_dir) + MyDB.updateDBVersion(curr_version) + +def done(config_dir): + MyDB.getInstance().close() + MyPreferenceDB.getInstance().close() + OwnerDB.getInstance().close() + PeerDB.getInstance().close() + PreferenceDB.getInstance().close() + TorrentDB.getInstance().close() + + +def make_filename(config_dir,filename): + if config_dir is None: + return filename + else: + return os.path.join(config_dir,filename) + +def setDBPath(db_dir = ''): + if not db_dir: + db_dir = '.' + if not os.access(db_dir, os.F_OK): + try: + os.mkdir(db_dir) + except os.error, msg: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: cannot set db path:", msg + db_dir = '.' + return db_dir + +def open_db2(filename, db_dir='', filetype=db.DB_BTREE): # backup + global home_dir + if not db_dir: + db_dir = home_dir + dir = setDBPath(db_dir) + path = os.path.join(dir, filename) + try: + d = dbshelve.open(path, filetype=filetype) + except Exception, msg: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: cannot open dbshelve on", path, msg + d = dbshelve.open(filename, filetype=filetype) + return d + +def open_db(filename, db_dir='', filetype=db.DB_BTREE, writeback=False): + global home_dir + if not db_dir: + db_dir = home_dir + dir = setDBPath(db_dir) + path = os.path.join(dir, filename) + env = db.DBEnv() + # Concurrent Data Store + env.open(dir, db.DB_THREAD|db.DB_INIT_CDB|db.DB_INIT_MPOOL|db.DB_CREATE|db.DB_PRIVATE) + #d = db.DB(env) + #d.open(path, filetype, db.DB_THREAD|db.DB_CREATE) + #_db = BsdDbShelf(d, writeback=writeback) + _db = dbshelve.open(filename, flags=db.DB_THREAD|db.DB_CREATE, + filetype=filetype, dbenv=env) + return _db, dir + +def validDict(data, keylen=0): # basic requirement for a data item in DB + if not isinstance(data, dict): + return False + for key in data: + if not isinstance(key, str): + return False + if STRICT_CHECK and keylen and len(key) != keylen: + return False + return True + + +# Abstract base calss +class BasicDB: # Should we use delegation instead of inheritance? + + exception_handler = None + + def __init__(self, db_dir=''): + self.default_item = {} #{'d':1, 'e':'abc', 'f':{'k':'v'}, 'g':[1,'2']} # for test + if self.__class__ == BasicDB: + self.db_name = 'basic.bsd' # for testing + self.opened = True + + self.db_dir = db_dir + self.filetype = db.DB_BTREE + self._data, self.db_dir = open_db(self.db_name, self.db_dir, filetype=self.filetype) + + #raise NotImplementedError, "Cannot create object of class BasicDB" + +#------------ Basic interfaces, used by member func and handlers -------------# + def __del__(self): + self.close() + + threadnames = {} + + def _put(self, key, value): # write + try: + if DEBUG: + name = currentThread().getName() + if name not in self.threadnames: + self.threadnames[name] = 0 + self.threadnames[name] += 1 + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: put", len(self.threadnames), name, \ + self.threadnames[name], time(), self.__class__.__name__ + if not value and type(value) == dict: + raise Exception('Warning someone tries to insert empty data in db: %s:%s'% (key, value)) + + dbutils.DeadlockWrap(self._data.put, key, value, max_retries=MAX_RETRIES) + #self._data.put(key, value) + except: + pass + + def _has_key(self, key): # find a key + try: + return dbutils.DeadlockWrap(self._data.has_key, key, max_retries=MAX_RETRIES) + #return self._data.has_key(key) + except Exception, e: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: _has_key EXCEPTION BY",currentThread().getName(), Exception, e, self.db_name, `key` + return False + + def _get(self, key, value=None): # read + try: + return dbutils.DeadlockWrap(self._data.get, key, value, max_retries=MAX_RETRIES) + #return self._data.get(key, value) +# except db.DBRunRecoveryError, e: +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: Sorry, meet DBRunRecoveryError at get, have to remove the whole database", self.db_name +# self.report_exception(e) +# self._recover_db() # have to clear the whole database + except Exception,e: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: _get EXCEPTION BY",currentThread().getName(), Exception, e, self.db_name, `key`, value + if value is not None: + return value + self.report_exception(e) + return None + +#=============================================================================== +# def _recover_db(self): +# path = os.path.join(self.db_dir, self.db_name) +# try: +# self._data.close() +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: closed and removing database", path +# os.remove(path) +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: removed database", path +# self._data, self.db_dir = open_db(self.db_name, self.db_dir, filetype=self.filetype) # reopen +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: database is removed and reopened successfully", path +# except Exception, msg: +# print_exc() +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: cannot remove the database", path, Exception, msg +#=============================================================================== + + def _updateItem(self, key, data): + try: + x = self._get(key) + if isinstance(x, dict): + x.update(data) + else: + x = data + self._put(key, x) + except: + print_exc() + + def _delete(self, key): + try: + if DEBUG: + name = currentThread().getName() + if name not in self.threadnames: + self.threadnames[name] = 0 + self.threadnames[name] += 1 + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: del", len(self.threadnames), name, \ + self.threadnames[name], time(), self.__class__.__name__ + + dbutils.DeadlockWrap(self._data.delete, key, max_retries=MAX_RETRIES) + #self._data.delete(key) + except: + pass + + def _sync(self): # write data from mem to disk + try: + dbutils.DeadlockWrap(self._data.sync, max_retries=MAX_RETRIES) +# except db.DBRunRecoveryError, e: +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: Sorry, meet DBRunRecoveryError at sync, have to remove the whole database", self.db_name +# self.report_exception(e) +# self._recover_db() # have to clear the whole database + except Exception, e: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: synchronize db error", self.db_name, Exception, e + self.report_exception(e) + + def _clear(self): + dbutils.DeadlockWrap(self._data.clear, max_retries=MAX_RETRIES) + #self._data.clear() + + + def _keys(self): + try: + return dbutils.DeadlockWrap(self._data.keys, max_retries=MAX_RETRIES) + #return self._data.keys() + except Exception,e: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: _keys EXCEPTION BY", currentThread().getName(), self.db_name + #print_exc() + self.report_exception(e) + return [] + + def _values(self): + return dbutils.DeadlockWrap(self._data.values, max_retries=MAX_RETRIES) + #return self._data.values() + + def _items(self): + return dbutils.DeadlockWrap(self._data.items, max_retries=MAX_RETRIES) + #return self._data.items() + + def _size(self): + try: + return dbutils.DeadlockWrap(len, self._data, max_retries=MAX_RETRIES) + #return len(self._data) + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: bsdcachedb.BasicDB._size error", self.__class__.__name__ + return 0 + + def _iteritems(self): + try: + return dbutils.DeadlockWrap(self._data.iteritems, max_retries=MAX_RETRIES) + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: bsdcachedb.BasicDB._iteritems error", self.__class__.__name__ + + def close(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: Closing database",self.db_name,currentThread().getName() + if self.opened: + try: + self._sync() + dbutils.DeadlockWrap(self._data.close, max_retries=MAX_RETRIES) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: Done waiting for database close",self.db_name,currentThread().getName() + #self._data.close() + except: + print_exc() + self.opened = False + + def updateDB(self, old_version): + pass + + def setDefaultItem(self, item): + df = deepcopy(self.default_item) + df.update(item) + return df + + def report_exception(self,e): + #return # Jie: don't show the error window to bother users + if BasicDB.exception_handler is not None: + BasicDB.exception_handler(e) + + +class MyDB(BasicDB): + + __single = None + + def __init__(self, myinfo=None, db_dir=''): + if MyDB.__single: + raise RuntimeError, "MyDB is singleton" + self.db_name = 'mydata.bsd' + self.opened = True + + self.db_dir = db_dir + self.filetype = db.DB_HASH + self._data, self.db_dir = open_db(self.db_name, self.db_dir, filetype=self.filetype) + + MyDB.__single = self + self.default_data = { + 'version':curr_version, + 'permid':'', + 'ip':'', + 'port':0, + 'name':'Tribler', + 'torrent_path':'', + 'prefxchg_queue':[], + 'bootstrapping':1, + 'max_num_torrents':100000, + 'max_num_my_preferences':1000, + 'superpeers':Set(), + 'friends':Set(), + } + self.preload_keys = ['ip', 'torrent_path', 'permid'] # these keys can be changed at each bootstrap + self.initData(myinfo) + self.friend_set = Set(self._get('friends')) + + def getInstance(*args, **kw): + if MyDB.__single is None: + MyDB(*args, **kw) + if MyDB.__single._size() < len(MyDB.__single.default_data): + MyDB.__single.initData() + return MyDB.__single + getInstance = staticmethod(getInstance) + + def setDefault(self, data): # it is only used by validData() + dd = deepcopy(self.default_data) + dd.update(data) + return dd + + def initData(self, myinfo=None): + MyDB.checkVersion(self) + if not myinfo: + myinfo = {} + myinfo = self.setDefault(myinfo) + self.load(myinfo) + + def load(self, myinfo): + for key in myinfo: + if not self._has_key(key) or key in self.preload_keys: # right? + self._put(key, myinfo[key]) + + def checkVersion(db): + if not MyDB.__single: + MyDB() # it should never be entered + old_version = MyDB.__single._get('version') + if not old_version: + MyDB.__single._put('version', curr_version) + elif old_version < curr_version: + db.updateDB(old_version) + #elif old_version > curr_version: + #FIXME: user first install 3.4.0, then 3.5.0. Now he cannot reinstall 3.4.0 anymore + # raise RuntimeError, "The version of database is too high. Please update the software." + checkVersion = staticmethod(checkVersion) + + def updateDBVersion(db): + MyDB.__single._put('version', curr_version) + MyDB.__single._sync() + updateDBVersion = staticmethod(updateDBVersion) + + # superpeers + def addSuperPeer(self, permid): + if isValidPermid(permid): + sp = self._get('superpeers') + sp.add(permid) + self._put('superpeers', sp) + + def deleteSuperPeer(self, permid): + if isValidPermid(permid): + try: + sp = self._get('superpeers') + sp.remove(permid) + self._put('superpeers', sp) + except: + pass + + def isSuperPeer(self, permid): + return permid in self._get('superpeers') + + def getSuperPeers(self): + superpeers = self._get('superpeers') + if superpeers is not None: + return list(superpeers) + else: + return [] + + # friends + def addFriend(self, permid): + if isValidPermid(permid): + if not 'friends' in self._keys(): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: addFriend key error", self._keys() + fr = self._get('friends') + fr.add(permid) + self._put('friends', fr) + self.friend_set = Set(fr) + + def deleteFriend(self, permid): + try: + fr = self._get('friends') + fr.remove(permid) + self._put('friends', fr) + self.friend_set = Set(fr) + except: + pass + + def isFriend(self, permid): + return permid in self.friend_set + + def getFriends(self): + friends = self._get('friends') + if friends is not None: + return list(friends) + else: + return [] + + +class PeerDB(BasicDB): + """ List of Peers, e.g. Host Cache """ + + __single = None + + def __init__(self, db_dir=''): + if PeerDB.__single: + raise RuntimeError, "PeerDB is singleton" + self.db_name = 'peers.bsd' + self.opened = True + + self.db_dir = db_dir + self.filetype = db.DB_BTREE + self._data, self.db_dir = open_db(self.db_name, self.db_dir, filetype=self.filetype) + MyDB.checkVersion(self) + PeerDB.__single = self + self.default_item = { + 'ip':'', + 'port':0, + 'name':'', + 'last_seen':0, + 'similarity':0, + 'connected_times':0, + 'oversion':0, # overlay version + 'buddycast_times':0, + 'last_buddycast_time':0, + #'trust':50, + #'reliability': + #'icon':'', + 'npeers':0, + 'ntorrents':0, + 'nprefs':0, + 'nqueries':0, + 'last_connected':0 + } + + def getInstance(*args, **kw): + if PeerDB.__single is None: + PeerDB(*args, **kw) + return PeerDB.__single + getInstance = staticmethod(getInstance) + + def updateItem(self, permid, item={}, update_dns=True, update_time=True): # insert a peer; update it if existed +# if item.has_key('name'): +# assert item['name'] != 'qfqf' + if isValidPermid(permid) and validDict(item): + if self._has_key(permid): + _item = self.getItem(permid) + if _item is None: # database error, the key exists, but the data ain't there + return + if not update_dns: + if item.has_key('ip'): + item.pop('ip') + if item.has_key('port'): + item.pop('port') + _item.update(item) + if update_time: + _item.update({'last_seen':int(time())}) + self._updateItem(permid, _item) + else: + item = self.setDefaultItem(item) + if update_time: + item.update({'last_seen':int(time())}) + self._put(permid, item) + + def deleteItem(self, permid): + self._delete(permid) + + def getItem(self, permid, default=False): + """ Arno: At the moment we keep a copy of the PeerDB in memory, + see Tribler.vwxGUI.peermanager. This class, however, already converts + the records using the save-memory by sharing key strings trick (see + TorrentDB) so there's no need to have that here. """ + ret = self._get(permid, None) + if ret is None and default: + ret = deepcopy(self.default_item) + return ret + + def hasItem(self, permid): + return self._has_key(permid) + + def updateDB(self, old_version): + def_newitem = {} + + if old_version < 4: + add_newitem = { + 'oversion':0, + 'npeers': 0, + 'ntorrents': 0, + 'nprefs': 0, + 'nqueries':0 + } + def_newitem.update(add_newitem) + + if old_version < 5: + add_newitem = { + 'last_connected':0 + } + def_newitem.update(add_newitem) + + keys = self._keys() + for key in keys: + item = self.getItem(key) + if item: + newitem = deepcopy(def_newitem) + newitem.update(item) # keep the old info if it exists + # copy last_seen value to last_connected + if 'last_seen' not in newitem: + newitem['last_seen'] = 0 # according to bug report + elif newitem['last_connected'] == 0 and newitem['last_seen'] > 0: + newitem['last_connected'] = newitem['last_seen'] + self._put(key, newitem) + self._sync() + + +class TorrentDB(BasicDB): + """ Database of all torrent files, including the torrents I don't have yet """ + + __single = None + + def __init__(self, db_dir=''): + if TorrentDB.__single: + raise RuntimeError, "TorrentDB is singleton" + self.db_name = 'torrents.bsd' + self.opened = True + + self.db_dir = db_dir + self.filetype = db.DB_BTREE + self._data, self.db_dir = open_db(self.db_name, self.db_dir, filetype=self.filetype) + + MyDB.checkVersion(self) + TorrentDB.__single = self + self.default_item = { + 'relevance':0, + 'torrent_name':'', # name of the torrent + 'torrent_dir':'', # dir+name=full path. Default path if the value is '\x01' + 'info':{}, # {name, length, announce, creation date, comment} + 'leecher': -1, + 'seeder': -1, + 'category': [], + 'ignore_number': 0, + 'last_check_time': 0, + 'retry_number': 0, + 'status': 'unknown', + 'source': '', + 'inserttime': 0, + 'progress': 0.0, + 'destdir':'', + 'secret':False # download secretly + } + self.infokey = 'info' + self.infokeys = ['name','creation date','num_files','length','announce','announce-list'] +# self.num_metadatalive = -100 + + def getInstance(*args, **kw): + if TorrentDB.__single is None: + TorrentDB(*args, **kw) + return TorrentDB.__single + getInstance = staticmethod(getInstance) + + def updateItem(self, infohash, item={}): # insert a torrent; update it if existed + + if isValidInfohash(infohash) and validDict(item): + if self._has_key(infohash): + _item = self.getItem(infohash) + if not _item: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: Error in bsdcachedb.TorrentDB.updateItem: database inconsistant!", self._has_key(infohash), self.getItem(infohash) + return + _item.update(item) + self._updateItem(infohash, _item) + else: + item = self.setDefaultItem(item) + self._put(infohash, item) + + def deleteItem(self, infohash): + self._delete(infohash) + + def getItem(self, infohash, default=False,savemem=False): + """ Arno: At the moment we keep a copy of the TorrentDB in memory, + see Tribler.vwxGUI.torrentManager. A lot of memory can be saved + by reusing/sharing the strings of the keys in the database records (=dicts). + When the savemem option is enabled, the dict returned will have the + key strings of the self.default_item. """ + ret = self._get(infohash, None) + if ret is None and default: + ret = deepcopy(self.default_item) + if savemem: + newret = {} + for key in self.default_item: + newret[key] = ret.get(key) + newinfo = {} + hiskeys = ret['info'].keys() + for key in self.infokeys: + if key in hiskeys: + newinfo[key] = ret['info'][key] + newret[self.infokey] = newinfo + return newret + return ret + + def updateDB(self, old_version): + if old_version == 1: + def_newitem = { + 'category': ['?'], + 'ignore_number': 0, + 'last_check_time': long(time()), + 'retry_number': 0, + 'seeder': -1, + 'leecher': -1, + 'status': "unknown"} + keys = self._keys() + for key in keys: + self._updateItem(key, def_newitem) + if old_version == 1 or old_version == 2: + def_newitem = { + 'source': '', + 'inserttime': 0, + 'progress': 0.0, + 'destdir':''} + keys = self._keys() + for key in keys: + self._updateItem(key, def_newitem) + + +class PreferenceDB(BasicDB): + """ Peer * Torrent """ + + __single = None + + def __init__(self, db_dir=''): + if PreferenceDB.__single: + raise RuntimeError, "PreferenceDB is singleton" + self.db_name = 'preferences.bsd' + self.opened = True + + self.db_dir = db_dir + self.filetype = db.DB_BTREE + self._data, self.db_dir = open_db(self.db_name, self.db_dir, filetype=self.filetype) + + MyDB.checkVersion(self) + PreferenceDB.__single = self + self.default_item = { # subitem actually + #'relevance':0, # relevance from the owner of this torrent + #'rank':0 + } + + def getInstance(*args, **kw): + if PreferenceDB.__single is None: + PreferenceDB(*args, **kw) + return PreferenceDB.__single + getInstance = staticmethod(getInstance) + + def addPreference(self, permid, infohash, data={}): # add or update pref + if not isValidPermid(permid) or not isValidInfohash(infohash): + return + + if not self._has_key(permid): + data = self.setDefaultItem(data) + item = {infohash:data} + else: + if self.hasPreference(permid, infohash): + _data = self.getPreference(permid, infohash) + _data.update(data) + else: + _data = self.setDefaultItem(data) + _item = {infohash:_data} + item = self.getItem(permid) + item.update(_item) + self._put(permid, item) + + def deletePreference(self, permid, infohash): + if self._has_key(permid): + preferences = self._get(permid) + preferences.pop(infohash) + self._put(permid, preferences) + + def getPreference(self, permid, infohash): + if self._has_key(permid): + preferences = self._get(permid) + if preferences.has_key(infohash): + return preferences[infohash] + return None + + def hasPreference(self, permid, infohash): + if self._has_key(permid): + return infohash in self._get(permid) + else: + return False + + def deleteItem(self, permid): + self._delete(permid) + + def getItem(self, permid): + return self._get(permid, {}) + + +class MyPreferenceDB(BasicDB): # = FileDB + + __single = None + + def __init__(self, db_dir=''): + if MyPreferenceDB.__single: + raise RuntimeError, "TorrentDB is singleton" + self.db_name = 'mypreferences.bsd' + self.opened = True + + self.db_dir = db_dir + self.filetype = db.DB_BTREE + self._data, self.db_dir = open_db(self.db_name, self.db_dir, filetype=self.filetype) + + MyDB.checkVersion(self) + MyPreferenceDB.__single = self + self.default_item = { + 'created_time':0, + 'rank':0, # -1 ~ 5, as a recommendation degree to others + 'content_name':'', # real file name in disk, may be different with info['name'] + 'content_dir':'', # dir + name = full path + 'last_seen':0, + } + + def getInstance(*args, **kw): + if MyPreferenceDB.__single is None: + MyPreferenceDB(*args, **kw) + return MyPreferenceDB.__single + getInstance = staticmethod(getInstance) + + def updateItem(self, infohash, item={}): # insert a torrent; update it if existed + if isValidInfohash(infohash) and validDict(item): + if self._has_key(infohash): + _item = self.getItem(infohash) + _item.update(item) + _item.update({'last_seen':int(time())}) + self._updateItem(infohash, _item) + else: + self.default_item['created_time'] = self.default_item['last_seen'] = int(time()) + item = self.setDefaultItem(item) + self._put(infohash, item) + self._sync() + + def deleteItem(self, infohash): + self._delete(infohash) + self._sync() + + def getItem(self, infohash, default=False): + ret = self._get(infohash, None) + if ret is None and default: + ret = deepcopy(self.default_item) + return ret + + def hasPreference(self, infohash): + return self._has_key(infohash) + + def getRank(self, infohash): + v = self._get(infohash) + if not v: + return 0 + return v.get('rank', 0) + + +class OwnerDB(BasicDB): + """ Torrent * Peer """ + + __single = None + + def __init__(self, db_dir=''): + if OwnerDB.__single: + raise RuntimeError, "OwnerDB is singleton" + self.db_name = 'owners.bsd' + self.opened = True + + self.db_dir = db_dir + self.filetype = db.DB_BTREE + self._data, self.db_dir = open_db(self.db_name, self.db_dir, filetype=self.filetype) + + OwnerDB.__single = self + + def getInstance(*args, **kw): + if OwnerDB.__single is None: + OwnerDB(*args, **kw) + return OwnerDB.__single + getInstance = staticmethod(getInstance) + + def getNumOwners(self, infohash): + owners = self._get(infohash) + if owners is not None: + n = len(owners) + else: + n = 0 + #print n, `infohash`, owners + return n + + + def addOwner(self, infohash, permid): + if isValidPermid(permid) and isValidInfohash(infohash): + if self._has_key(infohash): + owners = self._get(infohash) + owners.add(permid) + self._put(infohash, owners) + else: + self._put(infohash, Set([permid])) + + def deleteOwner(self, infohash, permid): + try: + owners = self._get(infohash) + owners.remove(permid) + if not owners: # remove the item if it is empty + self._delete(infohash) + else: + self._put(infohash, owners) + except: + pass + + def isOwner(self, permid, infohash): + if self._has_key(infohash): + owners = self._get(infohash) + return permid in owners + else: + return False + + def deleteItem(self, infohash): + self._delete(infohash) + + def getItem(self, infohash): + owners = self._get(infohash) + if owners is not None: + return list(owners) + else: + return [] + +class IP2PermIDDB(BasicDB): + """ IP * Peer """ + + __single = None + + def __init__(self, db_dir=''): + if IP2PermIDDB.__single: + raise RuntimeError, "IP2PermIDDB is singleton" + self.db_name = 'ip2permid.bsd' + self.opened = True + + self.filetype = db.DB_BTREE + self._data, self.db_dir = open_db(self.db_name, db_dir, filetype=self.filetype) + self.peer_db = PeerDB.getInstance(db_dir=db_dir) + IP2PermIDDB.__single = self + + def getInstance(*args, **kw): + if IP2PermIDDB.__single is None: + IP2PermIDDB(*args, **kw) + return IP2PermIDDB.__single + getInstance = staticmethod(getInstance) + + + def addIP(self, ip, permid): + if not isValidPermid(permid) or not isValidIP(ip): + return + + self._put(ip,permid) + + def getPermIDByIP(self, ip): + if not isValidIP(ip): + return None + + if not self._has_key(ip): + return None + else: + return self._get(ip) + + def deletePermID(self, permid, ip=None): + # Jie: This function was amended otherwise it takes too long to perform + if not ip: + data = self.peer_db._get(permid, {}) + ip = data.get('ip',None) + permid2 = self._get(ip) + if permid == permid2: + self._delete(ip) + + +# DB extension for BarterCast statistics +class BarterCastDB(BasicDB): + + __single = None + + def __init__(self, db_dir=''): + if BarterCastDB.__single: + raise RuntimeError, "BarterCastDB is singleton" + self.db_name = 'bartercast.bsd' + self.opened = True + + self.db_dir = db_dir + self.filetype = db.DB_BTREE + self._data, self.db_dir = open_db(self.db_name, self.db_dir, filetype=self.filetype) + + MyDB.checkVersion(self) + BarterCastDB.__single = self + self.default_item = { + 'last_seen':0, + 'value': 0, + 'downloaded': 0, + 'uploaded': 0, + } + + def getInstance(*args, **kw): + if BarterCastDB.__single is None: + BarterCastDB(*args, **kw) + return BarterCastDB.__single + getInstance = staticmethod(getInstance) + + def updateItem(self, (permid_from, permid_to), item={}, update_time=True): # insert a peer; update it if existed + + if isValidPermid(permid_from) and isValidPermid(permid_to) and validDict(item): + + key = bencode((permid_from, permid_to)) + if self._has_key(key): + _item = self.getItem((permid_from, permid_to)) + if _item is None: # database error, the key exists, but the data ain't there + return + _item.update(item) + if update_time: + _item.update({'last_seen':int(time())}) + self._updateItem(key, _item) + else: + item = self.setDefaultItem(item) + if update_time: + item.update({'last_seen':int(time())}) + self._put(key, item) + + def deleteItem(self, (permid_from, permid_to)): + key = bencode((permid_from, permid_to)) + self._delete(key) + + def getItem(self, (permid_from, permid_to), default=False): + key = bencode((permid_from, permid_to)) + ret = self._get(key, None) + if ret is None and default: + ret = deepcopy(self.default_item) + return ret + + def hasItem(self, (permid_from, permid_to)): + key = bencode((permid_from, permid_to)) + return self._has_key(key) + +#=============================================================================== +# class ActionDB(BasicDB): +# +# __single = None +# +# def __init__(self, db_dir=''): +# if ActionDB.__single: +# raise RuntimeError, "ActionDB is singleton" +# self.db_name = 'actions.bsd' +# self.opened = True +# env = db.DBEnv() +# # Concurrent Data Store +# env.open(db_dir, db.DB_THREAD|db.DB_INIT_CDB|db.DB_INIT_MPOOL|db.DB_CREATE|db.DB_PRIVATE) +# self._data = db.DB(dbEnv=env) +# self._data.open(self.filename, db.DB_RECNO, db.DB_CREATE) +# ActionDB.__single = self +# +# def getInstance(*args, **kw): +# if ActionDB.__single is None: +# ActionDB(*args, **kw) +# return ActionDB.__single +# getInstance = staticmethod(getInstance) +#=============================================================================== + + + + diff --git a/tribler-mod/Tribler/Core/CacheDB/bsdcachedb.py.bak b/tribler-mod/Tribler/Core/CacheDB/bsdcachedb.py.bak new file mode 100644 index 0000000..0e50bdf --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/bsdcachedb.py.bak @@ -0,0 +1,1136 @@ +# Written by Jie Yang +# see LICENSE.txt for license information +# +# ! WARNING ! +# Arno: The database schemas used should be versioned such that a specific DB +# version number denotes a specific set of schemas. So when updating the schemas +# i.e. adding a field, you MUST provide an updateDB() method that converts the +# records in the old schema to the new schema. If you are the first to add a new +# field for a release, you must also increase the database version number. +# ! WARNING ! +# +# +## TODO: update database V3: +# TorrentDB: clean relevance, insert time +# PeerDB: clean similarity, insert time +# PreferenceDB: clean permid:torrent_id:{} + +""" +Database design +Value in bracket is the default value +Don't use None as a default value + +MyDB - (PeerDB) + mydata.bsd: # future keys: pictures, + version: int (curr_version) # required + permid: str # required + ip: str ('') + port: int (0) + name: str ('Tribler') + torrent_path: str ('') # default path to store torrents + prefxchg_queue: list ([]) # not used + bootstrapping: int (1) # not used + max_num_torrents: int (100000) + max_num_my_preferences: int (1000) + superpeers: Set([permid]) + friends: Set([permid]) + +PeerDB - (MyFriendDB, PreferenceDB, OwnerDB) + peers.bsd: # future keys: sys_trust, reliablity, speed, personal_info, .. + permid:{ + ip: str ('') + port: int (0) # listening port, even behind firewall + name: str ('unknown') + last_seen: int (0) # last seen of this peer by anyone. This info could be copied from other peers + similarity: int (0) # [0, 1000] + oversion: int(0) # overlay version, added in 3.7.1, overlay version 4 + connected_times: int(0) # times to connect the peer successfully + #tried_times: int(0) # times to attempt to connect the peer, removed from 3.7.1 + buddycast_times: int(0) # times to receive buddycast message + last_buddycast_time: int (0) # from buddycast 3/tribler 3.7 + #relability (uptime, IP fixed/changing) + #trust: int (0) # [0, 100] + #icon: str ('') # name + '_' + permid[-4:] + npeers: int(0) # added in 4.1, overlay version 6, DB version 4 + ntorrents: int(0) # added in 4.1, overlay version 6, DB version 4 + nprefs: int(0) # added in 4.1, overlay version 6, DB version 4 + nqueries: int(0) # added in 4.1, overlay version 6, DB version 4 + last_connected: int(0) # last time I connected to the peer. added in 4.1.3, overlay version 6, DB version 5 + } + +TorrentDB - (PreferenceDB, MyPreference, OwnerDB) + torrents.bsd: # future keys: names, tags, trackers, .. + infohash:{ + relevance: int (0) # [0, 1000] + torrent_name: str ('') # torrent name + torrent_dir: str ('') # path of the torrent (without the file name). '\x01' for default path + info: dict ({}) # {name, length, announce, creation date, comment, announce-list, num_files} + # new keys in database version 2 + leecher: int (-1) + seeder: int (-1) + category: list () + ignore_number: int (0) + last_check_time: long (time()) + retry_number: int (0) + status: str ("unknown") + source: str("") + inserttime: long (time()) + progress: float + destdir: str("") + } + +PreferenceDB - (PeerDB, TorrentDB) # other peers' preferences + preferences.bsd: + permid:{ + torrent_id:{ + # 'relevance': int (0), 'rank': int (0), removed from 3.6 + } + } + +MyPreferenceDB - (TorrentDB) + mypreferences.bsd: # future keys: speed + infohash:{ + created_time: int (0) # time to start download/upload the torrent + content_name: str ('') # real file name in disk, may be different with info['name'] + content_dir: str ('') # content_dir + content_name = full path + rank: int (0) # [-1, 5], # -1 means it is a fake torrent + last_seen: int (0) + } + +OwnerDB - (PeerDB, TorrentDB) + owner.bsd: + infohash: Set([permid]) # future keys: tags, name + +""" + +import os, sys +from time import time +from copy import deepcopy +from sets import Set +from traceback import print_exc +from threading import currentThread + +from Tribler.Core.BitTornado.bencode import bencode + +#from Tribler.utilities import isValidPermid, isValidInfohash + +def isValidIP(ip): + try: + return validIP(ip) + except: + return False + +import socket +def validIP(ip): + try: + try: + # Is IPv4 addr? + socket.inet_aton(ip) + return True + except socket.error: + # Is hostname / IPv6? + socket.getaddrinfo(ip, None) + return True + except: + print_exc() + raise RuntimeError, "invalid IP address: " + ip + + +try: + # For Python 2.3 + from bsddb import db, dbshelve, dbutils +except ImportError: + # For earlier Pythons w/distutils pybsddb + from bsddb3 import db, dbshelve, dbutils + +dbutils._deadlock_VerboseFile = sys.stderr + + +#permid_len = 0 #112 +#infohash_len = 20 +# + +home_dir = 'bsddb' +# Database schema versions (for all databases) +# 1 = First +# 2 = Added keys to TorrentDB: leecher,seeder,category,ignore_number,last_check_time,retry_number,status +# 3 = Added keys to TorrentDB: source,inserttime +# 4 = Added keys to PeerDB: npeers, ntorrents, nprefs, nqueries +# 5 = Added keys to PeerDB: last_connected +curr_version = 5 +permid_length = 112 +infohash_length = 20 +torrent_id_length = 20 +MAX_RETRIES = 12 +STRICT_CHECK = False +DEBUG = False + +def isValidPermid(permid): # validate permid in outer layer + return True + +def isValidInfohash(infohash): + return True + +def init(config_dir, myinfo, db_exception_handler = None): + """ create all databases """ + + global home_dir + home_dir = make_filename(config_dir, 'bsddb') + if DEBUG: + print "Init database at", home_dir + BasicDB.exception_handler = db_exception_handler + MyDB.getInstance(myinfo, home_dir) + PeerDB.getInstance(home_dir) + TorrentDB.getInstance(home_dir) + PreferenceDB.getInstance(home_dir) + MyPreferenceDB.getInstance(home_dir) + OwnerDB.getInstance(home_dir) + MyDB.updateDBVersion(curr_version) + +def done(config_dir): + MyDB.getInstance().close() + MyPreferenceDB.getInstance().close() + OwnerDB.getInstance().close() + PeerDB.getInstance().close() + PreferenceDB.getInstance().close() + TorrentDB.getInstance().close() + + +def make_filename(config_dir,filename): + if config_dir is None: + return filename + else: + return os.path.join(config_dir,filename) + +def setDBPath(db_dir = ''): + if not db_dir: + db_dir = '.' + if not os.access(db_dir, os.F_OK): + try: + os.mkdir(db_dir) + except os.error, msg: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: cannot set db path:", msg + db_dir = '.' + return db_dir + +def open_db2(filename, db_dir='', filetype=db.DB_BTREE): # backup + global home_dir + if not db_dir: + db_dir = home_dir + dir = setDBPath(db_dir) + path = os.path.join(dir, filename) + try: + d = dbshelve.open(path, filetype=filetype) + except Exception, msg: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: cannot open dbshelve on", path, msg + d = dbshelve.open(filename, filetype=filetype) + return d + +def open_db(filename, db_dir='', filetype=db.DB_BTREE, writeback=False): + global home_dir + if not db_dir: + db_dir = home_dir + dir = setDBPath(db_dir) + path = os.path.join(dir, filename) + env = db.DBEnv() + # Concurrent Data Store + env.open(dir, db.DB_THREAD|db.DB_INIT_CDB|db.DB_INIT_MPOOL|db.DB_CREATE|db.DB_PRIVATE) + #d = db.DB(env) + #d.open(path, filetype, db.DB_THREAD|db.DB_CREATE) + #_db = BsdDbShelf(d, writeback=writeback) + _db = dbshelve.open(filename, flags=db.DB_THREAD|db.DB_CREATE, + filetype=filetype, dbenv=env) + return _db, dir + +def validDict(data, keylen=0): # basic requirement for a data item in DB + if not isinstance(data, dict): + return False + for key in data: + if not isinstance(key, str): + return False + if STRICT_CHECK and keylen and len(key) != keylen: + return False + return True + + +# Abstract base calss +class BasicDB: # Should we use delegation instead of inheritance? + + exception_handler = None + + def __init__(self, db_dir=''): + self.default_item = {} #{'d':1, 'e':'abc', 'f':{'k':'v'}, 'g':[1,'2']} # for test + if self.__class__ == BasicDB: + self.db_name = 'basic.bsd' # for testing + self.opened = True + + self.db_dir = db_dir + self.filetype = db.DB_BTREE + self._data, self.db_dir = open_db(self.db_name, self.db_dir, filetype=self.filetype) + + #raise NotImplementedError, "Cannot create object of class BasicDB" + +#------------ Basic interfaces, used by member func and handlers -------------# + def __del__(self): + self.close() + + threadnames = {} + + def _put(self, key, value): # write + try: + if DEBUG: + name = currentThread().getName() + if name not in self.threadnames: + self.threadnames[name] = 0 + self.threadnames[name] += 1 + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: put", len(self.threadnames), name, \ + self.threadnames[name], time(), self.__class__.__name__ + if not value and type(value) == dict: + raise Exception('Warning someone tries to insert empty data in db: %s:%s'% (key, value)) + + dbutils.DeadlockWrap(self._data.put, key, value, max_retries=MAX_RETRIES) + #self._data.put(key, value) + except: + pass + + def _has_key(self, key): # find a key + try: + return dbutils.DeadlockWrap(self._data.has_key, key, max_retries=MAX_RETRIES) + #return self._data.has_key(key) + except Exception, e: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: _has_key EXCEPTION BY",currentThread().getName(), Exception, e, self.db_name, `key` + return False + + def _get(self, key, value=None): # read + try: + return dbutils.DeadlockWrap(self._data.get, key, value, max_retries=MAX_RETRIES) + #return self._data.get(key, value) +# except db.DBRunRecoveryError, e: +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: Sorry, meet DBRunRecoveryError at get, have to remove the whole database", self.db_name +# self.report_exception(e) +# self._recover_db() # have to clear the whole database + except Exception,e: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: _get EXCEPTION BY",currentThread().getName(), Exception, e, self.db_name, `key`, value + if value is not None: + return value + self.report_exception(e) + return None + +#=============================================================================== +# def _recover_db(self): +# path = os.path.join(self.db_dir, self.db_name) +# try: +# self._data.close() +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: closed and removing database", path +# os.remove(path) +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: removed database", path +# self._data, self.db_dir = open_db(self.db_name, self.db_dir, filetype=self.filetype) # reopen +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: database is removed and reopened successfully", path +# except Exception, msg: +# print_exc() +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: cannot remove the database", path, Exception, msg +#=============================================================================== + + def _updateItem(self, key, data): + try: + x = self._get(key) + if isinstance(x, dict): + x.update(data) + else: + x = data + self._put(key, x) + except: + print_exc() + + def _delete(self, key): + try: + if DEBUG: + name = currentThread().getName() + if name not in self.threadnames: + self.threadnames[name] = 0 + self.threadnames[name] += 1 + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: del", len(self.threadnames), name, \ + self.threadnames[name], time(), self.__class__.__name__ + + dbutils.DeadlockWrap(self._data.delete, key, max_retries=MAX_RETRIES) + #self._data.delete(key) + except: + pass + + def _sync(self): # write data from mem to disk + try: + dbutils.DeadlockWrap(self._data.sync, max_retries=MAX_RETRIES) +# except db.DBRunRecoveryError, e: +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: Sorry, meet DBRunRecoveryError at sync, have to remove the whole database", self.db_name +# self.report_exception(e) +# self._recover_db() # have to clear the whole database + except Exception, e: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: synchronize db error", self.db_name, Exception, e + self.report_exception(e) + + def _clear(self): + dbutils.DeadlockWrap(self._data.clear, max_retries=MAX_RETRIES) + #self._data.clear() + + + def _keys(self): + try: + return dbutils.DeadlockWrap(self._data.keys, max_retries=MAX_RETRIES) + #return self._data.keys() + except Exception,e: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: _keys EXCEPTION BY", currentThread().getName(), self.db_name + #print_exc() + self.report_exception(e) + return [] + + def _values(self): + return dbutils.DeadlockWrap(self._data.values, max_retries=MAX_RETRIES) + #return self._data.values() + + def _items(self): + return dbutils.DeadlockWrap(self._data.items, max_retries=MAX_RETRIES) + #return self._data.items() + + def _size(self): + try: + return dbutils.DeadlockWrap(len, self._data, max_retries=MAX_RETRIES) + #return len(self._data) + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: bsdcachedb.BasicDB._size error", self.__class__.__name__ + return 0 + + def _iteritems(self): + try: + return dbutils.DeadlockWrap(self._data.iteritems, max_retries=MAX_RETRIES) + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: bsdcachedb.BasicDB._iteritems error", self.__class__.__name__ + + def close(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: Closing database",self.db_name,currentThread().getName() + if self.opened: + try: + self._sync() + dbutils.DeadlockWrap(self._data.close, max_retries=MAX_RETRIES) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: Done waiting for database close",self.db_name,currentThread().getName() + #self._data.close() + except: + print_exc() + self.opened = False + + def updateDB(self, old_version): + pass + + def setDefaultItem(self, item): + df = deepcopy(self.default_item) + df.update(item) + return df + + def report_exception(self,e): + #return # Jie: don't show the error window to bother users + if BasicDB.exception_handler is not None: + BasicDB.exception_handler(e) + + +class MyDB(BasicDB): + + __single = None + + def __init__(self, myinfo=None, db_dir=''): + if MyDB.__single: + raise RuntimeError, "MyDB is singleton" + self.db_name = 'mydata.bsd' + self.opened = True + + self.db_dir = db_dir + self.filetype = db.DB_HASH + self._data, self.db_dir = open_db(self.db_name, self.db_dir, filetype=self.filetype) + + MyDB.__single = self + self.default_data = { + 'version':curr_version, + 'permid':'', + 'ip':'', + 'port':0, + 'name':'Tribler', + 'torrent_path':'', + 'prefxchg_queue':[], + 'bootstrapping':1, + 'max_num_torrents':100000, + 'max_num_my_preferences':1000, + 'superpeers':Set(), + 'friends':Set(), + } + self.preload_keys = ['ip', 'torrent_path', 'permid'] # these keys can be changed at each bootstrap + self.initData(myinfo) + self.friend_set = Set(self._get('friends')) + + def getInstance(*args, **kw): + if MyDB.__single is None: + MyDB(*args, **kw) + if MyDB.__single._size() < len(MyDB.__single.default_data): + MyDB.__single.initData() + return MyDB.__single + getInstance = staticmethod(getInstance) + + def setDefault(self, data): # it is only used by validData() + dd = deepcopy(self.default_data) + dd.update(data) + return dd + + def initData(self, myinfo=None): + MyDB.checkVersion(self) + if not myinfo: + myinfo = {} + myinfo = self.setDefault(myinfo) + self.load(myinfo) + + def load(self, myinfo): + for key in myinfo: + if not self._has_key(key) or key in self.preload_keys: # right? + self._put(key, myinfo[key]) + + def checkVersion(db): + if not MyDB.__single: + MyDB() # it should never be entered + old_version = MyDB.__single._get('version') + if not old_version: + MyDB.__single._put('version', curr_version) + elif old_version < curr_version: + db.updateDB(old_version) + #elif old_version > curr_version: + #FIXME: user first install 3.4.0, then 3.5.0. Now he cannot reinstall 3.4.0 anymore + # raise RuntimeError, "The version of database is too high. Please update the software." + checkVersion = staticmethod(checkVersion) + + def updateDBVersion(db): + MyDB.__single._put('version', curr_version) + MyDB.__single._sync() + updateDBVersion = staticmethod(updateDBVersion) + + # superpeers + def addSuperPeer(self, permid): + if isValidPermid(permid): + sp = self._get('superpeers') + sp.add(permid) + self._put('superpeers', sp) + + def deleteSuperPeer(self, permid): + if isValidPermid(permid): + try: + sp = self._get('superpeers') + sp.remove(permid) + self._put('superpeers', sp) + except: + pass + + def isSuperPeer(self, permid): + return permid in self._get('superpeers') + + def getSuperPeers(self): + superpeers = self._get('superpeers') + if superpeers is not None: + return list(superpeers) + else: + return [] + + # friends + def addFriend(self, permid): + if isValidPermid(permid): + if not 'friends' in self._keys(): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: addFriend key error", self._keys() + fr = self._get('friends') + fr.add(permid) + self._put('friends', fr) + self.friend_set = Set(fr) + + def deleteFriend(self, permid): + try: + fr = self._get('friends') + fr.remove(permid) + self._put('friends', fr) + self.friend_set = Set(fr) + except: + pass + + def isFriend(self, permid): + return permid in self.friend_set + + def getFriends(self): + friends = self._get('friends') + if friends is not None: + return list(friends) + else: + return [] + + +class PeerDB(BasicDB): + """ List of Peers, e.g. Host Cache """ + + __single = None + + def __init__(self, db_dir=''): + if PeerDB.__single: + raise RuntimeError, "PeerDB is singleton" + self.db_name = 'peers.bsd' + self.opened = True + + self.db_dir = db_dir + self.filetype = db.DB_BTREE + self._data, self.db_dir = open_db(self.db_name, self.db_dir, filetype=self.filetype) + MyDB.checkVersion(self) + PeerDB.__single = self + self.default_item = { + 'ip':'', + 'port':0, + 'name':'', + 'last_seen':0, + 'similarity':0, + 'connected_times':0, + 'oversion':0, # overlay version + 'buddycast_times':0, + 'last_buddycast_time':0, + #'trust':50, + #'reliability': + #'icon':'', + 'npeers':0, + 'ntorrents':0, + 'nprefs':0, + 'nqueries':0, + 'last_connected':0 + } + + def getInstance(*args, **kw): + if PeerDB.__single is None: + PeerDB(*args, **kw) + return PeerDB.__single + getInstance = staticmethod(getInstance) + + def updateItem(self, permid, item={}, update_dns=True, update_time=True): # insert a peer; update it if existed +# if item.has_key('name'): +# assert item['name'] != 'qfqf' + if isValidPermid(permid) and validDict(item): + if self._has_key(permid): + _item = self.getItem(permid) + if _item is None: # database error, the key exists, but the data ain't there + return + if not update_dns: + if item.has_key('ip'): + item.pop('ip') + if item.has_key('port'): + item.pop('port') + _item.update(item) + if update_time: + _item.update({'last_seen':int(time())}) + self._updateItem(permid, _item) + else: + item = self.setDefaultItem(item) + if update_time: + item.update({'last_seen':int(time())}) + self._put(permid, item) + + def deleteItem(self, permid): + self._delete(permid) + + def getItem(self, permid, default=False): + """ Arno: At the moment we keep a copy of the PeerDB in memory, + see Tribler.vwxGUI.peermanager. This class, however, already converts + the records using the save-memory by sharing key strings trick (see + TorrentDB) so there's no need to have that here. """ + ret = self._get(permid, None) + if ret is None and default: + ret = deepcopy(self.default_item) + return ret + + def hasItem(self, permid): + return self._has_key(permid) + + def updateDB(self, old_version): + def_newitem = {} + + if old_version < 4: + add_newitem = { + 'oversion':0, + 'npeers': 0, + 'ntorrents': 0, + 'nprefs': 0, + 'nqueries':0 + } + def_newitem.update(add_newitem) + + if old_version < 5: + add_newitem = { + 'last_connected':0 + } + def_newitem.update(add_newitem) + + keys = self._keys() + for key in keys: + item = self.getItem(key) + if item: + newitem = deepcopy(def_newitem) + newitem.update(item) # keep the old info if it exists + # copy last_seen value to last_connected + if 'last_seen' not in newitem: + newitem['last_seen'] = 0 # according to bug report + elif newitem['last_connected'] == 0 and newitem['last_seen'] > 0: + newitem['last_connected'] = newitem['last_seen'] + self._put(key, newitem) + self._sync() + + +class TorrentDB(BasicDB): + """ Database of all torrent files, including the torrents I don't have yet """ + + __single = None + + def __init__(self, db_dir=''): + if TorrentDB.__single: + raise RuntimeError, "TorrentDB is singleton" + self.db_name = 'torrents.bsd' + self.opened = True + + self.db_dir = db_dir + self.filetype = db.DB_BTREE + self._data, self.db_dir = open_db(self.db_name, self.db_dir, filetype=self.filetype) + + MyDB.checkVersion(self) + TorrentDB.__single = self + self.default_item = { + 'relevance':0, + 'torrent_name':'', # name of the torrent + 'torrent_dir':'', # dir+name=full path. Default path if the value is '\x01' + 'info':{}, # {name, length, announce, creation date, comment} + 'leecher': -1, + 'seeder': -1, + 'category': [], + 'ignore_number': 0, + 'last_check_time': 0, + 'retry_number': 0, + 'status': 'unknown', + 'source': '', + 'inserttime': 0, + 'progress': 0.0, + 'destdir':'', + 'secret':False # download secretly + } + self.infokey = 'info' + self.infokeys = ['name','creation date','num_files','length','announce','announce-list'] +# self.num_metadatalive = -100 + + def getInstance(*args, **kw): + if TorrentDB.__single is None: + TorrentDB(*args, **kw) + return TorrentDB.__single + getInstance = staticmethod(getInstance) + + def updateItem(self, infohash, item={}): # insert a torrent; update it if existed + + if isValidInfohash(infohash) and validDict(item): + if self._has_key(infohash): + _item = self.getItem(infohash) + if not _item: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bsdcachedb: Error in bsdcachedb.TorrentDB.updateItem: database inconsistant!", self._has_key(infohash), self.getItem(infohash) + return + _item.update(item) + self._updateItem(infohash, _item) + else: + item = self.setDefaultItem(item) + self._put(infohash, item) + + def deleteItem(self, infohash): + self._delete(infohash) + + def getItem(self, infohash, default=False,savemem=False): + """ Arno: At the moment we keep a copy of the TorrentDB in memory, + see Tribler.vwxGUI.torrentManager. A lot of memory can be saved + by reusing/sharing the strings of the keys in the database records (=dicts). + When the savemem option is enabled, the dict returned will have the + key strings of the self.default_item. """ + ret = self._get(infohash, None) + if ret is None and default: + ret = deepcopy(self.default_item) + if savemem: + newret = {} + for key in self.default_item: + newret[key] = ret.get(key) + newinfo = {} + hiskeys = ret['info'].keys() + for key in self.infokeys: + if key in hiskeys: + newinfo[key] = ret['info'][key] + newret[self.infokey] = newinfo + return newret + return ret + + def updateDB(self, old_version): + if old_version == 1: + def_newitem = { + 'category': ['?'], + 'ignore_number': 0, + 'last_check_time': long(time()), + 'retry_number': 0, + 'seeder': -1, + 'leecher': -1, + 'status': "unknown"} + keys = self._keys() + for key in keys: + self._updateItem(key, def_newitem) + if old_version == 1 or old_version == 2: + def_newitem = { + 'source': '', + 'inserttime': 0, + 'progress': 0.0, + 'destdir':''} + keys = self._keys() + for key in keys: + self._updateItem(key, def_newitem) + + +class PreferenceDB(BasicDB): + """ Peer * Torrent """ + + __single = None + + def __init__(self, db_dir=''): + if PreferenceDB.__single: + raise RuntimeError, "PreferenceDB is singleton" + self.db_name = 'preferences.bsd' + self.opened = True + + self.db_dir = db_dir + self.filetype = db.DB_BTREE + self._data, self.db_dir = open_db(self.db_name, self.db_dir, filetype=self.filetype) + + MyDB.checkVersion(self) + PreferenceDB.__single = self + self.default_item = { # subitem actually + #'relevance':0, # relevance from the owner of this torrent + #'rank':0 + } + + def getInstance(*args, **kw): + if PreferenceDB.__single is None: + PreferenceDB(*args, **kw) + return PreferenceDB.__single + getInstance = staticmethod(getInstance) + + def addPreference(self, permid, infohash, data={}): # add or update pref + if not isValidPermid(permid) or not isValidInfohash(infohash): + return + + if not self._has_key(permid): + data = self.setDefaultItem(data) + item = {infohash:data} + else: + if self.hasPreference(permid, infohash): + _data = self.getPreference(permid, infohash) + _data.update(data) + else: + _data = self.setDefaultItem(data) + _item = {infohash:_data} + item = self.getItem(permid) + item.update(_item) + self._put(permid, item) + + def deletePreference(self, permid, infohash): + if self._has_key(permid): + preferences = self._get(permid) + preferences.pop(infohash) + self._put(permid, preferences) + + def getPreference(self, permid, infohash): + if self._has_key(permid): + preferences = self._get(permid) + if preferences.has_key(infohash): + return preferences[infohash] + return None + + def hasPreference(self, permid, infohash): + if self._has_key(permid): + return infohash in self._get(permid) + else: + return False + + def deleteItem(self, permid): + self._delete(permid) + + def getItem(self, permid): + return self._get(permid, {}) + + +class MyPreferenceDB(BasicDB): # = FileDB + + __single = None + + def __init__(self, db_dir=''): + if MyPreferenceDB.__single: + raise RuntimeError, "TorrentDB is singleton" + self.db_name = 'mypreferences.bsd' + self.opened = True + + self.db_dir = db_dir + self.filetype = db.DB_BTREE + self._data, self.db_dir = open_db(self.db_name, self.db_dir, filetype=self.filetype) + + MyDB.checkVersion(self) + MyPreferenceDB.__single = self + self.default_item = { + 'created_time':0, + 'rank':0, # -1 ~ 5, as a recommendation degree to others + 'content_name':'', # real file name in disk, may be different with info['name'] + 'content_dir':'', # dir + name = full path + 'last_seen':0, + } + + def getInstance(*args, **kw): + if MyPreferenceDB.__single is None: + MyPreferenceDB(*args, **kw) + return MyPreferenceDB.__single + getInstance = staticmethod(getInstance) + + def updateItem(self, infohash, item={}): # insert a torrent; update it if existed + if isValidInfohash(infohash) and validDict(item): + if self._has_key(infohash): + _item = self.getItem(infohash) + _item.update(item) + _item.update({'last_seen':int(time())}) + self._updateItem(infohash, _item) + else: + self.default_item['created_time'] = self.default_item['last_seen'] = int(time()) + item = self.setDefaultItem(item) + self._put(infohash, item) + self._sync() + + def deleteItem(self, infohash): + self._delete(infohash) + self._sync() + + def getItem(self, infohash, default=False): + ret = self._get(infohash, None) + if ret is None and default: + ret = deepcopy(self.default_item) + return ret + + def hasPreference(self, infohash): + return self._has_key(infohash) + + def getRank(self, infohash): + v = self._get(infohash) + if not v: + return 0 + return v.get('rank', 0) + + +class OwnerDB(BasicDB): + """ Torrent * Peer """ + + __single = None + + def __init__(self, db_dir=''): + if OwnerDB.__single: + raise RuntimeError, "OwnerDB is singleton" + self.db_name = 'owners.bsd' + self.opened = True + + self.db_dir = db_dir + self.filetype = db.DB_BTREE + self._data, self.db_dir = open_db(self.db_name, self.db_dir, filetype=self.filetype) + + OwnerDB.__single = self + + def getInstance(*args, **kw): + if OwnerDB.__single is None: + OwnerDB(*args, **kw) + return OwnerDB.__single + getInstance = staticmethod(getInstance) + + def getNumOwners(self, infohash): + owners = self._get(infohash) + if owners is not None: + n = len(owners) + else: + n = 0 + #print n, `infohash`, owners + return n + + + def addOwner(self, infohash, permid): + if isValidPermid(permid) and isValidInfohash(infohash): + if self._has_key(infohash): + owners = self._get(infohash) + owners.add(permid) + self._put(infohash, owners) + else: + self._put(infohash, Set([permid])) + + def deleteOwner(self, infohash, permid): + try: + owners = self._get(infohash) + owners.remove(permid) + if not owners: # remove the item if it is empty + self._delete(infohash) + else: + self._put(infohash, owners) + except: + pass + + def isOwner(self, permid, infohash): + if self._has_key(infohash): + owners = self._get(infohash) + return permid in owners + else: + return False + + def deleteItem(self, infohash): + self._delete(infohash) + + def getItem(self, infohash): + owners = self._get(infohash) + if owners is not None: + return list(owners) + else: + return [] + +class IP2PermIDDB(BasicDB): + """ IP * Peer """ + + __single = None + + def __init__(self, db_dir=''): + if IP2PermIDDB.__single: + raise RuntimeError, "IP2PermIDDB is singleton" + self.db_name = 'ip2permid.bsd' + self.opened = True + + self.filetype = db.DB_BTREE + self._data, self.db_dir = open_db(self.db_name, db_dir, filetype=self.filetype) + self.peer_db = PeerDB.getInstance(db_dir=db_dir) + IP2PermIDDB.__single = self + + def getInstance(*args, **kw): + if IP2PermIDDB.__single is None: + IP2PermIDDB(*args, **kw) + return IP2PermIDDB.__single + getInstance = staticmethod(getInstance) + + + def addIP(self, ip, permid): + if not isValidPermid(permid) or not isValidIP(ip): + return + + self._put(ip,permid) + + def getPermIDByIP(self, ip): + if not isValidIP(ip): + return None + + if not self._has_key(ip): + return None + else: + return self._get(ip) + + def deletePermID(self, permid, ip=None): + # Jie: This function was amended otherwise it takes too long to perform + if not ip: + data = self.peer_db._get(permid, {}) + ip = data.get('ip',None) + permid2 = self._get(ip) + if permid == permid2: + self._delete(ip) + + +# DB extension for BarterCast statistics +class BarterCastDB(BasicDB): + + __single = None + + def __init__(self, db_dir=''): + if BarterCastDB.__single: + raise RuntimeError, "BarterCastDB is singleton" + self.db_name = 'bartercast.bsd' + self.opened = True + + self.db_dir = db_dir + self.filetype = db.DB_BTREE + self._data, self.db_dir = open_db(self.db_name, self.db_dir, filetype=self.filetype) + + MyDB.checkVersion(self) + BarterCastDB.__single = self + self.default_item = { + 'last_seen':0, + 'value': 0, + 'downloaded': 0, + 'uploaded': 0, + } + + def getInstance(*args, **kw): + if BarterCastDB.__single is None: + BarterCastDB(*args, **kw) + return BarterCastDB.__single + getInstance = staticmethod(getInstance) + + def updateItem(self, (permid_from, permid_to), item={}, update_time=True): # insert a peer; update it if existed + + if isValidPermid(permid_from) and isValidPermid(permid_to) and validDict(item): + + key = bencode((permid_from, permid_to)) + if self._has_key(key): + _item = self.getItem((permid_from, permid_to)) + if _item is None: # database error, the key exists, but the data ain't there + return + _item.update(item) + if update_time: + _item.update({'last_seen':int(time())}) + self._updateItem(key, _item) + else: + item = self.setDefaultItem(item) + if update_time: + item.update({'last_seen':int(time())}) + self._put(key, item) + + def deleteItem(self, (permid_from, permid_to)): + key = bencode((permid_from, permid_to)) + self._delete(key) + + def getItem(self, (permid_from, permid_to), default=False): + key = bencode((permid_from, permid_to)) + ret = self._get(key, None) + if ret is None and default: + ret = deepcopy(self.default_item) + return ret + + def hasItem(self, (permid_from, permid_to)): + key = bencode((permid_from, permid_to)) + return self._has_key(key) + +#=============================================================================== +# class ActionDB(BasicDB): +# +# __single = None +# +# def __init__(self, db_dir=''): +# if ActionDB.__single: +# raise RuntimeError, "ActionDB is singleton" +# self.db_name = 'actions.bsd' +# self.opened = True +# env = db.DBEnv() +# # Concurrent Data Store +# env.open(db_dir, db.DB_THREAD|db.DB_INIT_CDB|db.DB_INIT_MPOOL|db.DB_CREATE|db.DB_PRIVATE) +# self._data = db.DB(dbEnv=env) +# self._data.open(self.filename, db.DB_RECNO, db.DB_CREATE) +# ActionDB.__single = self +# +# def getInstance(*args, **kw): +# if ActionDB.__single is None: +# ActionDB(*args, **kw) +# return ActionDB.__single +# getInstance = staticmethod(getInstance) +#=============================================================================== + + + + diff --git a/tribler-mod/Tribler/Core/CacheDB/bsddb2sqlite.py b/tribler-mod/Tribler/Core/CacheDB/bsddb2sqlite.py new file mode 100644 index 0000000..16c3f42 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/bsddb2sqlite.py @@ -0,0 +1,622 @@ +from time import localtime, strftime +# Written by Jie Yang +# see LICENSE.txt for license information + +import sys +import os +from bsdcachedb import MyDB, PeerDB, TorrentDB, PreferenceDB, MyPreferenceDB, BarterCastDB +from copy import deepcopy +from time import time +from sets import Set +from sha import sha +from base64 import encodestring, decodestring + +from Tribler.Core.BitTornado.bencode import bdecode + +LIB=0 +# 0: pysqlite, 1: APSW +if LIB == 0: + try: + import sqlite + except: + try: + from pysqlite2 import dbapi2 as sqlite + except: + from sqlite3 import dbapi2 as sqlite +elif LIB == 1: + try: + import apsw + except: + pass + +print "SQLite Wrapper:", {0:'PySQLite', 1:'APSW'}[LIB] + +def bin2str(bin): + # Full BASE64-encoded +# if bin.replace('+','').replace('/','').replace('=','').isalnum(): +# return bin # it is already BASE64-encoded +# else: + return encodestring(bin).replace("\n","") + +def str2bin(str): + try: + return decodestring(str) + except: + return str # has been decoded + + +class Bsddb2Sqlite: + def __init__(self, bsddb_dir, sqlite_dbfile_path, sql_filename): + self.bsddb_dir = bsddb_dir + self.sqlite_dbfile_path = sqlite_dbfile_path + self.sql_filename = sql_filename + self.sdb = None + self.commit_begined = 0 + self.permid_id = {} + self.progress = {} + self.infohash_id = {} + self.permid_id = {} + self.src_table = {'':0, 'BC':1} + self.icons = Set() + self.icon_dir = None + + def __del__(self): + try: + self._commit() + self.close() + except: + pass + + def close(self): + if self.sdb is not None: + self.sdb.close() + + def _fetchone(self, sql, arg=None): + find = None + if LIB==0: + if arg is None: + self.cur.execute(sql) + else: + self.cur.execute(sql, arg) + find = self.cur.fetchone() + else: + if arg is None: + for find in self.cur.execute(sql): + break + else: + for find in self.cur.execute(sql, arg): + break + if find is None: + return None + else: + if len(find)>1: + return find + else: + return find[0] + + def _getPeerID(self, peer_permid, bin=True): + if peer_permid in self.permid_id: + return self.permid_id[peer_permid] + if bin: + peer_permid_str = bin2str(peer_permid) + else: + peer_permid_str = peer_permid + sql_get_peer_id = "SELECT peer_id FROM Peer WHERE permid==?" + peer_id = self._fetchone(sql_get_peer_id, (peer_permid_str,)) + if peer_id is not None: + self.permid_id[peer_permid] = peer_id + + return peer_id + + def _getTorrentID(self, infohash, bin=True): + if bin: + infohash = bin2str(infohash) + sql_get_torrent_id = "SELECT torrent_id FROM Torrent WHERE infohash==?" + arg = (infohash,) + return self._fetchone(sql_get_torrent_id, arg) + +#=============================================================================== +# def _insertInfohash(self, infohash, bin=True): +# if bin: +# infohash = bin2str(infohash) +# sql_insert_torrent = "INSERT INTO Infohash (infohash) VALUES (?)" +# self.cur.execute(sql_insert_torrent, (infohash,)) +#=============================================================================== + + def _begin(self): + if LIB == 1: + self.commit_begined = 1 + self.cur.execute('BEGIN') + + def _commit(self): + if LIB == 0: + if self.sdb: + self.sdb.commit() + else: + if self.commit_begined == 1: + if self.cur: + self.cur.execute("COMMIT") + self.commit_begined = 0 + + def convert_PeerDB(self, limit=0): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "convert_PeerDB" + peer_db = PeerDB.getInstance(self.bsddb_dir) + npeers = 0 + for permid,db_data in peer_db._data.iteritems(): + data = { + 'ip':None, + 'port':None, + 'name':None, + 'last_seen':0, + 'similarity':0, + 'connected_times':0, + 'oversion':0, # overlay version + 'buddycast_times':0, + 'last_buddycast_time':0, + 'thumbnail':None, + 'npeers':0, + 'ntorrents':0, + 'nprefs':0, + 'nqueries':0, + 'last_connected':0, + 'friend':0, + 'superpeer':0, + } + + data.update(db_data) + iconfilename = sha(permid).hexdigest() + if iconfilename in self.icons: + icon_str = self.readIcon(iconfilename) + data['thumbnail'] = icon_str + icon_path = os.path.join(self.icon_dir, iconfilename + '.jpg') + if os.path.isfile(icon_path): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'remove', icon_path + os.remove(icon_path) + + self._addPeerToDB(permid, data) + npeers += 1 + self.permid_id[permid] = npeers + if limit and npeers >= limit: + break + #nconnpeers = self._fetchone('select count(*) from Peer where connected_times>0;') + #print "npeers", npeers, nconnpeers + + self._commit() + # delete peer icons +# for iconfilename in self.icons: +# icon_path = os.path.join(self.icon_dir, iconfilename + '.jpg') +# if os.path.isfile(icon_path): +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'remove', icon_path +# os.remove(icon_path) + + def readIcon(self, iconfilename): + # read a peer icon file and return the encoded string + icon_path = os.path.join(self.icon_dir, iconfilename + '.jpg') + data = None + try: + try: + f = open(icon_path, 'rb') + data = f.read() + data = encodestring(data).replace("\n","") + except: + data = None + finally: + f.close() + return data + + def _addPeerToDB(self, permid, data=None, bin=True): + sql_insert_peer = """ + INSERT INTO Peer + (permid, name, ip, port, thumbnail, + oversion, similarity, friend, superpeer, + last_seen, last_connected, last_buddycast, + connected_times, buddycast_times, + num_peers, num_torrents, num_prefs, num_queries + ) + VALUES (?,?,?,?,?, ?,?,?,?, ?,?,?, ?,?, ?,?,?,?) + """ + if bin: + permid = bin2str(permid) + if data is None: + sql_insert_peer = 'INSERT INTO Peer (permid) VALUES (?)' + self.cur.execute(sql_insert_peer,(permid,)) + else: + self.cur.execute(sql_insert_peer, + (permid, data['name'], data['ip'], data['port'], data['thumbnail'], + data['oversion'], data['similarity'], data['friend'], data['superpeer'], + data['last_seen'], data['last_connected'], data['last_buddycast_time'], + data['connected_times'], data['buddycast_times'], + data['npeers'], data['ntorrents'], data['nprefs'], data['nqueries']) + ) + + def convert_torrent_data(self, db_data): + data = { + 'torrent_name':None, # name of the torrent + 'leecher': -1, + 'seeder': -1, + 'ignore_number': 0, + 'retry_number': 0, + 'last_check_time': 0, + 'status': 0, # status table: unknown, good, dead + + 'category': 0, # category table + 'source': 0, # source table, from buddycast, rss or others + 'thumbnail':None, # 1 - the torrent has a thumbnail + 'relevance':0, + + 'inserttime': 0, # when the torrent file is written to the disk + 'progress': 0.0, # download progress + 'secret':0, # download secretly + + 'name':None, + 'length':0, + 'creation_date':0, + 'comment':None, + 'num_files':0, + + 'ignore_number':0, + 'retry_number':0, + 'last_check_time':0, + } + + if 'info' in db_data: + info = db_data.pop('info') + data['name'] = info.get('name', None) + data['length'] = info.get('length', 0) + data['num_files'] = info.get('num_files', 0) + data['creation_date'] = info.get('creation date', 0) + data['announce'] = info.get('announce', '') + data['announce-list'] = info.get('announce-list', []) + + # change torrent dir + torrent_dir = db_data.get('torrent_dir',None) + + # change status + status = db_data.get('status', 'unknown') + status_table = {'unknown':0, 'good':1, 'dead':2} + db_data['status'] = status_table[status] + + # change category + category_list = db_data.get('category', []) + category_table = {'Picture':6, 'Document':5, 'xxx':7, 'VideoClips':2, 'other':8, 'Video':1, 'Compressed':4, 'Audio':3} + if len(category_list) > 0: + category = category_list[0] + cat_int = category_table[category] + else: + cat_int = 0 + db_data['category'] = cat_int + + # change source + src = db_data.get('source', '') + if src in self.src_table: + src_int = self.src_table[src] + else: + src_int = self.insertNewSrc(src) # add a new src, e.g., a RSS feed + self.src_table[src] = src_int + db_data['source'] = src_int + data.update(db_data) + return data + + def convert_TorrentDB(self, limit=0): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "convert_TorrentDB" + torrent_db = TorrentDB.getInstance(self.bsddb_dir) + ntorrents = 0 + + for infohash, db_data in torrent_db._data.iteritems(): + data = self.convert_torrent_data(db_data) + self._addTorrentToDB(infohash, data) + ntorrents += 1 + if limit and ntorrents >= limit: + break + + self._commit() + #self.cur.execute('select count(*) from torrent') + #print 'add torrents', self.cur.fetchone()[0] + #print "ntorrents", ntorrents + + def insertNewSrc(self, src): + insert_src_sql = """ + INSERT INTO TorrentSource (name, description) + VALUES (?,?) + """ + desc = '' + if src.startswith('http') and src.endswith('xml'): + desc = 'RSS' + self.cur.execute(insert_src_sql, (src,desc)) + get_src_id_sql = """ + SELECT source_id FROM TorrentSource WHERE name=? + """ + src_id = self._fetchone(get_src_id_sql, (src,)) + assert src_id>1, src_id + return src_id + + def _addTorrentToDB(self, infohash, data=None): +# self._insertInfohash(infohash) +# torrent_id = self._getTorrentID(infohash) + infohash_str = bin2str(infohash) + if not data: + sql_insert_torrent = "INSERT INTO Torrent (infohash) VALUES (?)" + self.cur.execute(sql_insert_torrent, (infohash_str,)) + else: + if data['progress'] > 0: + self.progress[infohash] = data['progress'] + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bsddb2sqlite: _addTorrentToDB:",data['name'], data['torrent_name'] + + sql_insert_torrent = """ + INSERT INTO Torrent + (infohash, name, torrent_file_name, + length, creation_date, num_files, thumbnail, + insert_time, secret, relevance, + source_id, category_id, status_id, + num_seeders, num_leechers, comment) + VALUES (?,?,?, ?,?,?,?, ?,?,?, ?,?,?, ?,?,?) + """ + try: + self.cur.execute(sql_insert_torrent, + (infohash_str, data['name'], data['torrent_name'], + data['length'], data['creation_date'], data['num_files'], data['thumbnail'], + data['inserttime'], data['secret'], data['relevance'], + data['source'], data['category'], data['status'], + data['seeder'], data['leecher'], data['comment']) + ) + except Exception, msg: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "error input for _addTorrentToDB:", data, Exception, msg + #sys.exit(1) + + torrent_id = self._getTorrentID(infohash_str, False) + self.infohash_id[infohash] = torrent_id + if data: + self.addTorrentTracker(torrent_id, data) + + return torrent_id + + def addTorrentTracker(self, torrent_id, data): + announce = data['announce'] + ignore_number = data['ignore_number'] + retry_number = data['retry_number'] + last_check_time = data['last_check_time'] + + announce_list = data['announce-list'] + + sql_insert_torrent_tracker = """ + INSERT INTO TorrentTracker + (torrent_id, tracker, announce_tier, + ignored_times, retried_times, last_check) + VALUES (?,?,?, ?,?,?) + """ + + values = [(torrent_id, announce, 1, ignore_number, retry_number, last_check_time)] + tier_num = 2 + trackers = {announce:None} + for tier in announce_list: + for tracker in tier: + if tracker in trackers: + continue + value = (torrent_id, tracker, tier_num, 0, 0, 0) + values.append(value) + trackers[tracker] = None + tier_num += 1 + try: + self.cur.executemany(sql_insert_torrent_tracker, values) + except Exception, msg: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "error input for addTorrentTracker", data, values, Exception, msg + + + def convert_PreferenceDB(self): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "convert_PreferenceDB" + #print len(self.permid_id), len(self.infohash_id) + pref_db = PreferenceDB.getInstance(self.bsddb_dir) + nprefs = 0 + npeers = 0 + for permid,prefs in pref_db._data.iteritems(): + if not prefs: + continue + if permid in self.permid_id: + pid = self.permid_id[permid] + else: + continue + for infohash in prefs: + if infohash not in self.infohash_id: + self._addTorrentToDB(infohash) + tid = self._getTorrentID(infohash) + self.infohash_id[infohash] = tid + else: + tid = self.infohash_id[infohash] + self._addPeerPreference(pid, tid) + nprefs += 1 + npeers += 1 + self._commit() + #print "nprefs", nprefs, "npeers", npeers + + def _addPeerPreference(self, peer_id, torrent_id): + sql_insert_peer_torrent = "INSERT INTO Preference (peer_id, torrent_id) VALUES (?,?)" + try: + self.cur.execute(sql_insert_peer_torrent, (peer_id, torrent_id)) + except sqlite.IntegrityError, msg: # duplicated + #print Exception, msg + pass + + def convert_MyPreferenceDB(self): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "convert_MyPreferenceDB" + + mypref_db = MyPreferenceDB.getInstance(self.bsddb_dir) + nprefs = 0 + """ CREATE TABLE MyPreference (torrent_id INTEGER PRIMARY KEY, + download_name TEXT NOT NULL, download_dir TEXT NOT NULL, + progress NUMERIC DEFAULT 0, creation_time INTEGER NOT NULL, + rank INTEGER, last_access NUMERIC NOT NULL);""" + sql = """ + insert into MyPreference (torrent_id, destination_path, progress, creation_time) + values (?,?,?,?) + """ + + for infohash, data in mypref_db._data.iteritems(): + torrent_id = self._getTorrentID(infohash) + if not torrent_id: # not found in torrent db, insert it to torrent db first + torrent_id = self._addTorrentToDB(infohash) + download_name = data.get('content_name', '') + download_dir = data.get('content_dir', '') + # Arno, 2008-10-23: destdir should be topdir + dest_path = download_dir #os.path.join(download_dir, download_name) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bsddb2sqlite: mypreferences: Setting destdir to",dest_path + creation_time = data.get('created_time', 0) + prog = self.progress.get(infohash, 0) + self.cur.execute(sql, (torrent_id, dest_path, prog, creation_time)) + #self.cur.execute('select count(*) from MyPreference') + #print 'add MyPreferenceDB', self.cur.fetchone()[0] + + def addFriend(self, permid): + peer_id = self._getPeerID(permid) + if not peer_id: + self._addPeerToDB(permid) + peer_id = self._getPeerID(permid) + sql = 'update Peer set friend=1 where peer_id=%d'%peer_id + self.cur.execute(sql) + + def addSuperPeer(self, permid): + peer_id = self._getPeerID(permid) + if not peer_id: + self._addPeerToDB(permid) + peer_id = self._getPeerID(permid) + sql = 'update Peer set superpeer=1 where peer_id=%d'%peer_id + self.cur.execute(sql) + + def convert_MyDB(self, torrent_dir=None): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "convert MyDB to MyInfo" + + mydb = MyDB.getInstance(self.bsddb_dir) + + friends = mydb.getFriends() + for permid in friends: + self.addFriend(permid) + + superpeers = mydb.getSuperPeers() + for permid in superpeers: + self.addSuperPeer(permid) + + self._commit() + + #self.cur.execute('select count(*) from peer where friend==1') + #print 'add friends', self.cur.fetchone()[0] + #self.cur.execute('select count(*) from peer where superpeer==1') + #print 'add superpeers', self.cur.fetchone()[0] + + def create_sqlite(self, file_path, sql_file, autocommit=0): + if os.path.exists(file_path): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "sqlite db already exists", os.path.abspath(file_path) + return False + db_dir = os.path.dirname(os.path.abspath(file_path)) + if not os.path.exists(db_dir): + os.makedirs(db_dir) + + self.sdb = sqlite.connect(file_path, isolation_level=None) # auto-commit + self.cur = self.sdb.cursor() + + f = open(sql_file) + sql_create_tables = f.read() + f.close() + + sql_statements = sql_create_tables.split(';') + for sql in sql_statements: + self.cur.execute(sql) + + self._commit() + self.sdb.close() + + self.sdb = sqlite.connect(file_path) # auto-commit + self.cur = self.sdb.cursor() + + return True + + def convert_BartercastDB(self): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "convert_BartercastDB" + + db_path = os.path.join(self.bsddb_dir, 'bartercast.bsd') + if not os.path.isfile(db_path): + return + bc_db = BarterCastDB.getInstance(self.bsddb_dir) + insert_bc_sql = """ + INSERT INTO BarterCast + (peer_id_from, peer_id_to, downloaded, uploaded, last_seen, value) + VALUES (?,?,?,?,?,?) + """ + values = [] + for key,db_data in bc_db._data.iteritems(): + try: + permid_from, permid_to = bdecode(key) + permid_id_from = self._getPeerID(permid_from) + if permid_id_from is None: + self._addPeerToDB(permid_from) + permid_id_from = self._getPeerID(permid_from) + permid_id_to = self._getPeerID(permid_to) + if permid_id_to is None: + self._addPeerToDB(permid_to) + permid_id_to = self._getPeerID(permid_to) + downloaded = db_data.get('downloaded', 0) + uploaded = db_data.get('uploaded', 0) + last_seen = db_data.get('last_seen', 0) + value = db_data.get('value', 0) + values.append((permid_id_from, permid_id_to, downloaded, uploaded, last_seen, value)) + except Exception, msg: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "error input for convert_BartercastDB:", key, db_data, Exception, msg + self.cur.executemany(insert_bc_sql, values) + self._commit() + #print "converted bartercast db", len(values) + + def scan_PeerIcons(self, icon_dir): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "scan_PeerIcons", icon_dir + if not icon_dir or not os.path.isdir(icon_dir): + return + self.icon_dir = icon_dir + for file_name in os.listdir(icon_dir): + if file_name.endswith('.jpg') and len(file_name)==44: + self.icons.add(file_name[:-4]) + + def run(self, peer_limit=0, torrent_limit=0, torrent_dir=None, icon_dir=None): + if self.create_sqlite(self.sqlite_dbfile_path, self.sql_filename): + self.scan_PeerIcons(icon_dir) + + MyDB.getInstance(None, self.bsddb_dir) + self.convert_PeerDB(peer_limit) + self.convert_MyDB(torrent_dir) + self.convert_BartercastDB() + + self.convert_TorrentDB(torrent_limit) + self.convert_MyPreferenceDB() + self.convert_PreferenceDB() + + self.sdb.close() + #self.remove_bsddb() + + return True + else: + if self.sdb: + self.sdb.close() + return False + + def remove_bsddb(self): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", self.bsddb_dir + for filename in os.listdir(self.bsddb_dir): + if filename.endswith('.bsd'): + db_path = os.path.abspath(os.path.join(self.bsddb_dir,filename)) + if os.path.isfile(db_path): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "delete", db_path + os.remove(db_path) + + +if __name__ == '__main__': + bsddb_dir = sys.argv[1] + bsddb2sqlite = Bsddb2Sqlite(bsddb_dir, 'tribler.sdb', '../../tribler_sdb_v1.sql') + start = time() + peer_limit = torrent_limit = 0 + if len(sys.argv)>2: + peer_limit = int(sys.argv[2]) + if len(sys.argv)>3: + torrent_limit = int(sys.argv[3]) + print "limit", peer_limit, torrent_limit + bsddb2sqlite.run(peer_limit, torrent_limit) + print "cost time", time()-start + + + diff --git a/tribler-mod/Tribler/Core/CacheDB/bsddb2sqlite.py.bak b/tribler-mod/Tribler/Core/CacheDB/bsddb2sqlite.py.bak new file mode 100644 index 0000000..9a40fff --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/bsddb2sqlite.py.bak @@ -0,0 +1,621 @@ +# Written by Jie Yang +# see LICENSE.txt for license information + +import sys +import os +from bsdcachedb import MyDB, PeerDB, TorrentDB, PreferenceDB, MyPreferenceDB, BarterCastDB +from copy import deepcopy +from time import time +from sets import Set +from sha import sha +from base64 import encodestring, decodestring + +from Tribler.Core.BitTornado.bencode import bdecode + +LIB=0 +# 0: pysqlite, 1: APSW +if LIB == 0: + try: + import sqlite + except: + try: + from pysqlite2 import dbapi2 as sqlite + except: + from sqlite3 import dbapi2 as sqlite +elif LIB == 1: + try: + import apsw + except: + pass + +print "SQLite Wrapper:", {0:'PySQLite', 1:'APSW'}[LIB] + +def bin2str(bin): + # Full BASE64-encoded +# if bin.replace('+','').replace('/','').replace('=','').isalnum(): +# return bin # it is already BASE64-encoded +# else: + return encodestring(bin).replace("\n","") + +def str2bin(str): + try: + return decodestring(str) + except: + return str # has been decoded + + +class Bsddb2Sqlite: + def __init__(self, bsddb_dir, sqlite_dbfile_path, sql_filename): + self.bsddb_dir = bsddb_dir + self.sqlite_dbfile_path = sqlite_dbfile_path + self.sql_filename = sql_filename + self.sdb = None + self.commit_begined = 0 + self.permid_id = {} + self.progress = {} + self.infohash_id = {} + self.permid_id = {} + self.src_table = {'':0, 'BC':1} + self.icons = Set() + self.icon_dir = None + + def __del__(self): + try: + self._commit() + self.close() + except: + pass + + def close(self): + if self.sdb is not None: + self.sdb.close() + + def _fetchone(self, sql, arg=None): + find = None + if LIB==0: + if arg is None: + self.cur.execute(sql) + else: + self.cur.execute(sql, arg) + find = self.cur.fetchone() + else: + if arg is None: + for find in self.cur.execute(sql): + break + else: + for find in self.cur.execute(sql, arg): + break + if find is None: + return None + else: + if len(find)>1: + return find + else: + return find[0] + + def _getPeerID(self, peer_permid, bin=True): + if peer_permid in self.permid_id: + return self.permid_id[peer_permid] + if bin: + peer_permid_str = bin2str(peer_permid) + else: + peer_permid_str = peer_permid + sql_get_peer_id = "SELECT peer_id FROM Peer WHERE permid==?" + peer_id = self._fetchone(sql_get_peer_id, (peer_permid_str,)) + if peer_id is not None: + self.permid_id[peer_permid] = peer_id + + return peer_id + + def _getTorrentID(self, infohash, bin=True): + if bin: + infohash = bin2str(infohash) + sql_get_torrent_id = "SELECT torrent_id FROM Torrent WHERE infohash==?" + arg = (infohash,) + return self._fetchone(sql_get_torrent_id, arg) + +#=============================================================================== +# def _insertInfohash(self, infohash, bin=True): +# if bin: +# infohash = bin2str(infohash) +# sql_insert_torrent = "INSERT INTO Infohash (infohash) VALUES (?)" +# self.cur.execute(sql_insert_torrent, (infohash,)) +#=============================================================================== + + def _begin(self): + if LIB == 1: + self.commit_begined = 1 + self.cur.execute('BEGIN') + + def _commit(self): + if LIB == 0: + if self.sdb: + self.sdb.commit() + else: + if self.commit_begined == 1: + if self.cur: + self.cur.execute("COMMIT") + self.commit_begined = 0 + + def convert_PeerDB(self, limit=0): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "convert_PeerDB" + peer_db = PeerDB.getInstance(self.bsddb_dir) + npeers = 0 + for permid,db_data in peer_db._data.iteritems(): + data = { + 'ip':None, + 'port':None, + 'name':None, + 'last_seen':0, + 'similarity':0, + 'connected_times':0, + 'oversion':0, # overlay version + 'buddycast_times':0, + 'last_buddycast_time':0, + 'thumbnail':None, + 'npeers':0, + 'ntorrents':0, + 'nprefs':0, + 'nqueries':0, + 'last_connected':0, + 'friend':0, + 'superpeer':0, + } + + data.update(db_data) + iconfilename = sha(permid).hexdigest() + if iconfilename in self.icons: + icon_str = self.readIcon(iconfilename) + data['thumbnail'] = icon_str + icon_path = os.path.join(self.icon_dir, iconfilename + '.jpg') + if os.path.isfile(icon_path): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'remove', icon_path + os.remove(icon_path) + + self._addPeerToDB(permid, data) + npeers += 1 + self.permid_id[permid] = npeers + if limit and npeers >= limit: + break + #nconnpeers = self._fetchone('select count(*) from Peer where connected_times>0;') + #print "npeers", npeers, nconnpeers + + self._commit() + # delete peer icons +# for iconfilename in self.icons: +# icon_path = os.path.join(self.icon_dir, iconfilename + '.jpg') +# if os.path.isfile(icon_path): +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'remove', icon_path +# os.remove(icon_path) + + def readIcon(self, iconfilename): + # read a peer icon file and return the encoded string + icon_path = os.path.join(self.icon_dir, iconfilename + '.jpg') + data = None + try: + try: + f = open(icon_path, 'rb') + data = f.read() + data = encodestring(data).replace("\n","") + except: + data = None + finally: + f.close() + return data + + def _addPeerToDB(self, permid, data=None, bin=True): + sql_insert_peer = """ + INSERT INTO Peer + (permid, name, ip, port, thumbnail, + oversion, similarity, friend, superpeer, + last_seen, last_connected, last_buddycast, + connected_times, buddycast_times, + num_peers, num_torrents, num_prefs, num_queries + ) + VALUES (?,?,?,?,?, ?,?,?,?, ?,?,?, ?,?, ?,?,?,?) + """ + if bin: + permid = bin2str(permid) + if data is None: + sql_insert_peer = 'INSERT INTO Peer (permid) VALUES (?)' + self.cur.execute(sql_insert_peer,(permid,)) + else: + self.cur.execute(sql_insert_peer, + (permid, data['name'], data['ip'], data['port'], data['thumbnail'], + data['oversion'], data['similarity'], data['friend'], data['superpeer'], + data['last_seen'], data['last_connected'], data['last_buddycast_time'], + data['connected_times'], data['buddycast_times'], + data['npeers'], data['ntorrents'], data['nprefs'], data['nqueries']) + ) + + def convert_torrent_data(self, db_data): + data = { + 'torrent_name':None, # name of the torrent + 'leecher': -1, + 'seeder': -1, + 'ignore_number': 0, + 'retry_number': 0, + 'last_check_time': 0, + 'status': 0, # status table: unknown, good, dead + + 'category': 0, # category table + 'source': 0, # source table, from buddycast, rss or others + 'thumbnail':None, # 1 - the torrent has a thumbnail + 'relevance':0, + + 'inserttime': 0, # when the torrent file is written to the disk + 'progress': 0.0, # download progress + 'secret':0, # download secretly + + 'name':None, + 'length':0, + 'creation_date':0, + 'comment':None, + 'num_files':0, + + 'ignore_number':0, + 'retry_number':0, + 'last_check_time':0, + } + + if 'info' in db_data: + info = db_data.pop('info') + data['name'] = info.get('name', None) + data['length'] = info.get('length', 0) + data['num_files'] = info.get('num_files', 0) + data['creation_date'] = info.get('creation date', 0) + data['announce'] = info.get('announce', '') + data['announce-list'] = info.get('announce-list', []) + + # change torrent dir + torrent_dir = db_data.get('torrent_dir',None) + + # change status + status = db_data.get('status', 'unknown') + status_table = {'unknown':0, 'good':1, 'dead':2} + db_data['status'] = status_table[status] + + # change category + category_list = db_data.get('category', []) + category_table = {'Picture':6, 'Document':5, 'xxx':7, 'VideoClips':2, 'other':8, 'Video':1, 'Compressed':4, 'Audio':3} + if len(category_list) > 0: + category = category_list[0] + cat_int = category_table[category] + else: + cat_int = 0 + db_data['category'] = cat_int + + # change source + src = db_data.get('source', '') + if src in self.src_table: + src_int = self.src_table[src] + else: + src_int = self.insertNewSrc(src) # add a new src, e.g., a RSS feed + self.src_table[src] = src_int + db_data['source'] = src_int + data.update(db_data) + return data + + def convert_TorrentDB(self, limit=0): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "convert_TorrentDB" + torrent_db = TorrentDB.getInstance(self.bsddb_dir) + ntorrents = 0 + + for infohash, db_data in torrent_db._data.iteritems(): + data = self.convert_torrent_data(db_data) + self._addTorrentToDB(infohash, data) + ntorrents += 1 + if limit and ntorrents >= limit: + break + + self._commit() + #self.cur.execute('select count(*) from torrent') + #print 'add torrents', self.cur.fetchone()[0] + #print "ntorrents", ntorrents + + def insertNewSrc(self, src): + insert_src_sql = """ + INSERT INTO TorrentSource (name, description) + VALUES (?,?) + """ + desc = '' + if src.startswith('http') and src.endswith('xml'): + desc = 'RSS' + self.cur.execute(insert_src_sql, (src,desc)) + get_src_id_sql = """ + SELECT source_id FROM TorrentSource WHERE name=? + """ + src_id = self._fetchone(get_src_id_sql, (src,)) + assert src_id>1, src_id + return src_id + + def _addTorrentToDB(self, infohash, data=None): +# self._insertInfohash(infohash) +# torrent_id = self._getTorrentID(infohash) + infohash_str = bin2str(infohash) + if not data: + sql_insert_torrent = "INSERT INTO Torrent (infohash) VALUES (?)" + self.cur.execute(sql_insert_torrent, (infohash_str,)) + else: + if data['progress'] > 0: + self.progress[infohash] = data['progress'] + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bsddb2sqlite: _addTorrentToDB:",data['name'], data['torrent_name'] + + sql_insert_torrent = """ + INSERT INTO Torrent + (infohash, name, torrent_file_name, + length, creation_date, num_files, thumbnail, + insert_time, secret, relevance, + source_id, category_id, status_id, + num_seeders, num_leechers, comment) + VALUES (?,?,?, ?,?,?,?, ?,?,?, ?,?,?, ?,?,?) + """ + try: + self.cur.execute(sql_insert_torrent, + (infohash_str, data['name'], data['torrent_name'], + data['length'], data['creation_date'], data['num_files'], data['thumbnail'], + data['inserttime'], data['secret'], data['relevance'], + data['source'], data['category'], data['status'], + data['seeder'], data['leecher'], data['comment']) + ) + except Exception, msg: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "error input for _addTorrentToDB:", data, Exception, msg + #sys.exit(1) + + torrent_id = self._getTorrentID(infohash_str, False) + self.infohash_id[infohash] = torrent_id + if data: + self.addTorrentTracker(torrent_id, data) + + return torrent_id + + def addTorrentTracker(self, torrent_id, data): + announce = data['announce'] + ignore_number = data['ignore_number'] + retry_number = data['retry_number'] + last_check_time = data['last_check_time'] + + announce_list = data['announce-list'] + + sql_insert_torrent_tracker = """ + INSERT INTO TorrentTracker + (torrent_id, tracker, announce_tier, + ignored_times, retried_times, last_check) + VALUES (?,?,?, ?,?,?) + """ + + values = [(torrent_id, announce, 1, ignore_number, retry_number, last_check_time)] + tier_num = 2 + trackers = {announce:None} + for tier in announce_list: + for tracker in tier: + if tracker in trackers: + continue + value = (torrent_id, tracker, tier_num, 0, 0, 0) + values.append(value) + trackers[tracker] = None + tier_num += 1 + try: + self.cur.executemany(sql_insert_torrent_tracker, values) + except Exception, msg: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "error input for addTorrentTracker", data, values, Exception, msg + + + def convert_PreferenceDB(self): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "convert_PreferenceDB" + #print len(self.permid_id), len(self.infohash_id) + pref_db = PreferenceDB.getInstance(self.bsddb_dir) + nprefs = 0 + npeers = 0 + for permid,prefs in pref_db._data.iteritems(): + if not prefs: + continue + if permid in self.permid_id: + pid = self.permid_id[permid] + else: + continue + for infohash in prefs: + if infohash not in self.infohash_id: + self._addTorrentToDB(infohash) + tid = self._getTorrentID(infohash) + self.infohash_id[infohash] = tid + else: + tid = self.infohash_id[infohash] + self._addPeerPreference(pid, tid) + nprefs += 1 + npeers += 1 + self._commit() + #print "nprefs", nprefs, "npeers", npeers + + def _addPeerPreference(self, peer_id, torrent_id): + sql_insert_peer_torrent = "INSERT INTO Preference (peer_id, torrent_id) VALUES (?,?)" + try: + self.cur.execute(sql_insert_peer_torrent, (peer_id, torrent_id)) + except sqlite.IntegrityError, msg: # duplicated + #print Exception, msg + pass + + def convert_MyPreferenceDB(self): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "convert_MyPreferenceDB" + + mypref_db = MyPreferenceDB.getInstance(self.bsddb_dir) + nprefs = 0 + """ CREATE TABLE MyPreference (torrent_id INTEGER PRIMARY KEY, + download_name TEXT NOT NULL, download_dir TEXT NOT NULL, + progress NUMERIC DEFAULT 0, creation_time INTEGER NOT NULL, + rank INTEGER, last_access NUMERIC NOT NULL);""" + sql = """ + insert into MyPreference (torrent_id, destination_path, progress, creation_time) + values (?,?,?,?) + """ + + for infohash, data in mypref_db._data.iteritems(): + torrent_id = self._getTorrentID(infohash) + if not torrent_id: # not found in torrent db, insert it to torrent db first + torrent_id = self._addTorrentToDB(infohash) + download_name = data.get('content_name', '') + download_dir = data.get('content_dir', '') + # Arno, 2008-10-23: destdir should be topdir + dest_path = download_dir #os.path.join(download_dir, download_name) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bsddb2sqlite: mypreferences: Setting destdir to",dest_path + creation_time = data.get('created_time', 0) + prog = self.progress.get(infohash, 0) + self.cur.execute(sql, (torrent_id, dest_path, prog, creation_time)) + #self.cur.execute('select count(*) from MyPreference') + #print 'add MyPreferenceDB', self.cur.fetchone()[0] + + def addFriend(self, permid): + peer_id = self._getPeerID(permid) + if not peer_id: + self._addPeerToDB(permid) + peer_id = self._getPeerID(permid) + sql = 'update Peer set friend=1 where peer_id=%d'%peer_id + self.cur.execute(sql) + + def addSuperPeer(self, permid): + peer_id = self._getPeerID(permid) + if not peer_id: + self._addPeerToDB(permid) + peer_id = self._getPeerID(permid) + sql = 'update Peer set superpeer=1 where peer_id=%d'%peer_id + self.cur.execute(sql) + + def convert_MyDB(self, torrent_dir=None): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "convert MyDB to MyInfo" + + mydb = MyDB.getInstance(self.bsddb_dir) + + friends = mydb.getFriends() + for permid in friends: + self.addFriend(permid) + + superpeers = mydb.getSuperPeers() + for permid in superpeers: + self.addSuperPeer(permid) + + self._commit() + + #self.cur.execute('select count(*) from peer where friend==1') + #print 'add friends', self.cur.fetchone()[0] + #self.cur.execute('select count(*) from peer where superpeer==1') + #print 'add superpeers', self.cur.fetchone()[0] + + def create_sqlite(self, file_path, sql_file, autocommit=0): + if os.path.exists(file_path): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "sqlite db already exists", os.path.abspath(file_path) + return False + db_dir = os.path.dirname(os.path.abspath(file_path)) + if not os.path.exists(db_dir): + os.makedirs(db_dir) + + self.sdb = sqlite.connect(file_path, isolation_level=None) # auto-commit + self.cur = self.sdb.cursor() + + f = open(sql_file) + sql_create_tables = f.read() + f.close() + + sql_statements = sql_create_tables.split(';') + for sql in sql_statements: + self.cur.execute(sql) + + self._commit() + self.sdb.close() + + self.sdb = sqlite.connect(file_path) # auto-commit + self.cur = self.sdb.cursor() + + return True + + def convert_BartercastDB(self): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "convert_BartercastDB" + + db_path = os.path.join(self.bsddb_dir, 'bartercast.bsd') + if not os.path.isfile(db_path): + return + bc_db = BarterCastDB.getInstance(self.bsddb_dir) + insert_bc_sql = """ + INSERT INTO BarterCast + (peer_id_from, peer_id_to, downloaded, uploaded, last_seen, value) + VALUES (?,?,?,?,?,?) + """ + values = [] + for key,db_data in bc_db._data.iteritems(): + try: + permid_from, permid_to = bdecode(key) + permid_id_from = self._getPeerID(permid_from) + if permid_id_from is None: + self._addPeerToDB(permid_from) + permid_id_from = self._getPeerID(permid_from) + permid_id_to = self._getPeerID(permid_to) + if permid_id_to is None: + self._addPeerToDB(permid_to) + permid_id_to = self._getPeerID(permid_to) + downloaded = db_data.get('downloaded', 0) + uploaded = db_data.get('uploaded', 0) + last_seen = db_data.get('last_seen', 0) + value = db_data.get('value', 0) + values.append((permid_id_from, permid_id_to, downloaded, uploaded, last_seen, value)) + except Exception, msg: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "error input for convert_BartercastDB:", key, db_data, Exception, msg + self.cur.executemany(insert_bc_sql, values) + self._commit() + #print "converted bartercast db", len(values) + + def scan_PeerIcons(self, icon_dir): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "scan_PeerIcons", icon_dir + if not icon_dir or not os.path.isdir(icon_dir): + return + self.icon_dir = icon_dir + for file_name in os.listdir(icon_dir): + if file_name.endswith('.jpg') and len(file_name)==44: + self.icons.add(file_name[:-4]) + + def run(self, peer_limit=0, torrent_limit=0, torrent_dir=None, icon_dir=None): + if self.create_sqlite(self.sqlite_dbfile_path, self.sql_filename): + self.scan_PeerIcons(icon_dir) + + MyDB.getInstance(None, self.bsddb_dir) + self.convert_PeerDB(peer_limit) + self.convert_MyDB(torrent_dir) + self.convert_BartercastDB() + + self.convert_TorrentDB(torrent_limit) + self.convert_MyPreferenceDB() + self.convert_PreferenceDB() + + self.sdb.close() + #self.remove_bsddb() + + return True + else: + if self.sdb: + self.sdb.close() + return False + + def remove_bsddb(self): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", self.bsddb_dir + for filename in os.listdir(self.bsddb_dir): + if filename.endswith('.bsd'): + db_path = os.path.abspath(os.path.join(self.bsddb_dir,filename)) + if os.path.isfile(db_path): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "delete", db_path + os.remove(db_path) + + +if __name__ == '__main__': + bsddb_dir = sys.argv[1] + bsddb2sqlite = Bsddb2Sqlite(bsddb_dir, 'tribler.sdb', '../../tribler_sdb_v1.sql') + start = time() + peer_limit = torrent_limit = 0 + if len(sys.argv)>2: + peer_limit = int(sys.argv[2]) + if len(sys.argv)>3: + torrent_limit = int(sys.argv[3]) + print "limit", peer_limit, torrent_limit + bsddb2sqlite.run(peer_limit, torrent_limit) + print "cost time", time()-start + + + diff --git a/tribler-mod/Tribler/Core/CacheDB/cachedb.py b/tribler-mod/Tribler/Core/CacheDB/cachedb.py new file mode 100644 index 0000000..6e43629 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/cachedb.py @@ -0,0 +1,8 @@ +from time import localtime, strftime +# Written by Jie Yang +# see LICENSE.txt for license information + +from sqlitecachedb import * +from SqliteSeedingStatsCacheDB import * +from SqliteFriendshipStatsCacheDB import * +from SqliteVideoPlaybackStatsCacheDB import * diff --git a/tribler-mod/Tribler/Core/CacheDB/cachedb.py.bak b/tribler-mod/Tribler/Core/CacheDB/cachedb.py.bak new file mode 100644 index 0000000..ff18cf4 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/cachedb.py.bak @@ -0,0 +1,7 @@ +# Written by Jie Yang +# see LICENSE.txt for license information + +from sqlitecachedb import * +from SqliteSeedingStatsCacheDB import * +from SqliteFriendshipStatsCacheDB import * +from SqliteVideoPlaybackStatsCacheDB import * diff --git a/tribler-mod/Tribler/Core/CacheDB/friends.py b/tribler-mod/Tribler/Core/CacheDB/friends.py new file mode 100644 index 0000000..13cc248 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/friends.py @@ -0,0 +1,143 @@ +from time import localtime, strftime +# Written by Jie Yang +# see LICENSE.txt for license information +import sys +from time import time +import os +import base64 +from traceback import print_exc + +from Tribler.Core.Utilities.utilities import validIP, validPort, validPermid, validName, show_permid +from CacheDBHandler import FriendDBHandler +from Tribler.Core.simpledefs import NTFY_FRIENDS,NTFY_PEERS + +default_friend_file = 'friends.txt' + +DEBUG = False + +def init(session): + friend_db = session.open_dbhandler(NTFY_FRIENDS) + peer_db = session.open_dbhandler(NTFY_PEERS) + filename = make_filename(session.get_state_dir(), default_friend_file) + ExternalFriendList(friend_db,peer_db,filename).updateFriendList() + +def done(session): + friend_db = session.open_dbhandler(NTFY_FRIENDS) + peer_db = session.open_dbhandler(NTFY_PEERS) + filename = make_filename(session.get_state_dir(), default_friend_file) + ExternalFriendList(friend_db,peer_db,filename).writeFriendList() + +def make_filename(config_dir,filename): + if config_dir is None: + return filename + else: + return os.path.join(config_dir,filename) + +class ExternalFriendList: + def __init__(self,friend_db,peer_db,friend_file=default_friend_file): + self.friend_file = friend_file + self.friend_db = friend_db + self.peer_db = peer_db + + def clean(self): # delete friend file + try: + os.remove(self.friend_file) + except Exception: + pass + + def updateFriendList(self, friend_file=''): + if not friend_file: + friend_file = self.friend_file + self.friend_list = self.readFriendList(friend_file) + self.updateDB(self.friend_list) + #self.clean() + + def updateDB(self, friend_list): + if not friend_list: + return + for friend in friend_list: + self.friend_db.addExternalFriend(friend) + + def getFriends(self): + friends = [] + permids = self.friend_db.getFriends() + for permid in permids: + friend = self.peer_db.getPeer(permid) + friends.append(friend) + return friends + + def deleteFriend(self, permid): + self.friend_db.deleteFriend(permid) + + def readFriendList(self, filename=''): + """ read (name, permid, friend_ip, friend_port) lines from a text file """ + + if not filename: + filename = self.friend_file + try: + file = open(filename, "r") + friends = file.readlines() + file.close() + except IOError: # create a new file + file = open(filename, "w") + file.close() + return [] + + friends_info = [] + for friend in friends: + if friend.strip().startswith("#"): # skip commended lines + continue + friend_line = friend.split(',') + friend_info = [] + for i in range(len(friend_line)): + friend_info.append(friend_line[i].strip()) + try: + friend_info[1] = base64.decodestring( friend_info[1]+'\n' ) + except: + continue + if self.validFriendList(friend_info): + friend = {'name':friend_info[0], 'permid':friend_info[1], + 'ip':friend_info[2], 'port':int(friend_info[3])} + friends_info.append(friend) + return friends_info + + def validFriendList(self, friend_info): + try: + if len(friend_info) < 4: + raise RuntimeError, "one line in friends.txt can only contain at least 4 elements" + validName(friend_info[0]) + validPermid(friend_info[1]) + validIP(friend_info[2]) + validPort(int(friend_info[3])) + except Exception, msg: + if DEBUG: + print "======== reading friend list error ========" + print friend_info + print msg + print "===========================================" + return False + else: + return True + + def writeFriendList(self, filename=''): + if not filename: + filename = self.friend_file + try: + file = open(filename, "w") + except IOError: + print_exc() + return + + friends = self.getFriends() + friends_to_write = self.formatForText(friends) + file.writelines(friends_to_write) + file.close() + + def formatForText(self, friends): + lines = [] + for friend in friends: + permid = show_permid(friend['permid']) + line = ', '.join([friend['name'], permid, friend['ip'], str(friend['port'])]) + line += '\n' + lines.append(line) + return lines \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/CacheDB/friends.py.bak b/tribler-mod/Tribler/Core/CacheDB/friends.py.bak new file mode 100644 index 0000000..fcbc355 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/friends.py.bak @@ -0,0 +1,142 @@ +# Written by Jie Yang +# see LICENSE.txt for license information +import sys +from time import time +import os +import base64 +from traceback import print_exc + +from Tribler.Core.Utilities.utilities import validIP, validPort, validPermid, validName, show_permid +from CacheDBHandler import FriendDBHandler +from Tribler.Core.simpledefs import NTFY_FRIENDS,NTFY_PEERS + +default_friend_file = 'friends.txt' + +DEBUG = False + +def init(session): + friend_db = session.open_dbhandler(NTFY_FRIENDS) + peer_db = session.open_dbhandler(NTFY_PEERS) + filename = make_filename(session.get_state_dir(), default_friend_file) + ExternalFriendList(friend_db,peer_db,filename).updateFriendList() + +def done(session): + friend_db = session.open_dbhandler(NTFY_FRIENDS) + peer_db = session.open_dbhandler(NTFY_PEERS) + filename = make_filename(session.get_state_dir(), default_friend_file) + ExternalFriendList(friend_db,peer_db,filename).writeFriendList() + +def make_filename(config_dir,filename): + if config_dir is None: + return filename + else: + return os.path.join(config_dir,filename) + +class ExternalFriendList: + def __init__(self,friend_db,peer_db,friend_file=default_friend_file): + self.friend_file = friend_file + self.friend_db = friend_db + self.peer_db = peer_db + + def clean(self): # delete friend file + try: + os.remove(self.friend_file) + except Exception: + pass + + def updateFriendList(self, friend_file=''): + if not friend_file: + friend_file = self.friend_file + self.friend_list = self.readFriendList(friend_file) + self.updateDB(self.friend_list) + #self.clean() + + def updateDB(self, friend_list): + if not friend_list: + return + for friend in friend_list: + self.friend_db.addExternalFriend(friend) + + def getFriends(self): + friends = [] + permids = self.friend_db.getFriends() + for permid in permids: + friend = self.peer_db.getPeer(permid) + friends.append(friend) + return friends + + def deleteFriend(self, permid): + self.friend_db.deleteFriend(permid) + + def readFriendList(self, filename=''): + """ read (name, permid, friend_ip, friend_port) lines from a text file """ + + if not filename: + filename = self.friend_file + try: + file = open(filename, "r") + friends = file.readlines() + file.close() + except IOError: # create a new file + file = open(filename, "w") + file.close() + return [] + + friends_info = [] + for friend in friends: + if friend.strip().startswith("#"): # skip commended lines + continue + friend_line = friend.split(',') + friend_info = [] + for i in range(len(friend_line)): + friend_info.append(friend_line[i].strip()) + try: + friend_info[1] = base64.decodestring( friend_info[1]+'\n' ) + except: + continue + if self.validFriendList(friend_info): + friend = {'name':friend_info[0], 'permid':friend_info[1], + 'ip':friend_info[2], 'port':int(friend_info[3])} + friends_info.append(friend) + return friends_info + + def validFriendList(self, friend_info): + try: + if len(friend_info) < 4: + raise RuntimeError, "one line in friends.txt can only contain at least 4 elements" + validName(friend_info[0]) + validPermid(friend_info[1]) + validIP(friend_info[2]) + validPort(int(friend_info[3])) + except Exception, msg: + if DEBUG: + print "======== reading friend list error ========" + print friend_info + print msg + print "===========================================" + return False + else: + return True + + def writeFriendList(self, filename=''): + if not filename: + filename = self.friend_file + try: + file = open(filename, "w") + except IOError: + print_exc() + return + + friends = self.getFriends() + friends_to_write = self.formatForText(friends) + file.writelines(friends_to_write) + file.close() + + def formatForText(self, friends): + lines = [] + for friend in friends: + permid = show_permid(friend['permid']) + line = ', '.join([friend['name'], permid, friend['ip'], str(friend['port'])]) + line += '\n' + lines.append(line) + return lines \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/CacheDB/maxflow.py b/tribler-mod/Tribler/Core/CacheDB/maxflow.py new file mode 100644 index 0000000..0a04888 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/maxflow.py @@ -0,0 +1,163 @@ +from time import localtime, strftime +import sets + +# Computes maximal flow in a graph +# Adam Langley http://www.imperialviolet.org +# Creative Commons http://creativecommons.org/licenses/by-sa/2.0/ + +# Adapted for Tribler +from copy import deepcopy + +class Network(object): + """This class can be used to calculate the maximal flow between two points in a network/graph. + A network consists of nodes and arcs (egdes) that link them. Each arc has a capacity (the maximum flow down that arc). + The iterative algorithm is described at http://carbon.cudenver.edu/~hgreenbe/glossary/notes/maxflow-FF.pdf""" + + __slots__ = ['arcs', 'backarcs', 'nodes', 'labels'] + + def __init__ (self, arcs): + + self.nodes = [] + self.labels = {} + + self.arcs = arcs + self.backarcs = {} + + for source in arcs: + + if not source in self.nodes: + self.nodes.append(source) + + if not source in self.backarcs: + self.backarcs[source] = {} + + for dest in arcs[source]: + + if not dest in self.nodes: + self.nodes.append(dest) + + if not dest in self.backarcs: + self.backarcs[dest] = {} + + self.backarcs[dest][source] = {'cap' : arcs[source][dest]['cap'], 'flow' : 0} + + + def min (a, b): + """private function""" + if (a == -1): + return b + if (b == -1): + return a + return min (a, b) + + min = staticmethod (min) + + def maxflow (self, source, sink, max_distance = 10000): + """Return the maximum flow from the source to the sink""" + + if not source in self.nodes or not sink in self.nodes: + return 0.0 + + arcscopy = deepcopy(self.arcs) + backarcscopy = deepcopy(self.backarcs) + + DEBUG = False + + while 1: + labels = {} + labels[source] = ((0, 0), -1) + + unscanned = {source: 0} # sets.Set ([source]) + scanned = sets.Set() + + while 1: + # Select any node, x, that is labeled and unscanned + + for node in unscanned: + + if DEBUG: + print "Unscanned: " + str(node) + + # To all unlabeled succ nodes + for outnode in arcscopy[node]: + + if DEBUG: + print "to ", outnode + + if (outnode in unscanned or outnode in scanned): + continue + arc = arcscopy[node][outnode] + if (arc['flow'] >= arc['cap']) or (unscanned[node] + 1) > max_distance: + continue + + labels[outnode] = ((node, 1), Network.min(labels[node][1], arc['cap'] - arc['flow'])) + + if DEBUG: + print labels[outnode] + + unscanned[outnode] = unscanned[node] + 1 + #unscanned.add(outnode) + + # To all predecessor nodes + for innode in backarcscopy[node]: + + if DEBUG: + print "from ", innode + + if (innode in unscanned or innode in scanned): + continue + arc = arcscopy[innode][node] + if (arc['flow'] == 0) or (unscanned[node] + 1) > max_distance: + continue + labels[innode] = ((node, -1), Network.min(labels[node][1], arc['flow'])) + + if DEBUG: + print labels[innode] + + unscanned[innode] = unscanned[node] + 1 + #unscanned.add(innode) + + del unscanned[node] + #unscanned.remove(node) + + scanned.add(node) + + # print labels + break + + else: + # no labels could be assigned + # total the incoming flows to the sink + sum = 0 + for innode in backarcscopy[sink]: + sum += arcscopy[innode][sink]['flow'] + return sum + + if (sink in unscanned): + # sink is labeled and unscanned + break + + # Routine B + s = sink + ((node, sense), et) = labels[s] + # print "et: " + str (et) + while 1: + if (s == source): + break + ((node, sense), epi) = labels[s] + # If the first part of the label is y+ + if (sense == 1): + # print " add " + str(node) + " " + str(s) + arcscopy[node][s]['flow'] += et + else: + # print " rm " + str(s) + " " + str(node) + arcscopy[s][node]['flow'] -= et + s = node + ##print self.arcs + +if (__name__ == "__main__"): + n = Network ({'s' : {'a': {'cap': 20, 'flow': 0}, 'x' : {'cap' : 1, 'flow' : 0}, 'y' : {'cap' : 3, 'flow' : 0}}, 'x' : {'y' : {'cap' : 1, 'flow' : 0}, 't' : {'cap' : 3, 'flow' : 0}}, 'y' : {'x' : {'cap' : 1, 'flow' : 0}, 't' : {'cap' : 1, 'flow' : 0}}, 'a': {'b': {'cap': 20, 'flow': 0}}, 'b': {'c': {'cap': 20, 'flow': 0}}, 'c': {'t': {'cap': 20, 'flow': 0}}}) + + print n.nodes + print n.maxflow ('s', 'q', max_distance = 2) + diff --git a/tribler-mod/Tribler/Core/CacheDB/maxflow.py.bak b/tribler-mod/Tribler/Core/CacheDB/maxflow.py.bak new file mode 100644 index 0000000..cd020a3 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/maxflow.py.bak @@ -0,0 +1,162 @@ +import sets + +# Computes maximal flow in a graph +# Adam Langley http://www.imperialviolet.org +# Creative Commons http://creativecommons.org/licenses/by-sa/2.0/ + +# Adapted for Tribler +from copy import deepcopy + +class Network(object): + """This class can be used to calculate the maximal flow between two points in a network/graph. + A network consists of nodes and arcs (egdes) that link them. Each arc has a capacity (the maximum flow down that arc). + The iterative algorithm is described at http://carbon.cudenver.edu/~hgreenbe/glossary/notes/maxflow-FF.pdf""" + + __slots__ = ['arcs', 'backarcs', 'nodes', 'labels'] + + def __init__ (self, arcs): + + self.nodes = [] + self.labels = {} + + self.arcs = arcs + self.backarcs = {} + + for source in arcs: + + if not source in self.nodes: + self.nodes.append(source) + + if not source in self.backarcs: + self.backarcs[source] = {} + + for dest in arcs[source]: + + if not dest in self.nodes: + self.nodes.append(dest) + + if not dest in self.backarcs: + self.backarcs[dest] = {} + + self.backarcs[dest][source] = {'cap' : arcs[source][dest]['cap'], 'flow' : 0} + + + def min (a, b): + """private function""" + if (a == -1): + return b + if (b == -1): + return a + return min (a, b) + + min = staticmethod (min) + + def maxflow (self, source, sink, max_distance = 10000): + """Return the maximum flow from the source to the sink""" + + if not source in self.nodes or not sink in self.nodes: + return 0.0 + + arcscopy = deepcopy(self.arcs) + backarcscopy = deepcopy(self.backarcs) + + DEBUG = False + + while 1: + labels = {} + labels[source] = ((0, 0), -1) + + unscanned = {source: 0} # sets.Set ([source]) + scanned = sets.Set() + + while 1: + # Select any node, x, that is labeled and unscanned + + for node in unscanned: + + if DEBUG: + print "Unscanned: " + str(node) + + # To all unlabeled succ nodes + for outnode in arcscopy[node]: + + if DEBUG: + print "to ", outnode + + if (outnode in unscanned or outnode in scanned): + continue + arc = arcscopy[node][outnode] + if (arc['flow'] >= arc['cap']) or (unscanned[node] + 1) > max_distance: + continue + + labels[outnode] = ((node, 1), Network.min(labels[node][1], arc['cap'] - arc['flow'])) + + if DEBUG: + print labels[outnode] + + unscanned[outnode] = unscanned[node] + 1 + #unscanned.add(outnode) + + # To all predecessor nodes + for innode in backarcscopy[node]: + + if DEBUG: + print "from ", innode + + if (innode in unscanned or innode in scanned): + continue + arc = arcscopy[innode][node] + if (arc['flow'] == 0) or (unscanned[node] + 1) > max_distance: + continue + labels[innode] = ((node, -1), Network.min(labels[node][1], arc['flow'])) + + if DEBUG: + print labels[innode] + + unscanned[innode] = unscanned[node] + 1 + #unscanned.add(innode) + + del unscanned[node] + #unscanned.remove(node) + + scanned.add(node) + + # print labels + break + + else: + # no labels could be assigned + # total the incoming flows to the sink + sum = 0 + for innode in backarcscopy[sink]: + sum += arcscopy[innode][sink]['flow'] + return sum + + if (sink in unscanned): + # sink is labeled and unscanned + break + + # Routine B + s = sink + ((node, sense), et) = labels[s] + # print "et: " + str (et) + while 1: + if (s == source): + break + ((node, sense), epi) = labels[s] + # If the first part of the label is y+ + if (sense == 1): + # print " add " + str(node) + " " + str(s) + arcscopy[node][s]['flow'] += et + else: + # print " rm " + str(s) + " " + str(node) + arcscopy[s][node]['flow'] -= et + s = node + ##print self.arcs + +if (__name__ == "__main__"): + n = Network ({'s' : {'a': {'cap': 20, 'flow': 0}, 'x' : {'cap' : 1, 'flow' : 0}, 'y' : {'cap' : 3, 'flow' : 0}}, 'x' : {'y' : {'cap' : 1, 'flow' : 0}, 't' : {'cap' : 3, 'flow' : 0}}, 'y' : {'x' : {'cap' : 1, 'flow' : 0}, 't' : {'cap' : 1, 'flow' : 0}}, 'a': {'b': {'cap': 20, 'flow': 0}}, 'b': {'c': {'cap': 20, 'flow': 0}}, 'c': {'t': {'cap': 20, 'flow': 0}}}) + + print n.nodes + print n.maxflow ('s', 'q', max_distance = 2) + diff --git a/tribler-mod/Tribler/Core/CacheDB/read_db.py b/tribler-mod/Tribler/Core/CacheDB/read_db.py new file mode 100644 index 0000000..3472cee --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/read_db.py @@ -0,0 +1,201 @@ +from time import localtime, strftime +# Written by Jie Yang +# see LICENSE.txt for license information + +# -*- coding:gb2312 -*- +# A GUI to read bsddb or pickle and display the data by a tree ctrl. + +import sys +from traceback import print_exc +import os +from bsddb import dbshelve, db +from cPickle import load, loads +from sets import Set + +class DBReader: + def __init__(self): + self.open_type_list = ['bsddb.db', 'dbshelve', 'pickle', 'file'] + + def loadTreeData(self, db_path, data): + self.sb.SetStatusText('loading '+db_path, 2) + testdata = {1:'abc', 2:[1, 'a', 2.53], 3:{'a':'x', 'b':'y'}} + subroot = self.tree.AppendItem(self.root, db_path) + #self.open_type = self.db_type_rb.GetSelection() + self.addTreeNodes(subroot, data) + self.tree.SetItemPyData(subroot, data) + self.sb.SetStatusText('loaded '+db_path, 2) + self.sb.Refresh() + + def addTreeNodes(self, parentItem, items): + if isinstance(items, dict): + keys = items.keys() + keys.sort() + for key in keys: + newItem = self.tree.AppendItem(parentItem, `key`) + self.addTreeNodes(newItem, items[key]) + self.tree.SetItemPyData(newItem, items[key]) + elif isinstance(items, list) or isinstance(items, tuple) or isinstance(items, Set): + if isinstance(items, list): + items.sort() + for item in items: + self.addTreeNodes(parentItem, item) + self.tree.SetItemPyData(parentItem, items) + else: + if self.open_type == 1 and items: + unpack = None + try: + unpack = loads(items) + except: + unpack = None + if unpack is not None: + self.addTreeNodes(parentItem, unpack) + else: + self.tree.AppendItem(parentItem, `items`) + else: + self.tree.AppendItem(parentItem, `items`) + + def print_dict(self, data, level=0, comm=False): + + if isinstance(data, dict): + for i in data: + try: + show = str(i) + except: + show = repr(i) + if not show.isalpha(): + show = repr(i) + print " "*level, show + ':' + self.print_dict(data[i], level+1) + elif isinstance(data, list) or isinstance(data, Set) or isinstance(data, tuple): + data = list(data) + if not data: + print " "*level, "[]" + #else: + # print + for i in xrange(len(data)): + print " "*level, '[' + str(i) + ']:', + if isinstance(data[i], dict) or \ + isinstance(data[i], list) or \ + isinstance(data[i], Set) or \ + isinstance(data[i], tuple): + newlevel = level + 1 + print + else: + newlevel = 0 + self.print_dict(data[i], newlevel) + else: + try: + show = str(data) + except: + show = repr(data) + if not show.isalpha(): + show = repr(data) + if comm: + print " "*level, show + ':' + else: + print " "*level, show + + def openFile(self, db_path): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Try to open coded", repr(db_path) + data = self.openDB(db_path) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Open Type:", self.open_type_list[self.open_type] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "File Size:", len(`data`) + #self.loadTreeData(db_path, data) + print 'open db:', repr(db_path) + print + item = data.first() + num = 0 + while item: + key,value = item + unpack = None + try: + unpack = loads(value) + except: + unpack = None + self.print_dict(key, 0, True) + self.print_dict(unpack, 1) + item = data.next() + num += 1 + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Opened items", num + + #print data + + def openDB(self, db_path): + #open_type = self.db_type_rb.GetSelection() +# if self.db_path.endswith('pickle'): +# open_type = 2 + + assert os.path.exists(db_path) + d = None + for open_type in range(4): + try: + d = self._openDB(db_path, open_type) + except: + print_exc() + continue + if d is not None: + self.open_type = open_type + break + return d + + def _openDB(self, db_path, open_type): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "..Try to open by", self.open_type_list[open_type] + d = None + if open_type == 1: # 'bsddb.dbshelve' + db_types = [db.DB_BTREE, db.DB_HASH] + for dbtype in db_types: + try: + d = dbshelve.open(db_path, filetype=dbtype) + break + except: + d = None +# except: +# print_exc() + if d is not None: + return d.cursor() +# data = dict(d.items()) +# d.close() +# return data + else: + return d + + elif open_type == 0: # 'bsddb.db' + try: + d = db.DB() + d.open(db_path, db.DB_UNKNOWN) + except: + d = None +# print_exc() + if d is not None: + return d.cursor() +# data = dict(d.items()) +# d.close() +# return data + else: + return d + + elif open_type == 2: # 'pickle' + try: + f = open(db_path) + d = load(f) + f.close() + return d + except: + return None + + else: + try: + f = open(db_path) + d = f.readlines() + f.close() + return d + except: + return None + + +if __name__ == '__main__': + filename = sys.argv[1] + dbreader = DBReader() + dbreader.openFile(filename) + + \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/CacheDB/read_db.py.bak b/tribler-mod/Tribler/Core/CacheDB/read_db.py.bak new file mode 100644 index 0000000..a705d24 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/read_db.py.bak @@ -0,0 +1,200 @@ +# Written by Jie Yang +# see LICENSE.txt for license information + +# -*- coding:gb2312 -*- +# A GUI to read bsddb or pickle and display the data by a tree ctrl. + +import sys +from traceback import print_exc +import os +from bsddb import dbshelve, db +from cPickle import load, loads +from sets import Set + +class DBReader: + def __init__(self): + self.open_type_list = ['bsddb.db', 'dbshelve', 'pickle', 'file'] + + def loadTreeData(self, db_path, data): + self.sb.SetStatusText('loading '+db_path, 2) + testdata = {1:'abc', 2:[1, 'a', 2.53], 3:{'a':'x', 'b':'y'}} + subroot = self.tree.AppendItem(self.root, db_path) + #self.open_type = self.db_type_rb.GetSelection() + self.addTreeNodes(subroot, data) + self.tree.SetItemPyData(subroot, data) + self.sb.SetStatusText('loaded '+db_path, 2) + self.sb.Refresh() + + def addTreeNodes(self, parentItem, items): + if isinstance(items, dict): + keys = items.keys() + keys.sort() + for key in keys: + newItem = self.tree.AppendItem(parentItem, `key`) + self.addTreeNodes(newItem, items[key]) + self.tree.SetItemPyData(newItem, items[key]) + elif isinstance(items, list) or isinstance(items, tuple) or isinstance(items, Set): + if isinstance(items, list): + items.sort() + for item in items: + self.addTreeNodes(parentItem, item) + self.tree.SetItemPyData(parentItem, items) + else: + if self.open_type == 1 and items: + unpack = None + try: + unpack = loads(items) + except: + unpack = None + if unpack is not None: + self.addTreeNodes(parentItem, unpack) + else: + self.tree.AppendItem(parentItem, `items`) + else: + self.tree.AppendItem(parentItem, `items`) + + def print_dict(self, data, level=0, comm=False): + + if isinstance(data, dict): + for i in data: + try: + show = str(i) + except: + show = repr(i) + if not show.isalpha(): + show = repr(i) + print " "*level, show + ':' + self.print_dict(data[i], level+1) + elif isinstance(data, list) or isinstance(data, Set) or isinstance(data, tuple): + data = list(data) + if not data: + print " "*level, "[]" + #else: + # print + for i in xrange(len(data)): + print " "*level, '[' + str(i) + ']:', + if isinstance(data[i], dict) or \ + isinstance(data[i], list) or \ + isinstance(data[i], Set) or \ + isinstance(data[i], tuple): + newlevel = level + 1 + print + else: + newlevel = 0 + self.print_dict(data[i], newlevel) + else: + try: + show = str(data) + except: + show = repr(data) + if not show.isalpha(): + show = repr(data) + if comm: + print " "*level, show + ':' + else: + print " "*level, show + + def openFile(self, db_path): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Try to open coded", repr(db_path) + data = self.openDB(db_path) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Open Type:", self.open_type_list[self.open_type] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "File Size:", len(`data`) + #self.loadTreeData(db_path, data) + print 'open db:', repr(db_path) + print + item = data.first() + num = 0 + while item: + key,value = item + unpack = None + try: + unpack = loads(value) + except: + unpack = None + self.print_dict(key, 0, True) + self.print_dict(unpack, 1) + item = data.next() + num += 1 + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Opened items", num + + #print data + + def openDB(self, db_path): + #open_type = self.db_type_rb.GetSelection() +# if self.db_path.endswith('pickle'): +# open_type = 2 + + assert os.path.exists(db_path) + d = None + for open_type in range(4): + try: + d = self._openDB(db_path, open_type) + except: + print_exc() + continue + if d is not None: + self.open_type = open_type + break + return d + + def _openDB(self, db_path, open_type): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "..Try to open by", self.open_type_list[open_type] + d = None + if open_type == 1: # 'bsddb.dbshelve' + db_types = [db.DB_BTREE, db.DB_HASH] + for dbtype in db_types: + try: + d = dbshelve.open(db_path, filetype=dbtype) + break + except: + d = None +# except: +# print_exc() + if d is not None: + return d.cursor() +# data = dict(d.items()) +# d.close() +# return data + else: + return d + + elif open_type == 0: # 'bsddb.db' + try: + d = db.DB() + d.open(db_path, db.DB_UNKNOWN) + except: + d = None +# print_exc() + if d is not None: + return d.cursor() +# data = dict(d.items()) +# d.close() +# return data + else: + return d + + elif open_type == 2: # 'pickle' + try: + f = open(db_path) + d = load(f) + f.close() + return d + except: + return None + + else: + try: + f = open(db_path) + d = f.readlines() + f.close() + return d + except: + return None + + +if __name__ == '__main__': + filename = sys.argv[1] + dbreader = DBReader() + dbreader.openFile(filename) + + \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/CacheDB/sqlitecachedb.py b/tribler-mod/Tribler/Core/CacheDB/sqlitecachedb.py new file mode 100644 index 0000000..dd7d164 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/sqlitecachedb.py @@ -0,0 +1,1008 @@ +from time import localtime, strftime +# Written by Jie Yang +# see LICENSE.txt for license information + +import sys +import os +from time import sleep +from base64 import encodestring, decodestring +from unicode import dunno2unicode +import threading +from traceback import print_exc, print_stack + +from Tribler.__init__ import LIBRARYNAME + +# ONLY USE APSW >= 3.5.9-r1 +import apsw +#support_version = (3,5,9) +#support_version = (3,3,13) +#apsw_version = tuple([int(r) for r in apsw.apswversion().split('-')[0].split('.')]) +##print apsw_version +#assert apsw_version >= support_version, "Required APSW Version >= %d.%d.%d."%support_version + " But your version is %d.%d.%d.\n"%apsw_version + \ +# "Please download and install it from http://code.google.com/p/apsw/" + +CURRENT_MAIN_DB_VERSION = 2 + +CREATE_SQL_FILE = None +CREATE_SQL_FILE_POSTFIX = os.path.join(LIBRARYNAME, 'tribler_sdb_v'+str(CURRENT_MAIN_DB_VERSION)+'.sql') +DB_FILE_NAME = 'tribler.sdb' +DB_DIR_NAME = 'sqlite' # db file path = DB_DIR_NAME/DB_FILE_NAME +BSDDB_DIR_NAME = 'bsddb' +DEFAULT_BUSY_TIMEOUT = 10000 +MAX_SQL_BATCHED_TO_TRANSACTION = 1000 # don't change it unless carefully tested. A transaction with 1000 batched updates took 1.5 seconds +NULL = None +icon_dir = None +SHOW_ALL_EXECUTE = False +costs = [] +cost_reads = [] + +class Warning(Exception): + pass + +def init(config, db_exception_handler = None): + """ create sqlite database """ + global CREATE_SQL_FILE + global icon_dir + config_dir = config['state_dir'] + install_dir = config['install_dir'] + CREATE_SQL_FILE = os.path.join(install_dir,CREATE_SQL_FILE_POSTFIX) + sqlitedb = SQLiteCacheDB.getInstance(db_exception_handler) + + if config['superpeer']: + sqlite_db_path = ':memory:' + else: + sqlite_db_path = os.path.join(config_dir, DB_DIR_NAME, DB_FILE_NAME) + + + + bsddb_path = os.path.join(config_dir, BSDDB_DIR_NAME) + icon_dir = os.path.abspath(config['peer_icon_path']) + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","cachedb: init: SQL FILE",sqlite_db_path + + sqlitedb.initDB(sqlite_db_path, CREATE_SQL_FILE, bsddb_path) # the first place to create db in Tribler + return sqlitedb + +def done(config_dir): + SQLiteCacheDB.getInstance().close() + +def make_filename(config_dir,filename): + if config_dir is None: + return filename + else: + return os.path.join(config_dir,filename) + +def bin2str(bin): + # Full BASE64-encoded + return encodestring(bin).replace("\n","") + +def str2bin(str): + return decodestring(str) + +def print_exc_plus(): + """ + Print the usual traceback information, followed by a listing of all the + local variables in each frame. + http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52215 + http://initd.org/pub/software/pysqlite/apsw/3.3.13-r1/apsw.html#augmentedstacktraces + """ + + tb = sys.exc_info()[2] + stack = [] + + while tb: + stack.append(tb.tb_frame) + tb = tb.tb_next + + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Locals by frame, innermost last" + + for frame in stack: + print >> sys.stderr + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Frame %s in %s at line %s" % (frame.f_code.co_name, + frame.f_code.co_filename, + frame.f_lineno) + for key, value in frame.f_locals.items(): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "\t%20s = " % key, + #We have to be careful not to cause a new error in our error + #printer! Calling str() on an unknown object could cause an + #error we don't want. + try: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", value + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "" + +class safe_dict(dict): + def __init__(self, *args, **kw): + self.lock = threading.RLock() + dict.__init__(self, *args, **kw) + + def __getitem__(self, key): + self.lock.acquire() + try: + return dict.__getitem__(self, key) + finally: + self.lock.release() + + def __setitem__(self, key, value): + self.lock.acquire() + try: + dict.__setitem__(self, key, value) + finally: + self.lock.release() + + def __delitem__(self, key): + self.lock.acquire() + try: + dict.__delitem__(self, key) + finally: + self.lock.release() + + def __contains__(self, key): + self.lock.acquire() + try: + return dict.__contains__(self, key) + finally: + self.lock.release() + + def values(self): + self.lock.acquire() + try: + return dict.values(self) + finally: + self.lock.release() + +class SQLiteCacheDBBase: + + def __init__(self,db_exception_handler=None): + self.exception_handler = db_exception_handler + self.cursor_table = safe_dict() # {thread_name:cur} + self.cache_transaction_table = safe_dict() # {thread_name:[sql] + self.class_variables = safe_dict({'db_path':None,'busytimeout':None}) # busytimeout is in milliseconds + + self.permid_id = safe_dict() + self.infohash_id = safe_dict() + self.show_execute = False + + #TODO: All global variables must be protected to be thread safe? + self.status_table = None + self.category_table = None + self.src_table = None + self.applied_pragma_sync_norm = False + + def __del__(self): + self.close() + + def close(self, clean=False): + # only close the connection object in this thread, don't close other thread's connection object + thread_name = threading.currentThread().getName() + cur = self.getCursor(create=False) + + if cur: + con = cur.getconnection() + cur.close() + con.close() + con = None + del self.cursor_table[thread_name] + if clean: # used for test suite + self.permid_id = safe_dict() + self.infohash_id = safe_dict() + self.exception_handler = None + self.class_variables = safe_dict({'db_path':None,'busytimeout':None}) + self.cursor_table = safe_dict() + self.cache_transaction_table = safe_dict() + + + # --------- static functions -------- + def getCursor(self, create=True): + thread_name = threading.currentThread().getName() + curs = self.cursor_table + cur = curs.get(thread_name, None) # return [cur, cur, lib] or None + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '-------------- getCursor::', len(curs), time(), curs.keys() + if cur is None and create: + self.openDB(self.class_variables['db_path'], self.class_variables['busytimeout']) # create a new db obj for this thread + cur = curs.get(thread_name) + + return cur + + def openDB(self, dbfile_path=None, busytimeout=DEFAULT_BUSY_TIMEOUT): + """ + Open a SQLite database. Only one and the same database can be opened. + @dbfile_path The path to store the database file. + Set dbfile_path=':memory:' to create a db in memory. + @busytimeout Set the maximum time, in milliseconds, that SQLite will wait if the database is locked. + """ + + # already opened a db in this thread, reuse it + thread_name = threading.currentThread().getName() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","sqlcachedb: openDB",dbfile_path,thread_name + if thread_name in self.cursor_table: + #assert dbfile_path == None or self.class_variables['db_path'] == dbfile_path + return self.cursor_table[thread_name] + + assert dbfile_path, "You must specify the path of database file" + + if dbfile_path.lower() != ':memory:': + db_dir,db_filename = os.path.split(dbfile_path) + if db_dir and not os.path.isdir(db_dir): + os.makedirs(db_dir) + + con = apsw.Connection(dbfile_path) + con.setbusytimeout(busytimeout) + + cur = con.cursor() + self.cursor_table[thread_name] = cur + + if not self.applied_pragma_sync_norm: + # http://www.sqlite.org/pragma.html + # When synchronous is NORMAL, the SQLite database engine will still + # pause at the most critical moments, but less often than in FULL + # mode. There is a very small (though non-zero) chance that a power + # failure at just the wrong time could corrupt the database in + # NORMAL mode. But in practice, you are more likely to suffer a + # catastrophic disk failure or some other unrecoverable hardware + # fault. + # + self.applied_pragma_sync_norm = True + cur.execute("PRAGMA synchronous = NORMAL;") + + return cur + + def createDBTable(self, sql_create_table, dbfile_path, busytimeout=DEFAULT_BUSY_TIMEOUT): + """ + Create a SQLite database. + @sql_create_table The sql statements to create tables in the database. + Every statement must end with a ';'. + @dbfile_path The path to store the database file. Set dbfile_path=':memory:' to creates a db in memory. + @busytimeout Set the maximum time, in milliseconds, that SQLite will wait if the database is locked. + Default = 10000 milliseconds + """ + cur = self.openDB(dbfile_path, busytimeout) + cur.execute(sql_create_table) # it is suggested to include begin & commit in the script + + def initDB(self, sqlite_filepath, + create_sql_filename = None, + bsddb_dirpath = None, + busytimeout = DEFAULT_BUSY_TIMEOUT, + check_version = True, + current_db_version = CURRENT_MAIN_DB_VERSION): + """ + Create and initialize a SQLite database given a sql script. + Only one db can be opened. If the given dbfile_path is different with the opened DB file, warn and exit + @configure_dir The directory containing 'bsddb' directory + @sql_filename The path of sql script to create the tables in the database + Every statement must end with a ';'. + @busytimeout Set the maximum time, in milliseconds, to wait and retry + if failed to acquire a lock. Default = 5000 milliseconds + """ + if create_sql_filename is None: + create_sql_filename=CREATE_SQL_FILE + try: + self.lock.acquire() + + # verify db path identity + class_db_path = self.class_variables['db_path'] + if sqlite_filepath is None: # reuse the opened db file? + if class_db_path is not None: # yes, reuse it + # reuse the busytimeout + return self.openDB(class_db_path, self.class_variables['busytimeout']) + else: # no db file opened + raise Exception, "You must specify the path of database file when open it at the first time" + else: + if class_db_path is None: # the first time to open db path, store it + + if bsddb_dirpath != None and os.path.isdir(bsddb_dirpath) and not os.path.exists(sqlite_filepath): + self.convertFromBsd(bsddb_dirpath, sqlite_filepath, create_sql_filename) # only one chance to convert from bsddb + #print 'quit now' + #sys.exit(0) + # open the db if it exists (by converting from bsd) and is not broken, otherwise create a new one + # it will update the db if necessary by checking the version number + self.safelyOpenTriblerDB(sqlite_filepath, create_sql_filename, busytimeout, check_version=check_version, current_db_version=current_db_version) + + self.class_variables = {'db_path': sqlite_filepath, 'busytimeout': int(busytimeout)} + + return self.openDB() # return the cursor, won't reopen the db + + elif sqlite_filepath != class_db_path: # not the first time to open db path, check if it is the same + raise Exception, "Only one database file can be opened. You have opened %s and are trying to open %s." % (class_db_path, sqlite_filepath) + + finally: + self.lock.release() + + def safelyOpenTriblerDB(self, dbfile_path, sql_create, busytimeout=DEFAULT_BUSY_TIMEOUT, check_version=False, current_db_version=None): + """ + open the db if possible, otherwise create a new one + update the db if necessary by checking the version number + + safeOpenDB(): + try: + if sqlite db doesn't exist: + raise Error + open sqlite db + read sqlite_db_version + if sqlite_db_version dosen't exist: + raise Error + except: + close and delete sqlite db if possible + create new sqlite db file without sqlite_db_version + write sqlite_db_version at last + commit + open sqlite db + read sqlite_db_version + # must ensure these steps after except will not fail, otherwise force to exit + + if sqlite_db_version < current_db_version: + updateDB(sqlite_db_version, current_db_version) + commit + update sqlite_db_version at last + commit + """ + try: + if not os.path.isfile(dbfile_path): + raise Warning("No existing database found. Attempting to creating a new database %s" % repr(dbfile_path)) + + cur = self.openDB(dbfile_path, busytimeout) + if check_version: + sqlite_db_version = self.readDBVersion() + if sqlite_db_version == NULL or int(sqlite_db_version)<1: + raise NotImplementedError + except Exception, exception: + if isinstance(exception, Warning): + # user friendly warning to log the creation of a new database + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", exception + + else: + # user unfriendly exception message because something went wrong + print_exc() + + if os.path.isfile(dbfile_path): + self.close(clean=True) + os.remove(dbfile_path) + + if os.path.isfile(sql_create): + f = open(sql_create) + sql_create_tables = f.read() + f.close() + else: + raise Exception, "Cannot open sql script at %s" % os.path.realpath(sql_create) + + self.createDBTable(sql_create_tables, dbfile_path, busytimeout) + if check_version: + sqlite_db_version = self.readDBVersion() + + if check_version: + self.checkDB(sqlite_db_version, current_db_version) + + def report_exception(e): + #return # Jie: don't show the error window to bother users + if self.exception_handler != None: + self.exception_handler(e) + report_exception = staticmethod(report_exception) + + def checkDB(self, db_ver, curr_ver): + # read MyDB and check the version number. + if not db_ver or not curr_ver: + self.updateDB(db_ver,curr_ver) + return + db_ver = int(db_ver) + curr_ver = int(curr_ver) + #print "check db", db_ver, curr_ver + if db_ver != curr_ver: # TODO + self.updateDB(db_ver,curr_ver) + + def updateDB(self,db_ver,curr_ver): + pass #TODO + + def readDBVersion(self): + cur = self.getCursor() + sql = u"select value from MyInfo where entry='version'" + res = self.fetchone(sql) + if res: + find = list(res) + return find[0] # throw error if something wrong + else: + return None + + def writeDBVersion(self, version, commit=True): + sql = u"UPDATE MyInfo SET value=? WHERE entry='version'" + self.execute_write(sql, [version], commit=commit) + + def show_sql(self, switch): + # temporary show the sql executed + self.show_execute = switch + + # --------- generic functions ------------- + + def commit(self): + self.transaction() + + def _execute(self, sql, args=None): + cur = self.getCursor() + + if SHOW_ALL_EXECUTE or self.show_execute: + thread_name = threading.currentThread().getName() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '===', thread_name, '===\n', sql, '\n-----\n', args, '\n======\n' + try: + if args is None: + return cur.execute(sql) + else: + return cur.execute(sql, args) + except Exception, msg: + print_exc() + print_stack() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "cachedb: execute error:", Exception, msg + thread_name = threading.currentThread().getName() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '===', thread_name, '===\n', sql, '\n-----\n', args, '\n======\n' + #return None + # ARNODB: this is incorrect, it should reraise the exception + # such that _transaction can rollback or recommit. + # This bug already reported by Johan + raise msg + + + def execute_read(self, sql, args=None): + # this is only called for reading. If you want to write the db, always use execute_write or executemany + return self._execute(sql, args) + + def execute_write(self, sql, args=None, commit=True): + self.cache_transaction(sql, args) + if commit: + self.commit() + + def executemany(self, sql, args, commit=True): + + thread_name = threading.currentThread().getName() + if thread_name not in self.cache_transaction_table: + self.cache_transaction_table[thread_name] = [] + all = [(sql, arg) for arg in args] + self.cache_transaction_table[thread_name].extend(all) + + if commit: + self.commit() + + def cache_transaction(self, sql, args=None): + thread_name = threading.currentThread().getName() + if thread_name not in self.cache_transaction_table: + self.cache_transaction_table[thread_name] = [] + self.cache_transaction_table[thread_name].append((sql, args)) + + def transaction(self, sql=None, args=None): + if sql: + self.cache_transaction(sql, args) + + thread_name = threading.currentThread().getName() + + n = 0 + sql_full = '' + arg_list = [] + sql_queue = self.cache_transaction_table.get(thread_name,None) + if sql_queue: + while True: + try: + _sql,_args = sql_queue.pop(0) + except IndexError: + break + + _sql = _sql.strip() + if not _sql: + continue + if not _sql.endswith(';'): + _sql += ';' + sql_full += _sql + '\n' + if _args != None: + arg_list += list(_args) + n += 1 + + # if too many sql in cache, split them into batches to prevent processing and locking DB for a long time + # TODO: optimize the value of MAX_SQL_BATCHED_TO_TRANSACTION + if n % MAX_SQL_BATCHED_TO_TRANSACTION == 0: + self._transaction(sql_full, arg_list) + sql_full = '' + arg_list = [] + + self._transaction(sql_full, arg_list) + + def _transaction(self, sql, args=None): + if sql: + sql = 'BEGIN TRANSACTION; \n' + sql + 'COMMIT TRANSACTION;' + try: + self._execute(sql, args) + except Exception,e: + self.commit_retry_if_busy_or_rollback(e,0) + + def commit_retry_if_busy_or_rollback(self,e,tries): + """ + Arno: + SQL_BUSY errors happen at the beginning of the experiment, + very quickly after startup (e.g. 0.001 s), so the busy timeout + is not honoured for some reason. After the initial errors, + they no longer occur. + """ + if str(e).startswith("BusyError"): + try: + self._execute("COMMIT") + except Exception,e2: + if tries < 5: #self.max_commit_retries + # Spec is unclear whether next commit will also has + # 'busytimeout' seconds to try to get a write lock. + sleep(pow(2.0,tries+2)/100.0) + self.commit_retry_if_busy_or_rollback(e2,tries+1) + else: + self.rollback(tries) + raise Exception,e2 + else: + self.rollback(tries) + m = "cachedb: TRANSACTION ERROR "+threading.currentThread().getName()+' '+str(e) + raise Exception, m + + + def rollback(self, tries): + print_exc() + try: + self._execute("ROLLBACK") + except Exception, e: + # May be harmless, see above. Unfortunately they don't specify + # what the error is when an attempt is made to roll back + # an automatically rolled back transaction. + m = "cachedb: ROLLBACK ERROR "+threading.currentThread().getName()+' '+str(e) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'SQLite Database', m + raise Exception, m + + + # -------- Write Operations -------- + def insert(self, table_name, commit=True, **argv): + if len(argv) == 1: + sql = 'INSERT INTO %s (%s) VALUES (?);'%(table_name, argv.keys()[0]) + else: + questions = '?,'*len(argv) + sql = 'INSERT INTO %s %s VALUES (%s);'%(table_name, tuple(argv.keys()), questions[:-1]) + self.execute_write(sql, argv.values(), commit) + + def insertMany(self, table_name, values, keys=None, commit=True): + """ values must be a list of tuples """ + + questions = u'?,'*len(values[0]) + if keys is None: + sql = u'INSERT INTO %s VALUES (%s);'%(table_name, questions[:-1]) + else: + sql = u'INSERT INTO %s %s VALUES (%s);'%(table_name, tuple(keys), questions[:-1]) + self.executemany(sql, values, commit=commit) + + def update(self, table_name, where=None, commit=True, **argv): + sql = u'UPDATE %s SET '%table_name + arg = [] + for k,v in argv.iteritems(): + if type(v) is tuple: + sql += u'%s %s ?,' % (k, v[0]) + arg.append(v[1]) + else: + sql += u'%s=?,' % k + arg.append(v) + sql = sql[:-1] + if where != None: + sql += u' where %s'%where + self.execute_write(sql, arg, commit) + + def delete(self, table_name, commit=True, **argv): + sql = u'DELETE FROM %s WHERE '%table_name + arg = [] + for k,v in argv.iteritems(): + if type(v) is tuple: + sql += u'%s %s ? AND ' % (k, v[0]) + arg.append(v[1]) + else: + sql += u'%s=? AND ' % k + arg.append(v) + sql = sql[:-5] + self.execute_write(sql, argv.values(), commit) + + # -------- Read Operations -------- + def size(self, table_name): + num_rec_sql = u"SELECT count(*) FROM %s;"%table_name + result = self.fetchone(num_rec_sql) + return result + + def fetchone(self, sql, args=None): + # returns NULL: if the result is null + # return None: if it doesn't found any match results + find = self.execute_read(sql, args) + if not find: + return NULL + else: + find = list(find) + if len(find) > 0: + find = find[0] + else: + return NULL + if len(find)>1: + return find + else: + return find[0] + + def fetchall(self, sql, args=None, retry=0): + res = self.execute_read(sql, args) + if res != None: + find = list(res) + return find + else: + return [] # should it return None? + + def getOne(self, table_name, value_name, where=None, conj='and', **kw): + """ value_name could be a string, a tuple of strings, or '*' + """ + + if isinstance(value_name, tuple): + value_names = u",".join(value_name) + elif isinstance(value_name, list): + value_names = u",".join(value_name) + else: + value_names = value_name + + if isinstance(table_name, tuple): + table_names = u",".join(table_name) + elif isinstance(table_name, list): + table_names = u",".join(table_name) + else: + table_names = table_name + + sql = u'select %s from %s'%(value_names, table_names) + + if where or kw: + sql += u' where ' + if where: + sql += where + if kw: + sql += u' %s '%conj + if kw: + arg = [] + for k,v in kw.iteritems(): + if type(v) is tuple: + operator = v[0] + arg.append(v[1]) + else: + operator = "=" + arg.append(v) + sql += u' %s %s ? ' % (k, operator) + sql += conj + sql = sql[:-len(conj)] + else: + arg = None + + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'SQL: %s %s' % (sql, arg) + return self.fetchone(sql,arg) + + def getAll(self, table_name, value_name, where=None, group_by=None, having=None, order_by=None, limit=None, offset=None, conj='and', **kw): + """ value_name could be a string, or a tuple of strings + order by is represented as order_by + group by is represented as group_by + """ + if isinstance(value_name, tuple): + value_names = u",".join(value_name) + elif isinstance(value_name, list): + value_names = u",".join(value_name) + else: + value_names = value_name + + if isinstance(table_name, tuple): + table_names = u",".join(table_name) + elif isinstance(table_name, list): + table_names = u",".join(table_name) + else: + table_names = table_name + + sql = u'select %s from %s'%(value_names, table_names) + + if where or kw: + sql += u' where ' + if where: + sql += where + if kw: + sql += u' %s '%conj + if kw: + arg = [] + for k,v in kw.iteritems(): + if type(v) is tuple: + operator = v[0] + arg.append(v[1]) + else: + operator = "=" + arg.append(v) + + sql += u' %s %s ?' % (k, operator) + sql += conj + sql = sql[:-len(conj)] + else: + arg = None + + if group_by != None: + sql += u' group by ' + group_by + if having != None: + sql += u' having ' + having + if order_by != None: + sql += u' order by ' + order_by # you should add desc after order_by to reversely sort, i.e, 'last_seen desc' as order_by + if limit != None: + sql += u' limit %d'%limit + if offset != None: + sql += u' offset %d'%offset + + try: + return self.fetchall(sql, arg) or [] + except Exception, msg: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "sqldb: Wrong getAll sql statement:", sql + raise Exception, msg + + # ----- Tribler DB operations ---- + + def convertFromBsd(self, bsddb_dirpath, dbfile_path, sql_filename, delete_bsd=False): + # convert bsddb data to sqlite db. return false if cannot find or convert the db + peerdb_filepath = os.path.join(bsddb_dirpath, 'peers.bsd') + if not os.path.isfile(peerdb_filepath): + return False + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "sqldb: ************ convert bsddb to sqlite", sql_filename + converted = convert_db(bsddb_dirpath, dbfile_path, sql_filename) + if converted is True and delete_bsd is True: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "sqldb: delete bsddb directory" + for filename in os.listdir(bsddb_dirpath): + if filename.endswith('.bsd'): + abs_path = os.path.join(bsddb_dirpath, filename) + os.remove(abs_path) + try: + os.removedirs(bsddb_dirpath) + except: # the dir is not empty + pass + + + #------------- useful functions for multiple handlers ---------- + def insertPeer(self, permid, update=True, commit=True, **argv): + """ Insert a peer. permid is the binary permid. + If the peer is already in db and update is True, update the peer. + """ + peer_id = self.getPeerID(permid) + peer_existed = False + if 'name' in argv: + argv['name'] = dunno2unicode(argv['name']) + if peer_id != None: + peer_existed = True + if update: + where=u'peer_id=%d'%peer_id + self.update('Peer', where, commit=commit, **argv) + else: + self.insert('Peer', permid=bin2str(permid), commit=commit, **argv) + return peer_existed + + def deletePeer(self, permid=None, peer_id=None, force=True, commit=True): + if peer_id is None: + peer_id = self.getPeerID(permid) + + deleted = False + if peer_id != None: + if force: + self.delete('Peer', peer_id=peer_id, commit=commit) + else: + self.delete('Peer', peer_id=peer_id, friend=0, superpeer=0, commit=commit) + deleted = not self.hasPeer(permid, check_db=True) + if deleted and permid in self.permid_id: + self.permid_id.pop(permid) + + return deleted + + def getPeerID(self, permid): + assert isinstance(permid, str), permid + # permid must be binary + if permid in self.permid_id: + return self.permid_id[permid] + + sql_get_peer_id = "SELECT peer_id FROM Peer WHERE permid==?" + peer_id = self.fetchone(sql_get_peer_id, (bin2str(permid),)) + if peer_id != None: + self.permid_id[permid] = peer_id + + return peer_id + + def hasPeer(self, permid, check_db=False): + if not check_db: + return bool(self.getPeerID(permid)) + else: + permid_str = bin2str(permid) + sql_get_peer_id = "SELECT peer_id FROM Peer WHERE permid==?" + peer_id = self.fetchone(sql_get_peer_id, (permid_str,)) + if peer_id is None: + return False + else: + return True + + def insertInfohash(self, infohash, check_dup=False, commit=True): + """ Insert an infohash. infohash is binary """ + + if infohash in self.infohash_id: + if check_dup: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'sqldb: infohash to insert already exists', `infohash` + return + + infohash_str = bin2str(infohash) + sql_insert_torrent = "INSERT INTO Torrent (infohash) VALUES (?)" + try: + self.execute_write(sql_insert_torrent, (infohash_str,), commit) + except sqlite.IntegrityError, msg: + if check_dup: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'sqldb:', sqlite.IntegrityError, msg, `infohash` + + def deleteInfohash(self, infohash=None, torrent_id=None, commit=True): + if torrent_id is None: + torrent_id = self.getTorrentID(infohash) + + if torrent_id != None: + self.delete('Torrent', torrent_id=torrent_id, commit=commit) + if infohash in self.infohash_id: + self.infohash_id.pop(infohash) + + def getTorrentID(self, infohash): + assert isinstance(infohash, str), infohash + if infohash in self.infohash_id: + return self.infohash_id[infohash] + + sql_get_torrent_id = "SELECT torrent_id FROM Torrent WHERE infohash==?" + tid = self.fetchone(sql_get_torrent_id, (bin2str(infohash),)) + if tid != None: + self.infohash_id[infohash] = tid + return tid + + def getInfohash(self, torrent_id): + sql_get_infohash = "SELECT infohash FROM Torrent WHERE torrent_id==?" + arg = (torrent_id,) + ret = self.fetchone(sql_get_infohash, arg) + ret = str2bin(ret) + return ret + + def getTorrentStatusTable(self): + if self.status_table is None: + st = self.getAll('TorrentStatus', ('lower(name)', 'status_id')) + self.status_table = dict(st) + return self.status_table + + def getTorrentCategoryTable(self): + # The key is in lower case + if self.category_table is None: + ct = self.getAll('Category', ('lower(name)', 'category_id')) + self.category_table = dict(ct) + return self.category_table + + def getTorrentSourceTable(self): + # Don't use lower case because some URLs are case sensitive + if self.src_table is None: + st = self.getAll('TorrentSource', ('name', 'source_id')) + self.src_table = dict(st) + return self.src_table + + def test(self): + res1 = self.getAll('Category', '*') + res2 = len(self.getAll('Peer', 'name', 'name is not NULL')) + return (res1, res2) + +class SQLiteCacheDBV2(SQLiteCacheDBBase): + def updateDB(self, fromver, tover): + # bring database up to version 2, if necessary + if fromver < 2: + sql = """ +-- Patch for Moderation and VoteCast + +CREATE TABLE ModerationCast ( +mod_id text, +mod_name text, +infohash text not NULL, +time_stamp integer, +media_type text, +quality text, +tags text, +signature integer +); + +CREATE INDEX moderationcast_idx +ON ModerationCast +(mod_id); + +---------------------------------------- + +CREATE TABLE Moderators ( +mod_id integer, +status integer, +time_stamp integer +); + +CREATE UNIQUE INDEX moderators_idx +ON Moderators +(mod_id); + +---------------------------------------- + +CREATE TABLE VoteCast ( +mod_id text, +voter_id integer, +vote text, +time_stamp integer +); + +CREATE UNIQUE INDEX votecast_idx +ON VoteCast +(mod_id, voter_id); + + +-- Patch for BuddyCast 4 + +ALTER TABLE MyPreference ADD COLUMN click_position INTEGER DEFAULT -1; +ALTER TABLE MyPreference ADD COLUMN reranking_strategy INTEGER DEFAULT -1; +ALTER TABLE Preference ADD COLUMN click_position INTEGER DEFAULT -1; +ALTER TABLE Preference ADD COLUMN reranking_strategy INTEGER DEFAULT -1; +CREATE TABLE ClicklogSearch ( + peer_id INTEGER DEFAULT 0, + torrent_id INTEGER DEFAULT 0, + term_id INTEGER DEFAULT 0, + term_order INTEGER DEFAULT 0 + ); +CREATE INDEX idx_search_term ON ClicklogSearch (term_id); +CREATE INDEX idx_search_torrent ON ClicklogSearch (torrent_id); + + +CREATE TABLE ClicklogTerm ( + term_id INTEGER PRIMARY KEY AUTOINCREMENT DEFAULT 0, + term VARCHAR(255) NOT NULL, + times_seen INTEGER DEFAULT 0 NOT NULL + ); +CREATE INDEX idx_terms_term ON ClicklogTerm(term); + +""" + + self.execute_write(sql, commit=False) + # updating version stepwise so if this works, we store it + # regardless of later, potentially failing updates + self.writeDBVersion(2, commit=False) + self.commit() + + +class SQLiteCacheDB(SQLiteCacheDBV2): + __single = None # used for multithreaded singletons pattern + lock = threading.RLock() + + @classmethod + def getInstance(cls, *args, **kw): + # Singleton pattern with double-checking to ensure that it can only create one object + if cls.__single is None: + cls.lock.acquire() + try: + if cls.__single is None: + cls.__single = cls(*args, **kw) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SqliteCacheDB: getInstance: created is",cls,cls.__single + finally: + cls.lock.release() + return cls.__single + + def __init__(self, *args, **kargs): + # always use getInstance() to create this object + + # ARNOCOMMENT: why isn't the lock used on this read?! + + if self.__single != None: + raise RuntimeError, "SQLiteCacheDB is singleton" + SQLiteCacheDBBase.__init__(self, *args, **kargs) + + +def convert_db(bsddb_dir, dbfile_path, sql_filename): + # Jie: here I can convert the database created by the new Core version, but + # what we should consider is to convert the database created by the old version + # under .Tribler directory. + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "sqldb: start converting db from", bsddb_dir, "to", dbfile_path + from bsddb2sqlite import Bsddb2Sqlite + bsddb2sqlite = Bsddb2Sqlite(bsddb_dir, dbfile_path, sql_filename) + global icon_dir + return bsddb2sqlite.run(icon_dir=icon_dir) + +if __name__ == '__main__': + configure_dir = sys.argv[1] + config = {} + config['state_dir'] = configure_dir + config['install_dir'] = u'.' + config['peer_icon_path'] = u'.' + sqlite_test = init(config) + sqlite_test.test() + diff --git a/tribler-mod/Tribler/Core/CacheDB/sqlitecachedb.py.bak b/tribler-mod/Tribler/Core/CacheDB/sqlitecachedb.py.bak new file mode 100644 index 0000000..00d9d03 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/sqlitecachedb.py.bak @@ -0,0 +1,1007 @@ +# Written by Jie Yang +# see LICENSE.txt for license information + +import sys +import os +from time import sleep +from base64 import encodestring, decodestring +from unicode import dunno2unicode +import threading +from traceback import print_exc, print_stack + +from Tribler.__init__ import LIBRARYNAME + +# ONLY USE APSW >= 3.5.9-r1 +import apsw +#support_version = (3,5,9) +#support_version = (3,3,13) +#apsw_version = tuple([int(r) for r in apsw.apswversion().split('-')[0].split('.')]) +##print apsw_version +#assert apsw_version >= support_version, "Required APSW Version >= %d.%d.%d."%support_version + " But your version is %d.%d.%d.\n"%apsw_version + \ +# "Please download and install it from http://code.google.com/p/apsw/" + +CURRENT_MAIN_DB_VERSION = 2 + +CREATE_SQL_FILE = None +CREATE_SQL_FILE_POSTFIX = os.path.join(LIBRARYNAME, 'tribler_sdb_v'+str(CURRENT_MAIN_DB_VERSION)+'.sql') +DB_FILE_NAME = 'tribler.sdb' +DB_DIR_NAME = 'sqlite' # db file path = DB_DIR_NAME/DB_FILE_NAME +BSDDB_DIR_NAME = 'bsddb' +DEFAULT_BUSY_TIMEOUT = 10000 +MAX_SQL_BATCHED_TO_TRANSACTION = 1000 # don't change it unless carefully tested. A transaction with 1000 batched updates took 1.5 seconds +NULL = None +icon_dir = None +SHOW_ALL_EXECUTE = False +costs = [] +cost_reads = [] + +class Warning(Exception): + pass + +def init(config, db_exception_handler = None): + """ create sqlite database """ + global CREATE_SQL_FILE + global icon_dir + config_dir = config['state_dir'] + install_dir = config['install_dir'] + CREATE_SQL_FILE = os.path.join(install_dir,CREATE_SQL_FILE_POSTFIX) + sqlitedb = SQLiteCacheDB.getInstance(db_exception_handler) + + if config['superpeer']: + sqlite_db_path = ':memory:' + else: + sqlite_db_path = os.path.join(config_dir, DB_DIR_NAME, DB_FILE_NAME) + + + + bsddb_path = os.path.join(config_dir, BSDDB_DIR_NAME) + icon_dir = os.path.abspath(config['peer_icon_path']) + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","cachedb: init: SQL FILE",sqlite_db_path + + sqlitedb.initDB(sqlite_db_path, CREATE_SQL_FILE, bsddb_path) # the first place to create db in Tribler + return sqlitedb + +def done(config_dir): + SQLiteCacheDB.getInstance().close() + +def make_filename(config_dir,filename): + if config_dir is None: + return filename + else: + return os.path.join(config_dir,filename) + +def bin2str(bin): + # Full BASE64-encoded + return encodestring(bin).replace("\n","") + +def str2bin(str): + return decodestring(str) + +def print_exc_plus(): + """ + Print the usual traceback information, followed by a listing of all the + local variables in each frame. + http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52215 + http://initd.org/pub/software/pysqlite/apsw/3.3.13-r1/apsw.html#augmentedstacktraces + """ + + tb = sys.exc_info()[2] + stack = [] + + while tb: + stack.append(tb.tb_frame) + tb = tb.tb_next + + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Locals by frame, innermost last" + + for frame in stack: + print >> sys.stderr + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Frame %s in %s at line %s" % (frame.f_code.co_name, + frame.f_code.co_filename, + frame.f_lineno) + for key, value in frame.f_locals.items(): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "\t%20s = " % key, + #We have to be careful not to cause a new error in our error + #printer! Calling str() on an unknown object could cause an + #error we don't want. + try: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", value + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "" + +class safe_dict(dict): + def __init__(self, *args, **kw): + self.lock = threading.RLock() + dict.__init__(self, *args, **kw) + + def __getitem__(self, key): + self.lock.acquire() + try: + return dict.__getitem__(self, key) + finally: + self.lock.release() + + def __setitem__(self, key, value): + self.lock.acquire() + try: + dict.__setitem__(self, key, value) + finally: + self.lock.release() + + def __delitem__(self, key): + self.lock.acquire() + try: + dict.__delitem__(self, key) + finally: + self.lock.release() + + def __contains__(self, key): + self.lock.acquire() + try: + return dict.__contains__(self, key) + finally: + self.lock.release() + + def values(self): + self.lock.acquire() + try: + return dict.values(self) + finally: + self.lock.release() + +class SQLiteCacheDBBase: + + def __init__(self,db_exception_handler=None): + self.exception_handler = db_exception_handler + self.cursor_table = safe_dict() # {thread_name:cur} + self.cache_transaction_table = safe_dict() # {thread_name:[sql] + self.class_variables = safe_dict({'db_path':None,'busytimeout':None}) # busytimeout is in milliseconds + + self.permid_id = safe_dict() + self.infohash_id = safe_dict() + self.show_execute = False + + #TODO: All global variables must be protected to be thread safe? + self.status_table = None + self.category_table = None + self.src_table = None + self.applied_pragma_sync_norm = False + + def __del__(self): + self.close() + + def close(self, clean=False): + # only close the connection object in this thread, don't close other thread's connection object + thread_name = threading.currentThread().getName() + cur = self.getCursor(create=False) + + if cur: + con = cur.getconnection() + cur.close() + con.close() + con = None + del self.cursor_table[thread_name] + if clean: # used for test suite + self.permid_id = safe_dict() + self.infohash_id = safe_dict() + self.exception_handler = None + self.class_variables = safe_dict({'db_path':None,'busytimeout':None}) + self.cursor_table = safe_dict() + self.cache_transaction_table = safe_dict() + + + # --------- static functions -------- + def getCursor(self, create=True): + thread_name = threading.currentThread().getName() + curs = self.cursor_table + cur = curs.get(thread_name, None) # return [cur, cur, lib] or None + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '-------------- getCursor::', len(curs), time(), curs.keys() + if cur is None and create: + self.openDB(self.class_variables['db_path'], self.class_variables['busytimeout']) # create a new db obj for this thread + cur = curs.get(thread_name) + + return cur + + def openDB(self, dbfile_path=None, busytimeout=DEFAULT_BUSY_TIMEOUT): + """ + Open a SQLite database. Only one and the same database can be opened. + @dbfile_path The path to store the database file. + Set dbfile_path=':memory:' to create a db in memory. + @busytimeout Set the maximum time, in milliseconds, that SQLite will wait if the database is locked. + """ + + # already opened a db in this thread, reuse it + thread_name = threading.currentThread().getName() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","sqlcachedb: openDB",dbfile_path,thread_name + if thread_name in self.cursor_table: + #assert dbfile_path == None or self.class_variables['db_path'] == dbfile_path + return self.cursor_table[thread_name] + + assert dbfile_path, "You must specify the path of database file" + + if dbfile_path.lower() != ':memory:': + db_dir,db_filename = os.path.split(dbfile_path) + if db_dir and not os.path.isdir(db_dir): + os.makedirs(db_dir) + + con = apsw.Connection(dbfile_path) + con.setbusytimeout(busytimeout) + + cur = con.cursor() + self.cursor_table[thread_name] = cur + + if not self.applied_pragma_sync_norm: + # http://www.sqlite.org/pragma.html + # When synchronous is NORMAL, the SQLite database engine will still + # pause at the most critical moments, but less often than in FULL + # mode. There is a very small (though non-zero) chance that a power + # failure at just the wrong time could corrupt the database in + # NORMAL mode. But in practice, you are more likely to suffer a + # catastrophic disk failure or some other unrecoverable hardware + # fault. + # + self.applied_pragma_sync_norm = True + cur.execute("PRAGMA synchronous = NORMAL;") + + return cur + + def createDBTable(self, sql_create_table, dbfile_path, busytimeout=DEFAULT_BUSY_TIMEOUT): + """ + Create a SQLite database. + @sql_create_table The sql statements to create tables in the database. + Every statement must end with a ';'. + @dbfile_path The path to store the database file. Set dbfile_path=':memory:' to creates a db in memory. + @busytimeout Set the maximum time, in milliseconds, that SQLite will wait if the database is locked. + Default = 10000 milliseconds + """ + cur = self.openDB(dbfile_path, busytimeout) + cur.execute(sql_create_table) # it is suggested to include begin & commit in the script + + def initDB(self, sqlite_filepath, + create_sql_filename = None, + bsddb_dirpath = None, + busytimeout = DEFAULT_BUSY_TIMEOUT, + check_version = True, + current_db_version = CURRENT_MAIN_DB_VERSION): + """ + Create and initialize a SQLite database given a sql script. + Only one db can be opened. If the given dbfile_path is different with the opened DB file, warn and exit + @configure_dir The directory containing 'bsddb' directory + @sql_filename The path of sql script to create the tables in the database + Every statement must end with a ';'. + @busytimeout Set the maximum time, in milliseconds, to wait and retry + if failed to acquire a lock. Default = 5000 milliseconds + """ + if create_sql_filename is None: + create_sql_filename=CREATE_SQL_FILE + try: + self.lock.acquire() + + # verify db path identity + class_db_path = self.class_variables['db_path'] + if sqlite_filepath is None: # reuse the opened db file? + if class_db_path is not None: # yes, reuse it + # reuse the busytimeout + return self.openDB(class_db_path, self.class_variables['busytimeout']) + else: # no db file opened + raise Exception, "You must specify the path of database file when open it at the first time" + else: + if class_db_path is None: # the first time to open db path, store it + + if bsddb_dirpath != None and os.path.isdir(bsddb_dirpath) and not os.path.exists(sqlite_filepath): + self.convertFromBsd(bsddb_dirpath, sqlite_filepath, create_sql_filename) # only one chance to convert from bsddb + #print 'quit now' + #sys.exit(0) + # open the db if it exists (by converting from bsd) and is not broken, otherwise create a new one + # it will update the db if necessary by checking the version number + self.safelyOpenTriblerDB(sqlite_filepath, create_sql_filename, busytimeout, check_version=check_version, current_db_version=current_db_version) + + self.class_variables = {'db_path': sqlite_filepath, 'busytimeout': int(busytimeout)} + + return self.openDB() # return the cursor, won't reopen the db + + elif sqlite_filepath != class_db_path: # not the first time to open db path, check if it is the same + raise Exception, "Only one database file can be opened. You have opened %s and are trying to open %s." % (class_db_path, sqlite_filepath) + + finally: + self.lock.release() + + def safelyOpenTriblerDB(self, dbfile_path, sql_create, busytimeout=DEFAULT_BUSY_TIMEOUT, check_version=False, current_db_version=None): + """ + open the db if possible, otherwise create a new one + update the db if necessary by checking the version number + + safeOpenDB(): + try: + if sqlite db doesn't exist: + raise Error + open sqlite db + read sqlite_db_version + if sqlite_db_version dosen't exist: + raise Error + except: + close and delete sqlite db if possible + create new sqlite db file without sqlite_db_version + write sqlite_db_version at last + commit + open sqlite db + read sqlite_db_version + # must ensure these steps after except will not fail, otherwise force to exit + + if sqlite_db_version < current_db_version: + updateDB(sqlite_db_version, current_db_version) + commit + update sqlite_db_version at last + commit + """ + try: + if not os.path.isfile(dbfile_path): + raise Warning("No existing database found. Attempting to creating a new database %s" % repr(dbfile_path)) + + cur = self.openDB(dbfile_path, busytimeout) + if check_version: + sqlite_db_version = self.readDBVersion() + if sqlite_db_version == NULL or int(sqlite_db_version)<1: + raise NotImplementedError + except Exception, exception: + if isinstance(exception, Warning): + # user friendly warning to log the creation of a new database + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", exception + + else: + # user unfriendly exception message because something went wrong + print_exc() + + if os.path.isfile(dbfile_path): + self.close(clean=True) + os.remove(dbfile_path) + + if os.path.isfile(sql_create): + f = open(sql_create) + sql_create_tables = f.read() + f.close() + else: + raise Exception, "Cannot open sql script at %s" % os.path.realpath(sql_create) + + self.createDBTable(sql_create_tables, dbfile_path, busytimeout) + if check_version: + sqlite_db_version = self.readDBVersion() + + if check_version: + self.checkDB(sqlite_db_version, current_db_version) + + def report_exception(e): + #return # Jie: don't show the error window to bother users + if self.exception_handler != None: + self.exception_handler(e) + report_exception = staticmethod(report_exception) + + def checkDB(self, db_ver, curr_ver): + # read MyDB and check the version number. + if not db_ver or not curr_ver: + self.updateDB(db_ver,curr_ver) + return + db_ver = int(db_ver) + curr_ver = int(curr_ver) + #print "check db", db_ver, curr_ver + if db_ver != curr_ver: # TODO + self.updateDB(db_ver,curr_ver) + + def updateDB(self,db_ver,curr_ver): + pass #TODO + + def readDBVersion(self): + cur = self.getCursor() + sql = u"select value from MyInfo where entry='version'" + res = self.fetchone(sql) + if res: + find = list(res) + return find[0] # throw error if something wrong + else: + return None + + def writeDBVersion(self, version, commit=True): + sql = u"UPDATE MyInfo SET value=? WHERE entry='version'" + self.execute_write(sql, [version], commit=commit) + + def show_sql(self, switch): + # temporary show the sql executed + self.show_execute = switch + + # --------- generic functions ------------- + + def commit(self): + self.transaction() + + def _execute(self, sql, args=None): + cur = self.getCursor() + + if SHOW_ALL_EXECUTE or self.show_execute: + thread_name = threading.currentThread().getName() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '===', thread_name, '===\n', sql, '\n-----\n', args, '\n======\n' + try: + if args is None: + return cur.execute(sql) + else: + return cur.execute(sql, args) + except Exception, msg: + print_exc() + print_stack() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "cachedb: execute error:", Exception, msg + thread_name = threading.currentThread().getName() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '===', thread_name, '===\n', sql, '\n-----\n', args, '\n======\n' + #return None + # ARNODB: this is incorrect, it should reraise the exception + # such that _transaction can rollback or recommit. + # This bug already reported by Johan + raise msg + + + def execute_read(self, sql, args=None): + # this is only called for reading. If you want to write the db, always use execute_write or executemany + return self._execute(sql, args) + + def execute_write(self, sql, args=None, commit=True): + self.cache_transaction(sql, args) + if commit: + self.commit() + + def executemany(self, sql, args, commit=True): + + thread_name = threading.currentThread().getName() + if thread_name not in self.cache_transaction_table: + self.cache_transaction_table[thread_name] = [] + all = [(sql, arg) for arg in args] + self.cache_transaction_table[thread_name].extend(all) + + if commit: + self.commit() + + def cache_transaction(self, sql, args=None): + thread_name = threading.currentThread().getName() + if thread_name not in self.cache_transaction_table: + self.cache_transaction_table[thread_name] = [] + self.cache_transaction_table[thread_name].append((sql, args)) + + def transaction(self, sql=None, args=None): + if sql: + self.cache_transaction(sql, args) + + thread_name = threading.currentThread().getName() + + n = 0 + sql_full = '' + arg_list = [] + sql_queue = self.cache_transaction_table.get(thread_name,None) + if sql_queue: + while True: + try: + _sql,_args = sql_queue.pop(0) + except IndexError: + break + + _sql = _sql.strip() + if not _sql: + continue + if not _sql.endswith(';'): + _sql += ';' + sql_full += _sql + '\n' + if _args != None: + arg_list += list(_args) + n += 1 + + # if too many sql in cache, split them into batches to prevent processing and locking DB for a long time + # TODO: optimize the value of MAX_SQL_BATCHED_TO_TRANSACTION + if n % MAX_SQL_BATCHED_TO_TRANSACTION == 0: + self._transaction(sql_full, arg_list) + sql_full = '' + arg_list = [] + + self._transaction(sql_full, arg_list) + + def _transaction(self, sql, args=None): + if sql: + sql = 'BEGIN TRANSACTION; \n' + sql + 'COMMIT TRANSACTION;' + try: + self._execute(sql, args) + except Exception,e: + self.commit_retry_if_busy_or_rollback(e,0) + + def commit_retry_if_busy_or_rollback(self,e,tries): + """ + Arno: + SQL_BUSY errors happen at the beginning of the experiment, + very quickly after startup (e.g. 0.001 s), so the busy timeout + is not honoured for some reason. After the initial errors, + they no longer occur. + """ + if str(e).startswith("BusyError"): + try: + self._execute("COMMIT") + except Exception,e2: + if tries < 5: #self.max_commit_retries + # Spec is unclear whether next commit will also has + # 'busytimeout' seconds to try to get a write lock. + sleep(pow(2.0,tries+2)/100.0) + self.commit_retry_if_busy_or_rollback(e2,tries+1) + else: + self.rollback(tries) + raise Exception,e2 + else: + self.rollback(tries) + m = "cachedb: TRANSACTION ERROR "+threading.currentThread().getName()+' '+str(e) + raise Exception, m + + + def rollback(self, tries): + print_exc() + try: + self._execute("ROLLBACK") + except Exception, e: + # May be harmless, see above. Unfortunately they don't specify + # what the error is when an attempt is made to roll back + # an automatically rolled back transaction. + m = "cachedb: ROLLBACK ERROR "+threading.currentThread().getName()+' '+str(e) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'SQLite Database', m + raise Exception, m + + + # -------- Write Operations -------- + def insert(self, table_name, commit=True, **argv): + if len(argv) == 1: + sql = 'INSERT INTO %s (%s) VALUES (?);'%(table_name, argv.keys()[0]) + else: + questions = '?,'*len(argv) + sql = 'INSERT INTO %s %s VALUES (%s);'%(table_name, tuple(argv.keys()), questions[:-1]) + self.execute_write(sql, argv.values(), commit) + + def insertMany(self, table_name, values, keys=None, commit=True): + """ values must be a list of tuples """ + + questions = u'?,'*len(values[0]) + if keys is None: + sql = u'INSERT INTO %s VALUES (%s);'%(table_name, questions[:-1]) + else: + sql = u'INSERT INTO %s %s VALUES (%s);'%(table_name, tuple(keys), questions[:-1]) + self.executemany(sql, values, commit=commit) + + def update(self, table_name, where=None, commit=True, **argv): + sql = u'UPDATE %s SET '%table_name + arg = [] + for k,v in argv.iteritems(): + if type(v) is tuple: + sql += u'%s %s ?,' % (k, v[0]) + arg.append(v[1]) + else: + sql += u'%s=?,' % k + arg.append(v) + sql = sql[:-1] + if where != None: + sql += u' where %s'%where + self.execute_write(sql, arg, commit) + + def delete(self, table_name, commit=True, **argv): + sql = u'DELETE FROM %s WHERE '%table_name + arg = [] + for k,v in argv.iteritems(): + if type(v) is tuple: + sql += u'%s %s ? AND ' % (k, v[0]) + arg.append(v[1]) + else: + sql += u'%s=? AND ' % k + arg.append(v) + sql = sql[:-5] + self.execute_write(sql, argv.values(), commit) + + # -------- Read Operations -------- + def size(self, table_name): + num_rec_sql = u"SELECT count(*) FROM %s;"%table_name + result = self.fetchone(num_rec_sql) + return result + + def fetchone(self, sql, args=None): + # returns NULL: if the result is null + # return None: if it doesn't found any match results + find = self.execute_read(sql, args) + if not find: + return NULL + else: + find = list(find) + if len(find) > 0: + find = find[0] + else: + return NULL + if len(find)>1: + return find + else: + return find[0] + + def fetchall(self, sql, args=None, retry=0): + res = self.execute_read(sql, args) + if res != None: + find = list(res) + return find + else: + return [] # should it return None? + + def getOne(self, table_name, value_name, where=None, conj='and', **kw): + """ value_name could be a string, a tuple of strings, or '*' + """ + + if isinstance(value_name, tuple): + value_names = u",".join(value_name) + elif isinstance(value_name, list): + value_names = u",".join(value_name) + else: + value_names = value_name + + if isinstance(table_name, tuple): + table_names = u",".join(table_name) + elif isinstance(table_name, list): + table_names = u",".join(table_name) + else: + table_names = table_name + + sql = u'select %s from %s'%(value_names, table_names) + + if where or kw: + sql += u' where ' + if where: + sql += where + if kw: + sql += u' %s '%conj + if kw: + arg = [] + for k,v in kw.iteritems(): + if type(v) is tuple: + operator = v[0] + arg.append(v[1]) + else: + operator = "=" + arg.append(v) + sql += u' %s %s ? ' % (k, operator) + sql += conj + sql = sql[:-len(conj)] + else: + arg = None + + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'SQL: %s %s' % (sql, arg) + return self.fetchone(sql,arg) + + def getAll(self, table_name, value_name, where=None, group_by=None, having=None, order_by=None, limit=None, offset=None, conj='and', **kw): + """ value_name could be a string, or a tuple of strings + order by is represented as order_by + group by is represented as group_by + """ + if isinstance(value_name, tuple): + value_names = u",".join(value_name) + elif isinstance(value_name, list): + value_names = u",".join(value_name) + else: + value_names = value_name + + if isinstance(table_name, tuple): + table_names = u",".join(table_name) + elif isinstance(table_name, list): + table_names = u",".join(table_name) + else: + table_names = table_name + + sql = u'select %s from %s'%(value_names, table_names) + + if where or kw: + sql += u' where ' + if where: + sql += where + if kw: + sql += u' %s '%conj + if kw: + arg = [] + for k,v in kw.iteritems(): + if type(v) is tuple: + operator = v[0] + arg.append(v[1]) + else: + operator = "=" + arg.append(v) + + sql += u' %s %s ?' % (k, operator) + sql += conj + sql = sql[:-len(conj)] + else: + arg = None + + if group_by != None: + sql += u' group by ' + group_by + if having != None: + sql += u' having ' + having + if order_by != None: + sql += u' order by ' + order_by # you should add desc after order_by to reversely sort, i.e, 'last_seen desc' as order_by + if limit != None: + sql += u' limit %d'%limit + if offset != None: + sql += u' offset %d'%offset + + try: + return self.fetchall(sql, arg) or [] + except Exception, msg: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "sqldb: Wrong getAll sql statement:", sql + raise Exception, msg + + # ----- Tribler DB operations ---- + + def convertFromBsd(self, bsddb_dirpath, dbfile_path, sql_filename, delete_bsd=False): + # convert bsddb data to sqlite db. return false if cannot find or convert the db + peerdb_filepath = os.path.join(bsddb_dirpath, 'peers.bsd') + if not os.path.isfile(peerdb_filepath): + return False + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "sqldb: ************ convert bsddb to sqlite", sql_filename + converted = convert_db(bsddb_dirpath, dbfile_path, sql_filename) + if converted is True and delete_bsd is True: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "sqldb: delete bsddb directory" + for filename in os.listdir(bsddb_dirpath): + if filename.endswith('.bsd'): + abs_path = os.path.join(bsddb_dirpath, filename) + os.remove(abs_path) + try: + os.removedirs(bsddb_dirpath) + except: # the dir is not empty + pass + + + #------------- useful functions for multiple handlers ---------- + def insertPeer(self, permid, update=True, commit=True, **argv): + """ Insert a peer. permid is the binary permid. + If the peer is already in db and update is True, update the peer. + """ + peer_id = self.getPeerID(permid) + peer_existed = False + if 'name' in argv: + argv['name'] = dunno2unicode(argv['name']) + if peer_id != None: + peer_existed = True + if update: + where=u'peer_id=%d'%peer_id + self.update('Peer', where, commit=commit, **argv) + else: + self.insert('Peer', permid=bin2str(permid), commit=commit, **argv) + return peer_existed + + def deletePeer(self, permid=None, peer_id=None, force=True, commit=True): + if peer_id is None: + peer_id = self.getPeerID(permid) + + deleted = False + if peer_id != None: + if force: + self.delete('Peer', peer_id=peer_id, commit=commit) + else: + self.delete('Peer', peer_id=peer_id, friend=0, superpeer=0, commit=commit) + deleted = not self.hasPeer(permid, check_db=True) + if deleted and permid in self.permid_id: + self.permid_id.pop(permid) + + return deleted + + def getPeerID(self, permid): + assert isinstance(permid, str), permid + # permid must be binary + if permid in self.permid_id: + return self.permid_id[permid] + + sql_get_peer_id = "SELECT peer_id FROM Peer WHERE permid==?" + peer_id = self.fetchone(sql_get_peer_id, (bin2str(permid),)) + if peer_id != None: + self.permid_id[permid] = peer_id + + return peer_id + + def hasPeer(self, permid, check_db=False): + if not check_db: + return bool(self.getPeerID(permid)) + else: + permid_str = bin2str(permid) + sql_get_peer_id = "SELECT peer_id FROM Peer WHERE permid==?" + peer_id = self.fetchone(sql_get_peer_id, (permid_str,)) + if peer_id is None: + return False + else: + return True + + def insertInfohash(self, infohash, check_dup=False, commit=True): + """ Insert an infohash. infohash is binary """ + + if infohash in self.infohash_id: + if check_dup: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'sqldb: infohash to insert already exists', `infohash` + return + + infohash_str = bin2str(infohash) + sql_insert_torrent = "INSERT INTO Torrent (infohash) VALUES (?)" + try: + self.execute_write(sql_insert_torrent, (infohash_str,), commit) + except sqlite.IntegrityError, msg: + if check_dup: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'sqldb:', sqlite.IntegrityError, msg, `infohash` + + def deleteInfohash(self, infohash=None, torrent_id=None, commit=True): + if torrent_id is None: + torrent_id = self.getTorrentID(infohash) + + if torrent_id != None: + self.delete('Torrent', torrent_id=torrent_id, commit=commit) + if infohash in self.infohash_id: + self.infohash_id.pop(infohash) + + def getTorrentID(self, infohash): + assert isinstance(infohash, str), infohash + if infohash in self.infohash_id: + return self.infohash_id[infohash] + + sql_get_torrent_id = "SELECT torrent_id FROM Torrent WHERE infohash==?" + tid = self.fetchone(sql_get_torrent_id, (bin2str(infohash),)) + if tid != None: + self.infohash_id[infohash] = tid + return tid + + def getInfohash(self, torrent_id): + sql_get_infohash = "SELECT infohash FROM Torrent WHERE torrent_id==?" + arg = (torrent_id,) + ret = self.fetchone(sql_get_infohash, arg) + ret = str2bin(ret) + return ret + + def getTorrentStatusTable(self): + if self.status_table is None: + st = self.getAll('TorrentStatus', ('lower(name)', 'status_id')) + self.status_table = dict(st) + return self.status_table + + def getTorrentCategoryTable(self): + # The key is in lower case + if self.category_table is None: + ct = self.getAll('Category', ('lower(name)', 'category_id')) + self.category_table = dict(ct) + return self.category_table + + def getTorrentSourceTable(self): + # Don't use lower case because some URLs are case sensitive + if self.src_table is None: + st = self.getAll('TorrentSource', ('name', 'source_id')) + self.src_table = dict(st) + return self.src_table + + def test(self): + res1 = self.getAll('Category', '*') + res2 = len(self.getAll('Peer', 'name', 'name is not NULL')) + return (res1, res2) + +class SQLiteCacheDBV2(SQLiteCacheDBBase): + def updateDB(self, fromver, tover): + # bring database up to version 2, if necessary + if fromver < 2: + sql = """ +-- Patch for Moderation and VoteCast + +CREATE TABLE ModerationCast ( +mod_id text, +mod_name text, +infohash text not NULL, +time_stamp integer, +media_type text, +quality text, +tags text, +signature integer +); + +CREATE INDEX moderationcast_idx +ON ModerationCast +(mod_id); + +---------------------------------------- + +CREATE TABLE Moderators ( +mod_id integer, +status integer, +time_stamp integer +); + +CREATE UNIQUE INDEX moderators_idx +ON Moderators +(mod_id); + +---------------------------------------- + +CREATE TABLE VoteCast ( +mod_id text, +voter_id integer, +vote text, +time_stamp integer +); + +CREATE UNIQUE INDEX votecast_idx +ON VoteCast +(mod_id, voter_id); + + +-- Patch for BuddyCast 4 + +ALTER TABLE MyPreference ADD COLUMN click_position INTEGER DEFAULT -1; +ALTER TABLE MyPreference ADD COLUMN reranking_strategy INTEGER DEFAULT -1; +ALTER TABLE Preference ADD COLUMN click_position INTEGER DEFAULT -1; +ALTER TABLE Preference ADD COLUMN reranking_strategy INTEGER DEFAULT -1; +CREATE TABLE ClicklogSearch ( + peer_id INTEGER DEFAULT 0, + torrent_id INTEGER DEFAULT 0, + term_id INTEGER DEFAULT 0, + term_order INTEGER DEFAULT 0 + ); +CREATE INDEX idx_search_term ON ClicklogSearch (term_id); +CREATE INDEX idx_search_torrent ON ClicklogSearch (torrent_id); + + +CREATE TABLE ClicklogTerm ( + term_id INTEGER PRIMARY KEY AUTOINCREMENT DEFAULT 0, + term VARCHAR(255) NOT NULL, + times_seen INTEGER DEFAULT 0 NOT NULL + ); +CREATE INDEX idx_terms_term ON ClicklogTerm(term); + +""" + + self.execute_write(sql, commit=False) + # updating version stepwise so if this works, we store it + # regardless of later, potentially failing updates + self.writeDBVersion(2, commit=False) + self.commit() + + +class SQLiteCacheDB(SQLiteCacheDBV2): + __single = None # used for multithreaded singletons pattern + lock = threading.RLock() + + @classmethod + def getInstance(cls, *args, **kw): + # Singleton pattern with double-checking to ensure that it can only create one object + if cls.__single is None: + cls.lock.acquire() + try: + if cls.__single is None: + cls.__single = cls(*args, **kw) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SqliteCacheDB: getInstance: created is",cls,cls.__single + finally: + cls.lock.release() + return cls.__single + + def __init__(self, *args, **kargs): + # always use getInstance() to create this object + + # ARNOCOMMENT: why isn't the lock used on this read?! + + if self.__single != None: + raise RuntimeError, "SQLiteCacheDB is singleton" + SQLiteCacheDBBase.__init__(self, *args, **kargs) + + +def convert_db(bsddb_dir, dbfile_path, sql_filename): + # Jie: here I can convert the database created by the new Core version, but + # what we should consider is to convert the database created by the old version + # under .Tribler directory. + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "sqldb: start converting db from", bsddb_dir, "to", dbfile_path + from bsddb2sqlite import Bsddb2Sqlite + bsddb2sqlite = Bsddb2Sqlite(bsddb_dir, dbfile_path, sql_filename) + global icon_dir + return bsddb2sqlite.run(icon_dir=icon_dir) + +if __name__ == '__main__': + configure_dir = sys.argv[1] + config = {} + config['state_dir'] = configure_dir + config['install_dir'] = u'.' + config['peer_icon_path'] = u'.' + sqlite_test = init(config) + sqlite_test.test() + diff --git a/tribler-mod/Tribler/Core/CacheDB/unicode.py b/tribler-mod/Tribler/Core/CacheDB/unicode.py new file mode 100644 index 0000000..43542d2 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/unicode.py @@ -0,0 +1,86 @@ +from time import localtime, strftime +# Written by Jie Yang +# see LICENSE.txt for license information + +# Arno: why not use Tribler/Core/Utilities/unicode.py + +import sys + +def bin2unicode(bin,possible_encoding='utf_8'): + sysenc = sys.getfilesystemencoding() + if possible_encoding is None: + possible_encoding = sysenc + try: + return bin.decode(possible_encoding) + except: + try: + if possible_encoding == sysenc: + raise + return bin.decode(sysenc) + except: + try: + return bin.decode('utf_8') + except: + try: + return bin.decode('iso-8859-1') + except: + try: + return bin.decode(sys.getfilesystemencoding()) + except: + return bin.decode(sys.getdefaultencoding(), errors = 'replace') + + +def str2unicode(s): + try: + s = unicode(s) + except: + flag = 0 + for encoding in [sys.getfilesystemencoding(), 'utf_8', 'iso-8859-1', 'unicode-escape' ]: + try: + s = unicode(s, encoding) + flag = 1 + break + except: + pass + if flag == 0: + try: + s = unicode(s,sys.getdefaultencoding(), errors = 'replace') + except: + pass + return s + +def dunno2unicode(dunno): + newdunno = None + if isinstance(dunno,unicode): + newdunno = dunno + else: + try: + newdunno = bin2unicode(dunno) + except: + newdunno = str2unicode(dunno) + return newdunno + + +def name2unicode(metadata): + if metadata['info'].has_key('name.utf-8'): + namekey = 'name.utf-8' + else: + namekey = 'name' + if metadata.has_key('encoding'): + encoding = metadata['encoding'] + metadata['info'][namekey] = bin2unicode(metadata['info'][namekey],encoding) + else: + metadata['info'][namekey] = bin2unicode(metadata['info'][namekey]) + + # change metainfo['info']['name'] to metainfo['info'][namekey], just in case... + # roer888 TODO: Never tested the following 2 lines + if namekey != 'name': + metadata['info']['name'] = metadata['info'][namekey ] + + return namekey + + +def unicode2str(s): + if not isinstance(s,unicode): + return s + return s.encode(sys.getfilesystemencoding()) \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/CacheDB/unicode.py.bak b/tribler-mod/Tribler/Core/CacheDB/unicode.py.bak new file mode 100644 index 0000000..9129fb4 --- /dev/null +++ b/tribler-mod/Tribler/Core/CacheDB/unicode.py.bak @@ -0,0 +1,85 @@ +# Written by Jie Yang +# see LICENSE.txt for license information + +# Arno: why not use Tribler/Core/Utilities/unicode.py + +import sys + +def bin2unicode(bin,possible_encoding='utf_8'): + sysenc = sys.getfilesystemencoding() + if possible_encoding is None: + possible_encoding = sysenc + try: + return bin.decode(possible_encoding) + except: + try: + if possible_encoding == sysenc: + raise + return bin.decode(sysenc) + except: + try: + return bin.decode('utf_8') + except: + try: + return bin.decode('iso-8859-1') + except: + try: + return bin.decode(sys.getfilesystemencoding()) + except: + return bin.decode(sys.getdefaultencoding(), errors = 'replace') + + +def str2unicode(s): + try: + s = unicode(s) + except: + flag = 0 + for encoding in [sys.getfilesystemencoding(), 'utf_8', 'iso-8859-1', 'unicode-escape' ]: + try: + s = unicode(s, encoding) + flag = 1 + break + except: + pass + if flag == 0: + try: + s = unicode(s,sys.getdefaultencoding(), errors = 'replace') + except: + pass + return s + +def dunno2unicode(dunno): + newdunno = None + if isinstance(dunno,unicode): + newdunno = dunno + else: + try: + newdunno = bin2unicode(dunno) + except: + newdunno = str2unicode(dunno) + return newdunno + + +def name2unicode(metadata): + if metadata['info'].has_key('name.utf-8'): + namekey = 'name.utf-8' + else: + namekey = 'name' + if metadata.has_key('encoding'): + encoding = metadata['encoding'] + metadata['info'][namekey] = bin2unicode(metadata['info'][namekey],encoding) + else: + metadata['info'][namekey] = bin2unicode(metadata['info'][namekey]) + + # change metainfo['info']['name'] to metainfo['info'][namekey], just in case... + # roer888 TODO: Never tested the following 2 lines + if namekey != 'name': + metadata['info']['name'] = metadata['info'][namekey ] + + return namekey + + +def unicode2str(s): + if not isinstance(s,unicode): + return s + return s.encode(sys.getfilesystemencoding()) \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/CoopDownload/Coordinator.py b/tribler-mod/Tribler/Core/CoopDownload/Coordinator.py new file mode 100644 index 0000000..6331913 --- /dev/null +++ b/tribler-mod/Tribler/Core/CoopDownload/Coordinator.py @@ -0,0 +1,274 @@ +from time import localtime, strftime +# Written by Pawel Garbacki, Arno Bakker +# see LICENSE.txt for license information +# +# TODO: when DOWNLOAD_HELP cannot be sent, mark this in the interface + +from traceback import print_exc +import copy +import sys +from threading import Lock + +from Tribler.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge +from Tribler.Core.Utilities.utilities import show_permid_short +from Tribler.Core.BitTornado.bencode import bencode +from Tribler.Core.BitTornado.BT1.MessageID import DOWNLOAD_HELP, STOP_DOWNLOAD_HELP, PIECES_RESERVED + +DEBUG = False +MAX_ROUNDS = 137 + + +class Coordinator: + + def __init__(self, infohash, num_pieces): + self.reserved_pieces = [False] * num_pieces + self.infohash = infohash # readonly so no locking on this + + self.lock = Lock() + self.asked_helpers = [] # protected by lock + # optimization + self.reserved = [] + self.overlay_bridge = OverlayThreadingBridge.getInstance() + + # + # Interface for Core API. + # + def network_request_help(self,peerList,force = False): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: REQUESTING HELP FROM",peerList + self.lock.acquire() + try: + toask_helpers = [] + if force: + toask_helpers = peerList + else: + # Who in peerList has not been asked already? + for cand in peerList: + flag = 0 + for asked in self.asked_helpers: + if self.samePeer(cand,asked): + flag = 1 + break + if flag == 0: + toask_helpers.append(cand) + + permidlist = [] + for peer in toask_helpers: + peer['round'] = 0 + permidlist.append(peer['permid']) + self.asked_helpers.extend(toask_helpers) + self.network_send_request_help(permidlist) + except Exception,e: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helpcoord: Exception while requesting help",e + self.lock.release() + + def network_send_request_help(self,permidlist): + olthread_send_request_help_lambda = lambda:self.olthread_send_request_help(permidlist) + self.overlay_bridge.add_task(olthread_send_request_help_lambda,0) + + def olthread_send_request_help(self,permidlist): + for permid in permidlist: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: Coordinator connecting to",show_permid_short(permid),"for help" + self.overlay_bridge.connect(permid,self.olthread_request_help_connect_callback) + + def olthread_request_help_connect_callback(self,exc,dns,permid,selversion): + if exc is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: Coordinator sending to",show_permid_short(permid) + ## Create message according to protocol version + dlhelp_request = self.infohash + self.overlay_bridge.send(permid, DOWNLOAD_HELP + dlhelp_request,self.olthread_request_help_send_callback) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: DOWNLOAD_HELP: error connecting to",show_permid_short(permid),exc + self.olthread_remove_unreachable_helper(permid) + + def olthread_request_help_send_callback(self,exc,permid): + if exc is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: DOWNLOAD_HELP: error sending to",show_permid_short(permid),exc + self.olthread_remove_unreachable_helper(permid) + + + def olthread_remove_unreachable_helper(self,permid): + # Remove peer that we could not connect to from asked helpers + self.lock.acquire() + try: + newlist = [] + for peer in self.asked_helpers: + if peer['permid'] != permid: + newlist.append(peer) + self.asked_helpers = newlist + finally: + self.lock.release() + + + def network_stop_help(self,peerList, force = False): + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: STOPPING HELP FROM",peerList + self.lock.acquire() + try: + if force: + tostop_helpers = peerList + else: + # Who in the peerList is actually a helper currently? + tostop_helpers = [] + for cand in peerList: + for asked in self.asked_helpers: + if self.samePeer(cand,asked): + tostop_helpers.append(cand) + break + + # Who of the actual helpers gets to stay? + tokeep_helpers = [] + for asked in self.asked_helpers: + flag = 0 + for cand in tostop_helpers: + if self.samePeer(cand,asked): + flag = 1 + break + if flag == 0: + tokeep_helpers.append(asked) + + permidlist = [] + for peer in tostop_helpers: + permidlist.append(peer['permid']) + + self.network_send_stop_help(permidlist) + self.asked_helpers = tokeep_helpers + finally: + self.lock.release() + + #def stop_all_help(self): + # self.send_stop_help(self.asked_helpers) + # self.asked_helpers = [] + + def network_send_stop_help(self,permidlist): + olthread_send_stop_help_lambda = lambda:self.olthread_send_stop_help(permidlist) + self.overlay_bridge.add_task(olthread_send_stop_help_lambda,0) + + def olthread_send_stop_help(self,permidlist): + for permid in permidlist: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: Coordinator connecting to",show_permid_short(permid),"for stopping help" + self.overlay_bridge.connect(permid,self.olthread_stop_help_connect_callback) + + def olthread_stop_help_connect_callback(self,exc,dns,permid,selversion): + if exc is None: + ## Create message according to protocol version + stop_request = self.infohash + self.overlay_bridge.send(permid,STOP_DOWNLOAD_HELP + stop_request,self.olthread_stop_help_send_callback) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: STOP_DOWNLOAD_HELP: error connecting to",show_permid_short(permid),exc + + def olthread_stop_help_send_callback(self,exc,permid): + if exc is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: STOP_DOWNLOAD_HELP: error sending to",show_permid_short(permid),exc + + + def network_get_asked_helpers_copy(self): + """ Returns a COPY of the list. We need 'before' and 'after' info here, + so the caller is not allowed to update the current asked_helpers """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: Coordinator: Asked helpers is #",len(self.asked_helpers) + self.lock.acquire() + try: + return copy.deepcopy(self.asked_helpers) + finally: + self.lock.release() + + + def samePeer(self,a,b): + if a.has_key('permid'): + if b.has_key('permid'): + if a['permid'] == b['permid']: + return True + if a['ip'] == b['ip'] and a['port'] == b['port']: + return True + else: + return False + + + # + # Interface for CoordinatorMessageHandler + # + def network_is_helper_permid(self, permid): + """ Used by CoordinatorMessageHandler to check if RESERVE_PIECES is from good source """ + # called by overlay thread + for peer in self.asked_helpers: + if peer['permid'] == permid: + return True + return False + + def network_got_reserve_pieces(self,permid,pieces,all_or_nothing,selversion): + self.lock.acquire() + try: + reserved_pieces = self._network_reserve_pieces(pieces, all_or_nothing) + for peer in self.asked_helpers: + if peer['permid'] == permid: + peer['round'] = (peer['round'] + 1) % MAX_ROUNDS + if peer['round'] == 0: + reserved_pieces.extend(self.network_get_reserved()) + self.network_send_pieces_reserved(permid,reserved_pieces,selversion) + finally: + self.lock.release() + + def _network_reserve_pieces(self, pieces, all_or_nothing = False): + try: + new_reserved = [] + for piece in pieces: + if not self.reserved_pieces[piece]: + new_reserved.append(piece) + if not all_or_nothing: + self.reserved_pieces[piece] = True + self.reserved.append(-piece) + elif all_or_nothing: # there is no point of continuing + new_reserved = [] + break + if all_or_nothing: + for piece in new_reserved: + self.reserved_pieces[piece] = True + self.reserved.append(-piece) + except Exception, e: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helpcoord: Exception in reserve_pieces",e + return new_reserved + + def network_get_reserved(self): + return self.reserved + + def network_send_pieces_reserved(self, permid, pieces, selversion): + olthread_send_pieces_reserved_lambda = lambda:self.olthread_send_pieces_reserved(permid,pieces,selversion) + self.overlay_bridge.add_task(olthread_send_pieces_reserved_lambda,0) + + def olthread_send_pieces_reserved(self, permid, pieces, selversion): + ## Create message according to protocol version + payload = self.infohash + bencode(pieces) + # Optimization: we know we're connected + self.overlay_bridge.send(permid, PIECES_RESERVED + payload,self.olthread_pieces_reserved_send_callback) + + def olthread_pieces_reserved_send_callback(self,exc,permid): + if exc is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: PIECES_RESERVED: error sending to",show_permid_short(permid),exc + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: PIECES_RESERVED: Successfully sent to",show_permid_short(permid) + + + # + # Interface for Encrypter.Connection + # + def is_helper_ip(self, ip): + """ Used by Coordinator's Downloader (via Encrypter) to see what + connections are helpers """ + # called by network thread + self.lock.acquire() + try: + for peer in self.asked_helpers: + if peer['ip'] == ip: + return True + return False + finally: + self.lock.release() diff --git a/tribler-mod/Tribler/Core/CoopDownload/Coordinator.py.bak b/tribler-mod/Tribler/Core/CoopDownload/Coordinator.py.bak new file mode 100644 index 0000000..63c3ef9 --- /dev/null +++ b/tribler-mod/Tribler/Core/CoopDownload/Coordinator.py.bak @@ -0,0 +1,273 @@ +# Written by Pawel Garbacki, Arno Bakker +# see LICENSE.txt for license information +# +# TODO: when DOWNLOAD_HELP cannot be sent, mark this in the interface + +from traceback import print_exc +import copy +import sys +from threading import Lock + +from Tribler.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge +from Tribler.Core.Utilities.utilities import show_permid_short +from Tribler.Core.BitTornado.bencode import bencode +from Tribler.Core.BitTornado.BT1.MessageID import DOWNLOAD_HELP, STOP_DOWNLOAD_HELP, PIECES_RESERVED + +DEBUG = False +MAX_ROUNDS = 137 + + +class Coordinator: + + def __init__(self, infohash, num_pieces): + self.reserved_pieces = [False] * num_pieces + self.infohash = infohash # readonly so no locking on this + + self.lock = Lock() + self.asked_helpers = [] # protected by lock + # optimization + self.reserved = [] + self.overlay_bridge = OverlayThreadingBridge.getInstance() + + # + # Interface for Core API. + # + def network_request_help(self,peerList,force = False): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: REQUESTING HELP FROM",peerList + self.lock.acquire() + try: + toask_helpers = [] + if force: + toask_helpers = peerList + else: + # Who in peerList has not been asked already? + for cand in peerList: + flag = 0 + for asked in self.asked_helpers: + if self.samePeer(cand,asked): + flag = 1 + break + if flag == 0: + toask_helpers.append(cand) + + permidlist = [] + for peer in toask_helpers: + peer['round'] = 0 + permidlist.append(peer['permid']) + self.asked_helpers.extend(toask_helpers) + self.network_send_request_help(permidlist) + except Exception,e: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helpcoord: Exception while requesting help",e + self.lock.release() + + def network_send_request_help(self,permidlist): + olthread_send_request_help_lambda = lambda:self.olthread_send_request_help(permidlist) + self.overlay_bridge.add_task(olthread_send_request_help_lambda,0) + + def olthread_send_request_help(self,permidlist): + for permid in permidlist: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: Coordinator connecting to",show_permid_short(permid),"for help" + self.overlay_bridge.connect(permid,self.olthread_request_help_connect_callback) + + def olthread_request_help_connect_callback(self,exc,dns,permid,selversion): + if exc is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: Coordinator sending to",show_permid_short(permid) + ## Create message according to protocol version + dlhelp_request = self.infohash + self.overlay_bridge.send(permid, DOWNLOAD_HELP + dlhelp_request,self.olthread_request_help_send_callback) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: DOWNLOAD_HELP: error connecting to",show_permid_short(permid),exc + self.olthread_remove_unreachable_helper(permid) + + def olthread_request_help_send_callback(self,exc,permid): + if exc is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: DOWNLOAD_HELP: error sending to",show_permid_short(permid),exc + self.olthread_remove_unreachable_helper(permid) + + + def olthread_remove_unreachable_helper(self,permid): + # Remove peer that we could not connect to from asked helpers + self.lock.acquire() + try: + newlist = [] + for peer in self.asked_helpers: + if peer['permid'] != permid: + newlist.append(peer) + self.asked_helpers = newlist + finally: + self.lock.release() + + + def network_stop_help(self,peerList, force = False): + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: STOPPING HELP FROM",peerList + self.lock.acquire() + try: + if force: + tostop_helpers = peerList + else: + # Who in the peerList is actually a helper currently? + tostop_helpers = [] + for cand in peerList: + for asked in self.asked_helpers: + if self.samePeer(cand,asked): + tostop_helpers.append(cand) + break + + # Who of the actual helpers gets to stay? + tokeep_helpers = [] + for asked in self.asked_helpers: + flag = 0 + for cand in tostop_helpers: + if self.samePeer(cand,asked): + flag = 1 + break + if flag == 0: + tokeep_helpers.append(asked) + + permidlist = [] + for peer in tostop_helpers: + permidlist.append(peer['permid']) + + self.network_send_stop_help(permidlist) + self.asked_helpers = tokeep_helpers + finally: + self.lock.release() + + #def stop_all_help(self): + # self.send_stop_help(self.asked_helpers) + # self.asked_helpers = [] + + def network_send_stop_help(self,permidlist): + olthread_send_stop_help_lambda = lambda:self.olthread_send_stop_help(permidlist) + self.overlay_bridge.add_task(olthread_send_stop_help_lambda,0) + + def olthread_send_stop_help(self,permidlist): + for permid in permidlist: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: Coordinator connecting to",show_permid_short(permid),"for stopping help" + self.overlay_bridge.connect(permid,self.olthread_stop_help_connect_callback) + + def olthread_stop_help_connect_callback(self,exc,dns,permid,selversion): + if exc is None: + ## Create message according to protocol version + stop_request = self.infohash + self.overlay_bridge.send(permid,STOP_DOWNLOAD_HELP + stop_request,self.olthread_stop_help_send_callback) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: STOP_DOWNLOAD_HELP: error connecting to",show_permid_short(permid),exc + + def olthread_stop_help_send_callback(self,exc,permid): + if exc is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: STOP_DOWNLOAD_HELP: error sending to",show_permid_short(permid),exc + + + def network_get_asked_helpers_copy(self): + """ Returns a COPY of the list. We need 'before' and 'after' info here, + so the caller is not allowed to update the current asked_helpers """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: Coordinator: Asked helpers is #",len(self.asked_helpers) + self.lock.acquire() + try: + return copy.deepcopy(self.asked_helpers) + finally: + self.lock.release() + + + def samePeer(self,a,b): + if a.has_key('permid'): + if b.has_key('permid'): + if a['permid'] == b['permid']: + return True + if a['ip'] == b['ip'] and a['port'] == b['port']: + return True + else: + return False + + + # + # Interface for CoordinatorMessageHandler + # + def network_is_helper_permid(self, permid): + """ Used by CoordinatorMessageHandler to check if RESERVE_PIECES is from good source """ + # called by overlay thread + for peer in self.asked_helpers: + if peer['permid'] == permid: + return True + return False + + def network_got_reserve_pieces(self,permid,pieces,all_or_nothing,selversion): + self.lock.acquire() + try: + reserved_pieces = self._network_reserve_pieces(pieces, all_or_nothing) + for peer in self.asked_helpers: + if peer['permid'] == permid: + peer['round'] = (peer['round'] + 1) % MAX_ROUNDS + if peer['round'] == 0: + reserved_pieces.extend(self.network_get_reserved()) + self.network_send_pieces_reserved(permid,reserved_pieces,selversion) + finally: + self.lock.release() + + def _network_reserve_pieces(self, pieces, all_or_nothing = False): + try: + new_reserved = [] + for piece in pieces: + if not self.reserved_pieces[piece]: + new_reserved.append(piece) + if not all_or_nothing: + self.reserved_pieces[piece] = True + self.reserved.append(-piece) + elif all_or_nothing: # there is no point of continuing + new_reserved = [] + break + if all_or_nothing: + for piece in new_reserved: + self.reserved_pieces[piece] = True + self.reserved.append(-piece) + except Exception, e: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helpcoord: Exception in reserve_pieces",e + return new_reserved + + def network_get_reserved(self): + return self.reserved + + def network_send_pieces_reserved(self, permid, pieces, selversion): + olthread_send_pieces_reserved_lambda = lambda:self.olthread_send_pieces_reserved(permid,pieces,selversion) + self.overlay_bridge.add_task(olthread_send_pieces_reserved_lambda,0) + + def olthread_send_pieces_reserved(self, permid, pieces, selversion): + ## Create message according to protocol version + payload = self.infohash + bencode(pieces) + # Optimization: we know we're connected + self.overlay_bridge.send(permid, PIECES_RESERVED + payload,self.olthread_pieces_reserved_send_callback) + + def olthread_pieces_reserved_send_callback(self,exc,permid): + if exc is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: PIECES_RESERVED: error sending to",show_permid_short(permid),exc + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelp: PIECES_RESERVED: Successfully sent to",show_permid_short(permid) + + + # + # Interface for Encrypter.Connection + # + def is_helper_ip(self, ip): + """ Used by Coordinator's Downloader (via Encrypter) to see what + connections are helpers """ + # called by network thread + self.lock.acquire() + try: + for peer in self.asked_helpers: + if peer['ip'] == ip: + return True + return False + finally: + self.lock.release() diff --git a/tribler-mod/Tribler/Core/CoopDownload/CoordinatorMessageHandler.py b/tribler-mod/Tribler/Core/CoopDownload/CoordinatorMessageHandler.py new file mode 100644 index 0000000..6b85cf0 --- /dev/null +++ b/tribler-mod/Tribler/Core/CoopDownload/CoordinatorMessageHandler.py @@ -0,0 +1,59 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information +# +# SecureOverlay message handler for a Coordinator +# +import sys + +from Tribler.Core.BitTornado.bencode import bdecode +from Tribler.Core.BitTornado.BT1.MessageID import * +from Tribler.Core.Utilities.utilities import show_permid_short +from Tribler.Core.simpledefs import * + +DEBUG = False + +class CoordinatorMessageHandler: + def __init__(self,launchmany): + self.launchmany = launchmany + + #def register(self): + + def handleMessage(self,permid,selversion,message): + t = message[0] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helpcoord: Got",getMessageName(t) + + if t == RESERVE_PIECES: + return self.got_reserve_pieces(permid, message, selversion) + + def got_reserve_pieces(self, permid, message,selversion): + try: + infohash = message[1:21] + all_or_nothing = message[21] + pieces = bdecode(message[22:]) + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "warning: bad data in RESERVE_PIECES" + return False + + network_got_reserve_pieces_lambda = lambda:self.network_got_reserve_pieces(permid,infohash,pieces,all_or_nothing,selversion) + self.launchmany.rawserver.add_task(network_got_reserve_pieces_lambda,0) + + return True + + + def network_got_reserve_pieces(self,permid,infohash,pieces,all_or_nothing,selversion): + # Called by network thread + + c = self.launchmany.get_coopdl_role_object(infohash,COOPDL_ROLE_COORDINATOR) + if c is None: + return + + ## FIXME: if he's not a helper, but thinks he is, we better send him + ## a STOP_DOWNLOAD_HELP (again) + if not c.network_is_helper_permid(permid): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helpcoord: Ignoring RESERVE_PIECES from non-helper",show_permid_short(permid) + return + + c.network_got_reserve_pieces(permid, pieces, all_or_nothing, selversion) diff --git a/tribler-mod/Tribler/Core/CoopDownload/CoordinatorMessageHandler.py.bak b/tribler-mod/Tribler/Core/CoopDownload/CoordinatorMessageHandler.py.bak new file mode 100644 index 0000000..b15dd5e --- /dev/null +++ b/tribler-mod/Tribler/Core/CoopDownload/CoordinatorMessageHandler.py.bak @@ -0,0 +1,58 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information +# +# SecureOverlay message handler for a Coordinator +# +import sys + +from Tribler.Core.BitTornado.bencode import bdecode +from Tribler.Core.BitTornado.BT1.MessageID import * +from Tribler.Core.Utilities.utilities import show_permid_short +from Tribler.Core.simpledefs import * + +DEBUG = False + +class CoordinatorMessageHandler: + def __init__(self,launchmany): + self.launchmany = launchmany + + #def register(self): + + def handleMessage(self,permid,selversion,message): + t = message[0] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helpcoord: Got",getMessageName(t) + + if t == RESERVE_PIECES: + return self.got_reserve_pieces(permid, message, selversion) + + def got_reserve_pieces(self, permid, message,selversion): + try: + infohash = message[1:21] + all_or_nothing = message[21] + pieces = bdecode(message[22:]) + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "warning: bad data in RESERVE_PIECES" + return False + + network_got_reserve_pieces_lambda = lambda:self.network_got_reserve_pieces(permid,infohash,pieces,all_or_nothing,selversion) + self.launchmany.rawserver.add_task(network_got_reserve_pieces_lambda,0) + + return True + + + def network_got_reserve_pieces(self,permid,infohash,pieces,all_or_nothing,selversion): + # Called by network thread + + c = self.launchmany.get_coopdl_role_object(infohash,COOPDL_ROLE_COORDINATOR) + if c is None: + return + + ## FIXME: if he's not a helper, but thinks he is, we better send him + ## a STOP_DOWNLOAD_HELP (again) + if not c.network_is_helper_permid(permid): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helpcoord: Ignoring RESERVE_PIECES from non-helper",show_permid_short(permid) + return + + c.network_got_reserve_pieces(permid, pieces, all_or_nothing, selversion) diff --git a/tribler-mod/Tribler/Core/CoopDownload/Helper.py b/tribler-mod/Tribler/Core/CoopDownload/Helper.py new file mode 100644 index 0000000..3f54447 --- /dev/null +++ b/tribler-mod/Tribler/Core/CoopDownload/Helper.py @@ -0,0 +1,309 @@ +from time import localtime, strftime +# Written by Pawel Garbacki +# see LICENSE.txt for license information + +import sys +from traceback import print_exc +from time import time + + +from Tribler.Core.BitTornado.bencode import bencode +from Tribler.Core.BitTornado.BT1.MessageID import RESERVE_PIECES + +from Tribler.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge +from Tribler.Core.CacheDB.CacheDBHandler import PeerDBHandler +from Tribler.Core.Utilities.utilities import show_permid_short + +MAX_ROUNDS = 200 +DEBUG = False + +class SingleDownloadHelperInterface: + """ This interface should contain all methods that the PiecePiecker/Helper + calls on the SingleDownload class. + """ + def __init__(self): + self.frozen_by_helper = False + + def helper_set_freezing(self,val): + self.frozen_by_helper = val + + def is_frozen_by_helper(self): + return self.frozen_by_helper + + def is_choked(self): + pass + + def helper_forces_unchoke(self): + pass + + def _request_more(self, new_unchoke = False): + pass + + +class Helper: + def __init__(self, torrent_hash, num_pieces, coordinator_permid, coordinator = None): + self.overlay_bridge = OverlayThreadingBridge.getInstance() + self.torrent_hash = torrent_hash + if coordinator_permid is not None and coordinator_permid == '': + self.coordinator_permid = None + else: + self.coordinator_permid = coordinator_permid + self.coordinator_ip = None # see is_coordinator() + self.coordinator_port = -1 + + if self.coordinator_permid is not None: + peerdb = PeerDBHandler.getInstance() + peer = peerdb.getPeer(coordinator_permid) + if peer is not None: + self.coordinator_ip = peer['ip'] + self.coordinator_port = peer['port'] + + self.reserved_pieces = [False] * num_pieces + self.ignored_pieces = [False] * num_pieces + self.coordinator = coordinator + self.counter = 0 + self.completed = False + self.distr_reserved_pieces = [False] * num_pieces + self.marker = [True] * num_pieces + self.round = 0 + self.encoder = None + self.continuations = [] + self.outstanding = None + self.last_req_time = 0 + + def set_encoder(self,encoder): + self.encoder = encoder + self.encoder.set_coordinator_ip(self.coordinator_ip) + # To support a helping user stopping and restarting a torrent + if self.coordinator_permid is not None: + self.start_data_connection() + + def test(self): + result = self.reserve_piece(10,None) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","reserve piece returned: " + str(result) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Test passed" + + def _reserve_piece(self, piece): + self.reserved_pieces[piece] = True + self.distr_reserved_pieces[piece] = True + self.ignored_pieces[piece] = False + + def _ignore_piece(self, piece): + if not self.is_reserved(piece): + self.ignored_pieces[piece] = True + self.distr_reserved_pieces[piece] = True + + def is_coordinator(self,permid): + # If we could get coordinator_ip, don't help + if self.coordinator_ip is None: + return False + + if self.coordinator_permid == permid: + return True + else: + return False + + def get_coordinator_permid(self): + return self.coordinator_permid + + # + # Interface for PiecePicker and Downloader + # + # Called by network thread + def is_reserved(self, piece): + if self.reserved_pieces[piece] or (self.coordinator is not None and self.is_complete()): + return True + return self.reserved_pieces[piece] + + def is_ignored(self, piece): + if not self.ignored_pieces[piece] or (self.coordinator is not None and self.is_complete()): + return False + return self.ignored_pieces[piece] + + def is_complete(self): + if self.completed: + return True + self.round = (self.round + 1) % MAX_ROUNDS + if self.round != 0: + return False + if self.coordinator is not None: + self.completed = (self.coordinator.reserved_pieces == self.marker) + else: + self.completed = (self.distr_reserved_pieces == self.marker) + return self.completed + + def reserve_pieces(self, pieces, sdownload, all_or_nothing = False): + pieces_to_send = [] + ex = "None" + result = [] + for piece in pieces: + if self.is_reserved(piece): + result.append(piece) + elif not self.is_ignored(piece): + pieces_to_send.append(piece) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: reserve_pieces: result is",result,"to_send is",pieces_to_send + + if pieces_to_send == []: + return result + if self.coordinator is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: reserve_pieces: I am coordinator, calling self.coordinator.reserve_pieces" + new_reserved_pieces = self.coordinator.network_reserve_pieces(pieces_to_send, all_or_nothing) + for piece in new_reserved_pieces: + self._reserve_piece(piece) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: reserve_pieces: sending remote reservation request" + self.send_or_queue_reservation(sdownload,pieces_to_send,result) + return [] + + result = [] + for piece in pieces: + if self.is_reserved(piece): + result.append(piece) + else: + self._ignore_piece(piece) + return result + + def reserve_piece(self, piece, sdownload): + if self.coordinator is not None and self.is_complete(): + return True + new_reserved_pieces = self.reserve_pieces([piece],sdownload) + if new_reserved_pieces == []: + return False + else: + return True + + +## Synchronization interface + + def send_or_queue_reservation(self,sdownload,pieces_to_send,result): + """ Records the fact that a SingleDownload wants to reserve a + piece with the coordinator. If it's the first, send the + actual reservation request. + """ + if sdownload not in self.continuations: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: Queuing reservation for",pieces_to_send + self.continuations.append(sdownload) + sdownload.helper_set_freezing(True) + if len(self.continuations) > 0: + self.send_reservation(pieces_to_send) + + def send_reservation(self,pieces_to_send): + # Arno: I sometimes see no reply to a RESERVE_PIECE and the client + # stops acquiring new pieces. The last_req_time is supposed + # to fix this. + waited = int(time())-self.last_req_time + if self.outstanding is None or waited > 60: + self.counter += 1 + self.last_req_time = int(time()) + if DEBUG: + if self.outstanding is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: Sending reservation for",pieces_to_send,"because none" + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: Sending reservation for",pieces_to_send,"because timeout" + sdownload = self.continuations.pop(0) + if self.outstanding is not None: # allow bypassed conn to restart + self.outstanding.helper_set_freezing(False) + self.outstanding = sdownload + ex = "self.send_reserve_pieces(pieces_to_send)" + self.send_reserve_pieces(pieces_to_send) + + + def notify(self): + """ Called by HelperMessageHandler to "wake up" the download that's + waiting for its coordinator to reserve it a piece + """ + if self.outstanding is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: notify: No continuation waiting???" + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: notify: Waking downloader" + sdownload = self.outstanding + self.outstanding = None # must be not before calling self.restart! + self.restart(sdownload) + + #self.send_reservation() + l = self.continuations[:] # copy just to be sure + self.continuations = [] + for sdownload in l: + self.restart(sdownload) + + def restart(self,sdownload): + # Chokes can get in while we're waiting for reply from coordinator. + # But as we were called from _request_more() we were not choked + # just before, so pretend we didn't see the message yet. + if sdownload.is_choked(): + sdownload.helper_forces_unchoke() + sdownload.helper_set_freezing(False) + sdownload._request_more() + +## Coordinator comm. + def send_reserve_pieces(self, pieces, all_or_nothing = False): + # called by network thread, delegate to overlay thread + olthread_send_reserve_pieces_lambda = lambda:self.olthread_send_reserve_pieces(pieces,all_or_nothing) + self.overlay_bridge.add_task(olthread_send_reserve_pieces_lambda,0) + + def olthread_send_reserve_pieces(self, pieces, all_or_nothing = False): + # We need to create the message according to protocol version, so need + # to pass all info. + olthread_reserve_pieces_connect_callback_lambda = lambda e,d,p,s:self.olthread_reserve_pieces_connect_callback(e,d,p,s,pieces,all_or_nothing) + self.overlay_bridge.connect(self.coordinator_permid,olthread_reserve_pieces_connect_callback_lambda) + + def olthread_reserve_pieces_connect_callback(self,exc,dns,permid,selversion,pieces,all_or_nothing): + if exc is None: + ## Create message according to protocol version + if all_or_nothing: + all_or_nothing = chr(1) + else: + all_or_nothing = chr(0) + payload = self.torrent_hash + all_or_nothing + bencode(pieces) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: RESERVE_PIECES: Sending!!!!!!!!!!!!!",show_permid_short(permid) + + self.overlay_bridge.send(permid, RESERVE_PIECES + payload,self.olthread_reserve_pieces_send_callback) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: RESERVE_PIECES: error connecting to",show_permid_short(permid),exc + + def olthread_reserve_pieces_send_callback(self,exc,permid): + if exc is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: RESERVE_PIECES: error sending to",show_permid_short(permid),exc + + + # + # Interface for HelperMessageHandler + # + # All called by network thread + # + def got_pieces_reserved(self, permid, pieces): + self.handle_pieces_reserved(pieces) + self.start_data_connection() + + def handle_pieces_reserved(self,pieces): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: Coordinator replied",pieces + try: + for piece in pieces: + if piece > 0: + self._reserve_piece(piece) + else: + self._ignore_piece(-piece) + self.counter -= 1 + + except Exception,e: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: Exception in handle_pieces_reserved",e + + def start_data_connection(self): + # Do this always, will return quickly when connection already exists + dns = (self.coordinator_ip, self.coordinator_port) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: Starting data connection to coordinator",dns + self.encoder.start_connection(dns,id = None,coord_con = True) + diff --git a/tribler-mod/Tribler/Core/CoopDownload/Helper.py.bak b/tribler-mod/Tribler/Core/CoopDownload/Helper.py.bak new file mode 100644 index 0000000..5b0d80c --- /dev/null +++ b/tribler-mod/Tribler/Core/CoopDownload/Helper.py.bak @@ -0,0 +1,308 @@ +# Written by Pawel Garbacki +# see LICENSE.txt for license information + +import sys +from traceback import print_exc +from time import time + + +from Tribler.Core.BitTornado.bencode import bencode +from Tribler.Core.BitTornado.BT1.MessageID import RESERVE_PIECES + +from Tribler.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge +from Tribler.Core.CacheDB.CacheDBHandler import PeerDBHandler +from Tribler.Core.Utilities.utilities import show_permid_short + +MAX_ROUNDS = 200 +DEBUG = False + +class SingleDownloadHelperInterface: + """ This interface should contain all methods that the PiecePiecker/Helper + calls on the SingleDownload class. + """ + def __init__(self): + self.frozen_by_helper = False + + def helper_set_freezing(self,val): + self.frozen_by_helper = val + + def is_frozen_by_helper(self): + return self.frozen_by_helper + + def is_choked(self): + pass + + def helper_forces_unchoke(self): + pass + + def _request_more(self, new_unchoke = False): + pass + + +class Helper: + def __init__(self, torrent_hash, num_pieces, coordinator_permid, coordinator = None): + self.overlay_bridge = OverlayThreadingBridge.getInstance() + self.torrent_hash = torrent_hash + if coordinator_permid is not None and coordinator_permid == '': + self.coordinator_permid = None + else: + self.coordinator_permid = coordinator_permid + self.coordinator_ip = None # see is_coordinator() + self.coordinator_port = -1 + + if self.coordinator_permid is not None: + peerdb = PeerDBHandler.getInstance() + peer = peerdb.getPeer(coordinator_permid) + if peer is not None: + self.coordinator_ip = peer['ip'] + self.coordinator_port = peer['port'] + + self.reserved_pieces = [False] * num_pieces + self.ignored_pieces = [False] * num_pieces + self.coordinator = coordinator + self.counter = 0 + self.completed = False + self.distr_reserved_pieces = [False] * num_pieces + self.marker = [True] * num_pieces + self.round = 0 + self.encoder = None + self.continuations = [] + self.outstanding = None + self.last_req_time = 0 + + def set_encoder(self,encoder): + self.encoder = encoder + self.encoder.set_coordinator_ip(self.coordinator_ip) + # To support a helping user stopping and restarting a torrent + if self.coordinator_permid is not None: + self.start_data_connection() + + def test(self): + result = self.reserve_piece(10,None) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","reserve piece returned: " + str(result) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Test passed" + + def _reserve_piece(self, piece): + self.reserved_pieces[piece] = True + self.distr_reserved_pieces[piece] = True + self.ignored_pieces[piece] = False + + def _ignore_piece(self, piece): + if not self.is_reserved(piece): + self.ignored_pieces[piece] = True + self.distr_reserved_pieces[piece] = True + + def is_coordinator(self,permid): + # If we could get coordinator_ip, don't help + if self.coordinator_ip is None: + return False + + if self.coordinator_permid == permid: + return True + else: + return False + + def get_coordinator_permid(self): + return self.coordinator_permid + + # + # Interface for PiecePicker and Downloader + # + # Called by network thread + def is_reserved(self, piece): + if self.reserved_pieces[piece] or (self.coordinator is not None and self.is_complete()): + return True + return self.reserved_pieces[piece] + + def is_ignored(self, piece): + if not self.ignored_pieces[piece] or (self.coordinator is not None and self.is_complete()): + return False + return self.ignored_pieces[piece] + + def is_complete(self): + if self.completed: + return True + self.round = (self.round + 1) % MAX_ROUNDS + if self.round != 0: + return False + if self.coordinator is not None: + self.completed = (self.coordinator.reserved_pieces == self.marker) + else: + self.completed = (self.distr_reserved_pieces == self.marker) + return self.completed + + def reserve_pieces(self, pieces, sdownload, all_or_nothing = False): + pieces_to_send = [] + ex = "None" + result = [] + for piece in pieces: + if self.is_reserved(piece): + result.append(piece) + elif not self.is_ignored(piece): + pieces_to_send.append(piece) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: reserve_pieces: result is",result,"to_send is",pieces_to_send + + if pieces_to_send == []: + return result + if self.coordinator is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: reserve_pieces: I am coordinator, calling self.coordinator.reserve_pieces" + new_reserved_pieces = self.coordinator.network_reserve_pieces(pieces_to_send, all_or_nothing) + for piece in new_reserved_pieces: + self._reserve_piece(piece) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: reserve_pieces: sending remote reservation request" + self.send_or_queue_reservation(sdownload,pieces_to_send,result) + return [] + + result = [] + for piece in pieces: + if self.is_reserved(piece): + result.append(piece) + else: + self._ignore_piece(piece) + return result + + def reserve_piece(self, piece, sdownload): + if self.coordinator is not None and self.is_complete(): + return True + new_reserved_pieces = self.reserve_pieces([piece],sdownload) + if new_reserved_pieces == []: + return False + else: + return True + + +## Synchronization interface + + def send_or_queue_reservation(self,sdownload,pieces_to_send,result): + """ Records the fact that a SingleDownload wants to reserve a + piece with the coordinator. If it's the first, send the + actual reservation request. + """ + if sdownload not in self.continuations: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: Queuing reservation for",pieces_to_send + self.continuations.append(sdownload) + sdownload.helper_set_freezing(True) + if len(self.continuations) > 0: + self.send_reservation(pieces_to_send) + + def send_reservation(self,pieces_to_send): + # Arno: I sometimes see no reply to a RESERVE_PIECE and the client + # stops acquiring new pieces. The last_req_time is supposed + # to fix this. + waited = int(time())-self.last_req_time + if self.outstanding is None or waited > 60: + self.counter += 1 + self.last_req_time = int(time()) + if DEBUG: + if self.outstanding is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: Sending reservation for",pieces_to_send,"because none" + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: Sending reservation for",pieces_to_send,"because timeout" + sdownload = self.continuations.pop(0) + if self.outstanding is not None: # allow bypassed conn to restart + self.outstanding.helper_set_freezing(False) + self.outstanding = sdownload + ex = "self.send_reserve_pieces(pieces_to_send)" + self.send_reserve_pieces(pieces_to_send) + + + def notify(self): + """ Called by HelperMessageHandler to "wake up" the download that's + waiting for its coordinator to reserve it a piece + """ + if self.outstanding is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: notify: No continuation waiting???" + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: notify: Waking downloader" + sdownload = self.outstanding + self.outstanding = None # must be not before calling self.restart! + self.restart(sdownload) + + #self.send_reservation() + l = self.continuations[:] # copy just to be sure + self.continuations = [] + for sdownload in l: + self.restart(sdownload) + + def restart(self,sdownload): + # Chokes can get in while we're waiting for reply from coordinator. + # But as we were called from _request_more() we were not choked + # just before, so pretend we didn't see the message yet. + if sdownload.is_choked(): + sdownload.helper_forces_unchoke() + sdownload.helper_set_freezing(False) + sdownload._request_more() + +## Coordinator comm. + def send_reserve_pieces(self, pieces, all_or_nothing = False): + # called by network thread, delegate to overlay thread + olthread_send_reserve_pieces_lambda = lambda:self.olthread_send_reserve_pieces(pieces,all_or_nothing) + self.overlay_bridge.add_task(olthread_send_reserve_pieces_lambda,0) + + def olthread_send_reserve_pieces(self, pieces, all_or_nothing = False): + # We need to create the message according to protocol version, so need + # to pass all info. + olthread_reserve_pieces_connect_callback_lambda = lambda e,d,p,s:self.olthread_reserve_pieces_connect_callback(e,d,p,s,pieces,all_or_nothing) + self.overlay_bridge.connect(self.coordinator_permid,olthread_reserve_pieces_connect_callback_lambda) + + def olthread_reserve_pieces_connect_callback(self,exc,dns,permid,selversion,pieces,all_or_nothing): + if exc is None: + ## Create message according to protocol version + if all_or_nothing: + all_or_nothing = chr(1) + else: + all_or_nothing = chr(0) + payload = self.torrent_hash + all_or_nothing + bencode(pieces) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: RESERVE_PIECES: Sending!!!!!!!!!!!!!",show_permid_short(permid) + + self.overlay_bridge.send(permid, RESERVE_PIECES + payload,self.olthread_reserve_pieces_send_callback) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: RESERVE_PIECES: error connecting to",show_permid_short(permid),exc + + def olthread_reserve_pieces_send_callback(self,exc,permid): + if exc is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: RESERVE_PIECES: error sending to",show_permid_short(permid),exc + + + # + # Interface for HelperMessageHandler + # + # All called by network thread + # + def got_pieces_reserved(self, permid, pieces): + self.handle_pieces_reserved(pieces) + self.start_data_connection() + + def handle_pieces_reserved(self,pieces): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: Coordinator replied",pieces + try: + for piece in pieces: + if piece > 0: + self._reserve_piece(piece) + else: + self._ignore_piece(-piece) + self.counter -= 1 + + except Exception,e: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: Exception in handle_pieces_reserved",e + + def start_data_connection(self): + # Do this always, will return quickly when connection already exists + dns = (self.coordinator_ip, self.coordinator_port) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: Starting data connection to coordinator",dns + self.encoder.start_connection(dns,id = None,coord_con = True) + diff --git a/tribler-mod/Tribler/Core/CoopDownload/HelperMessageHandler.py b/tribler-mod/Tribler/Core/CoopDownload/HelperMessageHandler.py new file mode 100644 index 0000000..b023a15 --- /dev/null +++ b/tribler-mod/Tribler/Core/CoopDownload/HelperMessageHandler.py @@ -0,0 +1,198 @@ +from time import localtime, strftime +# Written by Pawel Garbacki, Arno Bakker +# see LICENSE.txt for license information +# +# SecureOverlay message handler for a Helper +# + + +import sys, os +import binascii + +from Tribler.Core.TorrentDef import * +from Tribler.Core.Session import * +from Tribler.Core.simpledefs import * +from Tribler.Core.DownloadConfig import DownloadStartupConfig +from Tribler.Core.CacheDB.CacheDBHandler import TorrentDBHandler +from Tribler.Core.Utilities.utilities import show_permid_short +from Tribler.Core.BitTornado.bencode import bencode, bdecode +from Tribler.Core.BitTornado.BT1.MessageID import * + +DEBUG = False + +class HelperMessageHandler: + def __init__(self): + self.metadata_queue = {} + + def register(self,session,metadata_handler,helpdir,dlconfig): + self.session = session + self.helpdir = helpdir + # The default DownloadStartupConfig dict as set in the Session + self.dlconfig = dlconfig + self.torrent_db = TorrentDBHandler.getInstance() + self.metadata_handler = metadata_handler + + def handleMessage(self,permid,selversion,message): + t = message[0] + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: Got",getMessageName(t) + + if t == DOWNLOAD_HELP: + return self.got_dlhelp_request(permid, message, selversion) + elif t == STOP_DOWNLOAD_HELP: + return self.got_stop_dlhelp_request(permid, message, selversion) + elif t == PIECES_RESERVED: + return self.got_pieces_reserved(permid, message, selversion) + + + def got_dlhelp_request(self, permid, message,selversion): + try: + infohash = message[1:] + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: warning: bad data in dlhelp_request" + return False + + if len(infohash) != 20: + return False + + if not self.can_help(infohash): + return False + torrent_data = self.find_torrent(infohash) + if torrent_data: + self.do_help(infohash, torrent_data, permid) + else: + self.get_metadata(permid, infohash,selversion) + return True + + + # It is very important here that we create safe filenames, i.e., it should + # not be possible for a coordinator to send a METADATA message that causes + # important files to be overwritten + # + def do_help(self, infohash, torrent_data, permid): + + basename = binascii.hexlify(infohash)+'.torrent' # ignore .tribe stuff, not vital + torrentfilename = os.path.join(self.helpdir,basename) + + tfile = open(torrentfilename, "wb") + tfile.write(torrent_data) + tfile.close() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helpmsg: Got metadata required for helping",show_permid_short(permid) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helpmsg: torrent: ",torrentfilename + + tdef = TorrentDef.load(torrentfilename) + if self.dlconfig is None: + dscfg = DownloadStartupConfig() + else: + dscfg = DownloadStartupConfig(self.dlconfig) + dscfg.set_coopdl_coordinator_permid(permid) + dscfg.set_dest_dir(self.helpdir) + + # Start new download + self.session.start_download(tdef,dscfg) + + def get_metadata(self, permid, infohash, selversion): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helpmsg: Don't have torrent yet, ask coordinator" + if not self.metadata_queue.has_key(infohash): + self.metadata_queue[infohash] = [] + self.metadata_queue[infohash].append(permid) + self.metadata_handler.send_metadata_request(permid, infohash, selversion,caller="dlhelp") + + def metadatahandler_received_torrent(self, infohash, torrent_data): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helpmsg: Metadata handler reports torrent is in." + if not self.metadata_queue.has_key(infohash) or not self.metadata_queue[infohash]: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helpmsg: Metadata handler reported a torrent we are not waiting for." + return + + for permid in self.metadata_queue[infohash]: + # only ask for metadata once + self.do_help(infohash, torrent_data, permid) + del self.metadata_queue[infohash] + + def can_help(self, infohash): #TODO: test if I can help the cordinator to download this file + return True #Future support: make the decision based on my preference + + def find_torrent(self, infohash): + torrent = self.torrent_db.getTorrent(infohash) + if torrent is None: + return None + elif 'torrent_dir' in torrent: + fn = torrent['torrent_dir'] + if os.path.isfile(fn): + f = open(fn,"rb") + data = f.read() + f.close() + return data + else: + return None + else: + return None + + + def got_stop_dlhelp_request(self, permid, message, selversion): + try: + infohash = message[1:] + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: warning: bad data in STOP_DOWNLOAD_HELP" + return False + + network_got_stop_dlhelp_lambda = lambda:self.network_got_stop_dlhelp(permid,message,selversion,infohash) + self.session.lm.rawserver.add_task(network_got_stop_dlhelp_lambda,0) + + # If the request is from a unauthorized peer, we close + # If the request is from an authorized peer (=coordinator) we close as + # well. So return False + return False + + + def network_got_stop_dlhelp(self,permid,message,selversion,infohash): + # Called by network thread + + h = self.session.lm.get_coopdl_role_object(infohash,COOPDL_ROLE_HELPER) + if h is None: + return + + if not h.is_coordinator(permid): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helpmsg: Got a STOP_DOWNLOAD_HELP message from non-coordinator",show_permid_short(permid) + return + + # Find and remove download + dlist = self.session.get_downloads() + for d in dlist: + if d.get_def().get_infohash() == infohash: + self.session.remove_download(d) + break + + def got_pieces_reserved(self,permid, message, selversion): + try: + infohash = message[1:21] + pieces = bdecode(message[21:]) + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: warning: bad data in PIECES_RESERVED message" + return False + + network_got_pieces_reserved_lambda = lambda:self.network_got_pieces_reserved(permid,message,selversion,infohash,pieces) + self.session.lm.rawserver.add_task(network_got_pieces_reserved_lambda,0) + + return True + + def network_got_pieces_reserved(self,permid,message,selversion,infohash,pieces): + # Called by network thread + + h = self.session.lm.get_coopdl_role_object(infohash,COOPDL_ROLE_HELPER) + if h is None: + return + + if not h.is_coordinator(permid): + return + + h.got_pieces_reserved(permid, pieces) + # Wake up download thread + h.notify() + diff --git a/tribler-mod/Tribler/Core/CoopDownload/HelperMessageHandler.py.bak b/tribler-mod/Tribler/Core/CoopDownload/HelperMessageHandler.py.bak new file mode 100644 index 0000000..8f61648 --- /dev/null +++ b/tribler-mod/Tribler/Core/CoopDownload/HelperMessageHandler.py.bak @@ -0,0 +1,197 @@ +# Written by Pawel Garbacki, Arno Bakker +# see LICENSE.txt for license information +# +# SecureOverlay message handler for a Helper +# + + +import sys, os +import binascii + +from Tribler.Core.TorrentDef import * +from Tribler.Core.Session import * +from Tribler.Core.simpledefs import * +from Tribler.Core.DownloadConfig import DownloadStartupConfig +from Tribler.Core.CacheDB.CacheDBHandler import TorrentDBHandler +from Tribler.Core.Utilities.utilities import show_permid_short +from Tribler.Core.BitTornado.bencode import bencode, bdecode +from Tribler.Core.BitTornado.BT1.MessageID import * + +DEBUG = False + +class HelperMessageHandler: + def __init__(self): + self.metadata_queue = {} + + def register(self,session,metadata_handler,helpdir,dlconfig): + self.session = session + self.helpdir = helpdir + # The default DownloadStartupConfig dict as set in the Session + self.dlconfig = dlconfig + self.torrent_db = TorrentDBHandler.getInstance() + self.metadata_handler = metadata_handler + + def handleMessage(self,permid,selversion,message): + t = message[0] + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: Got",getMessageName(t) + + if t == DOWNLOAD_HELP: + return self.got_dlhelp_request(permid, message, selversion) + elif t == STOP_DOWNLOAD_HELP: + return self.got_stop_dlhelp_request(permid, message, selversion) + elif t == PIECES_RESERVED: + return self.got_pieces_reserved(permid, message, selversion) + + + def got_dlhelp_request(self, permid, message,selversion): + try: + infohash = message[1:] + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: warning: bad data in dlhelp_request" + return False + + if len(infohash) != 20: + return False + + if not self.can_help(infohash): + return False + torrent_data = self.find_torrent(infohash) + if torrent_data: + self.do_help(infohash, torrent_data, permid) + else: + self.get_metadata(permid, infohash,selversion) + return True + + + # It is very important here that we create safe filenames, i.e., it should + # not be possible for a coordinator to send a METADATA message that causes + # important files to be overwritten + # + def do_help(self, infohash, torrent_data, permid): + + basename = binascii.hexlify(infohash)+'.torrent' # ignore .tribe stuff, not vital + torrentfilename = os.path.join(self.helpdir,basename) + + tfile = open(torrentfilename, "wb") + tfile.write(torrent_data) + tfile.close() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helpmsg: Got metadata required for helping",show_permid_short(permid) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helpmsg: torrent: ",torrentfilename + + tdef = TorrentDef.load(torrentfilename) + if self.dlconfig is None: + dscfg = DownloadStartupConfig() + else: + dscfg = DownloadStartupConfig(self.dlconfig) + dscfg.set_coopdl_coordinator_permid(permid) + dscfg.set_dest_dir(self.helpdir) + + # Start new download + self.session.start_download(tdef,dscfg) + + def get_metadata(self, permid, infohash, selversion): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helpmsg: Don't have torrent yet, ask coordinator" + if not self.metadata_queue.has_key(infohash): + self.metadata_queue[infohash] = [] + self.metadata_queue[infohash].append(permid) + self.metadata_handler.send_metadata_request(permid, infohash, selversion,caller="dlhelp") + + def metadatahandler_received_torrent(self, infohash, torrent_data): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helpmsg: Metadata handler reports torrent is in." + if not self.metadata_queue.has_key(infohash) or not self.metadata_queue[infohash]: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helpmsg: Metadata handler reported a torrent we are not waiting for." + return + + for permid in self.metadata_queue[infohash]: + # only ask for metadata once + self.do_help(infohash, torrent_data, permid) + del self.metadata_queue[infohash] + + def can_help(self, infohash): #TODO: test if I can help the cordinator to download this file + return True #Future support: make the decision based on my preference + + def find_torrent(self, infohash): + torrent = self.torrent_db.getTorrent(infohash) + if torrent is None: + return None + elif 'torrent_dir' in torrent: + fn = torrent['torrent_dir'] + if os.path.isfile(fn): + f = open(fn,"rb") + data = f.read() + f.close() + return data + else: + return None + else: + return None + + + def got_stop_dlhelp_request(self, permid, message, selversion): + try: + infohash = message[1:] + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: warning: bad data in STOP_DOWNLOAD_HELP" + return False + + network_got_stop_dlhelp_lambda = lambda:self.network_got_stop_dlhelp(permid,message,selversion,infohash) + self.session.lm.rawserver.add_task(network_got_stop_dlhelp_lambda,0) + + # If the request is from a unauthorized peer, we close + # If the request is from an authorized peer (=coordinator) we close as + # well. So return False + return False + + + def network_got_stop_dlhelp(self,permid,message,selversion,infohash): + # Called by network thread + + h = self.session.lm.get_coopdl_role_object(infohash,COOPDL_ROLE_HELPER) + if h is None: + return + + if not h.is_coordinator(permid): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helpmsg: Got a STOP_DOWNLOAD_HELP message from non-coordinator",show_permid_short(permid) + return + + # Find and remove download + dlist = self.session.get_downloads() + for d in dlist: + if d.get_def().get_infohash() == infohash: + self.session.remove_download(d) + break + + def got_pieces_reserved(self,permid, message, selversion): + try: + infohash = message[1:21] + pieces = bdecode(message[21:]) + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","helper: warning: bad data in PIECES_RESERVED message" + return False + + network_got_pieces_reserved_lambda = lambda:self.network_got_pieces_reserved(permid,message,selversion,infohash,pieces) + self.session.lm.rawserver.add_task(network_got_pieces_reserved_lambda,0) + + return True + + def network_got_pieces_reserved(self,permid,message,selversion,infohash,pieces): + # Called by network thread + + h = self.session.lm.get_coopdl_role_object(infohash,COOPDL_ROLE_HELPER) + if h is None: + return + + if not h.is_coordinator(permid): + return + + h.got_pieces_reserved(permid, pieces) + # Wake up download thread + h.notify() + diff --git a/tribler-mod/Tribler/Core/CoopDownload/Logger.py b/tribler-mod/Tribler/Core/CoopDownload/Logger.py new file mode 100644 index 0000000..d66fdd1 --- /dev/null +++ b/tribler-mod/Tribler/Core/CoopDownload/Logger.py @@ -0,0 +1,8 @@ +from time import localtime, strftime +# Written by Jie Yang +# see LICENSE.txt for license information + +# Just for compatibility of Pawel's download help logger +from Tribler.Core.Statistics.Logger import create_logger, get_logger + + \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/CoopDownload/Logger.py.bak b/tribler-mod/Tribler/Core/CoopDownload/Logger.py.bak new file mode 100644 index 0000000..5ba8519 --- /dev/null +++ b/tribler-mod/Tribler/Core/CoopDownload/Logger.py.bak @@ -0,0 +1,7 @@ +# Written by Jie Yang +# see LICENSE.txt for license information + +# Just for compatibility of Pawel's download help logger +from Tribler.Core.Statistics.Logger import create_logger, get_logger + + \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/CoopDownload/RatePredictor.py b/tribler-mod/Tribler/Core/CoopDownload/RatePredictor.py new file mode 100644 index 0000000..e2658a6 --- /dev/null +++ b/tribler-mod/Tribler/Core/CoopDownload/RatePredictor.py @@ -0,0 +1,61 @@ +from time import localtime, strftime + +# Written by Pawel Garbacki +# see LICENSE.txt for license information + +import sys +from Tribler.Core.BitTornado.clock import clock + +MIN_CAPACITY = 0.75 +DEBUG = True #False + +class RatePredictor: + def __init__(self, raw_server, rate_measure, max_rate, probing_period = 2): + self.raw_server = raw_server + self.rate_measure = rate_measure + if max_rate == 0: + self.max_rate = 2147483647 + else: + self.max_rate = max_rate + self.probing_period = probing_period # in seconds + +class ExpSmoothRatePredictor(RatePredictor): + def __init__(self, raw_server, rate_measure, max_rate, alpha = 0.5, max_period = 30, probing_period = 2): + RatePredictor.__init__(self, raw_server, rate_measure, max_rate, probing_period) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "RatePredictor:__init__" + self.alpha = alpha + self.max_period = max_period + self.value = None + self.timestamp = None + + def update(self): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "RatePredictor:update" + self.raw_server.add_task(self.update, self.probing_period) + current_value = self.rate_measure.get_rate() / 1000. + current_time = clock() + if self.value is None or current_time - self.timestamp > self.max_period: + self.value = current_value + else: + self.value = self.alpha * current_value + (1 - self.alpha) * self.value + if self.max_rate > 0 and self.value > self.max_rate: + self.value = self.max_rate + self.timestamp = current_time + + # exponential smoothing prediction + def predict(self): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "RatePredictor:predict" + # self.update() + if self.value is None: + return 0 + return self.value + + def has_capacity(self): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "RatePredictor:has_capacity" +# return False + # self.update() + result = None + if self.value is None: + result = True + else: + result = (1. - float(self.value) / self.max_rate) > MIN_CAPACITY + return result diff --git a/tribler-mod/Tribler/Core/CoopDownload/RatePredictor.py.bak b/tribler-mod/Tribler/Core/CoopDownload/RatePredictor.py.bak new file mode 100644 index 0000000..1f241de --- /dev/null +++ b/tribler-mod/Tribler/Core/CoopDownload/RatePredictor.py.bak @@ -0,0 +1,60 @@ + +# Written by Pawel Garbacki +# see LICENSE.txt for license information + +import sys +from Tribler.Core.BitTornado.clock import clock + +MIN_CAPACITY = 0.75 +DEBUG = True #False + +class RatePredictor: + def __init__(self, raw_server, rate_measure, max_rate, probing_period = 2): + self.raw_server = raw_server + self.rate_measure = rate_measure + if max_rate == 0: + self.max_rate = 2147483647 + else: + self.max_rate = max_rate + self.probing_period = probing_period # in seconds + +class ExpSmoothRatePredictor(RatePredictor): + def __init__(self, raw_server, rate_measure, max_rate, alpha = 0.5, max_period = 30, probing_period = 2): + RatePredictor.__init__(self, raw_server, rate_measure, max_rate, probing_period) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "RatePredictor:__init__" + self.alpha = alpha + self.max_period = max_period + self.value = None + self.timestamp = None + + def update(self): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "RatePredictor:update" + self.raw_server.add_task(self.update, self.probing_period) + current_value = self.rate_measure.get_rate() / 1000. + current_time = clock() + if self.value is None or current_time - self.timestamp > self.max_period: + self.value = current_value + else: + self.value = self.alpha * current_value + (1 - self.alpha) * self.value + if self.max_rate > 0 and self.value > self.max_rate: + self.value = self.max_rate + self.timestamp = current_time + + # exponential smoothing prediction + def predict(self): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "RatePredictor:predict" + # self.update() + if self.value is None: + return 0 + return self.value + + def has_capacity(self): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "RatePredictor:has_capacity" +# return False + # self.update() + result = None + if self.value is None: + result = True + else: + result = (1. - float(self.value) / self.max_rate) > MIN_CAPACITY + return result diff --git a/tribler-mod/Tribler/Core/CoopDownload/__init__.py b/tribler-mod/Tribler/Core/CoopDownload/__init__.py new file mode 100644 index 0000000..b7ef832 --- /dev/null +++ b/tribler-mod/Tribler/Core/CoopDownload/__init__.py @@ -0,0 +1,3 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/CoopDownload/__init__.py.bak b/tribler-mod/Tribler/Core/CoopDownload/__init__.py.bak new file mode 100644 index 0000000..395f8fb --- /dev/null +++ b/tribler-mod/Tribler/Core/CoopDownload/__init__.py.bak @@ -0,0 +1,2 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/DecentralizedTracking/__init__.py b/tribler-mod/Tribler/Core/DecentralizedTracking/__init__.py new file mode 100644 index 0000000..7adef79 --- /dev/null +++ b/tribler-mod/Tribler/Core/DecentralizedTracking/__init__.py @@ -0,0 +1,3 @@ +from time import localtime, strftime +# written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/DecentralizedTracking/__init__.py.bak b/tribler-mod/Tribler/Core/DecentralizedTracking/__init__.py.bak new file mode 100644 index 0000000..8a8e66a --- /dev/null +++ b/tribler-mod/Tribler/Core/DecentralizedTracking/__init__.py.bak @@ -0,0 +1,2 @@ +# written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/DecentralizedTracking/mainlineDHT.py b/tribler-mod/Tribler/Core/DecentralizedTracking/mainlineDHT.py new file mode 100644 index 0000000..f0d2979 --- /dev/null +++ b/tribler-mod/Tribler/Core/DecentralizedTracking/mainlineDHT.py @@ -0,0 +1,35 @@ +from time import localtime, strftime +# written by Fabian van der Werf, Arno Bakker +# see LICENSE.txt for license information + +khashmir_imported = False +try: + from khashmir.utkhashmir import UTKhashmir + khashmir_imported = True +except: + pass + + +DEBUG = False + +dht = None + +def init(*args, **kws): + global dht + global khashmir_imported + if khashmir_imported and dht is None: + dht = UTKhashmir(*args, **kws) + # Arno: no need for separate thread, it now runs on the regular network thread + dht.addContact('router.bittorrent.com', 6881) + +def control(): + import pdb + pdb.set_trace() + +def deinit(): + global dht + if dht is not None: + try: + dht.rawserver.stop() + except: + pass diff --git a/tribler-mod/Tribler/Core/DecentralizedTracking/mainlineDHT.py.bak b/tribler-mod/Tribler/Core/DecentralizedTracking/mainlineDHT.py.bak new file mode 100644 index 0000000..b58b4ea --- /dev/null +++ b/tribler-mod/Tribler/Core/DecentralizedTracking/mainlineDHT.py.bak @@ -0,0 +1,34 @@ +# written by Fabian van der Werf, Arno Bakker +# see LICENSE.txt for license information + +khashmir_imported = False +try: + from khashmir.utkhashmir import UTKhashmir + khashmir_imported = True +except: + pass + + +DEBUG = False + +dht = None + +def init(*args, **kws): + global dht + global khashmir_imported + if khashmir_imported and dht is None: + dht = UTKhashmir(*args, **kws) + # Arno: no need for separate thread, it now runs on the regular network thread + dht.addContact('router.bittorrent.com', 6881) + +def control(): + import pdb + pdb.set_trace() + +def deinit(): + global dht + if dht is not None: + try: + dht.rawserver.stop() + except: + pass diff --git a/tribler-mod/Tribler/Core/DecentralizedTracking/mainlineDHTChecker.py b/tribler-mod/Tribler/Core/DecentralizedTracking/mainlineDHTChecker.py new file mode 100644 index 0000000..a94d9d0 --- /dev/null +++ b/tribler-mod/Tribler/Core/DecentralizedTracking/mainlineDHTChecker.py @@ -0,0 +1,52 @@ +from time import localtime, strftime +# written by Arno Bakker, Yuan Yuan +# see LICENSE.txt for license information + +import sys +from threading import currentThread +from Tribler.Core.CacheDB.CacheDBHandler import TorrentDBHandler + +DEBUG = False + +class mainlineDHTChecker: + __single = None + + def __init__(self): + if mainlineDHTChecker.__single: + raise RuntimeError, "mainlineDHTChecker is Singleton" + mainlineDHTChecker.__single = self + + self.dht = None + self.torrent_db = TorrentDBHandler.getInstance() + + def getInstance(*args, **kw): + if mainlineDHTChecker.__single is None: + mainlineDHTChecker(*args, **kw) + return mainlineDHTChecker.__single + getInstance = staticmethod(getInstance) + + def register(self,dht): + self.dht = dht + + def lookup(self,infohash): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","mainlineDHTChecker: Lookup",`infohash` + + if self.dht is not None: + func = lambda p:self.got_peers_callback(infohash,p) + self.dht.getPeers(infohash,func) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","mainlineDHTChecker: No lookup, no DHT support loaded" + + + def got_peers_callback(self,infohash,peers): + """ Called by network thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","mainlineDHTChecker: Got",len(peers),"peers for torrent",`infohash`,currentThread().getName() + + alive = len(peers) > 0 + if alive: + status = "good" + kw = {'status': status} + self.torrent_db.updateTorrent(infohash, updateFlag=True, **kw) + diff --git a/tribler-mod/Tribler/Core/DecentralizedTracking/mainlineDHTChecker.py.bak b/tribler-mod/Tribler/Core/DecentralizedTracking/mainlineDHTChecker.py.bak new file mode 100644 index 0000000..a1a4e5e --- /dev/null +++ b/tribler-mod/Tribler/Core/DecentralizedTracking/mainlineDHTChecker.py.bak @@ -0,0 +1,51 @@ +# written by Arno Bakker, Yuan Yuan +# see LICENSE.txt for license information + +import sys +from threading import currentThread +from Tribler.Core.CacheDB.CacheDBHandler import TorrentDBHandler + +DEBUG = False + +class mainlineDHTChecker: + __single = None + + def __init__(self): + if mainlineDHTChecker.__single: + raise RuntimeError, "mainlineDHTChecker is Singleton" + mainlineDHTChecker.__single = self + + self.dht = None + self.torrent_db = TorrentDBHandler.getInstance() + + def getInstance(*args, **kw): + if mainlineDHTChecker.__single is None: + mainlineDHTChecker(*args, **kw) + return mainlineDHTChecker.__single + getInstance = staticmethod(getInstance) + + def register(self,dht): + self.dht = dht + + def lookup(self,infohash): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","mainlineDHTChecker: Lookup",`infohash` + + if self.dht is not None: + func = lambda p:self.got_peers_callback(infohash,p) + self.dht.getPeers(infohash,func) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","mainlineDHTChecker: No lookup, no DHT support loaded" + + + def got_peers_callback(self,infohash,peers): + """ Called by network thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","mainlineDHTChecker: Got",len(peers),"peers for torrent",`infohash`,currentThread().getName() + + alive = len(peers) > 0 + if alive: + status = "good" + kw = {'status': status} + self.torrent_db.updateTorrent(infohash, updateFlag=True, **kw) + diff --git a/tribler-mod/Tribler/Core/DecentralizedTracking/rsconvert.py b/tribler-mod/Tribler/Core/DecentralizedTracking/rsconvert.py new file mode 100644 index 0000000..b662b94 --- /dev/null +++ b/tribler-mod/Tribler/Core/DecentralizedTracking/rsconvert.py @@ -0,0 +1,43 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information +# +# Converts betweem BiTorrent/BitTornado 3.x/0.3 RawServer and BitTorrent 5.0 +# rawserver for Khashmir +# +import sys + +DEBUG = False + +class RawServerConverter: + + def __init__(self,rawserver): + self.rs = rawserver + + def create_udpsocket(self,port,host): + return self.rs.create_udpsocket(port,host) + + def start_listening_udp(self,serversocket,handler,context=None): + return self.rs.start_listening_udp(serversocket,handler) + + def stop_listening_udp(self,serversocket): + return self.rs.stop_listening_udp(serversocket) + + def add_task(self,t,func,*args,**kwargs): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rsconvert: add_task:",func + newf = lambda:func(*args,**kwargs) + return self.rs._add_task(newf,t) + + def external_add_task(self,t,func,*args,**kwargs): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rsconvert: external_add_task:",func + newf = lambda:func(*args,**kwargs) + return self.rs.add_task(newf,t) + + def listen_forever(self): + return self.rs.listen_forever() + + def stop(self): + self.rs.doneflag.set() + \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/DecentralizedTracking/rsconvert.py.bak b/tribler-mod/Tribler/Core/DecentralizedTracking/rsconvert.py.bak new file mode 100644 index 0000000..5c3f0ac --- /dev/null +++ b/tribler-mod/Tribler/Core/DecentralizedTracking/rsconvert.py.bak @@ -0,0 +1,42 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information +# +# Converts betweem BiTorrent/BitTornado 3.x/0.3 RawServer and BitTorrent 5.0 +# rawserver for Khashmir +# +import sys + +DEBUG = False + +class RawServerConverter: + + def __init__(self,rawserver): + self.rs = rawserver + + def create_udpsocket(self,port,host): + return self.rs.create_udpsocket(port,host) + + def start_listening_udp(self,serversocket,handler,context=None): + return self.rs.start_listening_udp(serversocket,handler) + + def stop_listening_udp(self,serversocket): + return self.rs.stop_listening_udp(serversocket) + + def add_task(self,t,func,*args,**kwargs): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rsconvert: add_task:",func + newf = lambda:func(*args,**kwargs) + return self.rs._add_task(newf,t) + + def external_add_task(self,t,func,*args,**kwargs): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rsconvert: external_add_task:",func + newf = lambda:func(*args,**kwargs) + return self.rs.add_task(newf,t) + + def listen_forever(self): + return self.rs.listen_forever() + + def stop(self): + self.rs.doneflag.set() + \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/DecentralizedTracking/ut_pex.py b/tribler-mod/Tribler/Core/DecentralizedTracking/ut_pex.py new file mode 100644 index 0000000..e7efe62 --- /dev/null +++ b/tribler-mod/Tribler/Core/DecentralizedTracking/ut_pex.py @@ -0,0 +1,140 @@ +from time import localtime, strftime +# Written by Arno Bakker, Bram Cohen +# see LICENSE.txt for license information + +__fool_epydoc = 481 +""" +uTorrent Peer Exchange (PEX) Support: +------------------------------------- +As documented in + http://transmission.m0k.org/trac/browser/trunk/misc/utorrent.txt + BitTorrent-5.0.8/BitTorrent/Connector.py + +The PEX message payload is a bencoded dict with three keys: + 'added': the set of peers met since the last PEX + 'added.f': a flag for every peer, apparently with the following values: + \x00: unknown, assuming default + \x01: Prefers encryption (as suggested by LH-ABC-3.2.0/BitTorrent/BT1/Connector.py) + \x02: Is seeder (as suggested by BitTorrent-5.0.8/BitTorrent/Connector.py) + OR-ing them together is allowed as I've seen \x03 values. + 'dropped': the set of peers dropped since last PEX + +The mechanism is insecure because there is no way to know if the peer addresses +are really of some peers that are running BitTorrent, or just DoS victims. +For peer addresses that come from trackers we at least know that the peer host +ran BitTorrent and was downloading this swarm (assuming the tracker is trustworthy). + +""" +import sys +from types import DictType,StringType +from Tribler.Core.BitTornado.BT1.track import compact_peer_info +from Tribler.Core.BitTornado.bencode import bencode + +EXTEND_MSG_UTORRENT_PEX_ID = chr(1) # Can be any value, the name 'ut_pex' is standardized +EXTEND_MSG_UTORRENT_PEX = 'ut_pex' # note case sensitive + +DEBUG = False + +def create_ut_pex(addedconns,droppedconns,thisconn): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ut_pex: create_ut_pex:",addedconns,droppedconns,thisconn + d = {} + compactedpeerstr = compact_connections(addedconns,thisconn) + d['added'] = compactedpeerstr + flags = '' + for i in range(len(addedconns)): + conn = addedconns[i] + if conn == thisconn: + continue + flag = 0 + if conn.get_extend_encryption(): + flag |= 1 + if conn.download is not None and conn.download.peer_is_complete(): + flag |= 2 + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ut_pex: create_ut_pex: add flag",`flag` + flags += chr(flag) + d['added.f'] = flags + compactedpeerstr = compact_connections(droppedconns) + d['dropped'] = compactedpeerstr + return bencode(d) + +def check_ut_pex(d): + if type(d) != DictType: + raise ValueError('ut_pex: not a dict') + apeers = check_ut_pex_peerlist(d,'added') + dpeers = check_ut_pex_peerlist(d,'dropped') + if 'added.f' in d: + addedf = d['added.f'] + if type(addedf) != StringType: + raise ValueError('ut_pex: added.f: not string') + if len(addedf) != len(apeers) and not len(addedf) == 0: + # KTorrent sends an empty added.f, be nice + raise ValueError('ut_pex: added.f: more flags than peers') + # Arno, 2008-09-12: Be liberal in what we receive + ##else: + ##raise ValueError('ut_pex: added.f: missing') + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ut_pex: Got",apeers + + return (apeers,dpeers) + +def check_ut_pex_peerlist(d,name): + if name not in d: + # Arno, 2008-09-12: Be liberal in what we receive, some clients + # leave out 'dropped' key + ##raise ValueError('ut_pex:'+name+': missing') + return [] + peerlist = d[name] + if type(peerlist) != StringType: + raise ValueError('ut_pex:'+name+': not string') + if len(peerlist) % 6 != 0: + raise ValueError('ut_pex:'+name+': not multiple of 6 bytes') + peers = decompact_connections(peerlist) + for ip,port in peers: + if ip == '127.0.0.1': + raise ValueError('ut_pex:'+name+': address is localhost') + return peers + +def ut_pex_get_conns_diff(currconns,prevconns): + addedconns = [] + droppedconns = [] + for conn in currconns: + if not (conn in prevconns): + # new conn + addedconns.append(conn) + for conn in prevconns: + if not (conn in currconns): + # old conn, was dropped + droppedconns.append(conn) + return (addedconns,droppedconns) + + +def compact_connections(conns,thisconn=None): + """ See BitTornado/BT1/track.py """ + compactpeers = [] + for conn in conns: + if conn == thisconn: + continue + ip = conn.get_ip() + port = conn.get_extend_listenport() + if port is None: + raise ValueError("ut_pex: compact: listen port unknown?!") + else: + compactpeer = compact_peer_info(ip,port) + compactpeers.append(compactpeer) + + # Create compact representation of peers + compactpeerstr = ''.join(compactpeers) + return compactpeerstr + + +def decompact_connections(p): + """ See BitTornado/BT1/Rerequester.py """ + peers = [] + for x in xrange(0, len(p), 6): + ip = '.'.join([str(ord(i)) for i in p[x:x+4]]) + port = (ord(p[x+4]) << 8) | ord(p[x+5]) + peers.append((ip, port)) + return peers + diff --git a/tribler-mod/Tribler/Core/DecentralizedTracking/ut_pex.py.bak b/tribler-mod/Tribler/Core/DecentralizedTracking/ut_pex.py.bak new file mode 100644 index 0000000..35e1ea1 --- /dev/null +++ b/tribler-mod/Tribler/Core/DecentralizedTracking/ut_pex.py.bak @@ -0,0 +1,139 @@ +# Written by Arno Bakker, Bram Cohen +# see LICENSE.txt for license information + +__fool_epydoc = 481 +""" +uTorrent Peer Exchange (PEX) Support: +------------------------------------- +As documented in + http://transmission.m0k.org/trac/browser/trunk/misc/utorrent.txt + BitTorrent-5.0.8/BitTorrent/Connector.py + +The PEX message payload is a bencoded dict with three keys: + 'added': the set of peers met since the last PEX + 'added.f': a flag for every peer, apparently with the following values: + \x00: unknown, assuming default + \x01: Prefers encryption (as suggested by LH-ABC-3.2.0/BitTorrent/BT1/Connector.py) + \x02: Is seeder (as suggested by BitTorrent-5.0.8/BitTorrent/Connector.py) + OR-ing them together is allowed as I've seen \x03 values. + 'dropped': the set of peers dropped since last PEX + +The mechanism is insecure because there is no way to know if the peer addresses +are really of some peers that are running BitTorrent, or just DoS victims. +For peer addresses that come from trackers we at least know that the peer host +ran BitTorrent and was downloading this swarm (assuming the tracker is trustworthy). + +""" +import sys +from types import DictType,StringType +from Tribler.Core.BitTornado.BT1.track import compact_peer_info +from Tribler.Core.BitTornado.bencode import bencode + +EXTEND_MSG_UTORRENT_PEX_ID = chr(1) # Can be any value, the name 'ut_pex' is standardized +EXTEND_MSG_UTORRENT_PEX = 'ut_pex' # note case sensitive + +DEBUG = False + +def create_ut_pex(addedconns,droppedconns,thisconn): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ut_pex: create_ut_pex:",addedconns,droppedconns,thisconn + d = {} + compactedpeerstr = compact_connections(addedconns,thisconn) + d['added'] = compactedpeerstr + flags = '' + for i in range(len(addedconns)): + conn = addedconns[i] + if conn == thisconn: + continue + flag = 0 + if conn.get_extend_encryption(): + flag |= 1 + if conn.download is not None and conn.download.peer_is_complete(): + flag |= 2 + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ut_pex: create_ut_pex: add flag",`flag` + flags += chr(flag) + d['added.f'] = flags + compactedpeerstr = compact_connections(droppedconns) + d['dropped'] = compactedpeerstr + return bencode(d) + +def check_ut_pex(d): + if type(d) != DictType: + raise ValueError('ut_pex: not a dict') + apeers = check_ut_pex_peerlist(d,'added') + dpeers = check_ut_pex_peerlist(d,'dropped') + if 'added.f' in d: + addedf = d['added.f'] + if type(addedf) != StringType: + raise ValueError('ut_pex: added.f: not string') + if len(addedf) != len(apeers) and not len(addedf) == 0: + # KTorrent sends an empty added.f, be nice + raise ValueError('ut_pex: added.f: more flags than peers') + # Arno, 2008-09-12: Be liberal in what we receive + ##else: + ##raise ValueError('ut_pex: added.f: missing') + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ut_pex: Got",apeers + + return (apeers,dpeers) + +def check_ut_pex_peerlist(d,name): + if name not in d: + # Arno, 2008-09-12: Be liberal in what we receive, some clients + # leave out 'dropped' key + ##raise ValueError('ut_pex:'+name+': missing') + return [] + peerlist = d[name] + if type(peerlist) != StringType: + raise ValueError('ut_pex:'+name+': not string') + if len(peerlist) % 6 != 0: + raise ValueError('ut_pex:'+name+': not multiple of 6 bytes') + peers = decompact_connections(peerlist) + for ip,port in peers: + if ip == '127.0.0.1': + raise ValueError('ut_pex:'+name+': address is localhost') + return peers + +def ut_pex_get_conns_diff(currconns,prevconns): + addedconns = [] + droppedconns = [] + for conn in currconns: + if not (conn in prevconns): + # new conn + addedconns.append(conn) + for conn in prevconns: + if not (conn in currconns): + # old conn, was dropped + droppedconns.append(conn) + return (addedconns,droppedconns) + + +def compact_connections(conns,thisconn=None): + """ See BitTornado/BT1/track.py """ + compactpeers = [] + for conn in conns: + if conn == thisconn: + continue + ip = conn.get_ip() + port = conn.get_extend_listenport() + if port is None: + raise ValueError("ut_pex: compact: listen port unknown?!") + else: + compactpeer = compact_peer_info(ip,port) + compactpeers.append(compactpeer) + + # Create compact representation of peers + compactpeerstr = ''.join(compactpeers) + return compactpeerstr + + +def decompact_connections(p): + """ See BitTornado/BT1/Rerequester.py """ + peers = [] + for x in xrange(0, len(p), 6): + ip = '.'.join([str(ord(i)) for i in p[x:x+4]]) + port = (ord(p[x+4]) << 8) | ord(p[x+5]) + peers.append((ip, port)) + return peers + diff --git a/tribler-mod/Tribler/Core/Download.py b/tribler-mod/Tribler/Core/Download.py new file mode 100644 index 0000000..c5ba7a0 --- /dev/null +++ b/tribler-mod/Tribler/Core/Download.py @@ -0,0 +1,155 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information +""" The representation of a running BT download/upload. """ + + +from Tribler.Core.simpledefs import * +from Tribler.Core.defaults import * +from Tribler.Core.exceptions import * +from Tribler.Core.Base import * +from Tribler.Core.APIImplementation.DownloadRuntimeConfig import DownloadRuntimeConfig +from Tribler.Core.APIImplementation.DownloadImpl import DownloadImpl +from Tribler.Core.APIImplementation.miscutils import * + +from Tribler.Core.osutils import * + + +class Download(DownloadRuntimeConfig,DownloadImpl): + """ + Representation of a running BT download/upload. + + A Download implements the DownloadConfigInterface which can be used to + change download parameters are runtime (for selected parameters). + + cf. libtorrent torrent_handle + """ + + # + # Internal methods + # + def __init__(self,session,tdef): + """ Internal constructor + @param session Session + @param tdef TorrentDef + """ + DownloadImpl.__init__(self,session,tdef) + # + # Public methods + # + def get_def(self): + """ + Return the read-only torrent definition (TorrentDef) for this Download. + @return A TorrentDef object. + """ + return DownloadImpl.get_def(self) + + + def set_state_callback(self,usercallback,getpeerlist=False): + """ + Set a callback for retrieving the state of the download. This callback + will be called immediately with a DownloadState object as first parameter. + The callback method must return a tuple (when,getpeerlist) where "when" + indicates whether the callback should be called again and represents a + number of seconds from now. If "when" <= 0.0 the callback will not be + called again. "getpeerlist" is a boolean that indicates whether the + DownloadState passed to the callback on the next invocation should + contain info about the set of current peers. + + The callback will be called by a popup thread which can be used + indefinitely (within reason) by the higher level code. + + @param usercallback Function that accepts DownloadState as parameter and + returns a (float,boolean) tuple. + """ + DownloadImpl.set_state_callback(self,usercallback,getpeerlist=getpeerlist) + + + def stop(self): + """ Stops the Download, i.e. closes all connections to other peers. """ + # Called by any thread + DownloadImpl.stop(self) + + def restart(self): + """ Restarts the stopped Download. """ + # Called by any thread + DownloadImpl.restart(self) + + # + # Config parameters that only exists at runtime + # + def set_max_desired_speed(self,direct,speed): + """ Sets the maximum desired upload/download speed for this Download. + @param direct The direction (UPLOAD/DOWNLOAD) + @param speed The speed in KB/s. + """ + DownloadImpl.set_max_desired_speed(self,direct,speed) + + def get_max_desired_speed(self,direct): + """ Returns the maximum desired upload/download speed for this Download. + @return The previously set speed in KB/s + """ + return DownloadImpl.get_max_desired_speed(self,direct) + + def get_dest_files(self, exts = None): + """ Returns the filenames on disk to which this Download saves + @return A list of (filename-in-torrent, disk filename) tuples. + """ + return DownloadImpl.get_dest_files(self, exts) + + # + # Cooperative download + # + def ask_coopdl_helpers(self,permidlist): + """ Ask the specified list of peers to help speed up this download """ + # called by any thread + self.dllock.acquire() + try: + # ARNOCOMMENT: WE NEED PERMID+IP FOR COOP DL. How to access DB? Can't + # do it on main thread, can't do it on network thread. + + peerreclist = self.session.lm.peer_db.getPeers(permidlist, ['permid','ip','port']) + + if self.sd is not None: + ask_coopdl_helpers_lambda = lambda:self.sd is not None and self.sd.ask_coopdl_helpers(peerreclist) + self.session.lm.rawserver.add_task(ask_coopdl_helpers_lambda,0) + else: + raise OperationNotPossibleWhenStoppedException() + finally: + self.dllock.release() + + # To retrieve the list of current helpers, see DownloadState + + def stop_coopdl_helpers(self,permidlist): + """ Ask the specified list of peers to stop helping speed up this + download """ + # called by any thread + self.dllock.acquire() + try: + # ARNOCOMMENT: WE NEED PERMID+IP FOR COOP DL. How to access DB? Can't + # do it on main thread, can't do it on network thread. + peerreclist = self.session.lm.peer_db.getPeers(permidlist, ['permid','ip','port']) + + if self.sd is not None: + stop_coopdl_helpers_lambda = lambda:self.sd is not None and self.sd.stop_coopdl_helpers(peerreclist) + self.session.lm.rawserver.add_task(stop_coopdl_helpers_lambda,0) + else: + raise OperationNotPossibleWhenStoppedException() + finally: + self.dllock.release() + +# SelectiveSeeding_ + def set_seeding_policy(self,smanager): + """ Assign the seeding policy to use for this Download. + @param smanager An instance of Tribler.Policies.SeedingManager + """ + self.dllock.acquire() + try: + if self.sd is not None: + set_seeding_smanager_lambda = lambda:self.sd is not None and self.sd.get_bt1download().choker.set_seeding_manager(smanager) + self.session.lm.rawserver.add_task(set_seeding_smanager_lambda,0) + else: + raise OperationNotPossibleWhenStoppedException() + finally: + self.dllock.release() +# _SelectiveSeeding diff --git a/tribler-mod/Tribler/Core/Download.py.bak b/tribler-mod/Tribler/Core/Download.py.bak new file mode 100644 index 0000000..85493ce --- /dev/null +++ b/tribler-mod/Tribler/Core/Download.py.bak @@ -0,0 +1,154 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information +""" The representation of a running BT download/upload. """ + + +from Tribler.Core.simpledefs import * +from Tribler.Core.defaults import * +from Tribler.Core.exceptions import * +from Tribler.Core.Base import * +from Tribler.Core.APIImplementation.DownloadRuntimeConfig import DownloadRuntimeConfig +from Tribler.Core.APIImplementation.DownloadImpl import DownloadImpl +from Tribler.Core.APIImplementation.miscutils import * + +from Tribler.Core.osutils import * + + +class Download(DownloadRuntimeConfig,DownloadImpl): + """ + Representation of a running BT download/upload. + + A Download implements the DownloadConfigInterface which can be used to + change download parameters are runtime (for selected parameters). + + cf. libtorrent torrent_handle + """ + + # + # Internal methods + # + def __init__(self,session,tdef): + """ Internal constructor + @param session Session + @param tdef TorrentDef + """ + DownloadImpl.__init__(self,session,tdef) + # + # Public methods + # + def get_def(self): + """ + Return the read-only torrent definition (TorrentDef) for this Download. + @return A TorrentDef object. + """ + return DownloadImpl.get_def(self) + + + def set_state_callback(self,usercallback,getpeerlist=False): + """ + Set a callback for retrieving the state of the download. This callback + will be called immediately with a DownloadState object as first parameter. + The callback method must return a tuple (when,getpeerlist) where "when" + indicates whether the callback should be called again and represents a + number of seconds from now. If "when" <= 0.0 the callback will not be + called again. "getpeerlist" is a boolean that indicates whether the + DownloadState passed to the callback on the next invocation should + contain info about the set of current peers. + + The callback will be called by a popup thread which can be used + indefinitely (within reason) by the higher level code. + + @param usercallback Function that accepts DownloadState as parameter and + returns a (float,boolean) tuple. + """ + DownloadImpl.set_state_callback(self,usercallback,getpeerlist=getpeerlist) + + + def stop(self): + """ Stops the Download, i.e. closes all connections to other peers. """ + # Called by any thread + DownloadImpl.stop(self) + + def restart(self): + """ Restarts the stopped Download. """ + # Called by any thread + DownloadImpl.restart(self) + + # + # Config parameters that only exists at runtime + # + def set_max_desired_speed(self,direct,speed): + """ Sets the maximum desired upload/download speed for this Download. + @param direct The direction (UPLOAD/DOWNLOAD) + @param speed The speed in KB/s. + """ + DownloadImpl.set_max_desired_speed(self,direct,speed) + + def get_max_desired_speed(self,direct): + """ Returns the maximum desired upload/download speed for this Download. + @return The previously set speed in KB/s + """ + return DownloadImpl.get_max_desired_speed(self,direct) + + def get_dest_files(self, exts = None): + """ Returns the filenames on disk to which this Download saves + @return A list of (filename-in-torrent, disk filename) tuples. + """ + return DownloadImpl.get_dest_files(self, exts) + + # + # Cooperative download + # + def ask_coopdl_helpers(self,permidlist): + """ Ask the specified list of peers to help speed up this download """ + # called by any thread + self.dllock.acquire() + try: + # ARNOCOMMENT: WE NEED PERMID+IP FOR COOP DL. How to access DB? Can't + # do it on main thread, can't do it on network thread. + + peerreclist = self.session.lm.peer_db.getPeers(permidlist, ['permid','ip','port']) + + if self.sd is not None: + ask_coopdl_helpers_lambda = lambda:self.sd is not None and self.sd.ask_coopdl_helpers(peerreclist) + self.session.lm.rawserver.add_task(ask_coopdl_helpers_lambda,0) + else: + raise OperationNotPossibleWhenStoppedException() + finally: + self.dllock.release() + + # To retrieve the list of current helpers, see DownloadState + + def stop_coopdl_helpers(self,permidlist): + """ Ask the specified list of peers to stop helping speed up this + download """ + # called by any thread + self.dllock.acquire() + try: + # ARNOCOMMENT: WE NEED PERMID+IP FOR COOP DL. How to access DB? Can't + # do it on main thread, can't do it on network thread. + peerreclist = self.session.lm.peer_db.getPeers(permidlist, ['permid','ip','port']) + + if self.sd is not None: + stop_coopdl_helpers_lambda = lambda:self.sd is not None and self.sd.stop_coopdl_helpers(peerreclist) + self.session.lm.rawserver.add_task(stop_coopdl_helpers_lambda,0) + else: + raise OperationNotPossibleWhenStoppedException() + finally: + self.dllock.release() + +# SelectiveSeeding_ + def set_seeding_policy(self,smanager): + """ Assign the seeding policy to use for this Download. + @param smanager An instance of Tribler.Policies.SeedingManager + """ + self.dllock.acquire() + try: + if self.sd is not None: + set_seeding_smanager_lambda = lambda:self.sd is not None and self.sd.get_bt1download().choker.set_seeding_manager(smanager) + self.session.lm.rawserver.add_task(set_seeding_smanager_lambda,0) + else: + raise OperationNotPossibleWhenStoppedException() + finally: + self.dllock.release() +# _SelectiveSeeding diff --git a/tribler-mod/Tribler/Core/DownloadConfig.py b/tribler-mod/Tribler/Core/DownloadConfig.py new file mode 100644 index 0000000..e6c570d --- /dev/null +++ b/tribler-mod/Tribler/Core/DownloadConfig.py @@ -0,0 +1,828 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information +""" Controls how a TorrentDef is downloaded (rate, where on disk, etc.) """ + +import sys +import os +#import time +import copy +import pickle +from types import StringType + +from Tribler.Core.simpledefs import * +from Tribler.Core.defaults import * +from Tribler.Core.exceptions import * +from Tribler.Core.Base import * +from Tribler.Core.APIImplementation.miscutils import * +from Tribler.Core.LiveSourceAuthConfig import LiveSourceAuthConfig + +from Tribler.Core.osutils import * + +# importing get_home_dir from Tribler.Core.Session causes an +# ImportError... +def get_home_dir(): + from Tribler.Core.Session import get_home_dir as session_get_home_dir + return session_get_home_dir() + +class DownloadConfigInterface: + """ + (key,value) pair config of per-torrent runtime parameters, + e.g. destdir, file-allocation policy, etc. Also options to advocate + torrent, e.g. register in DHT, advertise via Buddycast. + + Use DownloadStartupConfig to manipulate download configs before download + startup time. This is just a parent class. + + cf. libtorrent torrent_handle + """ + def __init__(self,dlconfig=None): + + if dlconfig is not None: # copy constructor + self.dlconfig = dlconfig + return + + self.dlconfig = {} + + # Define the built-in default here + self.dlconfig.update(dldefaults) + + # Arno: Sparse as default reduces CPU usage + if sys.platform != 'win32': + self.set_alloc_type(DISKALLOC_SPARSE) + + self.dlconfig['saveas'] = get_default_dest_dir() + + + def set_dest_dir(self,path): + """ Sets the directory where to save this Download. + @param path A path of a directory. + """ + self.dlconfig['saveas'] = path + + def get_dest_dir(self): + """ Gets the directory where to save this Download. + """ + return self.dlconfig['saveas'] + + def set_video_event_callback(self,usercallback): + """ Download the torrent in Video-On-Demand mode or as live stream. + When a playback event occurs, the usercallback function will be + called, with the following list of arguments: +
+            Download,event,params
+        
+ In which event is a string, and params a dictionary. The following + events are supported: +
+        VODEVENT_START:
+            The params dictionary will contain the fields
+        
+                mimetype,stream,filename,length
+        
+            If the filename is set, the video can be read from there. If not,
+            the video can be read from the stream, which is a file-like object 
+            supporting the read(),seek(), and close() operations. The MIME type
+            of the video is given by "mimetype", the length of the stream in
+            bytes by "length" which may be None if the length is unknown (e.g.
+            when live streaming).
+        
+            To fetch a specific file from a multi-file torrent, use the 
+            set_selected_files() method. This method sets the mode to DLMODE_VOD 
+
+        VODEVENT_PAUSE:
+            The download engine would like video playback to be paused as the
+            data is not coming in fast enough / the data due is not available
+            yet.
+            
+            The params dictionary contains the fields
+            
+                autoresume
+                
+            "autoresume" indicates whether or not the Core will generate
+            a VODEVENT_RESUME when it is ready again, or that this is left
+            to the core user.
+                    
+        VODEVENT_RESUME:
+            The download engine would like video playback to resume.
+        
+ The usercallback should ignore events it does not support. + + The usercallback will be called by a popup thread which can be used + indefinitely (within reason) by the higher level code. + + @param usercallback A function with the above signature. + """ + self.dlconfig['mode'] = DLMODE_VOD + self.dlconfig['vod_usercallback'] = usercallback + + def set_video_events(self,events=[]): + """ Sets which events will be supported with the usercallback set + by set_video_event_callback. Supporting the VODEVENT_START event is + mandatory, and can therefore be omitted from the list. + + @param events A list of supported VODEVENT_* events. + """ + + # create a copy to avoid loosing the info + self.dlconfig['vod_userevents'] = events[:] + + def set_video_source(self,videosource,authconfig=None): + """ Provides the live video source for this torrent from an external + source. + + @param videosource A file-like object providing the live video stream + (i.e., supports read() and close()) + @param authconfig The key information for source authentication of + packets. See LiveSourceAuthConfig and TorrentDef.create_live_torrent() + """ + self.dlconfig['video_source'] = videosource + if authconfig is None: + authconfig = LiveSourceAuthConfig(LIVE_AUTHMETHOD_NONE) + self.dlconfig['video_source_authconfig'] = authconfig + + def set_video_ratelimit(self,ratelimit): + """ Sets a limit on the speed at which the video stream is to be read. + Useful when creating a live stream from file or any other faster-than-live + data stream. + + @param ratelimit The maximum speed at which to read from the stream (bps) + """ + self.dlconfig['video_ratelimit'] = ratelimit + + def set_mode(self,mode): + """ Sets the mode of this download. + @param mode DLMODE_NORMAL/DLMODE_VOD """ + self.dlconfig['mode'] = mode + + def set_live_aux_seeders(self,seeders): + """ Sets a number of live seeders, auxiliary servers that + get high priority at the source server to distribute its content + to others. + @param seeders A list of [IP address,port] lists. + """ + self.dlconfig['live_aux_seeders'] = seeders + + def get_mode(self): + """ Returns the mode of this download. + @return DLMODE_NORMAL/DLMODE_VOD """ + return self.dlconfig['mode'] + + def get_video_event_callback(self): + """ Returns the function that was passed to set_video_event_callback(). + @return A function. + """ + return self.dlconfig['vod_usercallback'] + + def get_video_events(self): + """ Returns the function that was passed to set_video_events(). + @return A list of events. + """ + return self.dlconfig['vod_userevents'] + + def get_video_source(self): + """ Returns the object that was passed to set_video_source(). + @return A file-like object. + """ + return self.dlconfig['video_source'] + + def get_video_ratelimit(self): + """ Returns the speed at which the video stream is read (bps). + @return An integer. + """ + return self.dlconfig['video_ratelimit'] + + def get_live_aux_seeders(self): + """ Returns the aux. live seeders set. + @return A list of [IP address,port] lists. """ + return self.dlconfig['live_aux_seeders'] + + + def set_selected_files(self,files): + """ Select which files in the torrent to download. The filenames must + be the names as they appear in the torrent def. Trivially, when the + torrent contains a file 'sjaak.avi' the files parameter must + be 'sjaak.avi'. When the torrent contains multiple files and is named + 'filecollection', the files parameter must be + os.path.join('filecollection','sjaak.avi') + + @param files Can be a single filename or a list of filenames (e.g. + ['harry.avi','sjaak.avi']). + """ + # TODO: can't check if files exists, don't have tdef here.... bugger + if type(files) == StringType: # convenience + files = [files] + + if self.dlconfig['mode'] == DLMODE_VOD and len(files) > 1: + raise ValueError("In Video-On-Demand mode only 1 file can be selected for download") + self.dlconfig['selected_files'] = files + + + def get_selected_files(self): + """ Returns the list of files selected for download. + @return A list of strings. """ + return self.dlconfig['selected_files'] + + + + # + # Common download performance parameters + # + def set_max_speed(self,direct,speed): + """ Sets the maximum upload or download speed for this Download. + @param direct The direction (UPLOAD/DOWNLOAD) + @param speed The speed in KB/s. + """ + if direct == UPLOAD: + self.dlconfig['max_upload_rate'] = speed + else: + self.dlconfig['max_download_rate'] = speed + + def get_max_speed(self,direct): + """ Returns the configured maximum speed. + Returns the speed in KB/s. """ + if direct == UPLOAD: + return self.dlconfig['max_upload_rate'] + else: + return self.dlconfig['max_download_rate'] + + def set_max_conns_to_initiate(self,nconns): + """ Sets the maximum number of connections to initiate for this + Download. + @param nconns A number of connections. + """ + self.dlconfig['max_initiate'] = nconns + + def get_max_conns_to_initiate(self): + """ Returns the configured maximum number of connections to initiate. + @return A number of connections. + """ + return self.dlconfig['max_initiate'] + + def set_max_conns(self,nconns): + """ Sets the maximum number of connections to connections for this + Download. + @param nconns A number of connections. + """ + self.dlconfig['max_connections'] = nconns + + def get_max_conns(self): + """ Returns the configured maximum number of connections. + @return A number of connections. + """ + return self.dlconfig['max_connections'] + + # + # Cooperative Download parameters + # + def get_coopdl_role(self): + """ Returns the role which the download plays in a cooperative download, +
+        - COOPDL_ROLE_COORDINATOR: other peers help this download
+        - COOPDL_ROLE_HELPER: this download helps another peer download faster.
+        
+ The default is coordinator, and it is set to helper by the + set_coopdl_coordinator_permid() method. + """ + return self.dlconfig['coopdl_role'] + + def set_coopdl_coordinator_permid(self,permid): + """ Calling this method makes this download a helper in a cooperative + download, helping the peer identified by the specified permid. This peer + acts as coordinator, telling this download which parts of the content + to download. + @param permid A PermID. + """ + self.dlconfig['coopdl_role'] = COOPDL_ROLE_HELPER + self.dlconfig['coopdl_coordinator_permid'] = permid + + def get_coopdl_coordinator_permid(self): + """ Returns the configured coordinator permid. + @return A PermID + """ + return self.dlconfig['coopdl_coordinator_permid'] + + # See DownloadRuntime config for adding, removing and getting list of + # helping peers. + + + # + # Advanced download parameters + # + def set_max_uploads(self,value): + """ Set the maximum number of uploads to allow at once. + @param value A number. + """ + self.dlconfig['max_uploads'] = value + + def get_max_uploads(self): + """ Returns the maximum number of uploads. + @return A number. """ + return self.dlconfig['max_uploads'] + + def set_keepalive_interval(self,value): + """ Set the number of seconds to pause between sending keepalives. + @param value An interval """ + self.dlconfig['keepalive_interval'] = value + + def get_keepalive_interval(self): + """ Returns the keepalive interval. + @return A number of seconds. """ + return self.dlconfig['keepalive_interval'] + + def set_download_slice_size(self,value): + """ Set how many bytes to query for per request. + @param value A number of bytes. + """ + self.dlconfig['download_slice_size'] = value + + def get_download_slice_size(self): + """ Returns the number of bytes to query per request. + @return A number of bytes. """ + return self.dlconfig['download_slice_size'] + + def set_upload_unit_size(self,value): + """ When limiting upload rate, how many bytes to send at a time. + @param value A number of bytes. """ + self.dlconfig['upload_unit_size'] = value + + def get_upload_unit_size(self): + """ Returns the set upload unit size. + @returns A number of bytes. + """ + return self.dlconfig['upload_unit_size'] + + def set_request_backlog(self,value): + """ Maximum number of requests to keep in a single pipe at once. + @param value A number of requests. + """ + self.dlconfig['request_backlog'] = value + + def get_request_backlog(self): + """ Returns the request backlog. + @return A number of requests. + """ + return self.dlconfig['request_backlog'] + + def set_max_message_length(self,value): + """ Maximum message-length prefix to accept over the wire - larger + values get the connection dropped. + @param value A number of bytes. + """ + self.dlconfig['max_message_length'] = value + + def get_max_message_length(self): + """ Returns the maximum message length that is accepted. + @return A number of bytes. + """ + return self.dlconfig['max_message_length'] + + def set_max_slice_length(self,value): + """ Maximum length slice to send to peers, larger requests are ignored. + @param value A number of bytes. + """ + self.dlconfig['max_slice_length'] = value + + def get_max_slice_length(self): + """ Returns the maximum slice length that is accepted. + @return A number of bytes. + """ + return self.dlconfig['max_slice_length'] + + def set_max_rate_period(self,value): + """ Maximum amount of time to guess the current rate estimate. + @param value A number of seconds. """ + self.dlconfig['max_rate_period'] = value + + def get_max_rate_period(self): + """ Returns the maximum rate period. + @return A number of seconds. + """ + return self.dlconfig['max_rate_period'] + + def set_upload_rate_fudge(self,value): + """ Time equivalent of writing to kernel-level TCP buffer, for rate + adjustment. + @param value A number of seconds. + """ + self.dlconfig['upload_rate_fudge'] = value + + def get_upload_rate_fudge(self): + """ Returns the upload rate fudge. + @return A number of seconds. + """ + return self.dlconfig['upload_rate_fudge'] + + def set_tcp_ack_fudge(self,value): + """ How much TCP ACK download overhead to add to upload rate + calculations. I.e. when a message is received we add X percent + of this message to our upload rate to account for TCP ACKs that + were sent during the reception process. (0 = disabled) + @param value A percentage + """ + self.dlconfig['tcp_ack_fudge'] = value + + def get_tcp_ack_fudge(self): + """ Returns the TCP ACK fudge. + @return A percentage. + """ + return self.dlconfig['tcp_ack_fudge'] + + def set_rerequest_interval(self,value): + """ Time to wait between requesting more peers from tracker. + @param value An interval in seconds. + """ + self.dlconfig['rerequest_interval'] = value + + def get_rerequest_interval(self): + """ Returns the tracker re-request interval. + @return A number of seconds. + """ + return self.dlconfig['rerequest_interval'] + + def set_min_peers(self,value): + """ Minimum number of peers to not do rerequesting. + @param value A number of peers. + """ + self.dlconfig['min_peers'] = value + + def get_min_peers(self): + """ Returns the minimum number of peers. + @return A number of peers. + """ + return self.dlconfig['min_peers'] + + def set_http_timeout(self,value): + """ Number of seconds to wait before assuming that a HTTP connection + has timed out. + @param value A number of seconds. + """ + self.dlconfig['http_timeout'] = value + + def get_http_timeout(self): + """ Returns the HTTP timeout. + @return A number of seconds. + """ + return self.dlconfig['http_timeout'] + + def set_check_hashes(self,value): + """ Whether to check the integrit of the data on disk using the + hashes from the torrent definition. + @param value Boolean + """ + self.dlconfig['check_hashes'] = value + + def get_check_hashes(self): + """ Returns whether to check hashes. + @return Boolean. """ + return self.dlconfig['check_hashes'] + + def set_alloc_type(self,value): + """ Set disk-allocation type: +
+        * DISKALLOC_NORMAL:  Allocates space as data is received
+        * DISKALLOC_BACKGROUND: Also adds space in the background
+        * DISKALLOC_PREALLOCATE: Reserves space up front (slow)
+        * DISKALLOC_SPARSE: Is only for filesystems that support it by default 
+          (UNIX)
+        
+ @param value A DISKALLOC_* policy. + """ + self.dlconfig['alloc_type'] = value + + def get_alloc_type(self): + """ Returns the disk-allocation policy. + @return DISKALLOC_* + """ + return self.dlconfig['alloc_type'] + + def set_alloc_rate(self,value): + """ Set the rate to allocate space at using background + allocation (DISKALLOC_BACKGROUND). + + @param value A rate in MB/s. + """ + self.dlconfig['alloc_rate'] = value + + def get_alloc_rate(self): + """ Returns the background disk-allocation rate. + @return A number of megabytes per second. + """ + return self.dlconfig['alloc_rate'] + + def set_buffer_reads(self,value): + """ Whether to buffer disk reads. + @param value Boolean + """ + self.dlconfig['buffer_reads'] = value + + def get_buffer_reads(self): + """ Returns whether to buffer reads. + @return Boolean. """ + return self.dlconfig['buffer_reads'] + + def set_write_buffer_size(self,value): + """ The maximum amount of space to use for buffering disk writes + (0 = disabled). + @param value A buffer size in megabytes. + """ + self.dlconfig['write_buffer_size'] = value + + def get_write_buffer_size(self): + """ Returns the write buffer size. + @return A number of megabytes. + """ + return self.dlconfig['write_buffer_size'] + + def set_breakup_seed_bitfield(self,value): + """ Whether to send an incomplete BITFIELD and then fills with HAVE + messages, in order to get around intellectually-challenged Internet + Service Provider manipulation. + @param value Boolean + """ + self.dlconfig['breakup_seed_bitfield'] = value + + def get_breakup_seed_bitfield(self): + """ Returns whether to send an incomplete BITFIELD message. + @return Boolean. """ + return self.dlconfig['breakup_seed_bitfield'] + + def set_snub_time(self,value): + """ Seconds to wait for data to come in over a connection before + assuming it's semi-permanently choked. + @param value A number of seconds. + """ + self.dlconfig['snub_time'] = value + + def get_snub_time(self): + """ Returns the snub time. + @return A number of seconds. + """ + return self.dlconfig['snub_time'] + + def set_rarest_first_cutoff(self,value): + """ Number of downloads at which to switch from random to rarest first. + @param value A number of downloads. + """ + self.dlconfig['rarest_first_cutoff'] = value + + def get_rarest_first_cutoff(self): + """ Returns the rarest first cutoff. + @return A number of downloads. + """ + return self.dlconfig['rarest_first_cutoff'] + + def set_rarest_first_priority_cutoff(self,value): + """ The number of peers which need to have a piece before other + partials take priority over rarest first policy. + @param value A number of peers. + """ + self.dlconfig['rarest_first_priority_cutoff'] = value + + def get_rarest_first_priority_cutoff(self): + """ Returns the rarest-first priority cutoff. + @return A number of peers. """ + return self.dlconfig['rarest_first_priority_cutoff'] + + def set_min_uploads(self,value): + """ The number of uploads to fill out to with extra optimistic unchokes. + @param value A number of uploads. + """ + self.dlconfig['min_uploads'] = value + + def get_min_uploads(self): + """ Returns the minimum number of uploads. + @return A number of uploads. """ + return self.dlconfig['min_uploads'] + + def set_max_files_open(self,value): + """ The maximum number of files to keep open at a time, 0 means no + limit. + @param value A number of files. + """ + self.dlconfig['max_files_open'] = value + + def get_max_files_open(self): + """ Returns the maximum number of open files. + @return A number of files. """ + return self.dlconfig['max_files_open'] + + def set_round_robin_period(self,value): + """ The number of seconds between the client's switching upload targets. + @param value A number of seconds. + """ + self.dlconfig['round_robin_period'] = value + + def get_round_robin_period(self): + """ Returns the round-robin period. + @return A number of seconds. """ + return self.dlconfig['round_robin_period'] + + def set_super_seeder(self,value): + """ whether to use special upload-efficiency-maximizing routines (only + for dedicated seeds). + @param value Boolean + """ + self.dlconfig['super_seeder'] = value + + def get_super_seeder(self): + """ Returns hether super seeding is enabled. + @return Boolean. """ + return self.dlconfig['super_seeder'] + + def set_security(self,value): + """ Whether to enable extra security features intended to prevent abuse, + such as checking for multiple connections from the same IP address. + @param value Boolean + """ + self.dlconfig['security'] = value + + def get_security(self): + """ Returns the security setting. + @return Boolean. """ + return self.dlconfig['security'] + + def set_auto_kick(self,value): + """ Whether to automatically kick/ban peers that send bad data. + @param value Boolean + """ + self.dlconfig['auto_kick'] = value + + def get_auto_kick(self): + """ Returns whether autokick is enabled. + @return Boolean. """ + return self.dlconfig['auto_kick'] + + def set_double_check_writes(self,value): + """ Whether to double-check data being written to the disk for errors + (may increase CPU load). + @param value Boolean + """ + self.dlconfig['double_check'] = value + + def get_double_check_writes(self): + """ Returns whether double-checking on writes is enabled. """ + return self.dlconfig['double_check'] + + def set_triple_check_writes(self,value): + """ Whether to thoroughly check data being written to the disk (may + slow disk access). + @param value Boolean """ + self.dlconfig['triple_check'] = value + + def get_triple_check_writes(self): + """ Returns whether triple-checking on writes is enabled. """ + return self.dlconfig['triple_check'] + + def set_lock_files(self,value): + """ Whether to lock files the Download is working with. + @param value Boolean """ + self.dlconfig['lock_files'] = value + + def get_lock_files(self): + """ Returns whether locking of files is enabled. """ + return self.dlconfig['lock_files'] + + def set_lock_while_reading(self,value): + """ Whether to lock access to files being read. + @param value Boolean + """ + self.dlconfig['lock_while_reading'] = value + + def get_lock_while_reading(self): + """ Returns whether locking of files for reading is enabled. + @return Boolean. """ + return self.dlconfig['lock_while_reading'] + + def set_auto_flush(self,value): + """ Minutes between automatic flushes to disk (0 = disabled). + @param value A number of minutes. + """ + self.dlconfig['auto_flush'] = value + + def get_auto_flush(self): + """ Returns the auto flush interval. + @return A number of minutes. """ + return self.dlconfig['auto_flush'] + + def set_exclude_ips(self,value): + """ Set a list of IP addresses to be excluded. + @param value A list of IP addresses in dotted notation. + """ + self.dlconfig['exclude_ips'] = value + + def get_exclude_ips(self): + """ Returns the list of excluded IP addresses. + @return A list of strings. """ + return self.dlconfig['exclude_ips'] + + def set_ut_pex_max_addrs_from_peer(self,value): + """ Maximum number of addresses to accept from peer via the uTorrent + Peer Exchange extension (0 = disable PEX) + @param value A number of IP addresses. + """ + self.dlconfig['ut_pex_max_addrs_from_peer'] = value + + def get_ut_pex_max_addrs_from_peer(self): + """ Returns the maximum number of IP addresses to accept from a peer + via ut_pex. + @return A number of addresses. + """ + return self.dlconfig['ut_pex_max_addrs_from_peer'] + + def set_same_nat_try_internal(self,value): + """ Whether to try to detect if a peer is behind the same NAT as + this Session and then establish a connection over the internal + network + @param value Boolean + """ + self.dlconfig['same_nat_try_internal'] = value + + def get_same_nat_try_internal(self): + """ Returns whether same NAT detection is enabled. + @return Boolean """ + return self.dlconfig['same_nat_try_internal'] + + def set_unchoke_bias_for_internal(self,value): + """ Amount to add to unchoke score for peers on the internal network. + @param value A number + """ + self.dlconfig['unchoke_bias_for_internal'] = value + + def get_unchoke_bias_for_internal(self): + """ Returns the bias for peers on the internal network. + @return A number + """ + return self.dlconfig['unchoke_bias_for_internal'] + + +class DownloadStartupConfig(DownloadConfigInterface,Serializable,Copyable): + """ + (key,value) pair config of per-torrent runtime parameters, + e.g. destdir, file-allocation policy, etc. Also options to advocate + torrent, e.g. register in DHT, advertise via Buddycast. + + cf. libtorrent torrent_handle + """ + def __init__(self,dlconfig=None): + """ Normal constructor for DownloadStartupConfig (copy constructor + used internally) """ + DownloadConfigInterface.__init__(self,dlconfig) + # + # Class method + # + def load(filename): + """ + Load a saved DownloadStartupConfig from disk. + + @param filename An absolute Unicode filename + @return DownloadStartupConfig object + """ + # Class method, no locking required + f = open(filename,"rb") + dlconfig = pickle.load(f) + dscfg = DownloadStartupConfig(dlconfig) + f.close() + return dscfg + load = staticmethod(load) + + def save(self,filename): + """ Save the DownloadStartupConfig to disk. + @param filename An absolute Unicode filename + """ + # Called by any thread + f = open(filename,"wb") + pickle.dump(self.dlconfig,f) + f.close() + + # + # Copyable interface + # + def copy(self): + config = copy.copy(self.dlconfig) + return DownloadStartupConfig(config) + + +def get_default_dest_dir(): + """ Returns the default dir to save content to. +
 
+    * For Win32/MacOS: Desktop\TriblerDownloads
+    * For UNIX: 
+        If Desktop exists: Desktop\TriblerDownloads
+        else: Home\TriblerDownloads
+    
+ """ + uhome = get_home_dir() + + if sys.platform == 'win32': + tempdir = os.path.join(uhome, 'Desktop', 'TriblerDownloads') + elif sys.platform == 'darwin': + tempdir = os.path.join(uhome, 'Desktop', 'TriblerDownloads') + else: + tempdir = os.path.join(uhome, 'Desktop') + if not os.path.exists(tempdir): + tempdir = os.path.join(uhome, 'Desktop', 'TriblerDownloads') + else: + tempdir = os.path.join(uhome, 'TriblerDownloads') + return tempdir + diff --git a/tribler-mod/Tribler/Core/DownloadConfig.py.bak b/tribler-mod/Tribler/Core/DownloadConfig.py.bak new file mode 100644 index 0000000..eb489a0 --- /dev/null +++ b/tribler-mod/Tribler/Core/DownloadConfig.py.bak @@ -0,0 +1,827 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information +""" Controls how a TorrentDef is downloaded (rate, where on disk, etc.) """ + +import sys +import os +#import time +import copy +import pickle +from types import StringType + +from Tribler.Core.simpledefs import * +from Tribler.Core.defaults import * +from Tribler.Core.exceptions import * +from Tribler.Core.Base import * +from Tribler.Core.APIImplementation.miscutils import * +from Tribler.Core.LiveSourceAuthConfig import LiveSourceAuthConfig + +from Tribler.Core.osutils import * + +# importing get_home_dir from Tribler.Core.Session causes an +# ImportError... +def get_home_dir(): + from Tribler.Core.Session import get_home_dir as session_get_home_dir + return session_get_home_dir() + +class DownloadConfigInterface: + """ + (key,value) pair config of per-torrent runtime parameters, + e.g. destdir, file-allocation policy, etc. Also options to advocate + torrent, e.g. register in DHT, advertise via Buddycast. + + Use DownloadStartupConfig to manipulate download configs before download + startup time. This is just a parent class. + + cf. libtorrent torrent_handle + """ + def __init__(self,dlconfig=None): + + if dlconfig is not None: # copy constructor + self.dlconfig = dlconfig + return + + self.dlconfig = {} + + # Define the built-in default here + self.dlconfig.update(dldefaults) + + # Arno: Sparse as default reduces CPU usage + if sys.platform != 'win32': + self.set_alloc_type(DISKALLOC_SPARSE) + + self.dlconfig['saveas'] = get_default_dest_dir() + + + def set_dest_dir(self,path): + """ Sets the directory where to save this Download. + @param path A path of a directory. + """ + self.dlconfig['saveas'] = path + + def get_dest_dir(self): + """ Gets the directory where to save this Download. + """ + return self.dlconfig['saveas'] + + def set_video_event_callback(self,usercallback): + """ Download the torrent in Video-On-Demand mode or as live stream. + When a playback event occurs, the usercallback function will be + called, with the following list of arguments: +
+            Download,event,params
+        
+ In which event is a string, and params a dictionary. The following + events are supported: +
+        VODEVENT_START:
+            The params dictionary will contain the fields
+        
+                mimetype,stream,filename,length
+        
+            If the filename is set, the video can be read from there. If not,
+            the video can be read from the stream, which is a file-like object 
+            supporting the read(),seek(), and close() operations. The MIME type
+            of the video is given by "mimetype", the length of the stream in
+            bytes by "length" which may be None if the length is unknown (e.g.
+            when live streaming).
+        
+            To fetch a specific file from a multi-file torrent, use the 
+            set_selected_files() method. This method sets the mode to DLMODE_VOD 
+
+        VODEVENT_PAUSE:
+            The download engine would like video playback to be paused as the
+            data is not coming in fast enough / the data due is not available
+            yet.
+            
+            The params dictionary contains the fields
+            
+                autoresume
+                
+            "autoresume" indicates whether or not the Core will generate
+            a VODEVENT_RESUME when it is ready again, or that this is left
+            to the core user.
+                    
+        VODEVENT_RESUME:
+            The download engine would like video playback to resume.
+        
+ The usercallback should ignore events it does not support. + + The usercallback will be called by a popup thread which can be used + indefinitely (within reason) by the higher level code. + + @param usercallback A function with the above signature. + """ + self.dlconfig['mode'] = DLMODE_VOD + self.dlconfig['vod_usercallback'] = usercallback + + def set_video_events(self,events=[]): + """ Sets which events will be supported with the usercallback set + by set_video_event_callback. Supporting the VODEVENT_START event is + mandatory, and can therefore be omitted from the list. + + @param events A list of supported VODEVENT_* events. + """ + + # create a copy to avoid loosing the info + self.dlconfig['vod_userevents'] = events[:] + + def set_video_source(self,videosource,authconfig=None): + """ Provides the live video source for this torrent from an external + source. + + @param videosource A file-like object providing the live video stream + (i.e., supports read() and close()) + @param authconfig The key information for source authentication of + packets. See LiveSourceAuthConfig and TorrentDef.create_live_torrent() + """ + self.dlconfig['video_source'] = videosource + if authconfig is None: + authconfig = LiveSourceAuthConfig(LIVE_AUTHMETHOD_NONE) + self.dlconfig['video_source_authconfig'] = authconfig + + def set_video_ratelimit(self,ratelimit): + """ Sets a limit on the speed at which the video stream is to be read. + Useful when creating a live stream from file or any other faster-than-live + data stream. + + @param ratelimit The maximum speed at which to read from the stream (bps) + """ + self.dlconfig['video_ratelimit'] = ratelimit + + def set_mode(self,mode): + """ Sets the mode of this download. + @param mode DLMODE_NORMAL/DLMODE_VOD """ + self.dlconfig['mode'] = mode + + def set_live_aux_seeders(self,seeders): + """ Sets a number of live seeders, auxiliary servers that + get high priority at the source server to distribute its content + to others. + @param seeders A list of [IP address,port] lists. + """ + self.dlconfig['live_aux_seeders'] = seeders + + def get_mode(self): + """ Returns the mode of this download. + @return DLMODE_NORMAL/DLMODE_VOD """ + return self.dlconfig['mode'] + + def get_video_event_callback(self): + """ Returns the function that was passed to set_video_event_callback(). + @return A function. + """ + return self.dlconfig['vod_usercallback'] + + def get_video_events(self): + """ Returns the function that was passed to set_video_events(). + @return A list of events. + """ + return self.dlconfig['vod_userevents'] + + def get_video_source(self): + """ Returns the object that was passed to set_video_source(). + @return A file-like object. + """ + return self.dlconfig['video_source'] + + def get_video_ratelimit(self): + """ Returns the speed at which the video stream is read (bps). + @return An integer. + """ + return self.dlconfig['video_ratelimit'] + + def get_live_aux_seeders(self): + """ Returns the aux. live seeders set. + @return A list of [IP address,port] lists. """ + return self.dlconfig['live_aux_seeders'] + + + def set_selected_files(self,files): + """ Select which files in the torrent to download. The filenames must + be the names as they appear in the torrent def. Trivially, when the + torrent contains a file 'sjaak.avi' the files parameter must + be 'sjaak.avi'. When the torrent contains multiple files and is named + 'filecollection', the files parameter must be + os.path.join('filecollection','sjaak.avi') + + @param files Can be a single filename or a list of filenames (e.g. + ['harry.avi','sjaak.avi']). + """ + # TODO: can't check if files exists, don't have tdef here.... bugger + if type(files) == StringType: # convenience + files = [files] + + if self.dlconfig['mode'] == DLMODE_VOD and len(files) > 1: + raise ValueError("In Video-On-Demand mode only 1 file can be selected for download") + self.dlconfig['selected_files'] = files + + + def get_selected_files(self): + """ Returns the list of files selected for download. + @return A list of strings. """ + return self.dlconfig['selected_files'] + + + + # + # Common download performance parameters + # + def set_max_speed(self,direct,speed): + """ Sets the maximum upload or download speed for this Download. + @param direct The direction (UPLOAD/DOWNLOAD) + @param speed The speed in KB/s. + """ + if direct == UPLOAD: + self.dlconfig['max_upload_rate'] = speed + else: + self.dlconfig['max_download_rate'] = speed + + def get_max_speed(self,direct): + """ Returns the configured maximum speed. + Returns the speed in KB/s. """ + if direct == UPLOAD: + return self.dlconfig['max_upload_rate'] + else: + return self.dlconfig['max_download_rate'] + + def set_max_conns_to_initiate(self,nconns): + """ Sets the maximum number of connections to initiate for this + Download. + @param nconns A number of connections. + """ + self.dlconfig['max_initiate'] = nconns + + def get_max_conns_to_initiate(self): + """ Returns the configured maximum number of connections to initiate. + @return A number of connections. + """ + return self.dlconfig['max_initiate'] + + def set_max_conns(self,nconns): + """ Sets the maximum number of connections to connections for this + Download. + @param nconns A number of connections. + """ + self.dlconfig['max_connections'] = nconns + + def get_max_conns(self): + """ Returns the configured maximum number of connections. + @return A number of connections. + """ + return self.dlconfig['max_connections'] + + # + # Cooperative Download parameters + # + def get_coopdl_role(self): + """ Returns the role which the download plays in a cooperative download, +
+        - COOPDL_ROLE_COORDINATOR: other peers help this download
+        - COOPDL_ROLE_HELPER: this download helps another peer download faster.
+        
+ The default is coordinator, and it is set to helper by the + set_coopdl_coordinator_permid() method. + """ + return self.dlconfig['coopdl_role'] + + def set_coopdl_coordinator_permid(self,permid): + """ Calling this method makes this download a helper in a cooperative + download, helping the peer identified by the specified permid. This peer + acts as coordinator, telling this download which parts of the content + to download. + @param permid A PermID. + """ + self.dlconfig['coopdl_role'] = COOPDL_ROLE_HELPER + self.dlconfig['coopdl_coordinator_permid'] = permid + + def get_coopdl_coordinator_permid(self): + """ Returns the configured coordinator permid. + @return A PermID + """ + return self.dlconfig['coopdl_coordinator_permid'] + + # See DownloadRuntime config for adding, removing and getting list of + # helping peers. + + + # + # Advanced download parameters + # + def set_max_uploads(self,value): + """ Set the maximum number of uploads to allow at once. + @param value A number. + """ + self.dlconfig['max_uploads'] = value + + def get_max_uploads(self): + """ Returns the maximum number of uploads. + @return A number. """ + return self.dlconfig['max_uploads'] + + def set_keepalive_interval(self,value): + """ Set the number of seconds to pause between sending keepalives. + @param value An interval """ + self.dlconfig['keepalive_interval'] = value + + def get_keepalive_interval(self): + """ Returns the keepalive interval. + @return A number of seconds. """ + return self.dlconfig['keepalive_interval'] + + def set_download_slice_size(self,value): + """ Set how many bytes to query for per request. + @param value A number of bytes. + """ + self.dlconfig['download_slice_size'] = value + + def get_download_slice_size(self): + """ Returns the number of bytes to query per request. + @return A number of bytes. """ + return self.dlconfig['download_slice_size'] + + def set_upload_unit_size(self,value): + """ When limiting upload rate, how many bytes to send at a time. + @param value A number of bytes. """ + self.dlconfig['upload_unit_size'] = value + + def get_upload_unit_size(self): + """ Returns the set upload unit size. + @returns A number of bytes. + """ + return self.dlconfig['upload_unit_size'] + + def set_request_backlog(self,value): + """ Maximum number of requests to keep in a single pipe at once. + @param value A number of requests. + """ + self.dlconfig['request_backlog'] = value + + def get_request_backlog(self): + """ Returns the request backlog. + @return A number of requests. + """ + return self.dlconfig['request_backlog'] + + def set_max_message_length(self,value): + """ Maximum message-length prefix to accept over the wire - larger + values get the connection dropped. + @param value A number of bytes. + """ + self.dlconfig['max_message_length'] = value + + def get_max_message_length(self): + """ Returns the maximum message length that is accepted. + @return A number of bytes. + """ + return self.dlconfig['max_message_length'] + + def set_max_slice_length(self,value): + """ Maximum length slice to send to peers, larger requests are ignored. + @param value A number of bytes. + """ + self.dlconfig['max_slice_length'] = value + + def get_max_slice_length(self): + """ Returns the maximum slice length that is accepted. + @return A number of bytes. + """ + return self.dlconfig['max_slice_length'] + + def set_max_rate_period(self,value): + """ Maximum amount of time to guess the current rate estimate. + @param value A number of seconds. """ + self.dlconfig['max_rate_period'] = value + + def get_max_rate_period(self): + """ Returns the maximum rate period. + @return A number of seconds. + """ + return self.dlconfig['max_rate_period'] + + def set_upload_rate_fudge(self,value): + """ Time equivalent of writing to kernel-level TCP buffer, for rate + adjustment. + @param value A number of seconds. + """ + self.dlconfig['upload_rate_fudge'] = value + + def get_upload_rate_fudge(self): + """ Returns the upload rate fudge. + @return A number of seconds. + """ + return self.dlconfig['upload_rate_fudge'] + + def set_tcp_ack_fudge(self,value): + """ How much TCP ACK download overhead to add to upload rate + calculations. I.e. when a message is received we add X percent + of this message to our upload rate to account for TCP ACKs that + were sent during the reception process. (0 = disabled) + @param value A percentage + """ + self.dlconfig['tcp_ack_fudge'] = value + + def get_tcp_ack_fudge(self): + """ Returns the TCP ACK fudge. + @return A percentage. + """ + return self.dlconfig['tcp_ack_fudge'] + + def set_rerequest_interval(self,value): + """ Time to wait between requesting more peers from tracker. + @param value An interval in seconds. + """ + self.dlconfig['rerequest_interval'] = value + + def get_rerequest_interval(self): + """ Returns the tracker re-request interval. + @return A number of seconds. + """ + return self.dlconfig['rerequest_interval'] + + def set_min_peers(self,value): + """ Minimum number of peers to not do rerequesting. + @param value A number of peers. + """ + self.dlconfig['min_peers'] = value + + def get_min_peers(self): + """ Returns the minimum number of peers. + @return A number of peers. + """ + return self.dlconfig['min_peers'] + + def set_http_timeout(self,value): + """ Number of seconds to wait before assuming that a HTTP connection + has timed out. + @param value A number of seconds. + """ + self.dlconfig['http_timeout'] = value + + def get_http_timeout(self): + """ Returns the HTTP timeout. + @return A number of seconds. + """ + return self.dlconfig['http_timeout'] + + def set_check_hashes(self,value): + """ Whether to check the integrit of the data on disk using the + hashes from the torrent definition. + @param value Boolean + """ + self.dlconfig['check_hashes'] = value + + def get_check_hashes(self): + """ Returns whether to check hashes. + @return Boolean. """ + return self.dlconfig['check_hashes'] + + def set_alloc_type(self,value): + """ Set disk-allocation type: +
+        * DISKALLOC_NORMAL:  Allocates space as data is received
+        * DISKALLOC_BACKGROUND: Also adds space in the background
+        * DISKALLOC_PREALLOCATE: Reserves space up front (slow)
+        * DISKALLOC_SPARSE: Is only for filesystems that support it by default 
+          (UNIX)
+        
+ @param value A DISKALLOC_* policy. + """ + self.dlconfig['alloc_type'] = value + + def get_alloc_type(self): + """ Returns the disk-allocation policy. + @return DISKALLOC_* + """ + return self.dlconfig['alloc_type'] + + def set_alloc_rate(self,value): + """ Set the rate to allocate space at using background + allocation (DISKALLOC_BACKGROUND). + + @param value A rate in MB/s. + """ + self.dlconfig['alloc_rate'] = value + + def get_alloc_rate(self): + """ Returns the background disk-allocation rate. + @return A number of megabytes per second. + """ + return self.dlconfig['alloc_rate'] + + def set_buffer_reads(self,value): + """ Whether to buffer disk reads. + @param value Boolean + """ + self.dlconfig['buffer_reads'] = value + + def get_buffer_reads(self): + """ Returns whether to buffer reads. + @return Boolean. """ + return self.dlconfig['buffer_reads'] + + def set_write_buffer_size(self,value): + """ The maximum amount of space to use for buffering disk writes + (0 = disabled). + @param value A buffer size in megabytes. + """ + self.dlconfig['write_buffer_size'] = value + + def get_write_buffer_size(self): + """ Returns the write buffer size. + @return A number of megabytes. + """ + return self.dlconfig['write_buffer_size'] + + def set_breakup_seed_bitfield(self,value): + """ Whether to send an incomplete BITFIELD and then fills with HAVE + messages, in order to get around intellectually-challenged Internet + Service Provider manipulation. + @param value Boolean + """ + self.dlconfig['breakup_seed_bitfield'] = value + + def get_breakup_seed_bitfield(self): + """ Returns whether to send an incomplete BITFIELD message. + @return Boolean. """ + return self.dlconfig['breakup_seed_bitfield'] + + def set_snub_time(self,value): + """ Seconds to wait for data to come in over a connection before + assuming it's semi-permanently choked. + @param value A number of seconds. + """ + self.dlconfig['snub_time'] = value + + def get_snub_time(self): + """ Returns the snub time. + @return A number of seconds. + """ + return self.dlconfig['snub_time'] + + def set_rarest_first_cutoff(self,value): + """ Number of downloads at which to switch from random to rarest first. + @param value A number of downloads. + """ + self.dlconfig['rarest_first_cutoff'] = value + + def get_rarest_first_cutoff(self): + """ Returns the rarest first cutoff. + @return A number of downloads. + """ + return self.dlconfig['rarest_first_cutoff'] + + def set_rarest_first_priority_cutoff(self,value): + """ The number of peers which need to have a piece before other + partials take priority over rarest first policy. + @param value A number of peers. + """ + self.dlconfig['rarest_first_priority_cutoff'] = value + + def get_rarest_first_priority_cutoff(self): + """ Returns the rarest-first priority cutoff. + @return A number of peers. """ + return self.dlconfig['rarest_first_priority_cutoff'] + + def set_min_uploads(self,value): + """ The number of uploads to fill out to with extra optimistic unchokes. + @param value A number of uploads. + """ + self.dlconfig['min_uploads'] = value + + def get_min_uploads(self): + """ Returns the minimum number of uploads. + @return A number of uploads. """ + return self.dlconfig['min_uploads'] + + def set_max_files_open(self,value): + """ The maximum number of files to keep open at a time, 0 means no + limit. + @param value A number of files. + """ + self.dlconfig['max_files_open'] = value + + def get_max_files_open(self): + """ Returns the maximum number of open files. + @return A number of files. """ + return self.dlconfig['max_files_open'] + + def set_round_robin_period(self,value): + """ The number of seconds between the client's switching upload targets. + @param value A number of seconds. + """ + self.dlconfig['round_robin_period'] = value + + def get_round_robin_period(self): + """ Returns the round-robin period. + @return A number of seconds. """ + return self.dlconfig['round_robin_period'] + + def set_super_seeder(self,value): + """ whether to use special upload-efficiency-maximizing routines (only + for dedicated seeds). + @param value Boolean + """ + self.dlconfig['super_seeder'] = value + + def get_super_seeder(self): + """ Returns hether super seeding is enabled. + @return Boolean. """ + return self.dlconfig['super_seeder'] + + def set_security(self,value): + """ Whether to enable extra security features intended to prevent abuse, + such as checking for multiple connections from the same IP address. + @param value Boolean + """ + self.dlconfig['security'] = value + + def get_security(self): + """ Returns the security setting. + @return Boolean. """ + return self.dlconfig['security'] + + def set_auto_kick(self,value): + """ Whether to automatically kick/ban peers that send bad data. + @param value Boolean + """ + self.dlconfig['auto_kick'] = value + + def get_auto_kick(self): + """ Returns whether autokick is enabled. + @return Boolean. """ + return self.dlconfig['auto_kick'] + + def set_double_check_writes(self,value): + """ Whether to double-check data being written to the disk for errors + (may increase CPU load). + @param value Boolean + """ + self.dlconfig['double_check'] = value + + def get_double_check_writes(self): + """ Returns whether double-checking on writes is enabled. """ + return self.dlconfig['double_check'] + + def set_triple_check_writes(self,value): + """ Whether to thoroughly check data being written to the disk (may + slow disk access). + @param value Boolean """ + self.dlconfig['triple_check'] = value + + def get_triple_check_writes(self): + """ Returns whether triple-checking on writes is enabled. """ + return self.dlconfig['triple_check'] + + def set_lock_files(self,value): + """ Whether to lock files the Download is working with. + @param value Boolean """ + self.dlconfig['lock_files'] = value + + def get_lock_files(self): + """ Returns whether locking of files is enabled. """ + return self.dlconfig['lock_files'] + + def set_lock_while_reading(self,value): + """ Whether to lock access to files being read. + @param value Boolean + """ + self.dlconfig['lock_while_reading'] = value + + def get_lock_while_reading(self): + """ Returns whether locking of files for reading is enabled. + @return Boolean. """ + return self.dlconfig['lock_while_reading'] + + def set_auto_flush(self,value): + """ Minutes between automatic flushes to disk (0 = disabled). + @param value A number of minutes. + """ + self.dlconfig['auto_flush'] = value + + def get_auto_flush(self): + """ Returns the auto flush interval. + @return A number of minutes. """ + return self.dlconfig['auto_flush'] + + def set_exclude_ips(self,value): + """ Set a list of IP addresses to be excluded. + @param value A list of IP addresses in dotted notation. + """ + self.dlconfig['exclude_ips'] = value + + def get_exclude_ips(self): + """ Returns the list of excluded IP addresses. + @return A list of strings. """ + return self.dlconfig['exclude_ips'] + + def set_ut_pex_max_addrs_from_peer(self,value): + """ Maximum number of addresses to accept from peer via the uTorrent + Peer Exchange extension (0 = disable PEX) + @param value A number of IP addresses. + """ + self.dlconfig['ut_pex_max_addrs_from_peer'] = value + + def get_ut_pex_max_addrs_from_peer(self): + """ Returns the maximum number of IP addresses to accept from a peer + via ut_pex. + @return A number of addresses. + """ + return self.dlconfig['ut_pex_max_addrs_from_peer'] + + def set_same_nat_try_internal(self,value): + """ Whether to try to detect if a peer is behind the same NAT as + this Session and then establish a connection over the internal + network + @param value Boolean + """ + self.dlconfig['same_nat_try_internal'] = value + + def get_same_nat_try_internal(self): + """ Returns whether same NAT detection is enabled. + @return Boolean """ + return self.dlconfig['same_nat_try_internal'] + + def set_unchoke_bias_for_internal(self,value): + """ Amount to add to unchoke score for peers on the internal network. + @param value A number + """ + self.dlconfig['unchoke_bias_for_internal'] = value + + def get_unchoke_bias_for_internal(self): + """ Returns the bias for peers on the internal network. + @return A number + """ + return self.dlconfig['unchoke_bias_for_internal'] + + +class DownloadStartupConfig(DownloadConfigInterface,Serializable,Copyable): + """ + (key,value) pair config of per-torrent runtime parameters, + e.g. destdir, file-allocation policy, etc. Also options to advocate + torrent, e.g. register in DHT, advertise via Buddycast. + + cf. libtorrent torrent_handle + """ + def __init__(self,dlconfig=None): + """ Normal constructor for DownloadStartupConfig (copy constructor + used internally) """ + DownloadConfigInterface.__init__(self,dlconfig) + # + # Class method + # + def load(filename): + """ + Load a saved DownloadStartupConfig from disk. + + @param filename An absolute Unicode filename + @return DownloadStartupConfig object + """ + # Class method, no locking required + f = open(filename,"rb") + dlconfig = pickle.load(f) + dscfg = DownloadStartupConfig(dlconfig) + f.close() + return dscfg + load = staticmethod(load) + + def save(self,filename): + """ Save the DownloadStartupConfig to disk. + @param filename An absolute Unicode filename + """ + # Called by any thread + f = open(filename,"wb") + pickle.dump(self.dlconfig,f) + f.close() + + # + # Copyable interface + # + def copy(self): + config = copy.copy(self.dlconfig) + return DownloadStartupConfig(config) + + +def get_default_dest_dir(): + """ Returns the default dir to save content to. +
 
+    * For Win32/MacOS: Desktop\TriblerDownloads
+    * For UNIX: 
+        If Desktop exists: Desktop\TriblerDownloads
+        else: Home\TriblerDownloads
+    
+ """ + uhome = get_home_dir() + + if sys.platform == 'win32': + tempdir = os.path.join(uhome, 'Desktop', 'TriblerDownloads') + elif sys.platform == 'darwin': + tempdir = os.path.join(uhome, 'Desktop', 'TriblerDownloads') + else: + tempdir = os.path.join(uhome, 'Desktop') + if not os.path.exists(tempdir): + tempdir = os.path.join(uhome, 'Desktop', 'TriblerDownloads') + else: + tempdir = os.path.join(uhome, 'TriblerDownloads') + return tempdir + diff --git a/tribler-mod/Tribler/Core/DownloadState.py b/tribler-mod/Tribler/Core/DownloadState.py new file mode 100644 index 0000000..45d9af6 --- /dev/null +++ b/tribler-mod/Tribler/Core/DownloadState.py @@ -0,0 +1,339 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information +""" Contains a snapshot of the state of the Download at a specific point in time. """ + +from Tribler.Core.simpledefs import * +from Tribler.Core.defaults import * +from Tribler.Core.exceptions import * +from Tribler.Core.Base import * + +DEBUG = False + +class DownloadState(Serializable): + """ + Contains a snapshot of the state of the Download at a specific + point in time. Using a snapshot instead of providing live data and + protecting access via locking should be faster. + + cf. libtorrent torrent_status + """ + def __init__(self,download,status,error,progress,stats=None,filepieceranges=None,logmsgs=None,coopdl_helpers=[],coopdl_coordinator=None,peerid=None,videoinfo=None): + """ Internal constructor. + @param download The Download this state belongs too. + @param status The status of the Download (DLSTATUS_*) + @param progress The general progress of the Download. + @param stats The BT engine statistics for the Download. + @param filepieceranges The range of pieces that we are interested in. + The get_pieces_complete() returns only completeness information about + this range. This is used for playing a video in a multi-torrent file. + @param logmsgs A list of messages from the BT engine which may be of + @param peerid Our own peer id in the BT download swarm. + interest to the user. E.g. connection to tracker failed. + @param videoinfo Dictionary with video information. + """ + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadState.__init__",`download.get_def().get_name()`,"status",status,"error",error,"progress",progress,"stats",`stats` + + self.download = download + self.filepieceranges = filepieceranges # NEED CONC CONTROL IF selected_files RUNTIME SETABLE + self.logmsgs = logmsgs + self.coopdl_helpers = coopdl_helpers + self.coopdl_coordinator = coopdl_coordinator + self.peerid = peerid + self.videoinfo = videoinfo + if stats is None: + # No info available yet from download engine + self.error = error # readonly access + self.progress = progress + if self.error is not None: + self.status = DLSTATUS_STOPPED_ON_ERROR + else: + self.status = status + self.stats = None + elif error is not None: + self.error = error # readonly access + self.progress = 0.0 # really want old progress + self.status = DLSTATUS_STOPPED_ON_ERROR + self.stats = None + elif status is not None: + # For HASHCHECKING and WAITING4HASHCHECK + self.error = error + self.status = status + if self.status == DLSTATUS_WAITING4HASHCHECK: + self.progress = 0.0 + else: + self.progress = stats['frac'] + self.stats = None + else: + # Copy info from stats + self.error = None + self.progress = stats['frac'] + if stats['frac'] == 1.0: + self.status = DLSTATUS_SEEDING + else: + self.status = DLSTATUS_DOWNLOADING + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","STATS IS",stats + + # Safe to store the stats dict. The stats dict is created per + # invocation of the BT1Download returned statsfunc and contains no + # pointers. + # + self.stats = stats + + # for pieces complete + statsobj = self.stats['stats'] + if self.filepieceranges is None: + self.haveslice = statsobj.have # is copy of network engine list + else: + # Show only pieces complete for the selected ranges of files + totalpieces =0 + for t,tl,f in self.filepieceranges: + diff = tl-t + totalpieces += diff + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadState: get_pieces_complete",totalpieces + + haveslice = [False] * totalpieces + haveall = True + index = 0 + for t,tl,f in self.filepieceranges: + for piece in range(t,tl): + haveslice[index] = statsobj.have[piece] + if haveall and haveslice[index] == False: + haveall = False + index += 1 + self.haveslice = haveslice + if haveall and len(self.filepieceranges) > 0: + # we have all pieces of the selected files + self.status = DLSTATUS_SEEDING + self.progress = 1.0 + + def get_peerid(self): + """ Returns our own peer id in this swarm, or None if unknown. + @return String or None. + """ + return self.peerid + + def get_videoinfo(self): + """ Returns information about the video being streamed, or {} if unknown. + @return Dict. + """ + if not self.videoinfo: + return {} + else: + return self.videoinfo + + def get_download(self): + """ Returns the Download object of which this is the state """ + return self.download + + def get_progress(self): + """ The general progress of the Download as a percentage. When status is + * DLSTATUS_HASHCHECKING it is the percentage of already downloaded + content checked for integrity. + * DLSTATUS_DOWNLOADING/SEEDING it is the percentage downloaded. + @return Progress as a float (0..1). + """ + return self.progress + + def get_status(self): + """ Returns the status of the torrent. + @return DLSTATUS_* """ + return self.status + + def get_error(self): + """ Returns the Exception that caused the download to be moved to + DLSTATUS_STOPPED_ON_ERROR status. + @return Exception + """ + return self.error + + # + # Details + # + def get_current_speed(self,direct): + """ + Returns the current up or download speed. + @return The speed in KB/s, as float. + """ + if self.stats is None: + return 0.0 + if direct == UPLOAD: + return self.stats['up']/1024.0 + else: + return self.stats['down']/1024.0 + + def get_eta(self): + """ + Returns the estimated time to finish of download. + @return The time in ?, as ?. + """ + if self.stats is None: + return 0.0 + else: + return self.stats['time'] + + def get_num_peers(self): + """ + Returns the download's number of active connections. This is used + to see if there is any progress when non-fatal errors have occured + (e.g. tracker timeout). + @return Boolean. + """ + if self.stats is None: + return 0 + + # Determine if we need statsobj to be requested, same as for spew + statsobj = self.stats['stats'] + return statsobj.numSeeds+statsobj.numPeers + + def get_num_seeds_peers(self): + """ + Returns the sum of the number of seeds and peers. This function + works only if the Download.set_state_callback() / + Session.set_download_states_callback() was called with the getpeerlist + parameter set to True, otherwise returns (None,None) + @return A tuple (num seeds, num peers) + """ + if self.stats is None or self.stats['spew'] is None: + return (None,None) + + total = len(self.stats['spew']) + seeds = len([i for i in self.stats['spew'] if i['completed'] == 1.0]) + return seeds, total-seeds + + def get_pieces_complete(self): + """ Returns a list of booleans indicating whether we have completely + received that piece of the content. The list of pieces for which + we provide this info depends on which files were selected for download + using DownloadStartupConfig.set_selected_files(). + @return A list of booleans + """ + if self.stats is None: + return [] + else: + return self.haveslice + + def get_vod_prebuffering_progress(self): + """ Returns the percentage of prebuffering for Video-On-Demand already + completed. + @return A float (0..1) """ + if self.stats is None: + if self.status == DLSTATUS_STOPPED and self.progress == 1.0: + return 1.0 + else: + return 0.0 + else: + return self.stats['vod_prebuf_frac'] + + def is_vod(self): + """ Returns if this download is currently in vod mode + + @return A Boolean""" + if self.stats is None: + return False + else: + return self.stats['vod'] + + def get_vod_playable(self): + """ Returns whether or not the Download started in Video-On-Demand + mode has sufficient prebuffer and download speed to be played out + to the user. + @return Boolean. + """ + if self.stats is None: + return False + else: + return self.stats['vod_playable'] + + def get_vod_playable_after(self): + """ Returns the estimated time until the Download started in Video-On-Demand + mode can be started to play out to the user. + @return A number of seconds. + """ + if self.stats is None: + return float(2 ** 31) + else: + return self.stats['vod_playable_after'] + +# def get_vod_duration(self): +# """ Returns video total duration if vod system managed to detect it +# +# @return int duration in seconds or None if unknown +# """ +# if self.stats is not None: +# return self.stats['vod_duration'] + + def get_vod_stats(self): + """ Returns a dictionary of collected VOD statistics. The keys contained are: +
+        'played' = number of pieces played
+        'late' = number of pieces arrived after they were due
+        'dropped' = number of pieces lost
+        'stall' = estimation of time the player stalled, waiting for pieces (seconds)
+        'pos' = playback position
+        'prebuf' = amount of prebuffering time that was needed (seconds,
+                   set when playback starts)
+        
, or no keys if no VOD is in progress. + @return Dict. + """ + if self.stats is None: + return {} + else: + return self.stats['vod_stats'] + + + + def get_log_messages(self): + """ Returns the last 10 logged non-fatal error messages. + @return A list of (time,msg) tuples. Time is Python time() format. """ + if self.logmsgs is None: + return [] + else: + return self.logmsgs + + def get_peerlist(self): + """ Returns a list of dictionaries, one for each connected peer + containing the statistics for that peer. In particular, the + dictionary contains the keys: +
+        'id' = PeerID or 'http seed'
+        'ip' = IP address as string or URL of httpseed
+        'optimistic' = True/False
+        'direction' = 'L'/'R' (outgoing/incoming)
+        'uprate' = Upload rate in KB/s
+        'uinterested' = Upload Interested: True/False
+        'uchoked' = Upload Choked: True/False
+        'downrate' = Download rate in KB/s
+        'dinterested' = Download interested: True/Flase
+        'dchoked' = Download choked: True/False
+        'snubbed' = Download snubbed: True/False
+        'utotal' = Total uploaded from peer in KB
+        'dtotal' = Total downloaded from peer in KB
+        'completed' = Fraction of download completed by peer (0-1.0) 
+        'speed' = The peer's current total download speed (estimated)
+        
+ """ + if self.stats is None or 'spew' not in self.stats: + return [] + else: + return self.stats['spew'] + + + def get_coopdl_helpers(self): + """ Returns the peers currently helping. + @return A list of PermIDs. + """ + if self.coopdl_helpers is None: + return [] + else: + return self.coopdl_helpers + + def get_coopdl_coordinator(self): + """ Returns the permid of the coordinator when helping that peer + in a cooperative download + @return A PermID. + """ + return self.coopdl_coordinator + \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/DownloadState.py.bak b/tribler-mod/Tribler/Core/DownloadState.py.bak new file mode 100644 index 0000000..fa22df7 --- /dev/null +++ b/tribler-mod/Tribler/Core/DownloadState.py.bak @@ -0,0 +1,338 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information +""" Contains a snapshot of the state of the Download at a specific point in time. """ + +from Tribler.Core.simpledefs import * +from Tribler.Core.defaults import * +from Tribler.Core.exceptions import * +from Tribler.Core.Base import * + +DEBUG = False + +class DownloadState(Serializable): + """ + Contains a snapshot of the state of the Download at a specific + point in time. Using a snapshot instead of providing live data and + protecting access via locking should be faster. + + cf. libtorrent torrent_status + """ + def __init__(self,download,status,error,progress,stats=None,filepieceranges=None,logmsgs=None,coopdl_helpers=[],coopdl_coordinator=None,peerid=None,videoinfo=None): + """ Internal constructor. + @param download The Download this state belongs too. + @param status The status of the Download (DLSTATUS_*) + @param progress The general progress of the Download. + @param stats The BT engine statistics for the Download. + @param filepieceranges The range of pieces that we are interested in. + The get_pieces_complete() returns only completeness information about + this range. This is used for playing a video in a multi-torrent file. + @param logmsgs A list of messages from the BT engine which may be of + @param peerid Our own peer id in the BT download swarm. + interest to the user. E.g. connection to tracker failed. + @param videoinfo Dictionary with video information. + """ + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadState.__init__",`download.get_def().get_name()`,"status",status,"error",error,"progress",progress,"stats",`stats` + + self.download = download + self.filepieceranges = filepieceranges # NEED CONC CONTROL IF selected_files RUNTIME SETABLE + self.logmsgs = logmsgs + self.coopdl_helpers = coopdl_helpers + self.coopdl_coordinator = coopdl_coordinator + self.peerid = peerid + self.videoinfo = videoinfo + if stats is None: + # No info available yet from download engine + self.error = error # readonly access + self.progress = progress + if self.error is not None: + self.status = DLSTATUS_STOPPED_ON_ERROR + else: + self.status = status + self.stats = None + elif error is not None: + self.error = error # readonly access + self.progress = 0.0 # really want old progress + self.status = DLSTATUS_STOPPED_ON_ERROR + self.stats = None + elif status is not None: + # For HASHCHECKING and WAITING4HASHCHECK + self.error = error + self.status = status + if self.status == DLSTATUS_WAITING4HASHCHECK: + self.progress = 0.0 + else: + self.progress = stats['frac'] + self.stats = None + else: + # Copy info from stats + self.error = None + self.progress = stats['frac'] + if stats['frac'] == 1.0: + self.status = DLSTATUS_SEEDING + else: + self.status = DLSTATUS_DOWNLOADING + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","STATS IS",stats + + # Safe to store the stats dict. The stats dict is created per + # invocation of the BT1Download returned statsfunc and contains no + # pointers. + # + self.stats = stats + + # for pieces complete + statsobj = self.stats['stats'] + if self.filepieceranges is None: + self.haveslice = statsobj.have # is copy of network engine list + else: + # Show only pieces complete for the selected ranges of files + totalpieces =0 + for t,tl,f in self.filepieceranges: + diff = tl-t + totalpieces += diff + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DownloadState: get_pieces_complete",totalpieces + + haveslice = [False] * totalpieces + haveall = True + index = 0 + for t,tl,f in self.filepieceranges: + for piece in range(t,tl): + haveslice[index] = statsobj.have[piece] + if haveall and haveslice[index] == False: + haveall = False + index += 1 + self.haveslice = haveslice + if haveall and len(self.filepieceranges) > 0: + # we have all pieces of the selected files + self.status = DLSTATUS_SEEDING + self.progress = 1.0 + + def get_peerid(self): + """ Returns our own peer id in this swarm, or None if unknown. + @return String or None. + """ + return self.peerid + + def get_videoinfo(self): + """ Returns information about the video being streamed, or {} if unknown. + @return Dict. + """ + if not self.videoinfo: + return {} + else: + return self.videoinfo + + def get_download(self): + """ Returns the Download object of which this is the state """ + return self.download + + def get_progress(self): + """ The general progress of the Download as a percentage. When status is + * DLSTATUS_HASHCHECKING it is the percentage of already downloaded + content checked for integrity. + * DLSTATUS_DOWNLOADING/SEEDING it is the percentage downloaded. + @return Progress as a float (0..1). + """ + return self.progress + + def get_status(self): + """ Returns the status of the torrent. + @return DLSTATUS_* """ + return self.status + + def get_error(self): + """ Returns the Exception that caused the download to be moved to + DLSTATUS_STOPPED_ON_ERROR status. + @return Exception + """ + return self.error + + # + # Details + # + def get_current_speed(self,direct): + """ + Returns the current up or download speed. + @return The speed in KB/s, as float. + """ + if self.stats is None: + return 0.0 + if direct == UPLOAD: + return self.stats['up']/1024.0 + else: + return self.stats['down']/1024.0 + + def get_eta(self): + """ + Returns the estimated time to finish of download. + @return The time in ?, as ?. + """ + if self.stats is None: + return 0.0 + else: + return self.stats['time'] + + def get_num_peers(self): + """ + Returns the download's number of active connections. This is used + to see if there is any progress when non-fatal errors have occured + (e.g. tracker timeout). + @return Boolean. + """ + if self.stats is None: + return 0 + + # Determine if we need statsobj to be requested, same as for spew + statsobj = self.stats['stats'] + return statsobj.numSeeds+statsobj.numPeers + + def get_num_seeds_peers(self): + """ + Returns the sum of the number of seeds and peers. This function + works only if the Download.set_state_callback() / + Session.set_download_states_callback() was called with the getpeerlist + parameter set to True, otherwise returns (None,None) + @return A tuple (num seeds, num peers) + """ + if self.stats is None or self.stats['spew'] is None: + return (None,None) + + total = len(self.stats['spew']) + seeds = len([i for i in self.stats['spew'] if i['completed'] == 1.0]) + return seeds, total-seeds + + def get_pieces_complete(self): + """ Returns a list of booleans indicating whether we have completely + received that piece of the content. The list of pieces for which + we provide this info depends on which files were selected for download + using DownloadStartupConfig.set_selected_files(). + @return A list of booleans + """ + if self.stats is None: + return [] + else: + return self.haveslice + + def get_vod_prebuffering_progress(self): + """ Returns the percentage of prebuffering for Video-On-Demand already + completed. + @return A float (0..1) """ + if self.stats is None: + if self.status == DLSTATUS_STOPPED and self.progress == 1.0: + return 1.0 + else: + return 0.0 + else: + return self.stats['vod_prebuf_frac'] + + def is_vod(self): + """ Returns if this download is currently in vod mode + + @return A Boolean""" + if self.stats is None: + return False + else: + return self.stats['vod'] + + def get_vod_playable(self): + """ Returns whether or not the Download started in Video-On-Demand + mode has sufficient prebuffer and download speed to be played out + to the user. + @return Boolean. + """ + if self.stats is None: + return False + else: + return self.stats['vod_playable'] + + def get_vod_playable_after(self): + """ Returns the estimated time until the Download started in Video-On-Demand + mode can be started to play out to the user. + @return A number of seconds. + """ + if self.stats is None: + return float(2 ** 31) + else: + return self.stats['vod_playable_after'] + +# def get_vod_duration(self): +# """ Returns video total duration if vod system managed to detect it +# +# @return int duration in seconds or None if unknown +# """ +# if self.stats is not None: +# return self.stats['vod_duration'] + + def get_vod_stats(self): + """ Returns a dictionary of collected VOD statistics. The keys contained are: +
+        'played' = number of pieces played
+        'late' = number of pieces arrived after they were due
+        'dropped' = number of pieces lost
+        'stall' = estimation of time the player stalled, waiting for pieces (seconds)
+        'pos' = playback position
+        'prebuf' = amount of prebuffering time that was needed (seconds,
+                   set when playback starts)
+        
, or no keys if no VOD is in progress. + @return Dict. + """ + if self.stats is None: + return {} + else: + return self.stats['vod_stats'] + + + + def get_log_messages(self): + """ Returns the last 10 logged non-fatal error messages. + @return A list of (time,msg) tuples. Time is Python time() format. """ + if self.logmsgs is None: + return [] + else: + return self.logmsgs + + def get_peerlist(self): + """ Returns a list of dictionaries, one for each connected peer + containing the statistics for that peer. In particular, the + dictionary contains the keys: +
+        'id' = PeerID or 'http seed'
+        'ip' = IP address as string or URL of httpseed
+        'optimistic' = True/False
+        'direction' = 'L'/'R' (outgoing/incoming)
+        'uprate' = Upload rate in KB/s
+        'uinterested' = Upload Interested: True/False
+        'uchoked' = Upload Choked: True/False
+        'downrate' = Download rate in KB/s
+        'dinterested' = Download interested: True/Flase
+        'dchoked' = Download choked: True/False
+        'snubbed' = Download snubbed: True/False
+        'utotal' = Total uploaded from peer in KB
+        'dtotal' = Total downloaded from peer in KB
+        'completed' = Fraction of download completed by peer (0-1.0) 
+        'speed' = The peer's current total download speed (estimated)
+        
+ """ + if self.stats is None or 'spew' not in self.stats: + return [] + else: + return self.stats['spew'] + + + def get_coopdl_helpers(self): + """ Returns the peers currently helping. + @return A list of PermIDs. + """ + if self.coopdl_helpers is None: + return [] + else: + return self.coopdl_helpers + + def get_coopdl_coordinator(self): + """ Returns the permid of the coordinator when helping that peer + in a cooperative download + @return A PermID. + """ + return self.coopdl_coordinator + \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/LiveSourceAuthConfig.py b/tribler-mod/Tribler/Core/LiveSourceAuthConfig.py new file mode 100644 index 0000000..7d2cdd7 --- /dev/null +++ b/tribler-mod/Tribler/Core/LiveSourceAuthConfig.py @@ -0,0 +1,61 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information + +from Tribler.Core.simpledefs import * +from Tribler.Core.Overlay.permid import generate_keypair,read_keypair,save_keypair + +class LiveSourceAuthConfig: + """ Base class for configuring authentication methods for data from the + source in live streaming. + """ + def __init__(self,authmethod): + self.authmethod = authmethod + + def get_method(self): + return self.authmethod + + +class ECDSALiveSourceAuthConfig(LiveSourceAuthConfig): + """ Class for configuring the ECDSA authentication method for data from the + source in live streaming. The ECDSA method adds a ECDSA signature to each + piece that is generated. + """ + def __init__(self,keypair=None): + """ Constructor for LIVE_AUTHMETHOD_ECDSA authentication of the + live source. If no keypair is specified, one is generated. + + @param keypair (Optional) An M2Crypto.EC keypair. + """ + LiveSourceAuthConfig.__init__(self,LIVE_AUTHMETHOD_ECDSA) + if keypair is None: + self.keypair = generate_keypair() + else: + self.keypair = keypair + + def get_pubkey(self): + return self.keypair.pub().get_der() + + def get_keypair(self): + return self.keypair + + # + # Class method + # + def load(filename): + """ + Load a saved ECDSALiveSourceAuthConfig from disk. + + @param filename An absolute Unicode filename + @return ECDSALiveSourceAuthConfig object + """ + keypair = read_keypair(filename) + return ECDSALiveSourceAuthConfig(keypair) + load = staticmethod(load) + + def save(self,filename): + """ Save the ECDSALiveSourceAuthConfig to disk. + @param filename An absolute Unicode filename + """ + save_keypair(self.keypair,filename) + \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/LiveSourceAuthConfig.py.bak b/tribler-mod/Tribler/Core/LiveSourceAuthConfig.py.bak new file mode 100644 index 0000000..d81cc91 --- /dev/null +++ b/tribler-mod/Tribler/Core/LiveSourceAuthConfig.py.bak @@ -0,0 +1,60 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information + +from Tribler.Core.simpledefs import * +from Tribler.Core.Overlay.permid import generate_keypair,read_keypair,save_keypair + +class LiveSourceAuthConfig: + """ Base class for configuring authentication methods for data from the + source in live streaming. + """ + def __init__(self,authmethod): + self.authmethod = authmethod + + def get_method(self): + return self.authmethod + + +class ECDSALiveSourceAuthConfig(LiveSourceAuthConfig): + """ Class for configuring the ECDSA authentication method for data from the + source in live streaming. The ECDSA method adds a ECDSA signature to each + piece that is generated. + """ + def __init__(self,keypair=None): + """ Constructor for LIVE_AUTHMETHOD_ECDSA authentication of the + live source. If no keypair is specified, one is generated. + + @param keypair (Optional) An M2Crypto.EC keypair. + """ + LiveSourceAuthConfig.__init__(self,LIVE_AUTHMETHOD_ECDSA) + if keypair is None: + self.keypair = generate_keypair() + else: + self.keypair = keypair + + def get_pubkey(self): + return self.keypair.pub().get_der() + + def get_keypair(self): + return self.keypair + + # + # Class method + # + def load(filename): + """ + Load a saved ECDSALiveSourceAuthConfig from disk. + + @param filename An absolute Unicode filename + @return ECDSALiveSourceAuthConfig object + """ + keypair = read_keypair(filename) + return ECDSALiveSourceAuthConfig(keypair) + load = staticmethod(load) + + def save(self,filename): + """ Save the ECDSALiveSourceAuthConfig to disk. + @param filename An absolute Unicode filename + """ + save_keypair(self.keypair,filename) + \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/Merkle/__init__.py b/tribler-mod/Tribler/Core/Merkle/__init__.py new file mode 100644 index 0000000..b7ef832 --- /dev/null +++ b/tribler-mod/Tribler/Core/Merkle/__init__.py @@ -0,0 +1,3 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/Merkle/__init__.py.bak b/tribler-mod/Tribler/Core/Merkle/__init__.py.bak new file mode 100644 index 0000000..395f8fb --- /dev/null +++ b/tribler-mod/Tribler/Core/Merkle/__init__.py.bak @@ -0,0 +1,2 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/Merkle/merkle.py b/tribler-mod/Tribler/Core/Merkle/merkle.py new file mode 100644 index 0000000..b7ba79e --- /dev/null +++ b/tribler-mod/Tribler/Core/Merkle/merkle.py @@ -0,0 +1,269 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information + +from math import log,pow,floor +from sha import sha +import sys + +DEBUG = False + +# External classes + +class MerkleTree: + + def __init__(self,piece_size,total_length,root_hash=None,hashes=None): + """ + Create a Merkle hash tree + + When creating a .torrent: + root_hash is None and hashes is not None + When creating an initial seeder: + root_hash is None and hashes is not None + (root_hash is None to allow comparison with the calculated + root hash and the one in the .torrent) + When creating a downloader: + root_hash is not None and hashes is None + """ + self.npieces = len2npieces(piece_size,total_length) + self.treeheight = get_tree_height(self.npieces) + self.tree = create_tree(self.treeheight) + if hashes is None: + self.root_hash = root_hash + else: + fill_tree(self.tree,self.treeheight,self.npieces,hashes) + # root_hash is None during .torrent generation + if root_hash is None: + self.root_hash = self.tree[0] + else: + raise AssertionError, "merkle: if hashes not None, root_hash must be" + + def get_root_hash(self): + return self.root_hash + + def compare_root_hashes(self,other): + return self.root_hash == other + + def get_hashes_for_piece(self,index): + return get_hashes_for_piece(self.tree,self.treeheight,index) + + def check_hashes(self,hashlist): + return check_tree_path(self.root_hash,self.treeheight,hashlist) + + def update_hash_admin(self,hashlist,piece_hashes): + update_hash_admin(hashlist,self.tree,self.treeheight,piece_hashes) + + def get_piece_hashes(self): + """ + Get the pieces' hashes from the bottom of the hash tree. Used during + a graceful restart of a client that already downloaded stuff. + """ + return get_piece_hashes(self.tree,self.treeheight,self.npieces) + +def create_fake_hashes(info): + total_length = calc_total_length(info) + npieces = len2npieces(info['piece length'],total_length) + return ['\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' ] * npieces + + +# Internal functions +# Design choice: all algoritmics have been returned into stateless functions, +# i.e. they operate on the input parameters only. This to keep them extremely +# clear. + +def len2npieces(piece_size,total_length): + npieces = total_length / piece_size + if piece_size*npieces < total_length: + npieces += 1 + return npieces + + +def calc_total_length(info): + # Merkle: Calculate total length from .torrent info + if info.has_key('length'): + return info['length'] + # multi-file torrent + files = info['files'] + total_length = 0 + for i in range(0,len(files)): + total_length += files[i]['length'] + return total_length + + +def get_tree_height(npieces): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: number of pieces is",npieces + height = log(npieces,2) + if height - floor(height) > 0.0: + height = int(height)+1 + else: + height = int(height) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: tree height is",height + return height + +def create_tree(height): + # Create tree that has enough leaves to hold all hashes + treesize = int(pow(2,height+1)-1) # subtract unused tail + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: treesize",treesize + tree = ['\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' ] * treesize + return tree + +def fill_tree(tree,height,npieces,hashes): + # 1. Fill bottom of tree with hashes + startoffset = int(pow(2,height)-1) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: bottom of tree starts at",startoffset + for offset in range(startoffset,startoffset+npieces): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: copying",offset + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: hashes[",offset-startoffset,"]=",str(hashes[offset-startoffset]) + tree[offset] = hashes[offset-startoffset] + # 2. Note that unused leaves are NOT filled. It may be a good idea to fill + # them as hashing 0 values may create a security problem. However, the + # filler values would have to be known to any initial seeder, otherwise it + # will not be able build the same hash tree as the other initial seeders. + # Assume anyone should be able to autonomously become a seeder, the filler + # must be public info. I don't know whether having public info as filler + # instead of 0s is any safer, cryptographically speaking. Hence, we stick + # with 0 for the moment + + # 3. Calculate higher level hashes from leaves + for level in range(height,0,-1): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: calculating level",level + for offset in range(int(pow(2,level)-1),int(pow(2,level+1)-2),2): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: data offset",offset + [ parentstartoffset, parentoffset ] = get_parent_offset(offset,level) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: parent offset",parentoffset + data = tree[offset]+tree[offset+1] + digester = sha() + digester.update(data) + digest = digester.digest() + tree[parentoffset] = digest + #for offset in range(0,treesize-1): + # print offset,"HASH",str(tree[offset]) + return tree + + +def get_hashes_for_piece(tree,height,index): + startoffset = int(pow(2,height)-1) + myoffset = startoffset+index + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: myoffset",myoffset + # 1. Add piece's own hash + hashlist = [ [myoffset,tree[myoffset]] ] + # 2. Add hash of piece's sibling, left or right + if myoffset % 2 == 0: + siblingoffset = myoffset-1 + else: + siblingoffset = myoffset+1 + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: siblingoffset",siblingoffset + if siblingoffset != -1: + hashlist.append([siblingoffset,tree[siblingoffset]]) + # 3. Add hashes of uncles + uncleoffset = myoffset + for level in range(height,0,-1): + uncleoffset = get_uncle_offset(uncleoffset,level) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: uncleoffset",uncleoffset + hashlist.append( [uncleoffset,tree[uncleoffset]] ) + return hashlist + + +def check_tree_path(root_hash,height,hashlist): + """ + The hashes should be in the right order in the hashlist, otherwise + the peer will be kicked. The hashlist parameter is assumed to be + of the right type, and contain values of the right type as well. + The exact values should be checked for validity here. + """ + maxoffset = int(pow(2,height+1)-2) + mystartoffset = int(pow(2,height)-1) + i=0 + a = hashlist[i] + if a[0] < 0 or a[0] > maxoffset: + return False + i += 1 + b = hashlist[i] + if b[0] < 0 or b[0] > maxoffset: + return False + i += 1 + myindex = a[0]-mystartoffset + sibindex = b[0]-mystartoffset + for level in range(height,0,-1): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: checking level",level + a = check_fork(a,b,level) + b = hashlist[i] + if b[0] < 0 or b[0] > maxoffset: + return False + i += 1 + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: ROOT HASH",`str(root_hash)`,"==",`str(a[1])` + if a[1] == root_hash: + return True + else: + return False + +def update_hash_admin(hashlist,tree,height,hashes): + mystartoffset = int(pow(2,height)-1) + for i in range(0,len(hashlist)): + if i < 2: + # me and sibling real hashes of piece data, save them + index = hashlist[i][0]-mystartoffset + # ignore siblings that are just tree filler + if index < len(hashes): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: update_hash_admin: saving hash of",index + hashes[index] = hashlist[i][1] + # put all hashes in tree, such that we incrementally learn it + # and can pass them on to others + tree[hashlist[i][0]] = hashlist[i][1] + + +def check_fork(a,b,level): + myoffset = a[0] + siblingoffset = b[0] + if myoffset > siblingoffset: + data = b[1]+a[1] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: combining",siblingoffset,myoffset + else: + data = a[1]+b[1] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: combining",myoffset,siblingoffset + digester = sha() + digester.update(data) + digest = digester.digest() + [parentstartoffset, parentoffset ] = get_parent_offset(myoffset,level-1) + return [parentoffset,digest] + +def get_parent_offset(myoffset,level): + parentstartoffset = int(pow(2,level)-1) + mystartoffset = int(pow(2,level+1)-1) + parentoffset = parentstartoffset + (myoffset-mystartoffset)/2 + return [parentstartoffset, parentoffset] + + +def get_uncle_offset(myoffset,level): + if level == 1: + return 0 + [parentstartoffset,parentoffset ] = get_parent_offset(myoffset,level-1) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: parent offset",parentoffset + parentindex = parentoffset-parentstartoffset + if parentoffset % 2 == 0: + uncleoffset = parentoffset-1 + else: + uncleoffset = parentoffset+1 + return uncleoffset + +def get_piece_hashes(tree,height,npieces): + startoffset = int(pow(2,height)-1) + hashes = ['\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' ] * npieces + for offset in range(startoffset,startoffset+npieces): + hashes[offset-startoffset] = tree[offset] + return hashes + diff --git a/tribler-mod/Tribler/Core/Merkle/merkle.py.bak b/tribler-mod/Tribler/Core/Merkle/merkle.py.bak new file mode 100644 index 0000000..3297226 --- /dev/null +++ b/tribler-mod/Tribler/Core/Merkle/merkle.py.bak @@ -0,0 +1,268 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information + +from math import log,pow,floor +from sha import sha +import sys + +DEBUG = False + +# External classes + +class MerkleTree: + + def __init__(self,piece_size,total_length,root_hash=None,hashes=None): + """ + Create a Merkle hash tree + + When creating a .torrent: + root_hash is None and hashes is not None + When creating an initial seeder: + root_hash is None and hashes is not None + (root_hash is None to allow comparison with the calculated + root hash and the one in the .torrent) + When creating a downloader: + root_hash is not None and hashes is None + """ + self.npieces = len2npieces(piece_size,total_length) + self.treeheight = get_tree_height(self.npieces) + self.tree = create_tree(self.treeheight) + if hashes is None: + self.root_hash = root_hash + else: + fill_tree(self.tree,self.treeheight,self.npieces,hashes) + # root_hash is None during .torrent generation + if root_hash is None: + self.root_hash = self.tree[0] + else: + raise AssertionError, "merkle: if hashes not None, root_hash must be" + + def get_root_hash(self): + return self.root_hash + + def compare_root_hashes(self,other): + return self.root_hash == other + + def get_hashes_for_piece(self,index): + return get_hashes_for_piece(self.tree,self.treeheight,index) + + def check_hashes(self,hashlist): + return check_tree_path(self.root_hash,self.treeheight,hashlist) + + def update_hash_admin(self,hashlist,piece_hashes): + update_hash_admin(hashlist,self.tree,self.treeheight,piece_hashes) + + def get_piece_hashes(self): + """ + Get the pieces' hashes from the bottom of the hash tree. Used during + a graceful restart of a client that already downloaded stuff. + """ + return get_piece_hashes(self.tree,self.treeheight,self.npieces) + +def create_fake_hashes(info): + total_length = calc_total_length(info) + npieces = len2npieces(info['piece length'],total_length) + return ['\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' ] * npieces + + +# Internal functions +# Design choice: all algoritmics have been returned into stateless functions, +# i.e. they operate on the input parameters only. This to keep them extremely +# clear. + +def len2npieces(piece_size,total_length): + npieces = total_length / piece_size + if piece_size*npieces < total_length: + npieces += 1 + return npieces + + +def calc_total_length(info): + # Merkle: Calculate total length from .torrent info + if info.has_key('length'): + return info['length'] + # multi-file torrent + files = info['files'] + total_length = 0 + for i in range(0,len(files)): + total_length += files[i]['length'] + return total_length + + +def get_tree_height(npieces): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: number of pieces is",npieces + height = log(npieces,2) + if height - floor(height) > 0.0: + height = int(height)+1 + else: + height = int(height) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: tree height is",height + return height + +def create_tree(height): + # Create tree that has enough leaves to hold all hashes + treesize = int(pow(2,height+1)-1) # subtract unused tail + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: treesize",treesize + tree = ['\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' ] * treesize + return tree + +def fill_tree(tree,height,npieces,hashes): + # 1. Fill bottom of tree with hashes + startoffset = int(pow(2,height)-1) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: bottom of tree starts at",startoffset + for offset in range(startoffset,startoffset+npieces): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: copying",offset + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: hashes[",offset-startoffset,"]=",str(hashes[offset-startoffset]) + tree[offset] = hashes[offset-startoffset] + # 2. Note that unused leaves are NOT filled. It may be a good idea to fill + # them as hashing 0 values may create a security problem. However, the + # filler values would have to be known to any initial seeder, otherwise it + # will not be able build the same hash tree as the other initial seeders. + # Assume anyone should be able to autonomously become a seeder, the filler + # must be public info. I don't know whether having public info as filler + # instead of 0s is any safer, cryptographically speaking. Hence, we stick + # with 0 for the moment + + # 3. Calculate higher level hashes from leaves + for level in range(height,0,-1): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: calculating level",level + for offset in range(int(pow(2,level)-1),int(pow(2,level+1)-2),2): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: data offset",offset + [ parentstartoffset, parentoffset ] = get_parent_offset(offset,level) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: parent offset",parentoffset + data = tree[offset]+tree[offset+1] + digester = sha() + digester.update(data) + digest = digester.digest() + tree[parentoffset] = digest + #for offset in range(0,treesize-1): + # print offset,"HASH",str(tree[offset]) + return tree + + +def get_hashes_for_piece(tree,height,index): + startoffset = int(pow(2,height)-1) + myoffset = startoffset+index + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: myoffset",myoffset + # 1. Add piece's own hash + hashlist = [ [myoffset,tree[myoffset]] ] + # 2. Add hash of piece's sibling, left or right + if myoffset % 2 == 0: + siblingoffset = myoffset-1 + else: + siblingoffset = myoffset+1 + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: siblingoffset",siblingoffset + if siblingoffset != -1: + hashlist.append([siblingoffset,tree[siblingoffset]]) + # 3. Add hashes of uncles + uncleoffset = myoffset + for level in range(height,0,-1): + uncleoffset = get_uncle_offset(uncleoffset,level) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: uncleoffset",uncleoffset + hashlist.append( [uncleoffset,tree[uncleoffset]] ) + return hashlist + + +def check_tree_path(root_hash,height,hashlist): + """ + The hashes should be in the right order in the hashlist, otherwise + the peer will be kicked. The hashlist parameter is assumed to be + of the right type, and contain values of the right type as well. + The exact values should be checked for validity here. + """ + maxoffset = int(pow(2,height+1)-2) + mystartoffset = int(pow(2,height)-1) + i=0 + a = hashlist[i] + if a[0] < 0 or a[0] > maxoffset: + return False + i += 1 + b = hashlist[i] + if b[0] < 0 or b[0] > maxoffset: + return False + i += 1 + myindex = a[0]-mystartoffset + sibindex = b[0]-mystartoffset + for level in range(height,0,-1): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: checking level",level + a = check_fork(a,b,level) + b = hashlist[i] + if b[0] < 0 or b[0] > maxoffset: + return False + i += 1 + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: ROOT HASH",`str(root_hash)`,"==",`str(a[1])` + if a[1] == root_hash: + return True + else: + return False + +def update_hash_admin(hashlist,tree,height,hashes): + mystartoffset = int(pow(2,height)-1) + for i in range(0,len(hashlist)): + if i < 2: + # me and sibling real hashes of piece data, save them + index = hashlist[i][0]-mystartoffset + # ignore siblings that are just tree filler + if index < len(hashes): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: update_hash_admin: saving hash of",index + hashes[index] = hashlist[i][1] + # put all hashes in tree, such that we incrementally learn it + # and can pass them on to others + tree[hashlist[i][0]] = hashlist[i][1] + + +def check_fork(a,b,level): + myoffset = a[0] + siblingoffset = b[0] + if myoffset > siblingoffset: + data = b[1]+a[1] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: combining",siblingoffset,myoffset + else: + data = a[1]+b[1] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: combining",myoffset,siblingoffset + digester = sha() + digester.update(data) + digest = digester.digest() + [parentstartoffset, parentoffset ] = get_parent_offset(myoffset,level-1) + return [parentoffset,digest] + +def get_parent_offset(myoffset,level): + parentstartoffset = int(pow(2,level)-1) + mystartoffset = int(pow(2,level+1)-1) + parentoffset = parentstartoffset + (myoffset-mystartoffset)/2 + return [parentstartoffset, parentoffset] + + +def get_uncle_offset(myoffset,level): + if level == 1: + return 0 + [parentstartoffset,parentoffset ] = get_parent_offset(myoffset,level-1) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","merkle: parent offset",parentoffset + parentindex = parentoffset-parentstartoffset + if parentoffset % 2 == 0: + uncleoffset = parentoffset-1 + else: + uncleoffset = parentoffset+1 + return uncleoffset + +def get_piece_hashes(tree,height,npieces): + startoffset = int(pow(2,height)-1) + hashes = ['\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' ] * npieces + for offset in range(startoffset,startoffset+npieces): + hashes[offset-startoffset] = tree[offset] + return hashes + diff --git a/tribler-mod/Tribler/Core/NATFirewall/ConnectionCheck.py b/tribler-mod/Tribler/Core/NATFirewall/ConnectionCheck.py new file mode 100644 index 0000000..6184242 --- /dev/null +++ b/tribler-mod/Tribler/Core/NATFirewall/ConnectionCheck.py @@ -0,0 +1,154 @@ +from time import localtime, strftime +import sys +from time import sleep +import thread +import random +from Tribler.Core.NATFirewall.NatCheck import GetNATType +from Tribler.Core.NATFirewall.TimeoutCheck import GetTimeout + +DEBUG = False + +class ConnectionCheck: + + __single = None + + def __init__(self, session): + if ConnectionCheck.__single: + raise RuntimeError, "ConnectionCheck is singleton" + ConnectionCheck.__single = self + self._lock = thread.allocate_lock() + self._running = False + self.session = session + self.permid = self.session.get_permid() + self.nat_type = None + self.nat_timeout = 0 + self._nat_callbacks = [] # list with callback functions that want to know the nat_type + self.natcheck_reply_callbacks = [] # list with callback functions that want to send a natcheck_reply message + + @staticmethod + def getInstance(*args, **kw): + if ConnectionCheck.__single is None: + ConnectionCheck(*args, **kw) + return ConnectionCheck.__single + + def try_start(self, reply_callback = None): + + if reply_callback: self.natcheck_reply_callbacks.append(reply_callback) + + if DEBUG: + if self._running: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "natcheckmsghandler: the thread is already running" + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "natcheckmsghandler: starting the thread" + + if not self._running: + thread.start_new_thread(self.run, ()) + + while True: + sleep(0) + if self._running: + break + + def run(self): + self._lock.acquire() + self._running = True + self._lock.release() + + try: + self.nat_discovery() + + finally: + self._lock.acquire() + self._running = False + self._lock.release() + + def timeout_check(self, pingback): + """ + Find out NAT timeout + """ + return GetTimeout(pingback) + + def natcheck(self, in_port, server1, server2): + """ + Find out NAT type and public address and port + """ + nat_type, ex_ip, ex_port, in_ip = GetNATType(in_port, server1, server2) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "NAT Type: " + nat_type[1] + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Public Address: " + ex_ip + ":" + str(ex_port) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Private Address: " + in_ip + ":" + str(in_port) + return nat_type, ex_ip, ex_port, in_ip + + def get_nat_type(self, callback=None): + """ + When a callback parameter is supplied it will always be + called. When the NAT-type is already known the callback will + be made instantly. Otherwise, the callback will be made when + the NAT discovery has finished. + """ + if self.nat_type: + if callback: + callback(self.nat_type) + return self.nat_type + else: + if callback: + self._nat_callbacks.append(callback) + self.try_start() + return "Unknown NAT/Firewall" + + def _perform_nat_type_notification(self): + nat_type = self.get_nat_type() + callbacks = self._nat_callbacks + self._nat_callbacks = [] + + for callback in callbacks: + try: + callback(nat_type) + except: + pass + + def nat_discovery(self): + """ + Main method of the class: launches nat discovery algorithm + """ + in_port = self.session.get_puncturing_internal_port() + stun_servers = self.session.get_stun_servers() + random.seed() + random.shuffle(stun_servers) + stun1 = stun_servers[1] + stun2 = stun_servers[0] + pingback_servers = self.session.get_pingback_servers() + random.shuffle(pingback_servers) + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", 'Starting ConnectionCheck on %s %s %s' % (in_port, stun1, stun2) + + performed_nat_type_notification = False + + # Check what kind of NAT the peer is behind + nat_type, ex_ip, ex_port, in_ip = self.natcheck(in_port, stun1, stun2) + self.nat_type = nat_type[1] + + # notify any callbacks interested in the nat_type only + self._perform_nat_type_notification() + performed_nat_type_notification = True + + + # If there is any callback interested, check the UDP timeout of the NAT the peer is behind + if len(self.natcheck_reply_callbacks): + + if nat_type[0] > 0: + for pingback in pingback_servers: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheck: pingback is:", pingback + self.nat_timeout = self.timeout_check(pingback) + if self.nat_timeout <= 0: break + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck: Nat UDP timeout is: ", str(self.nat_timeout) + + self.nat_params = [nat_type[1], nat_type[0], self.nat_timeout, ex_ip, int(ex_port), in_ip, in_port] + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", str(self.nat_params) + + # notify any callbacks interested in sending a natcheck_reply message + for reply_callback in self.natcheck_reply_callbacks: + reply_callback(self.nat_params) + self.natcheck_reply_callbacks = [] + + if not performed_nat_type_notification: + self._perform_nat_type_notification() diff --git a/tribler-mod/Tribler/Core/NATFirewall/ConnectionCheck.py.bak b/tribler-mod/Tribler/Core/NATFirewall/ConnectionCheck.py.bak new file mode 100644 index 0000000..278793a --- /dev/null +++ b/tribler-mod/Tribler/Core/NATFirewall/ConnectionCheck.py.bak @@ -0,0 +1,153 @@ +import sys +from time import sleep +import thread +import random +from Tribler.Core.NATFirewall.NatCheck import GetNATType +from Tribler.Core.NATFirewall.TimeoutCheck import GetTimeout + +DEBUG = False + +class ConnectionCheck: + + __single = None + + def __init__(self, session): + if ConnectionCheck.__single: + raise RuntimeError, "ConnectionCheck is singleton" + ConnectionCheck.__single = self + self._lock = thread.allocate_lock() + self._running = False + self.session = session + self.permid = self.session.get_permid() + self.nat_type = None + self.nat_timeout = 0 + self._nat_callbacks = [] # list with callback functions that want to know the nat_type + self.natcheck_reply_callbacks = [] # list with callback functions that want to send a natcheck_reply message + + @staticmethod + def getInstance(*args, **kw): + if ConnectionCheck.__single is None: + ConnectionCheck(*args, **kw) + return ConnectionCheck.__single + + def try_start(self, reply_callback = None): + + if reply_callback: self.natcheck_reply_callbacks.append(reply_callback) + + if DEBUG: + if self._running: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "natcheckmsghandler: the thread is already running" + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "natcheckmsghandler: starting the thread" + + if not self._running: + thread.start_new_thread(self.run, ()) + + while True: + sleep(0) + if self._running: + break + + def run(self): + self._lock.acquire() + self._running = True + self._lock.release() + + try: + self.nat_discovery() + + finally: + self._lock.acquire() + self._running = False + self._lock.release() + + def timeout_check(self, pingback): + """ + Find out NAT timeout + """ + return GetTimeout(pingback) + + def natcheck(self, in_port, server1, server2): + """ + Find out NAT type and public address and port + """ + nat_type, ex_ip, ex_port, in_ip = GetNATType(in_port, server1, server2) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "NAT Type: " + nat_type[1] + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Public Address: " + ex_ip + ":" + str(ex_port) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Private Address: " + in_ip + ":" + str(in_port) + return nat_type, ex_ip, ex_port, in_ip + + def get_nat_type(self, callback=None): + """ + When a callback parameter is supplied it will always be + called. When the NAT-type is already known the callback will + be made instantly. Otherwise, the callback will be made when + the NAT discovery has finished. + """ + if self.nat_type: + if callback: + callback(self.nat_type) + return self.nat_type + else: + if callback: + self._nat_callbacks.append(callback) + self.try_start() + return "Unknown NAT/Firewall" + + def _perform_nat_type_notification(self): + nat_type = self.get_nat_type() + callbacks = self._nat_callbacks + self._nat_callbacks = [] + + for callback in callbacks: + try: + callback(nat_type) + except: + pass + + def nat_discovery(self): + """ + Main method of the class: launches nat discovery algorithm + """ + in_port = self.session.get_puncturing_internal_port() + stun_servers = self.session.get_stun_servers() + random.seed() + random.shuffle(stun_servers) + stun1 = stun_servers[1] + stun2 = stun_servers[0] + pingback_servers = self.session.get_pingback_servers() + random.shuffle(pingback_servers) + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", 'Starting ConnectionCheck on %s %s %s' % (in_port, stun1, stun2) + + performed_nat_type_notification = False + + # Check what kind of NAT the peer is behind + nat_type, ex_ip, ex_port, in_ip = self.natcheck(in_port, stun1, stun2) + self.nat_type = nat_type[1] + + # notify any callbacks interested in the nat_type only + self._perform_nat_type_notification() + performed_nat_type_notification = True + + + # If there is any callback interested, check the UDP timeout of the NAT the peer is behind + if len(self.natcheck_reply_callbacks): + + if nat_type[0] > 0: + for pingback in pingback_servers: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheck: pingback is:", pingback + self.nat_timeout = self.timeout_check(pingback) + if self.nat_timeout <= 0: break + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck: Nat UDP timeout is: ", str(self.nat_timeout) + + self.nat_params = [nat_type[1], nat_type[0], self.nat_timeout, ex_ip, int(ex_port), in_ip, in_port] + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", str(self.nat_params) + + # notify any callbacks interested in sending a natcheck_reply message + for reply_callback in self.natcheck_reply_callbacks: + reply_callback(self.nat_params) + self.natcheck_reply_callbacks = [] + + if not performed_nat_type_notification: + self._perform_nat_type_notification() diff --git a/tribler-mod/Tribler/Core/NATFirewall/DialbackMsgHandler.py b/tribler-mod/Tribler/Core/NATFirewall/DialbackMsgHandler.py new file mode 100644 index 0000000..ba1e285 --- /dev/null +++ b/tribler-mod/Tribler/Core/NATFirewall/DialbackMsgHandler.py @@ -0,0 +1,468 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information +# +# The dialback-message extension serves to (1)~see if we're externally reachable +# and (2)~to tell us what our external IP adress is. When an overlay connection +# is made when we're in dialback mode, we will send a DIALBACK_REQUEST message +# over the overlay connection. The peer is then support to initiate a new +# BT connection with infohash 0x00 0x00 ... 0x01 and send a DIALBACK_REPLY over +# that connection. Those connections are referred to as ReturnConnections +# +# TODO: security problem: if malicious peer connects 7 times to us and tells +# 7 times the same bad external iP, we believe him. Sol: only use locally +# initiated conns + IP address check (BC2 message could be used to attack +# still) +# +# TODO: Arno,2007-09-18: Bittorrent mainline tracker e.g. +# http://tracker.publish.bittorrent.com:6969/announce +# now also returns your IP address in the reply, i.e. there is a +# {'external ip': '\x82%\xc1@'} +# in the dict. We should use this info. +# + +import sys +from time import time +from random import shuffle +from traceback import print_exc,print_stack +from threading import currentThread + +from Tribler.Core.BitTornado.BT1.MessageID import * +from Tribler.Core.BitTornado.bencode import bencode,bdecode + +from Tribler.Core.NATFirewall.ReturnConnHandler import ReturnConnHandler +from Tribler.Core.Overlay.SecureOverlay import OLPROTO_VER_THIRD +from Tribler.Core.Utilities.utilities import * +from Tribler.Core.simpledefs import * + +DEBUG = False + +# +# Constants +# + +REPLY_WAIT = 60 # seconds +REPLY_VALIDITY = 2*24*3600.0 # seconds + +# Normally, one would allow just one majority to possibly exists. However, +# as current Buddycast has a lot of stale peer addresses, let's make +# PEERS_TO_ASK not 5 but 7. +# +PEERS_TO_AGREE = 4 # peers have to say X is my IP before I believe them +YOURIP_PEERS_TO_AGREE = 16 # peers have to say X is my IP via 'yourip' in EXTEND hs before I believe them +PEERS_TO_ASK = 7 # maximum number of outstanding requests +MAX_TRIES = 35 # 5 times 7 peers + +class DialbackMsgHandler: + + __single = None + + def __init__(self): + if DialbackMsgHandler.__single: + raise RuntimeError, "DialbackMsgHandler is singleton" + DialbackMsgHandler.__single = self + + self.peers_asked = {} + self.myips = [] + self.consensusip = None # IP address according to peers + self.fromsuperpeer = False + self.dbreach = False # Did I get any DIALBACK_REPLY? + self.btenginereach = False # Did BT engine get incoming connections? + self.ntries = 0 + self.active = False # Need defaults for test code + self.rawserver = None + self.launchmany = None + self.peer_db = None + self.superpeer_db = None + self.trust_superpeers = None + self.old_ext_ip = None + self.myips_according_to_yourip = [] + self.returnconnhand = ReturnConnHandler.getInstance() + + + def getInstance(*args, **kw): + if DialbackMsgHandler.__single is None: + DialbackMsgHandler(*args, **kw) + return DialbackMsgHandler.__single + getInstance = staticmethod(getInstance) + + def register(self,overlay_bridge,launchmany,rawserver,config): + """ Called by MainThread """ + self.overlay_bridge = overlay_bridge + self.rawserver = rawserver + self.launchmany = launchmany + self.peer_db = launchmany.peer_db + self.superpeer_db = launchmany.superpeer_db + self.active = config['dialback_active'], + self.trust_superpeers = config['dialback_trust_superpeers'] + self.returnconnhand.register(self.rawserver,launchmany.multihandler,launchmany.listen_port,config['overlay_max_message_length']) + self.returnconnhand.register_conns_callback(self.network_handleReturnConnConnection) + self.returnconnhand.register_recv_callback(self.network_handleReturnConnMessage) + self.returnconnhand.start_listening() + + self.old_ext_ip = launchmany.get_ext_ip() + + + def register_yourip(self,launchmany): + """ Called by MainThread """ + self.launchmany = launchmany + + + def olthread_handleSecOverlayConnection(self,exc,permid,selversion,locally_initiated): + """ + Called from OverlayApps to signal there is an overlay-connection, + see if we should ask it to dialback + """ + # Called by overlay thread + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: handleConnection",exc,"v",selversion,"local",locally_initiated + if selversion < OLPROTO_VER_THIRD: + return True + + if exc is not None: + try: + del self.peers_asked[permid] + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: handleConnection: Got error on connection that we didn't ask for dialback" + pass + return + + if self.consensusip is None: + self.ntries += 1 + if self.ntries >= MAX_TRIES: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: tried too many times, giving up" + return True + + if self.dbreach or self.btenginereach: + self.launchmany.set_activity(NTFY_ACT_GET_EXT_IP_FROM_PEERS) + else: + self.launchmany.set_activity(NTFY_ACT_REACHABLE) + + # Also do this when the connection is not locally initiated. + # That tells us that we're connectable, but it doesn't tell us + # our external IP address. + if self.active: + self.olthread_attempt_request_dialback(permid) + return True + + def olthread_attempt_request_dialback(self,permid): + # Called by overlay thread + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: attempt dialback request",show_permid_short(permid) + + dns = self.olthread_get_dns_from_peerdb(permid) + ipinuse = False + + # 1. Remove peers we asked but didn't succeed in connecting back + threshold = time()-REPLY_WAIT + newdict = {} + for permid2,peerrec in self.peers_asked.iteritems(): + if peerrec['reqtime'] >= threshold: + newdict[permid2] = peerrec + if peerrec['dns'][0] == dns[0]: + ipinuse = True + self.peers_asked = newdict + + # 2. Already asked? + if permid in self.peers_asked or ipinuse or len(self.peers_asked) >= PEERS_TO_ASK: + # ipinuse protects a little against attacker that want us to believe + # we have a certain IP address. + if DEBUG: + pipa = permid in self.peers_asked + lpa = len(self.peers_asked) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: No request made to",show_permid_short(permid),"already asked",pipa,"IP in use",ipinuse,"nasked",lpa + + return + dns = self.olthread_get_dns_from_peerdb(permid) + + # 3. Ask him to dialback + peerrec = {'dns':dns,'reqtime':time()} + self.peers_asked[permid] = peerrec + self.overlay_bridge.connect(permid,self.olthread_request_connect_callback) + + def olthread_request_connect_callback(self,exc,dns,permid,selversion): + # Called by overlay thread + if exc is None: + if selversion >= OLPROTO_VER_THIRD: + self.overlay_bridge.send(permid, DIALBACK_REQUEST+'',self.olthread_request_send_callback) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REQUEST: peer speaks old protocol, weird",show_permid_short(permid) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REQUEST: error connecting to",show_permid_short(permid),exc + + + def olthread_request_send_callback(self,exc,permid): + # Called by overlay thread + if exc is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REQUEST error sending to",show_permid_short(permid),exc + pass + + def olthread_handleSecOverlayMessage(self,permid,selversion,message): + """ + Handle incoming DIALBACK_REQUEST messages + """ + # Called by overlay thread + t = message[0] + + if t == DIALBACK_REQUEST: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: Got DIALBACK_REQUEST",len(message),show_permid_short(permid) + return self.olthread_process_dialback_request(permid, message, selversion) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: UNKNOWN OVERLAY MESSAGE", ord(t) + return False + + + def olthread_process_dialback_request(self,permid,message,selversion): + # Called by overlay thread + # 1. Check + if len(message) != 1: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REQUEST: message too big" + return False + + # 2. Retrieve peer's IP address + dns = self.olthread_get_dns_from_peerdb(permid) + + # 3. Send back reply + # returnconnhand uses the network thread to do stuff, so the callback + # will be made by the network thread + self.returnconnhand.connect_dns(dns,self.network_returnconn_reply_connect_callback) + + # 4. Message processed OK, don't know about sending of reply though + return True + + + def network_returnconn_reply_connect_callback(self,exc,dns): + # Called by network thread + + if not currentThread().getName().startswith("NetworkThread"): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: network_returnconn_reply_connect_callback: called by",currentThread().getName()," not NetworkThread" + print_stack() + + if exc is None: + hisip = dns[0] + try: + reply = bencode(hisip) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: sending to",dns + self.returnconnhand.send(dns, DIALBACK_REPLY+reply, self.network_returnconn_reply_send_callback) + except: + print_exc() + return False + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: error connecting to",dns,exc + + def network_returnconn_reply_send_callback(self,exc,dns): + # Called by network thread + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: send callback:",dns,exc + + + if exc is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: error sending to",dns,exc + pass + + # + # Receipt of connection that would carry DIALBACK_REPLY + # + def network_handleReturnConnConnection(self,exc,dns,locally_initiated): + # Called by network thread + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: Got connection from",dns,exc + pass + + def network_handleReturnConnMessage(self,dns,message): + # Called by network thread + t = message[0] + + if t == DIALBACK_REPLY: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: Got DIALBACK_REPLY",len(message),dns + + # Hand over processing to overlay thread + olthread_process_dialback_reply_lambda = lambda:self.olthread_process_dialback_reply(dns, message) + self.overlay_bridge.add_task(olthread_process_dialback_reply_lambda,0) + + # We're done and no longer need the return connection, so + # call close explicitly + self.returnconnhand.close(dns) + return True + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: UNKNOWN RETURNCONN MESSAGE", ord(t) + return False + + + def olthread_process_dialback_reply(self,dns,message): + # Called by overlay thread + + # 1. Yes, we're reachable, now just matter of determining ext IP + self.dbreach = True + + # 2. Authentication: did I ask this peer? + permid = self.olthread_permid_of_asked_peer(dns) + if permid is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: Got reply from peer I didn't ask",dns + return False + + del self.peers_asked[permid] + + # 3. See what he sent us + try: + myip = bdecode(message[1:]) + except: + print_exc() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: error becoding" + return False + if not isValidIP(myip): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: invalid IP" + return False + + + # 4. See if superpeer, then we're done, trusted source + if self.trust_superpeers: + superpeers = self.superpeer_db.getSuperPeers() + if permid in superpeers: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: superpeer said my IP address is",myip,"setting it to that" + self.consensusip = myip + self.fromsuperpeer = True + else: + # 5, 6. 7, 8. Record this peers opinion and see if we get a + # majority vote. + # + self.myips,consensusip = tally_opinion(myip,self.myips,PEERS_TO_AGREE) + if self.consensusip is None: + self.consensusip = consensusip + + # 8. Change IP address if different + if self.consensusip is not None: + + self.launchmany.dialback_got_ext_ip_callback(self.consensusip) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: I think my IP address is",self.old_ext_ip,"others say",self.consensusip,", setting it to latter" + + # 9. Notify GUI that we are connectable + self.launchmany.dialback_reachable_callback() + + return True + + + # + # Information from other modules + # + def network_btengine_reachable_callback(self): + """ Called by network thread """ + if self.launchmany is not None: + self.launchmany.dialback_reachable_callback() + + # network thread updating our state. Ignoring concurrency, as this is a + # one time op. + self.btenginereach = True + + def isConnectable(self): + """ Called by overlay (BuddyCast) and network (Rerequester) thread + and now also any thread via Session.get_externally_reachable() """ + + # network thread updating our state. Ignoring concurrency, as these + # variables go from False to True once and stay there, or remain False + return self.dbreach or self.btenginereach + + + def network_btengine_extend_yourip(self,myip): + """ Called by Connecter when we receive an EXTEND handshake that + contains an yourip line. + + TODO: weigh opinion based on whether we locally initiated the connection + from a trusted tracker response, or that the address came from ut_pex. + """ + self.myips_according_to_yourip, yourip_consensusip = tally_opinion(myip,self.myips_according_to_yourip,YOURIP_PEERS_TO_AGREE) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: yourip: someone said my IP is",myip + if yourip_consensusip is not None: + self.launchmany.yourip_got_ext_ip_callback(yourip_consensusip) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: yourip: I think my IP address is",self.old_ext_ip,"others via EXTEND hs say",yourip_consensusip,"recording latter as option" + + # + # Internal methods + # + def olthread_get_dns_from_peerdb(self,permid): + dns = None + peer = self.peer_db.getPeer(permid) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: get_dns_from_peerdb: Got peer",peer + if peer: + ip = self.to_real_ip(peer['ip']) + dns = (ip, int(peer['port'])) + return dns + + def to_real_ip(self,hostname_or_ip): + """ If it's a hostname convert it to IP address first """ + ip = None + try: + """ Speed up: don't go to DNS resolver unnecessarily """ + socket.inet_aton(hostname_or_ip) + ip = hostname_or_ip + except: + try: + ip = socket.gethostbyname(hostname_or_ip) + except: + print_exc() + return ip + + + def olthread_permid_of_asked_peer(self,dns): + for permid,peerrec in self.peers_asked.iteritems(): + if peerrec['dns'] == dns: + # Yes, we asked this peer + return permid + return None + + +def tally_opinion(myip,oplist,requiredquorum): + + consensusip = None + + # 5. Ordinary peer, just add his opinion + oplist.append([myip,time()]) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: peer said I have IP address",myip + + # 6. Remove stale opinions + newlist = [] + threshold = time()-REPLY_VALIDITY + for pair in oplist: + if pair[1] >= threshold: + newlist.append(pair) + oplist = newlist + + # 7. See if we have X peers that agree + opinions = {} + for pair in oplist: + ip = pair[0] + if not (ip in opinions): + opinions[ip] = 1 + else: + opinions[ip] += 1 + + for o in opinions: + if opinions[o] >= requiredquorum: + # We have a quorum + if consensusip is None: + consensusip = o + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: Got consensus on my IP address being",consensusip + else: + # Hmmmm... more than one consensus + pass + + return oplist,consensusip diff --git a/tribler-mod/Tribler/Core/NATFirewall/DialbackMsgHandler.py.bak b/tribler-mod/Tribler/Core/NATFirewall/DialbackMsgHandler.py.bak new file mode 100644 index 0000000..f27563b --- /dev/null +++ b/tribler-mod/Tribler/Core/NATFirewall/DialbackMsgHandler.py.bak @@ -0,0 +1,467 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information +# +# The dialback-message extension serves to (1)~see if we're externally reachable +# and (2)~to tell us what our external IP adress is. When an overlay connection +# is made when we're in dialback mode, we will send a DIALBACK_REQUEST message +# over the overlay connection. The peer is then support to initiate a new +# BT connection with infohash 0x00 0x00 ... 0x01 and send a DIALBACK_REPLY over +# that connection. Those connections are referred to as ReturnConnections +# +# TODO: security problem: if malicious peer connects 7 times to us and tells +# 7 times the same bad external iP, we believe him. Sol: only use locally +# initiated conns + IP address check (BC2 message could be used to attack +# still) +# +# TODO: Arno,2007-09-18: Bittorrent mainline tracker e.g. +# http://tracker.publish.bittorrent.com:6969/announce +# now also returns your IP address in the reply, i.e. there is a +# {'external ip': '\x82%\xc1@'} +# in the dict. We should use this info. +# + +import sys +from time import time +from random import shuffle +from traceback import print_exc,print_stack +from threading import currentThread + +from Tribler.Core.BitTornado.BT1.MessageID import * +from Tribler.Core.BitTornado.bencode import bencode,bdecode + +from Tribler.Core.NATFirewall.ReturnConnHandler import ReturnConnHandler +from Tribler.Core.Overlay.SecureOverlay import OLPROTO_VER_THIRD +from Tribler.Core.Utilities.utilities import * +from Tribler.Core.simpledefs import * + +DEBUG = False + +# +# Constants +# + +REPLY_WAIT = 60 # seconds +REPLY_VALIDITY = 2*24*3600.0 # seconds + +# Normally, one would allow just one majority to possibly exists. However, +# as current Buddycast has a lot of stale peer addresses, let's make +# PEERS_TO_ASK not 5 but 7. +# +PEERS_TO_AGREE = 4 # peers have to say X is my IP before I believe them +YOURIP_PEERS_TO_AGREE = 16 # peers have to say X is my IP via 'yourip' in EXTEND hs before I believe them +PEERS_TO_ASK = 7 # maximum number of outstanding requests +MAX_TRIES = 35 # 5 times 7 peers + +class DialbackMsgHandler: + + __single = None + + def __init__(self): + if DialbackMsgHandler.__single: + raise RuntimeError, "DialbackMsgHandler is singleton" + DialbackMsgHandler.__single = self + + self.peers_asked = {} + self.myips = [] + self.consensusip = None # IP address according to peers + self.fromsuperpeer = False + self.dbreach = False # Did I get any DIALBACK_REPLY? + self.btenginereach = False # Did BT engine get incoming connections? + self.ntries = 0 + self.active = False # Need defaults for test code + self.rawserver = None + self.launchmany = None + self.peer_db = None + self.superpeer_db = None + self.trust_superpeers = None + self.old_ext_ip = None + self.myips_according_to_yourip = [] + self.returnconnhand = ReturnConnHandler.getInstance() + + + def getInstance(*args, **kw): + if DialbackMsgHandler.__single is None: + DialbackMsgHandler(*args, **kw) + return DialbackMsgHandler.__single + getInstance = staticmethod(getInstance) + + def register(self,overlay_bridge,launchmany,rawserver,config): + """ Called by MainThread """ + self.overlay_bridge = overlay_bridge + self.rawserver = rawserver + self.launchmany = launchmany + self.peer_db = launchmany.peer_db + self.superpeer_db = launchmany.superpeer_db + self.active = config['dialback_active'], + self.trust_superpeers = config['dialback_trust_superpeers'] + self.returnconnhand.register(self.rawserver,launchmany.multihandler,launchmany.listen_port,config['overlay_max_message_length']) + self.returnconnhand.register_conns_callback(self.network_handleReturnConnConnection) + self.returnconnhand.register_recv_callback(self.network_handleReturnConnMessage) + self.returnconnhand.start_listening() + + self.old_ext_ip = launchmany.get_ext_ip() + + + def register_yourip(self,launchmany): + """ Called by MainThread """ + self.launchmany = launchmany + + + def olthread_handleSecOverlayConnection(self,exc,permid,selversion,locally_initiated): + """ + Called from OverlayApps to signal there is an overlay-connection, + see if we should ask it to dialback + """ + # Called by overlay thread + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: handleConnection",exc,"v",selversion,"local",locally_initiated + if selversion < OLPROTO_VER_THIRD: + return True + + if exc is not None: + try: + del self.peers_asked[permid] + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: handleConnection: Got error on connection that we didn't ask for dialback" + pass + return + + if self.consensusip is None: + self.ntries += 1 + if self.ntries >= MAX_TRIES: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: tried too many times, giving up" + return True + + if self.dbreach or self.btenginereach: + self.launchmany.set_activity(NTFY_ACT_GET_EXT_IP_FROM_PEERS) + else: + self.launchmany.set_activity(NTFY_ACT_REACHABLE) + + # Also do this when the connection is not locally initiated. + # That tells us that we're connectable, but it doesn't tell us + # our external IP address. + if self.active: + self.olthread_attempt_request_dialback(permid) + return True + + def olthread_attempt_request_dialback(self,permid): + # Called by overlay thread + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: attempt dialback request",show_permid_short(permid) + + dns = self.olthread_get_dns_from_peerdb(permid) + ipinuse = False + + # 1. Remove peers we asked but didn't succeed in connecting back + threshold = time()-REPLY_WAIT + newdict = {} + for permid2,peerrec in self.peers_asked.iteritems(): + if peerrec['reqtime'] >= threshold: + newdict[permid2] = peerrec + if peerrec['dns'][0] == dns[0]: + ipinuse = True + self.peers_asked = newdict + + # 2. Already asked? + if permid in self.peers_asked or ipinuse or len(self.peers_asked) >= PEERS_TO_ASK: + # ipinuse protects a little against attacker that want us to believe + # we have a certain IP address. + if DEBUG: + pipa = permid in self.peers_asked + lpa = len(self.peers_asked) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: No request made to",show_permid_short(permid),"already asked",pipa,"IP in use",ipinuse,"nasked",lpa + + return + dns = self.olthread_get_dns_from_peerdb(permid) + + # 3. Ask him to dialback + peerrec = {'dns':dns,'reqtime':time()} + self.peers_asked[permid] = peerrec + self.overlay_bridge.connect(permid,self.olthread_request_connect_callback) + + def olthread_request_connect_callback(self,exc,dns,permid,selversion): + # Called by overlay thread + if exc is None: + if selversion >= OLPROTO_VER_THIRD: + self.overlay_bridge.send(permid, DIALBACK_REQUEST+'',self.olthread_request_send_callback) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REQUEST: peer speaks old protocol, weird",show_permid_short(permid) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REQUEST: error connecting to",show_permid_short(permid),exc + + + def olthread_request_send_callback(self,exc,permid): + # Called by overlay thread + if exc is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REQUEST error sending to",show_permid_short(permid),exc + pass + + def olthread_handleSecOverlayMessage(self,permid,selversion,message): + """ + Handle incoming DIALBACK_REQUEST messages + """ + # Called by overlay thread + t = message[0] + + if t == DIALBACK_REQUEST: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: Got DIALBACK_REQUEST",len(message),show_permid_short(permid) + return self.olthread_process_dialback_request(permid, message, selversion) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: UNKNOWN OVERLAY MESSAGE", ord(t) + return False + + + def olthread_process_dialback_request(self,permid,message,selversion): + # Called by overlay thread + # 1. Check + if len(message) != 1: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REQUEST: message too big" + return False + + # 2. Retrieve peer's IP address + dns = self.olthread_get_dns_from_peerdb(permid) + + # 3. Send back reply + # returnconnhand uses the network thread to do stuff, so the callback + # will be made by the network thread + self.returnconnhand.connect_dns(dns,self.network_returnconn_reply_connect_callback) + + # 4. Message processed OK, don't know about sending of reply though + return True + + + def network_returnconn_reply_connect_callback(self,exc,dns): + # Called by network thread + + if not currentThread().getName().startswith("NetworkThread"): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: network_returnconn_reply_connect_callback: called by",currentThread().getName()," not NetworkThread" + print_stack() + + if exc is None: + hisip = dns[0] + try: + reply = bencode(hisip) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: sending to",dns + self.returnconnhand.send(dns, DIALBACK_REPLY+reply, self.network_returnconn_reply_send_callback) + except: + print_exc() + return False + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: error connecting to",dns,exc + + def network_returnconn_reply_send_callback(self,exc,dns): + # Called by network thread + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: send callback:",dns,exc + + + if exc is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: error sending to",dns,exc + pass + + # + # Receipt of connection that would carry DIALBACK_REPLY + # + def network_handleReturnConnConnection(self,exc,dns,locally_initiated): + # Called by network thread + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: Got connection from",dns,exc + pass + + def network_handleReturnConnMessage(self,dns,message): + # Called by network thread + t = message[0] + + if t == DIALBACK_REPLY: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: Got DIALBACK_REPLY",len(message),dns + + # Hand over processing to overlay thread + olthread_process_dialback_reply_lambda = lambda:self.olthread_process_dialback_reply(dns, message) + self.overlay_bridge.add_task(olthread_process_dialback_reply_lambda,0) + + # We're done and no longer need the return connection, so + # call close explicitly + self.returnconnhand.close(dns) + return True + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: UNKNOWN RETURNCONN MESSAGE", ord(t) + return False + + + def olthread_process_dialback_reply(self,dns,message): + # Called by overlay thread + + # 1. Yes, we're reachable, now just matter of determining ext IP + self.dbreach = True + + # 2. Authentication: did I ask this peer? + permid = self.olthread_permid_of_asked_peer(dns) + if permid is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: Got reply from peer I didn't ask",dns + return False + + del self.peers_asked[permid] + + # 3. See what he sent us + try: + myip = bdecode(message[1:]) + except: + print_exc() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: error becoding" + return False + if not isValidIP(myip): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: invalid IP" + return False + + + # 4. See if superpeer, then we're done, trusted source + if self.trust_superpeers: + superpeers = self.superpeer_db.getSuperPeers() + if permid in superpeers: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: superpeer said my IP address is",myip,"setting it to that" + self.consensusip = myip + self.fromsuperpeer = True + else: + # 5, 6. 7, 8. Record this peers opinion and see if we get a + # majority vote. + # + self.myips,consensusip = tally_opinion(myip,self.myips,PEERS_TO_AGREE) + if self.consensusip is None: + self.consensusip = consensusip + + # 8. Change IP address if different + if self.consensusip is not None: + + self.launchmany.dialback_got_ext_ip_callback(self.consensusip) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: I think my IP address is",self.old_ext_ip,"others say",self.consensusip,", setting it to latter" + + # 9. Notify GUI that we are connectable + self.launchmany.dialback_reachable_callback() + + return True + + + # + # Information from other modules + # + def network_btengine_reachable_callback(self): + """ Called by network thread """ + if self.launchmany is not None: + self.launchmany.dialback_reachable_callback() + + # network thread updating our state. Ignoring concurrency, as this is a + # one time op. + self.btenginereach = True + + def isConnectable(self): + """ Called by overlay (BuddyCast) and network (Rerequester) thread + and now also any thread via Session.get_externally_reachable() """ + + # network thread updating our state. Ignoring concurrency, as these + # variables go from False to True once and stay there, or remain False + return self.dbreach or self.btenginereach + + + def network_btengine_extend_yourip(self,myip): + """ Called by Connecter when we receive an EXTEND handshake that + contains an yourip line. + + TODO: weigh opinion based on whether we locally initiated the connection + from a trusted tracker response, or that the address came from ut_pex. + """ + self.myips_according_to_yourip, yourip_consensusip = tally_opinion(myip,self.myips_according_to_yourip,YOURIP_PEERS_TO_AGREE) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: yourip: someone said my IP is",myip + if yourip_consensusip is not None: + self.launchmany.yourip_got_ext_ip_callback(yourip_consensusip) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: yourip: I think my IP address is",self.old_ext_ip,"others via EXTEND hs say",yourip_consensusip,"recording latter as option" + + # + # Internal methods + # + def olthread_get_dns_from_peerdb(self,permid): + dns = None + peer = self.peer_db.getPeer(permid) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: get_dns_from_peerdb: Got peer",peer + if peer: + ip = self.to_real_ip(peer['ip']) + dns = (ip, int(peer['port'])) + return dns + + def to_real_ip(self,hostname_or_ip): + """ If it's a hostname convert it to IP address first """ + ip = None + try: + """ Speed up: don't go to DNS resolver unnecessarily """ + socket.inet_aton(hostname_or_ip) + ip = hostname_or_ip + except: + try: + ip = socket.gethostbyname(hostname_or_ip) + except: + print_exc() + return ip + + + def olthread_permid_of_asked_peer(self,dns): + for permid,peerrec in self.peers_asked.iteritems(): + if peerrec['dns'] == dns: + # Yes, we asked this peer + return permid + return None + + +def tally_opinion(myip,oplist,requiredquorum): + + consensusip = None + + # 5. Ordinary peer, just add his opinion + oplist.append([myip,time()]) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: peer said I have IP address",myip + + # 6. Remove stale opinions + newlist = [] + threshold = time()-REPLY_VALIDITY + for pair in oplist: + if pair[1] >= threshold: + newlist.append(pair) + oplist = newlist + + # 7. See if we have X peers that agree + opinions = {} + for pair in oplist: + ip = pair[0] + if not (ip in opinions): + opinions[ip] = 1 + else: + opinions[ip] += 1 + + for o in opinions: + if opinions[o] >= requiredquorum: + # We have a quorum + if consensusip is None: + consensusip = o + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dialback: DIALBACK_REPLY: Got consensus on my IP address being",consensusip + else: + # Hmmmm... more than one consensus + pass + + return oplist,consensusip diff --git a/tribler-mod/Tribler/Core/NATFirewall/NatCheck.py b/tribler-mod/Tribler/Core/NATFirewall/NatCheck.py new file mode 100644 index 0000000..c905e13 --- /dev/null +++ b/tribler-mod/Tribler/Core/NATFirewall/NatCheck.py @@ -0,0 +1,209 @@ +from time import localtime, strftime +# Written by Lucia D'Acunto +# see LICENSE.txt for license information + +import socket +import sys + +DEBUG = False + +def Test1(udpsock, serveraddr): + """ + The client sends a request to a server asking it to send the + response back to the address and port the request came from + """ + + retVal = {"resp":False, "ex_ip":None, "ex_port":None} + BUFSIZ = 1024 + reply = "" + request = "ping1" + + udpsock.sendto(request, serveraddr) + + try: + reply, rcvaddr = udpsock.recvfrom(BUFSIZ) + except socket.timeout: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Connection attempt to %s timed out" % (serveraddr,) + return retVal + + except ValueError, (strerror): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Could not receive data: %s" % (strerror) + return retVal + except socket.error, (errno, strerror): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Could not receive data: %s" % (strerror) + return retVal + + ex_ip, ex_port = reply.split(":") + + retVal["resp"] = True + retVal["ex_ip"] = ex_ip + retVal["ex_port"] = ex_port + + return retVal + +def Test2(udpsock, serveraddr): + """ + The client sends a request asking to receive an echo from a + different address and a different port on the address and port the + request came from + """ + + retVal = {"resp":False} + BUFSIZ = 1024 + request = "ping2" + + udpsock.sendto(request, serveraddr) + + try: + reply, rcvaddr = udpsock.recvfrom(BUFSIZ) + except socket.timeout: + #if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Connection attempt to %s timed out" % (serveraddr,) + return retVal + except ValueError, (strerror): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Could not receive data: %s" % (strerror) + return retVal + except socket.error, (errno, strerror): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Could not receive data: %s" % (strerror) + return retVal + + retVal["resp"] = True + + return retVal + +def Test3(udpsock, serveraddr): + """ + The client sends a request asking to receive an echo from the same + address but from a different port on the address and port the + request came from + """ + + retVal = {"resp":False, "ex_ip":None, "ex_port":None} + BUFSIZ = 1024 + reply = "" + request = "ping3" + + udpsock.sendto(request, serveraddr) + + try: + reply, rcvaddr = udpsock.recvfrom(BUFSIZ) + except socket.timeout: + #if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Connection attempt to %s timed out" % (serveraddr,) + return retVal + except ValueError, (strerror): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Could not receive data: %s" % (strerror) + return retVal + except socket.error, (errno, strerror): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Could not receive data: %s" % (strerror) + return retVal + + ex_ip, ex_port = reply.split(":") + + retVal["resp"] = True + retVal["ex_ip"] = ex_ip + retVal["ex_port"] = ex_port + + return retVal + +# Returns information about the NAT the client is behind +def GetNATType(in_port, serveraddr1, serveraddr2): + """ + Returns the NAT type according to the STUN algorithm, as well as the external + address (ip, port) and the internal address of the host + """ + + nat_type, ex_ip, ex_port, in_ip = [-1, "Unknown"], "0.0.0.0", "0", "0.0.0.0" + + # Set up the socket + udpsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + udpsock.settimeout(5) + try: + udpsock.bind(('',in_port)) + except socket.error, err: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Couldn't bind a udp socket on port %d : %s" % (in_port, err) + return (nat_type, ex_ip, ex_port, in_ip) + try: + # Get the internal IP address + connectaddr = ('tribler.org',80) + s = socket.socket() + s.connect(connectaddr) + in_ip = s.getsockname()[0] + del s + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck: getting the internal ip address by connecting to tribler.org:80", in_ip + except socket.error, err: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Couldn't connect to %s:%i" % (connectaddr[0], connectaddr[1]) + return (nat_type, ex_ip, ex_port, in_ip) + + """ + EXECUTE THE STUN ALGORITHM + """ + + # Do Test I + ret = Test1(udpsock, serveraddr1) + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Test I reported: " + str(ret) + + if ret["resp"] == False: + nat_type[1] = "Blocked" + + else: + ex_ip = ret["ex_ip"] + ex_port = ret["ex_port"] + + if ret["ex_ip"] == in_ip: # No NAT: check for firewall + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "No NAT" + + # Do Test II + ret = Test2(udpsock, serveraddr1) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Test II reported: " + str(ret) + + if ret["resp"] == True: + nat_type[0] = 0 + nat_type[1] = "Open Internet" + else: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "There is a Firewall" + + # Do Test III + ret = Test3(udpsock, serveraddr1) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Test III reported: " + str(ret) + + if ret["resp"] == True: + nat_type[0] = 2 + nat_type[1] = "Restricted Cone Firewall" + else: + nat_type[0] = 3 + nat_type[1] = "Port Restricted Cone Firewall" + + else: # There is a NAT + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "There is a NAT" + + # Do Test II + ret = Test2(udpsock, serveraddr1) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Test II reported: " + str(ret) + if ret["resp"] == True: + nat_type[0] = 1 + nat_type[1] = "Full Cone NAT" + else: + #Do Test I using a different echo server + ret = Test1(udpsock, serveraddr2) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Test I reported: " + str(ret) + + if ex_ip == ret["ex_ip"] and ex_port == ret["ex_port"]: # Public address is constant: consistent translation + + # Do Test III + ret = Test3(udpsock, serveraddr1) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Test III reported: " + str(ret) + + if ret["resp"] == True: + nat_type[0] = 2 + nat_type[1] = "Restricted Cone NAT" + else: + nat_type[0] = 3 + nat_type[1] = "Port Restricted Cone NAT" + + else: + nat_type[0] = -1 + nat_type[1] = "Symmetric NAT" + + udpsock.close() + return (nat_type, ex_ip, ex_port, in_ip) diff --git a/tribler-mod/Tribler/Core/NATFirewall/NatCheck.py.bak b/tribler-mod/Tribler/Core/NATFirewall/NatCheck.py.bak new file mode 100644 index 0000000..86c6b7b --- /dev/null +++ b/tribler-mod/Tribler/Core/NATFirewall/NatCheck.py.bak @@ -0,0 +1,208 @@ +# Written by Lucia D'Acunto +# see LICENSE.txt for license information + +import socket +import sys + +DEBUG = False + +def Test1(udpsock, serveraddr): + """ + The client sends a request to a server asking it to send the + response back to the address and port the request came from + """ + + retVal = {"resp":False, "ex_ip":None, "ex_port":None} + BUFSIZ = 1024 + reply = "" + request = "ping1" + + udpsock.sendto(request, serveraddr) + + try: + reply, rcvaddr = udpsock.recvfrom(BUFSIZ) + except socket.timeout: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Connection attempt to %s timed out" % (serveraddr,) + return retVal + + except ValueError, (strerror): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Could not receive data: %s" % (strerror) + return retVal + except socket.error, (errno, strerror): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Could not receive data: %s" % (strerror) + return retVal + + ex_ip, ex_port = reply.split(":") + + retVal["resp"] = True + retVal["ex_ip"] = ex_ip + retVal["ex_port"] = ex_port + + return retVal + +def Test2(udpsock, serveraddr): + """ + The client sends a request asking to receive an echo from a + different address and a different port on the address and port the + request came from + """ + + retVal = {"resp":False} + BUFSIZ = 1024 + request = "ping2" + + udpsock.sendto(request, serveraddr) + + try: + reply, rcvaddr = udpsock.recvfrom(BUFSIZ) + except socket.timeout: + #if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Connection attempt to %s timed out" % (serveraddr,) + return retVal + except ValueError, (strerror): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Could not receive data: %s" % (strerror) + return retVal + except socket.error, (errno, strerror): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Could not receive data: %s" % (strerror) + return retVal + + retVal["resp"] = True + + return retVal + +def Test3(udpsock, serveraddr): + """ + The client sends a request asking to receive an echo from the same + address but from a different port on the address and port the + request came from + """ + + retVal = {"resp":False, "ex_ip":None, "ex_port":None} + BUFSIZ = 1024 + reply = "" + request = "ping3" + + udpsock.sendto(request, serveraddr) + + try: + reply, rcvaddr = udpsock.recvfrom(BUFSIZ) + except socket.timeout: + #if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Connection attempt to %s timed out" % (serveraddr,) + return retVal + except ValueError, (strerror): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Could not receive data: %s" % (strerror) + return retVal + except socket.error, (errno, strerror): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Could not receive data: %s" % (strerror) + return retVal + + ex_ip, ex_port = reply.split(":") + + retVal["resp"] = True + retVal["ex_ip"] = ex_ip + retVal["ex_port"] = ex_port + + return retVal + +# Returns information about the NAT the client is behind +def GetNATType(in_port, serveraddr1, serveraddr2): + """ + Returns the NAT type according to the STUN algorithm, as well as the external + address (ip, port) and the internal address of the host + """ + + nat_type, ex_ip, ex_port, in_ip = [-1, "Unknown"], "0.0.0.0", "0", "0.0.0.0" + + # Set up the socket + udpsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + udpsock.settimeout(5) + try: + udpsock.bind(('',in_port)) + except socket.error, err: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Couldn't bind a udp socket on port %d : %s" % (in_port, err) + return (nat_type, ex_ip, ex_port, in_ip) + try: + # Get the internal IP address + connectaddr = ('tribler.org',80) + s = socket.socket() + s.connect(connectaddr) + in_ip = s.getsockname()[0] + del s + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck: getting the internal ip address by connecting to tribler.org:80", in_ip + except socket.error, err: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Couldn't connect to %s:%i" % (connectaddr[0], connectaddr[1]) + return (nat_type, ex_ip, ex_port, in_ip) + + """ + EXECUTE THE STUN ALGORITHM + """ + + # Do Test I + ret = Test1(udpsock, serveraddr1) + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Test I reported: " + str(ret) + + if ret["resp"] == False: + nat_type[1] = "Blocked" + + else: + ex_ip = ret["ex_ip"] + ex_port = ret["ex_port"] + + if ret["ex_ip"] == in_ip: # No NAT: check for firewall + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "No NAT" + + # Do Test II + ret = Test2(udpsock, serveraddr1) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Test II reported: " + str(ret) + + if ret["resp"] == True: + nat_type[0] = 0 + nat_type[1] = "Open Internet" + else: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "There is a Firewall" + + # Do Test III + ret = Test3(udpsock, serveraddr1) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Test III reported: " + str(ret) + + if ret["resp"] == True: + nat_type[0] = 2 + nat_type[1] = "Restricted Cone Firewall" + else: + nat_type[0] = 3 + nat_type[1] = "Port Restricted Cone Firewall" + + else: # There is a NAT + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "There is a NAT" + + # Do Test II + ret = Test2(udpsock, serveraddr1) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Test II reported: " + str(ret) + if ret["resp"] == True: + nat_type[0] = 1 + nat_type[1] = "Full Cone NAT" + else: + #Do Test I using a different echo server + ret = Test1(udpsock, serveraddr2) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Test I reported: " + str(ret) + + if ex_ip == ret["ex_ip"] and ex_port == ret["ex_port"]: # Public address is constant: consistent translation + + # Do Test III + ret = Test3(udpsock, serveraddr1) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCheck:", "Test III reported: " + str(ret) + + if ret["resp"] == True: + nat_type[0] = 2 + nat_type[1] = "Restricted Cone NAT" + else: + nat_type[0] = 3 + nat_type[1] = "Port Restricted Cone NAT" + + else: + nat_type[0] = -1 + nat_type[1] = "Symmetric NAT" + + udpsock.close() + return (nat_type, ex_ip, ex_port, in_ip) diff --git a/tribler-mod/Tribler/Core/NATFirewall/NatCheckMsgHandler.py b/tribler-mod/Tribler/Core/NATFirewall/NatCheckMsgHandler.py new file mode 100644 index 0000000..c82f561 --- /dev/null +++ b/tribler-mod/Tribler/Core/NATFirewall/NatCheckMsgHandler.py @@ -0,0 +1,406 @@ +from time import localtime, strftime +# Written by Lucia D'Acunto +# see LICENSE.txt for license information + +from time import strftime +from traceback import print_exc +import datetime +import random +import socket +import sys +import thread + +from Tribler.Core.BitTornado.BT1.MessageID import CRAWLER_NATCHECK, CRAWLER_NATTRAVERSAL +from Tribler.Core.BitTornado.bencode import bencode, bdecode +from Tribler.Core.NATFirewall.ConnectionCheck import ConnectionCheck +from Tribler.Core.NATFirewall.NatTraversal import tryConnect, coordinateHolePunching +from Tribler.Core.Overlay.SecureOverlay import OLPROTO_VER_EIGHTH, OLPROTO_VER_NINE, SecureOverlay +from Tribler.Core.Statistics.Crawler import Crawler +from Tribler.Core.Utilities.utilities import show_permid, show_permid_short +from types import IntType, StringType, ListType +from Tribler.Core.simpledefs import * + +DEBUG = False + +PEERLIST_LEN = 100 + +class NatCheckMsgHandler: + + __single = None + + def __init__(self): + if NatCheckMsgHandler.__single: + raise RuntimeError, "NatCheckMsgHandler is singleton" + NatCheckMsgHandler.__single = self + self.crawler_reply_callbacks = [] + self._secure_overlay = SecureOverlay.getInstance() + + self.crawler = Crawler.get_instance() + if self.crawler.am_crawler(): + self._file = open("natcheckcrawler.txt", "a") + self._file.write("\n".join(("# " + "*" * 80, strftime("%Y/%m/%d %H:%M:%S"), "# Crawler started\n"))) + self._file.flush() + self._file2 = open("nattraversalcrawler.txt", "a") + self._file2.write("\n".join(("# " + "*" * 80, strftime("%Y/%m/%d %H:%M:%S"), "# Crawler started\n"))) + self._file2.flush() + self.peerlist = [] + self.holePunchingIP = socket.gethostbyname(socket.gethostname()) + + else: + self._file = None + + @staticmethod + def getInstance(*args, **kw): + if NatCheckMsgHandler.__single is None: + NatCheckMsgHandler(*args, **kw) + return NatCheckMsgHandler.__single + + def register(self, launchmany): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: register" + + self.session = launchmany.session + self.doNatCheckSender = None + self.registered = True + + def doNatCheck(self, target_permid, selversion, request_callback): + """ + The nat-check initiator_callback + """ + + # for Tribler versions < 4.5.0 : do nothing + # TODO: change OLPROTO_VER_EIGHTH to OLPROTO_VER_SEVENTH + if selversion < OLPROTO_VER_NINE: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: Tribler version too old for NATCHECK: do nothing" + return False + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: do NATCHECK" + + # send the message + request_callback(CRAWLER_NATCHECK, "", callback=self.doNatCheckCallback) + + return True + + def doNatCheckCallback(self, exc, permid): + + if exc is not None: + return False + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCHECK_REQUEST was sent to", show_permid_short(permid), exc + + # Register peerinfo on file + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), + "REQUEST", + show_permid(permid), + str(self._secure_overlay.get_dns_from_peerdb(permid)), + "\n"))) + self._file.flush() + return True + + def gotDoNatCheckMessage(self, sender_permid, selversion, channel_id, payload, reply_callback): + """ + The handle-request callback + """ + + self.doNatCheckSender = sender_permid + self.crawler_reply_callbacks.append(reply_callback) + + try: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","NatCheckMsgHandler: start_nat_type_detect()" + conn_check = ConnectionCheck.getInstance(self.session) + conn_check.try_start(self.natthreadcb_natCheckReplyCallback) + except: + print_exc() + return False + + return True + + def natthreadcb_natCheckReplyCallback(self, ncr_data): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NAT type: ", ncr_data + + # send the message to the peer who has made the NATCHECK request, if any + if self.doNatCheckSender is not None: + try: + ncr_msg = bencode(ncr_data) + except: + print_exc() + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "error ncr_data:", ncr_data + return False + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler:", ncr_data + + # todo: make sure that natthreadcb_natCheckReplyCallback is always called for a request + # send replies to all the requests that have been received so far + for reply_callback in self.crawler_reply_callbacks: + reply_callback(ncr_msg, callback=self.natCheckReplySendCallback) + self.crawler_reply_callbacks = [] + + + def natCheckReplySendCallback(self, exc, permid): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCHECK_REPLY was sent to", show_permid_short(permid), exc + if exc is not None: + return False + return True + + def gotNatCheckReplyMessage(self, permid, selversion, channel_id, error, payload, request_callback): + """ + The handle-reply callback + """ + if error: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: gotNatCheckReplyMessage" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: error", error + + # generic error: another crawler already obtained these results + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), + " REPLY", + show_permid(permid), + str(self._secure_overlay.get_dns_from_peerdb(permid)), + "ERROR(%d)" % error, + payload, + "\n"))) + self._file.flush() + + else: + try: + recv_data = bdecode(payload) + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bad encoded data:", payload + return False + + try: # check natCheckReply message + self.validNatCheckReplyMsg(recv_data) + except RuntimeError, e: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", e + return False + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: received NAT_CHECK_REPLY message: ", recv_data + + # Register peerinfo on file + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), + " REPLY", + show_permid(permid), + str(self._secure_overlay.get_dns_from_peerdb(permid)), + ":".join([str(x) for x in recv_data]), + "\n"))) + self._file.flush() + + # for Tribler versions < 5.0 : do nothing + if selversion < OLPROTO_VER_NINE: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: Tribler version too old for NATTRAVERSAL: do nothing" + return True + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: do NATTRAVERSAL" + + # Save peer in peerlist + if len(self.peerlist) == PEERLIST_LEN: + del self.peerlist[0] + self.peerlist.append([permid,recv_data[1],recv_data[2]]) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: peerlist length is: ", len(self.peerlist) + + # Try to perform hole punching + if len(self.peerlist) >= 2: + self.tryHolePunching() + + return True + + def validNatCheckReplyMsg(self, ncr_data): + + if not type(ncr_data) == ListType: + raise RuntimeError, "NatCheckMsgHandler: received data is not valid. It must be a list of parameters." + return False + + if not type(ncr_data[0]) == StringType: + raise RuntimeError, "NatCheckMsgHandler: received data is not valid. The first element in the list must be a string." + return False + + if not type(ncr_data[1]) == IntType: + raise RuntimeError, "NatCheckMsgHandler: received data is not valid. The second element in the list must be an integer." + return False + + if not type(ncr_data[2]) == IntType: + raise RuntimeError, "NatCheckMsgHandler: received data is not valid. The third element in the list must be an integer." + return False + + if not type(ncr_data[3]) == StringType: + raise RuntimeError, "NatCheckMsgHandler: received data is not valid. The forth element in the list must be a string." + return False + + if not type(ncr_data[4]) == IntType: + raise RuntimeError, "NatCheckMsgHandler: received data is not valid. The fifth element in the list must be an integer." + return False + + if not type(ncr_data[5]) == StringType: + raise RuntimeError, "NatCheckMsgHandler: received data is not valid. The sixth element in the list must be a string." + return False + + if not type(ncr_data[6]) == IntType: + raise RuntimeError, "NatCheckMsgHandler: received data is not valid. The seventh element in the list must be an integer." + return False + + def tryHolePunching(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: first element in peerlist", self.peerlist[len(self.peerlist)-1] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: second element in peerlist", self.peerlist[len(self.peerlist)-2] + + holePunchingPort = random.randrange(3200, 4200, 1) + holePunchingAddr = (self.holePunchingIP, holePunchingPort) + + peer1 = self.peerlist[len(self.peerlist)-1] + peer2 = self.peerlist[len(self.peerlist)-2] + + request_id = str(show_permid_short(peer1[0]) + show_permid_short(peer2[0]) + str(random.randrange(0, 1000, 1))) + + self.udpConnect(peer1[0], request_id, holePunchingAddr) + self.udpConnect(peer2[0], request_id, holePunchingAddr) + + # Register peerinfo on file + self._file2.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), + "REQUEST", + request_id, + show_permid(peer1[0]), + str(peer1[1]), + str(peer1[2]), + str(self._secure_overlay.get_dns_from_peerdb(peer1[0])), + show_permid(peer2[0]), + str(peer2[1]), + str(peer2[2]), + str(self._secure_overlay.get_dns_from_peerdb(peer2[0])), + "\n"))) + self._file2.flush() + + thread.start_new_thread(coordinateHolePunching, (peer1, peer2, holePunchingAddr)) + + def udpConnect(self, permid, request_id, holePunchingAddr): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: request UDP connection" + + mh_data = request_id + ":" + holePunchingAddr[0] + ":" + str(holePunchingAddr[1]) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: udpConnect message is", mh_data + + try: + mh_msg = bencode(mh_data) + except: + print_exc() + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: error mh_data:", mh_data + return False + + # send the message + self.crawler.send_request(permid, CRAWLER_NATTRAVERSAL, mh_msg, frequency=0, callback=self.udpConnectCallback) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: request for", show_permid_short(permid), "sent to crawler" + + def udpConnectCallback(self, exc, permid): + + if exc is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATTRAVERSAL_REQUEST failed to", show_permid_short(permid), exc + + # Register peerinfo on file + self._file2.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), + "REQUEST FAILED", + show_permid(permid), + str(self._secure_overlay.get_dns_from_peerdb(permid)), + "\n"))) + return False + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATTRAVERSAL_REQUEST was sent to", show_permid_short(permid), exc + return True + + def gotUdpConnectRequest(self, permid, selversion, channel_id, mh_msg, reply_callback): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: gotUdpConnectRequest from", show_permid_short(permid) + + try: + mh_data = bdecode(mh_msg) + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: bad encoded data:", mh_msg + return False + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: gotUdpConnectRequest is", mh_data + + + try: + request_id, host, port = mh_data.split(":") + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: error in received data:", mh_data + return False + + coordinator = (host, int(port)) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: coordinator address is", coordinator + + mhr_data = request_id + ":" + tryConnect(coordinator) + + # Report back to coordinator + try: + mhr_msg = bencode(mhr_data) + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: error in encoding data:", mhr_data + return False + + reply_callback(mhr_msg, callback=self.udpConnectReplySendCallback) + + def udpConnectReplySendCallback(self, exc, permid): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATTRAVERSAL_REPLY was sent to", show_permid_short(permid), exc + if exc is not None: + return False + return True + + + def gotUdpConnectReply(self, permid, selversion, channel_id, error, mhr_msg, request_callback): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: gotMakeHoleReplyMessage" + + try: + mhr_data = bdecode(mhr_msg) + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: bad encoded data:", mhr_msg + return False + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: message is", mhr_data + + try: + request_id, reply = mhr_data.split(":") + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: error in received data:", mhr_data + return False + + # Register peerinfo on file + self._file2.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), + " REPLY", + request_id, + show_permid(permid), + str(self._secure_overlay.get_dns_from_peerdb(permid)), + reply, + "\n"))) + + self._file2.flush() + diff --git a/tribler-mod/Tribler/Core/NATFirewall/NatCheckMsgHandler.py.bak b/tribler-mod/Tribler/Core/NATFirewall/NatCheckMsgHandler.py.bak new file mode 100644 index 0000000..8fcf6dd --- /dev/null +++ b/tribler-mod/Tribler/Core/NATFirewall/NatCheckMsgHandler.py.bak @@ -0,0 +1,405 @@ +# Written by Lucia D'Acunto +# see LICENSE.txt for license information + +from time import strftime +from traceback import print_exc +import datetime +import random +import socket +import sys +import thread + +from Tribler.Core.BitTornado.BT1.MessageID import CRAWLER_NATCHECK, CRAWLER_NATTRAVERSAL +from Tribler.Core.BitTornado.bencode import bencode, bdecode +from Tribler.Core.NATFirewall.ConnectionCheck import ConnectionCheck +from Tribler.Core.NATFirewall.NatTraversal import tryConnect, coordinateHolePunching +from Tribler.Core.Overlay.SecureOverlay import OLPROTO_VER_EIGHTH, OLPROTO_VER_NINE, SecureOverlay +from Tribler.Core.Statistics.Crawler import Crawler +from Tribler.Core.Utilities.utilities import show_permid, show_permid_short +from types import IntType, StringType, ListType +from Tribler.Core.simpledefs import * + +DEBUG = False + +PEERLIST_LEN = 100 + +class NatCheckMsgHandler: + + __single = None + + def __init__(self): + if NatCheckMsgHandler.__single: + raise RuntimeError, "NatCheckMsgHandler is singleton" + NatCheckMsgHandler.__single = self + self.crawler_reply_callbacks = [] + self._secure_overlay = SecureOverlay.getInstance() + + self.crawler = Crawler.get_instance() + if self.crawler.am_crawler(): + self._file = open("natcheckcrawler.txt", "a") + self._file.write("\n".join(("# " + "*" * 80, strftime("%Y/%m/%d %H:%M:%S"), "# Crawler started\n"))) + self._file.flush() + self._file2 = open("nattraversalcrawler.txt", "a") + self._file2.write("\n".join(("# " + "*" * 80, strftime("%Y/%m/%d %H:%M:%S"), "# Crawler started\n"))) + self._file2.flush() + self.peerlist = [] + self.holePunchingIP = socket.gethostbyname(socket.gethostname()) + + else: + self._file = None + + @staticmethod + def getInstance(*args, **kw): + if NatCheckMsgHandler.__single is None: + NatCheckMsgHandler(*args, **kw) + return NatCheckMsgHandler.__single + + def register(self, launchmany): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: register" + + self.session = launchmany.session + self.doNatCheckSender = None + self.registered = True + + def doNatCheck(self, target_permid, selversion, request_callback): + """ + The nat-check initiator_callback + """ + + # for Tribler versions < 4.5.0 : do nothing + # TODO: change OLPROTO_VER_EIGHTH to OLPROTO_VER_SEVENTH + if selversion < OLPROTO_VER_NINE: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: Tribler version too old for NATCHECK: do nothing" + return False + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: do NATCHECK" + + # send the message + request_callback(CRAWLER_NATCHECK, "", callback=self.doNatCheckCallback) + + return True + + def doNatCheckCallback(self, exc, permid): + + if exc is not None: + return False + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCHECK_REQUEST was sent to", show_permid_short(permid), exc + + # Register peerinfo on file + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), + "REQUEST", + show_permid(permid), + str(self._secure_overlay.get_dns_from_peerdb(permid)), + "\n"))) + self._file.flush() + return True + + def gotDoNatCheckMessage(self, sender_permid, selversion, channel_id, payload, reply_callback): + """ + The handle-request callback + """ + + self.doNatCheckSender = sender_permid + self.crawler_reply_callbacks.append(reply_callback) + + try: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","NatCheckMsgHandler: start_nat_type_detect()" + conn_check = ConnectionCheck.getInstance(self.session) + conn_check.try_start(self.natthreadcb_natCheckReplyCallback) + except: + print_exc() + return False + + return True + + def natthreadcb_natCheckReplyCallback(self, ncr_data): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NAT type: ", ncr_data + + # send the message to the peer who has made the NATCHECK request, if any + if self.doNatCheckSender is not None: + try: + ncr_msg = bencode(ncr_data) + except: + print_exc() + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "error ncr_data:", ncr_data + return False + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler:", ncr_data + + # todo: make sure that natthreadcb_natCheckReplyCallback is always called for a request + # send replies to all the requests that have been received so far + for reply_callback in self.crawler_reply_callbacks: + reply_callback(ncr_msg, callback=self.natCheckReplySendCallback) + self.crawler_reply_callbacks = [] + + + def natCheckReplySendCallback(self, exc, permid): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATCHECK_REPLY was sent to", show_permid_short(permid), exc + if exc is not None: + return False + return True + + def gotNatCheckReplyMessage(self, permid, selversion, channel_id, error, payload, request_callback): + """ + The handle-reply callback + """ + if error: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: gotNatCheckReplyMessage" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: error", error + + # generic error: another crawler already obtained these results + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), + " REPLY", + show_permid(permid), + str(self._secure_overlay.get_dns_from_peerdb(permid)), + "ERROR(%d)" % error, + payload, + "\n"))) + self._file.flush() + + else: + try: + recv_data = bdecode(payload) + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bad encoded data:", payload + return False + + try: # check natCheckReply message + self.validNatCheckReplyMsg(recv_data) + except RuntimeError, e: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", e + return False + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: received NAT_CHECK_REPLY message: ", recv_data + + # Register peerinfo on file + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), + " REPLY", + show_permid(permid), + str(self._secure_overlay.get_dns_from_peerdb(permid)), + ":".join([str(x) for x in recv_data]), + "\n"))) + self._file.flush() + + # for Tribler versions < 5.0 : do nothing + if selversion < OLPROTO_VER_NINE: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: Tribler version too old for NATTRAVERSAL: do nothing" + return True + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: do NATTRAVERSAL" + + # Save peer in peerlist + if len(self.peerlist) == PEERLIST_LEN: + del self.peerlist[0] + self.peerlist.append([permid,recv_data[1],recv_data[2]]) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: peerlist length is: ", len(self.peerlist) + + # Try to perform hole punching + if len(self.peerlist) >= 2: + self.tryHolePunching() + + return True + + def validNatCheckReplyMsg(self, ncr_data): + + if not type(ncr_data) == ListType: + raise RuntimeError, "NatCheckMsgHandler: received data is not valid. It must be a list of parameters." + return False + + if not type(ncr_data[0]) == StringType: + raise RuntimeError, "NatCheckMsgHandler: received data is not valid. The first element in the list must be a string." + return False + + if not type(ncr_data[1]) == IntType: + raise RuntimeError, "NatCheckMsgHandler: received data is not valid. The second element in the list must be an integer." + return False + + if not type(ncr_data[2]) == IntType: + raise RuntimeError, "NatCheckMsgHandler: received data is not valid. The third element in the list must be an integer." + return False + + if not type(ncr_data[3]) == StringType: + raise RuntimeError, "NatCheckMsgHandler: received data is not valid. The forth element in the list must be a string." + return False + + if not type(ncr_data[4]) == IntType: + raise RuntimeError, "NatCheckMsgHandler: received data is not valid. The fifth element in the list must be an integer." + return False + + if not type(ncr_data[5]) == StringType: + raise RuntimeError, "NatCheckMsgHandler: received data is not valid. The sixth element in the list must be a string." + return False + + if not type(ncr_data[6]) == IntType: + raise RuntimeError, "NatCheckMsgHandler: received data is not valid. The seventh element in the list must be an integer." + return False + + def tryHolePunching(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: first element in peerlist", self.peerlist[len(self.peerlist)-1] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: second element in peerlist", self.peerlist[len(self.peerlist)-2] + + holePunchingPort = random.randrange(3200, 4200, 1) + holePunchingAddr = (self.holePunchingIP, holePunchingPort) + + peer1 = self.peerlist[len(self.peerlist)-1] + peer2 = self.peerlist[len(self.peerlist)-2] + + request_id = str(show_permid_short(peer1[0]) + show_permid_short(peer2[0]) + str(random.randrange(0, 1000, 1))) + + self.udpConnect(peer1[0], request_id, holePunchingAddr) + self.udpConnect(peer2[0], request_id, holePunchingAddr) + + # Register peerinfo on file + self._file2.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), + "REQUEST", + request_id, + show_permid(peer1[0]), + str(peer1[1]), + str(peer1[2]), + str(self._secure_overlay.get_dns_from_peerdb(peer1[0])), + show_permid(peer2[0]), + str(peer2[1]), + str(peer2[2]), + str(self._secure_overlay.get_dns_from_peerdb(peer2[0])), + "\n"))) + self._file2.flush() + + thread.start_new_thread(coordinateHolePunching, (peer1, peer2, holePunchingAddr)) + + def udpConnect(self, permid, request_id, holePunchingAddr): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: request UDP connection" + + mh_data = request_id + ":" + holePunchingAddr[0] + ":" + str(holePunchingAddr[1]) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: udpConnect message is", mh_data + + try: + mh_msg = bencode(mh_data) + except: + print_exc() + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: error mh_data:", mh_data + return False + + # send the message + self.crawler.send_request(permid, CRAWLER_NATTRAVERSAL, mh_msg, frequency=0, callback=self.udpConnectCallback) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: request for", show_permid_short(permid), "sent to crawler" + + def udpConnectCallback(self, exc, permid): + + if exc is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATTRAVERSAL_REQUEST failed to", show_permid_short(permid), exc + + # Register peerinfo on file + self._file2.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), + "REQUEST FAILED", + show_permid(permid), + str(self._secure_overlay.get_dns_from_peerdb(permid)), + "\n"))) + return False + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATTRAVERSAL_REQUEST was sent to", show_permid_short(permid), exc + return True + + def gotUdpConnectRequest(self, permid, selversion, channel_id, mh_msg, reply_callback): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: gotUdpConnectRequest from", show_permid_short(permid) + + try: + mh_data = bdecode(mh_msg) + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: bad encoded data:", mh_msg + return False + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: gotUdpConnectRequest is", mh_data + + + try: + request_id, host, port = mh_data.split(":") + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: error in received data:", mh_data + return False + + coordinator = (host, int(port)) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: coordinator address is", coordinator + + mhr_data = request_id + ":" + tryConnect(coordinator) + + # Report back to coordinator + try: + mhr_msg = bencode(mhr_data) + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: error in encoding data:", mhr_data + return False + + reply_callback(mhr_msg, callback=self.udpConnectReplySendCallback) + + def udpConnectReplySendCallback(self, exc, permid): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NATTRAVERSAL_REPLY was sent to", show_permid_short(permid), exc + if exc is not None: + return False + return True + + + def gotUdpConnectReply(self, permid, selversion, channel_id, error, mhr_msg, request_callback): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: gotMakeHoleReplyMessage" + + try: + mhr_data = bdecode(mhr_msg) + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: bad encoded data:", mhr_msg + return False + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: message is", mhr_data + + try: + request_id, reply = mhr_data.split(":") + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: error in received data:", mhr_data + return False + + # Register peerinfo on file + self._file2.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), + " REPLY", + request_id, + show_permid(permid), + str(self._secure_overlay.get_dns_from_peerdb(permid)), + reply, + "\n"))) + + self._file2.flush() + diff --git a/tribler-mod/Tribler/Core/NATFirewall/NatTraversal.py b/tribler-mod/Tribler/Core/NATFirewall/NatTraversal.py new file mode 100644 index 0000000..6db8169 --- /dev/null +++ b/tribler-mod/Tribler/Core/NATFirewall/NatTraversal.py @@ -0,0 +1,179 @@ +from time import localtime, strftime +from time import strftime +from traceback import print_exc +import socket +import sys + +DEBUG = False + +def coordinateHolePunching(peer1, peer2, holePunchingAddr): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: coordinateHolePunching at", holePunchingAddr + + # Set up the sockets + try : + udpsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + udpsock.bind(holePunchingAddr) + udpsock.settimeout(60) + + except socket.error, (errno, strerror) : + + if udpsock : + udpsock.close() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: Could not open socket: %s" % (strerror) + + return + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: waiting for connection..." + + # Receive messages + peeraddr2 = None + while True: + + try: + data, peeraddr1 = udpsock.recvfrom(1024) + if not data: + continue + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal:", strftime("%Y/%m/%d %H:%M:%S"), "...connected from: ", peeraddr1 + if peeraddr2 == None: + peeraddr2 = peeraddr1 + elif peeraddr2 != peeraddr1: + udpsock.sendto(peeraddr1[0] + ":" + str(peeraddr1[1]), peeraddr2) + udpsock.sendto(peeraddr1[0] + ":" + str(peeraddr1[1]), peeraddr2) + udpsock.sendto(peeraddr1[0] + ":" + str(peeraddr1[1]), peeraddr2) + udpsock.sendto(peeraddr2[0] + ":" + str(peeraddr2[1]), peeraddr1) + udpsock.sendto(peeraddr2[0] + ":" + str(peeraddr2[1]), peeraddr1) + udpsock.sendto(peeraddr2[0] + ":" + str(peeraddr2[1]), peeraddr1) + break + + except socket.timeout, error: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: timeout with peers", error + udpsock.close() + break + + # Close socket + udpsock.close() + +def tryConnect(coordinator): + + # Set up the socket + udpsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + udpsock.settimeout(5) + + # Send messages + udpsock.sendto("ping",coordinator) + udpsock.sendto("ping",coordinator) + udpsock.sendto("ping",coordinator) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: sending ping to ", coordinator + + # Wait for response from the coordinator + + while True: + data = None + addr = None + try: + data, addr = udpsock.recvfrom(1024) + except socket.timeout, (strerror): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: timeout with coordinator" + return "ERR" + + if addr == coordinator: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: received", data, "from coordinator" + break + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: received", data, "from", addr + + #success = False + #try: + # host, port = data.split(":") + #except: + # print_exc() + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: error in received data:", data + # return success + # peer = (host, int(port)) + # for i in range(3): + # udpsock.sendto("hello",peer) + # udpsock.sendto("hello",peer) + # udpsock.sendto("hello",peer) + + # try: + # data, addr = udpsock.recvfrom(1024) + + # except socket.timeout, (strerror): + # if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: first timeout", strerror + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: resend" + + # else: + # success = True + # break + + try: + host, port = data.split(":") + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: error in received data:", data + return "ERR" + + peer = (host, int(port)) + udpsock.sendto("hello",peer) + udpsock.sendto("hello",peer) + udpsock.sendto("hello",peer) + + # Wait for response + data = None + addr = None + + while True: + try: + data, addr = udpsock.recvfrom(1024) + except socket.timeout, (strerror): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: first timeout", strerror + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: resend" + + udpsock.sendto("hello", peer) + udpsock.sendto("hello", peer) + udpsock.sendto("hello", peer) + + try: + data, addr = udpsock.recvfrom(1024) + except socket.timeout, (strerror): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: second timeout", strerror + + return "NO" + + # data received, check address + if addr == peer: # peer is not symmetric NAT + break + + if addr[0] == peer[0]: # peer has a symmetric NAT + peer = addr + break + + + udpsock.sendto("hello",peer) + udpsock.sendto("hello",peer) + udpsock.sendto("hello",peer) + + # Close socket + udpsock.close() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: message from", addr, "is", data + + return "YES" + + diff --git a/tribler-mod/Tribler/Core/NATFirewall/NatTraversal.py.bak b/tribler-mod/Tribler/Core/NATFirewall/NatTraversal.py.bak new file mode 100644 index 0000000..b77dd3d --- /dev/null +++ b/tribler-mod/Tribler/Core/NATFirewall/NatTraversal.py.bak @@ -0,0 +1,178 @@ +from time import strftime +from traceback import print_exc +import socket +import sys + +DEBUG = False + +def coordinateHolePunching(peer1, peer2, holePunchingAddr): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: coordinateHolePunching at", holePunchingAddr + + # Set up the sockets + try : + udpsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + udpsock.bind(holePunchingAddr) + udpsock.settimeout(60) + + except socket.error, (errno, strerror) : + + if udpsock : + udpsock.close() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: Could not open socket: %s" % (strerror) + + return + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: waiting for connection..." + + # Receive messages + peeraddr2 = None + while True: + + try: + data, peeraddr1 = udpsock.recvfrom(1024) + if not data: + continue + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal:", strftime("%Y/%m/%d %H:%M:%S"), "...connected from: ", peeraddr1 + if peeraddr2 == None: + peeraddr2 = peeraddr1 + elif peeraddr2 != peeraddr1: + udpsock.sendto(peeraddr1[0] + ":" + str(peeraddr1[1]), peeraddr2) + udpsock.sendto(peeraddr1[0] + ":" + str(peeraddr1[1]), peeraddr2) + udpsock.sendto(peeraddr1[0] + ":" + str(peeraddr1[1]), peeraddr2) + udpsock.sendto(peeraddr2[0] + ":" + str(peeraddr2[1]), peeraddr1) + udpsock.sendto(peeraddr2[0] + ":" + str(peeraddr2[1]), peeraddr1) + udpsock.sendto(peeraddr2[0] + ":" + str(peeraddr2[1]), peeraddr1) + break + + except socket.timeout, error: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: timeout with peers", error + udpsock.close() + break + + # Close socket + udpsock.close() + +def tryConnect(coordinator): + + # Set up the socket + udpsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + udpsock.settimeout(5) + + # Send messages + udpsock.sendto("ping",coordinator) + udpsock.sendto("ping",coordinator) + udpsock.sendto("ping",coordinator) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: sending ping to ", coordinator + + # Wait for response from the coordinator + + while True: + data = None + addr = None + try: + data, addr = udpsock.recvfrom(1024) + except socket.timeout, (strerror): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: timeout with coordinator" + return "ERR" + + if addr == coordinator: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: received", data, "from coordinator" + break + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: received", data, "from", addr + + #success = False + #try: + # host, port = data.split(":") + #except: + # print_exc() + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: error in received data:", data + # return success + # peer = (host, int(port)) + # for i in range(3): + # udpsock.sendto("hello",peer) + # udpsock.sendto("hello",peer) + # udpsock.sendto("hello",peer) + + # try: + # data, addr = udpsock.recvfrom(1024) + + # except socket.timeout, (strerror): + # if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: first timeout", strerror + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: resend" + + # else: + # success = True + # break + + try: + host, port = data.split(":") + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatCheckMsgHandler: error in received data:", data + return "ERR" + + peer = (host, int(port)) + udpsock.sendto("hello",peer) + udpsock.sendto("hello",peer) + udpsock.sendto("hello",peer) + + # Wait for response + data = None + addr = None + + while True: + try: + data, addr = udpsock.recvfrom(1024) + except socket.timeout, (strerror): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: first timeout", strerror + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: resend" + + udpsock.sendto("hello", peer) + udpsock.sendto("hello", peer) + udpsock.sendto("hello", peer) + + try: + data, addr = udpsock.recvfrom(1024) + except socket.timeout, (strerror): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: second timeout", strerror + + return "NO" + + # data received, check address + if addr == peer: # peer is not symmetric NAT + break + + if addr[0] == peer[0]: # peer has a symmetric NAT + peer = addr + break + + + udpsock.sendto("hello",peer) + udpsock.sendto("hello",peer) + udpsock.sendto("hello",peer) + + # Close socket + udpsock.close() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "NatTraversal: message from", addr, "is", data + + return "YES" + + diff --git a/tribler-mod/Tribler/Core/NATFirewall/ReturnConnHandler.py b/tribler-mod/Tribler/Core/NATFirewall/ReturnConnHandler.py new file mode 100644 index 0000000..6cf4c1b --- /dev/null +++ b/tribler-mod/Tribler/Core/NATFirewall/ReturnConnHandler.py @@ -0,0 +1,604 @@ +from time import localtime, strftime +# Written by Arno Bakker, Bram Cohen, Jie Yang +# see LICENSE.txt for license information +# +# This class receives all connections and messages destined for +# infohash = 0x00 0x00 ... 0x01 +# The peer sends a DIALBACK_REPLY message, we send no reply. +# + +import sys +from struct import pack,unpack +from time import time +from sets import Set +from cStringIO import StringIO +from threading import currentThread +from socket import gethostbyname +from traceback import print_exc,print_stack + +from Tribler.Core.BitTornado.__init__ import createPeerID +from Tribler.Core.BitTornado.BT1.MessageID import protocol_name,option_pattern,getMessageName +from Tribler.Core.BitTornado.BT1.convert import tobinary,toint + + +DEBUG = False + +# +# Public definitions +# +dialback_infohash = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01' + +# +# Private definitions +# + +# States for overlay connection +STATE_INITIAL = 0 +STATE_HS_FULL_WAIT = 1 +STATE_HS_PEERID_WAIT = 2 +STATE_DATA_WAIT = 4 +STATE_CLOSED = 5 + +# Misc +EXPIRE_THRESHOLD = 30 # seconds:: keep consistent with sockethandler +EXPIRE_CHECK_INTERVAL = 60 # seconds + + +class ReturnConnHandler: + __single = None + + def __init__(self): + if ReturnConnHandler.__single: + raise RuntimeError, "ReturnConnHandler is Singleton" + ReturnConnHandler.__single = self + + # + # Interface for upper layer + # + def getInstance(*args, **kw): + if ReturnConnHandler.__single is None: + ReturnConnHandler(*args, **kw) + return ReturnConnHandler.__single + getInstance = staticmethod(getInstance) + + def register(self,rawserver,multihandler,mylistenport,max_len): + """ Called by MainThread """ + self.rawserver = rawserver # real rawserver, not overlay_bridge + self.sock_hand = self.rawserver.sockethandler + self.multihandler = multihandler + self.dialback_rawserver = multihandler.newRawServer(dialback_infohash, + self.rawserver.doneflag, + protocol_name) + self.myid = create_my_peer_id(mylistenport) + self.max_len = max_len + self.iplport2oc = {} # (IP,listen port) -> ReturnConnection + self.usermsghandler = None + self.userconnhandler = None + + def resetSingleton(self): + """ For testing purposes """ + ReturnConnHandler.__single = None + + def start_listening(self): + """ Called by MainThread """ + self.dialback_rawserver.start_listening(self) + + def connect_dns(self,dns,callback): + """ Connects to the indicated endpoint. Non-blocking. + + Pre: "dns" must be an IP address, not a hostname. + + Network thread calls "callback(exc,dns)" when the connection + is established or when an error occurs during connection + establishment. In the former case, exc is None, otherwise + it contains an Exception. + + The established connection will auto close after EXPIRE_THRESHOLD + seconds of inactivity. + """ + # Called by overlay thread + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: connect_dns",dns + # To prevent concurrency problems on sockets the calling thread + # delegates to the network thread. + task = Task(self._connect_dns,dns,callback) + self.rawserver.add_task(task.start, 0) + + + def send(self,dns,msg,callback): + """ Sends a message to the indicated dns. Non-blocking. + + Pre: connection to permid must have been established successfully. + + Network thread calls "callback(exc,dns)" when the message is sent + or when an error occurs during sending. In the former case, exc + is None, otherwise it contains an Exception. + """ + # Called by overlay thread + # To prevent concurrency problems on sockets the calling thread + # delegates to the network thread. + task = Task(self._send,dns,msg,callback) + self.rawserver.add_task(task.start, 0) + + + + def close(self,dns): + """ Closes any connection to indicated permid. Non-blocking. + + Pre: connection to permid must have been established successfully. + + Network thread calls "callback(exc,permid,selver)" when the connection + is closed. + """ + # Called by overlay thread + # To prevent concurrency problems on sockets the calling thread + # delegates to the network thread. + task = Task(self._close,dns) + self.rawserver.add_task(task.start, 0) + + + def register_recv_callback(self,callback): + """ Register a callback to be called when receiving a message from + any permid. Non-blocking. + + Network thread calls "callback(exc,permid,selver,msg)" when a message + is received. The callback is not called on errors e.g. remote + connection close. + """ + self.usermsghandler = callback + + def register_conns_callback(self,callback): + """ Register a callback to be called when receiving a connection from + any permid. Non-blocking. + + Network thread calls "callback(exc,permid,selver,locally_initiated)" + when a connection is established (locally initiated or remote), or + when a connection is closed locally or remotely. In the former case, + exc is None, otherwise it contains an Exception. + + Note that this means that if a callback is registered via this method, + both this callback and the callback passed to a connect() method + will be called. + """ + self.userconnhandler = callback + + + # + # Internal methods + # + def _connect_dns(self,dns,callback): + # Called by network thread + try: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: actual connect_dns",dns + iplport = ip_and_port2str(dns[0],dns[1]) + oc = None + try: + oc = self.iplport2oc[iplport] + except KeyError: + pass + if oc is None: + oc = self.start_connection(dns) + self.iplport2oc[iplport] = oc + oc.queue_callback(dns,callback) + else: + callback(None,dns) + except Exception,exc: + if DEBUG: + print_exc(file=sys.stderr) + callback(exc,dns) + + def _send(self,dns,message,callback): + # Called by network thread + try: + iplport = ip_and_port2str(dns[0],dns[1]) + oc = None + try: + oc = self.iplport2oc[iplport] + except KeyError: + pass + if oc is None: + callback(KeyError('Not connected to dns'),dns) + else: + oc.send_message(message) + callback(None,dns) + except Exception,exc: + if DEBUG: + print_exc(file=sys.stderr) + callback(exc,dns) + + + def _close(self,dns): + # Called by network thread + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: actual close",dns + try: + iplport = ip_and_port2str(dns[0],dns[1]) + oc = None + try: + oc = self.iplport2oc[iplport] + except KeyError: + pass + if oc is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: error - actual close, but no connection to peer in admin" + else: + oc.close() + except Exception,e: + print_exc(file=sys.stderr) + + # + # Interface for SocketHandler + # + def external_connection_made(self,singsock): + """ incoming connection (never used) """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: external_connection_made",singsock.get_ip(),singsock.get_port() + oc = ReturnConnection(self,singsock,self.rawserver) + singsock.set_handler(oc) + + def connection_flushed(self,singsock): + """ sockethandler flushes connection """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: connection_flushed",singsock.get_ip(),singsock.get_port() + pass + + # + # Interface for ServerPortHandler + # + def externally_handshaked_connection_made(self, singsock, options, msg_remainder): + """ incoming connection, handshake partially read to identity + as an it as overlay connection (used always) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: externally_handshaked_connection_made",\ + singsock.get_ip(),singsock.get_port() + oc = ReturnConnection(self,singsock,self.rawserver,ext_handshake = True, options = options) + singsock.set_handler(oc) + if msg_remainder: + oc.data_came_in(singsock,msg_remainder) + return True + + + # + # Interface for ReturnConnection + # + def got_connection(self,oc): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: Got connection from",oc.get_ip(),"listen",oc.get_listen_port() + + ret = True + iplport = ip_and_port2str(oc.get_ip(),oc.get_listen_port()) + known = iplport in self.iplport2oc + if not known: + self.iplport2oc[iplport] = oc + elif known and not oc.is_locally_initiated(): + # Locally initiated connections will already be registered, + # so if it's not a local connection and we already have one + # we have a duplicate, and we close the new one. + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: got_connection:", \ + "closing because we already have a connection to",iplport + self.cleanup_admin_and_callbacks(oc, + Exception('closing because we already have a connection to peer')) + ret = False + + if ret: + oc.dequeue_callbacks() + if self.userconnhandler is not None: + try: + self.userconnhandler(None,(oc.get_ip(),oc.get_listen_port()),oc.is_locally_initiated()) + except: + # Catchall + print_exc(file=sys.stderr) + return ret + + def local_close(self,oc): + """ our side is closing the connection """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: local_close" + self.cleanup_admin_and_callbacks(oc,Exception('local close')) + + def connection_lost(self,oc): + """ overlay connection telling us to clear admin """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: connection_lost" + self.cleanup_admin_and_callbacks(oc,Exception('connection lost')) + + def got_message(self,dns,message): + """ received message from peer, pass to upper layer """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: got_message",getMessageName(message[0]) + if self.usermsghandler is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: User receive callback not set" + return + try: + ret = self.usermsghandler(dns,message) + if ret is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: INTERNAL ERROR:", \ + "User receive callback returned None, not True or False" + ret = False + return ret + except: + # Catch all + print_exc(file=sys.stderr) + return False + + + def get_max_len(self): + return self.max_len + + def get_my_peer_id(self): + return self.myid + + def measurefunc(self,length): + pass + + def start_connection(self,dns): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: Attempt to connect to",dns + singsock = self.sock_hand.start_connection(dns) + oc = ReturnConnection(self,singsock,self.rawserver, + locally_initiated=True,specified_dns=dns) + singsock.set_handler(oc) + return oc + + def cleanup_admin_and_callbacks(self,oc,exc): + oc.cleanup_callbacks(exc) + self.cleanup_admin(oc) + if self.userconnhandler is not None: + self.userconnhandler(exc,(oc.get_ip(),oc.get_listen_port()),oc.is_locally_initiated()) + + def cleanup_admin(self,oc): + iplports = [] + d = 0 + for key in self.iplport2oc.keys(): + #print "***** iplport2oc:", key, self.iplport2oc[key] + if self.iplport2oc[key] == oc: + del self.iplport2oc[key] + #print "*****!!! del", key, oc + d += 1 + + +class Task: + def __init__(self,method,*args, **kwargs): + self.method = method + self.args = args + self.kwargs = kwargs + + def start(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: task: start",self.method + #print_stack(file=sys.stderr) + self.method(*self.args,**self.kwargs) + + +class ReturnConnection: + def __init__(self,handler,singsock,rawserver,locally_initiated = False, + specified_dns = None, ext_handshake = False,options = None): + # Called by network thread + self.handler = handler + self.singsock = singsock # for writing + self.rawserver = rawserver + self.buffer = StringIO() + self.cb_queue = [] + self.listen_port = None + self.options = None + self.locally_initiated = locally_initiated + self.specified_dns = specified_dns + self.last_use = time() + + self.state = STATE_INITIAL + self.write(chr(len(protocol_name)) + protocol_name + + option_pattern + dialback_infohash + self.handler.get_my_peer_id()) + if ext_handshake: + self.state = STATE_HS_PEERID_WAIT + self.next_len = 20 + self.next_func = self.read_peer_id + self.set_options(options) + else: + self.state = STATE_HS_FULL_WAIT + self.next_len = 1 + self.next_func = self.read_header_len + + # Leave autoclose here instead of ReturnConnHandler, as that doesn't record + # remotely-initiated ReturnConnections before authentication is done. + self.rawserver.add_task(self._dlbconn_auto_close, EXPIRE_CHECK_INTERVAL) + + # + # Interface for SocketHandler + # + def data_came_in(self, singsock, data): + """ sockethandler received data """ + # now we got something we can ask for the peer's real port + dummy_port = singsock.get_port(True) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbconn: data_came_in",singsock.get_ip(),singsock.get_port() + self.handler.measurefunc(len(data)) + self.last_use = time() + while 1: + if self.state == STATE_CLOSED: + return + i = self.next_len - self.buffer.tell() + if i > len(data): + self.buffer.write(data) + return + self.buffer.write(data[:i]) + data = data[i:] + m = self.buffer.getvalue() + self.buffer.reset() + self.buffer.truncate() + try: + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbconn: Trying to read",self.next_len,"using",self.next_func + x = self.next_func(m) + except: + self.next_len, self.next_func = 1, self.read_dead + if DEBUG: + print_exc(file=sys.stderr) + raise + if x is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbconn: next_func returned None",self.next_func + self.close() + return + self.next_len, self.next_func = x + + def connection_lost(self,singsock): + """ kernel or socket handler reports connection lost """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbconn: connection_lost",singsock.get_ip(),singsock.get_port(),self.state + if self.state != STATE_CLOSED: + self.state = STATE_CLOSED + self.handler.connection_lost(self) + + def connection_flushed(self,singsock): + """ sockethandler flushes connection """ + pass + + # + # Interface for ReturnConnHandler + # + def send_message(self,message): + self.last_use = time() + s = tobinary(len(message))+message + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbconn: Sending message",len(message) + self.write(s) + + def is_locally_initiated(self): + return self.locally_initiated + + def get_ip(self): + return self.singsock.get_ip() + + def get_port(self): + return self.singsock.get_port() + + def get_listen_port(self): + return self.listen_port + + def queue_callback(self,dns,callback): + if callback is not None: + self.cb_queue.append(callback) + + def dequeue_callbacks(self): + try: + for callback in self.cb_queue: + callback(None,self.specified_dns) + self.cb_queue = [] + except Exception,e: + print_exc(file=sys.stderr) + + + def cleanup_callbacks(self,exc): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbconn: cleanup_callbacks: #callbacks is",len(self.cb_queue) + try: + for callback in self.cb_queue: + ## Failure connecting + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbconn: cleanup_callbacks: callback is",callback + callback(exc,self.specified_dns) + except Exception,e: + print_exc(file=sys.stderr) + + # + # Internal methods + # + def read_header_len(self, s): + if ord(s) != len(protocol_name): + return None + return len(protocol_name), self.read_header + + def read_header(self, s): + if s != protocol_name: + return None + return 8, self.read_reserved + + def read_reserved(self, s): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbconn: Reserved bits:", `s` + self.set_options(s) + return 20, self.read_download_id + + def read_download_id(self, s): + if s != dialback_infohash: + return None + return 20, self.read_peer_id + + def read_peer_id(self, s): + self.unauth_peer_id = s + self.listen_port = decode_listen_port(self.unauth_peer_id) + self.state = STATE_DATA_WAIT + if not self.got_connection(): + self.close() + return + return 4, self.read_len + + + def got_connection(self): + return self.handler.got_connection(self) + + def read_len(self, s): + l = toint(s) + if l > self.handler.get_max_len(): + return None + return l, self.read_message + + def read_message(self, s): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbconn: read_message len",len(s),self.state + + if s != '': + if self.state == STATE_DATA_WAIT: + if not self.handler.got_message((self.get_ip(),self.get_listen_port()),s): + return None + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbconn: Received message while in illegal state, internal error!" + return None + return 4, self.read_len + + def read_dead(self, s): + return None + + def write(self,s): + self.singsock.write(s) + + def set_options(self,options): + self.options = options + + def close(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbconn: we close()",self.get_ip(),self.get_port() + self.state_when_error = self.state + if self.state != STATE_CLOSED: + self.state = STATE_CLOSED + self.handler.local_close(self) + self.singsock.close() + return + + def _dlbconn_auto_close(self): + if (time() - self.last_use) > EXPIRE_THRESHOLD: + self.close() + else: + self.rawserver.add_task(self._dlbconn_auto_close, EXPIRE_CHECK_INTERVAL) + +def create_my_peer_id(my_listen_port): + myid = createPeerID() + myid = myid[:14] + pack(' ReturnConnection + self.usermsghandler = None + self.userconnhandler = None + + def resetSingleton(self): + """ For testing purposes """ + ReturnConnHandler.__single = None + + def start_listening(self): + """ Called by MainThread """ + self.dialback_rawserver.start_listening(self) + + def connect_dns(self,dns,callback): + """ Connects to the indicated endpoint. Non-blocking. + + Pre: "dns" must be an IP address, not a hostname. + + Network thread calls "callback(exc,dns)" when the connection + is established or when an error occurs during connection + establishment. In the former case, exc is None, otherwise + it contains an Exception. + + The established connection will auto close after EXPIRE_THRESHOLD + seconds of inactivity. + """ + # Called by overlay thread + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: connect_dns",dns + # To prevent concurrency problems on sockets the calling thread + # delegates to the network thread. + task = Task(self._connect_dns,dns,callback) + self.rawserver.add_task(task.start, 0) + + + def send(self,dns,msg,callback): + """ Sends a message to the indicated dns. Non-blocking. + + Pre: connection to permid must have been established successfully. + + Network thread calls "callback(exc,dns)" when the message is sent + or when an error occurs during sending. In the former case, exc + is None, otherwise it contains an Exception. + """ + # Called by overlay thread + # To prevent concurrency problems on sockets the calling thread + # delegates to the network thread. + task = Task(self._send,dns,msg,callback) + self.rawserver.add_task(task.start, 0) + + + + def close(self,dns): + """ Closes any connection to indicated permid. Non-blocking. + + Pre: connection to permid must have been established successfully. + + Network thread calls "callback(exc,permid,selver)" when the connection + is closed. + """ + # Called by overlay thread + # To prevent concurrency problems on sockets the calling thread + # delegates to the network thread. + task = Task(self._close,dns) + self.rawserver.add_task(task.start, 0) + + + def register_recv_callback(self,callback): + """ Register a callback to be called when receiving a message from + any permid. Non-blocking. + + Network thread calls "callback(exc,permid,selver,msg)" when a message + is received. The callback is not called on errors e.g. remote + connection close. + """ + self.usermsghandler = callback + + def register_conns_callback(self,callback): + """ Register a callback to be called when receiving a connection from + any permid. Non-blocking. + + Network thread calls "callback(exc,permid,selver,locally_initiated)" + when a connection is established (locally initiated or remote), or + when a connection is closed locally or remotely. In the former case, + exc is None, otherwise it contains an Exception. + + Note that this means that if a callback is registered via this method, + both this callback and the callback passed to a connect() method + will be called. + """ + self.userconnhandler = callback + + + # + # Internal methods + # + def _connect_dns(self,dns,callback): + # Called by network thread + try: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: actual connect_dns",dns + iplport = ip_and_port2str(dns[0],dns[1]) + oc = None + try: + oc = self.iplport2oc[iplport] + except KeyError: + pass + if oc is None: + oc = self.start_connection(dns) + self.iplport2oc[iplport] = oc + oc.queue_callback(dns,callback) + else: + callback(None,dns) + except Exception,exc: + if DEBUG: + print_exc(file=sys.stderr) + callback(exc,dns) + + def _send(self,dns,message,callback): + # Called by network thread + try: + iplport = ip_and_port2str(dns[0],dns[1]) + oc = None + try: + oc = self.iplport2oc[iplport] + except KeyError: + pass + if oc is None: + callback(KeyError('Not connected to dns'),dns) + else: + oc.send_message(message) + callback(None,dns) + except Exception,exc: + if DEBUG: + print_exc(file=sys.stderr) + callback(exc,dns) + + + def _close(self,dns): + # Called by network thread + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: actual close",dns + try: + iplport = ip_and_port2str(dns[0],dns[1]) + oc = None + try: + oc = self.iplport2oc[iplport] + except KeyError: + pass + if oc is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: error - actual close, but no connection to peer in admin" + else: + oc.close() + except Exception,e: + print_exc(file=sys.stderr) + + # + # Interface for SocketHandler + # + def external_connection_made(self,singsock): + """ incoming connection (never used) """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: external_connection_made",singsock.get_ip(),singsock.get_port() + oc = ReturnConnection(self,singsock,self.rawserver) + singsock.set_handler(oc) + + def connection_flushed(self,singsock): + """ sockethandler flushes connection """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: connection_flushed",singsock.get_ip(),singsock.get_port() + pass + + # + # Interface for ServerPortHandler + # + def externally_handshaked_connection_made(self, singsock, options, msg_remainder): + """ incoming connection, handshake partially read to identity + as an it as overlay connection (used always) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: externally_handshaked_connection_made",\ + singsock.get_ip(),singsock.get_port() + oc = ReturnConnection(self,singsock,self.rawserver,ext_handshake = True, options = options) + singsock.set_handler(oc) + if msg_remainder: + oc.data_came_in(singsock,msg_remainder) + return True + + + # + # Interface for ReturnConnection + # + def got_connection(self,oc): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: Got connection from",oc.get_ip(),"listen",oc.get_listen_port() + + ret = True + iplport = ip_and_port2str(oc.get_ip(),oc.get_listen_port()) + known = iplport in self.iplport2oc + if not known: + self.iplport2oc[iplport] = oc + elif known and not oc.is_locally_initiated(): + # Locally initiated connections will already be registered, + # so if it's not a local connection and we already have one + # we have a duplicate, and we close the new one. + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: got_connection:", \ + "closing because we already have a connection to",iplport + self.cleanup_admin_and_callbacks(oc, + Exception('closing because we already have a connection to peer')) + ret = False + + if ret: + oc.dequeue_callbacks() + if self.userconnhandler is not None: + try: + self.userconnhandler(None,(oc.get_ip(),oc.get_listen_port()),oc.is_locally_initiated()) + except: + # Catchall + print_exc(file=sys.stderr) + return ret + + def local_close(self,oc): + """ our side is closing the connection """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: local_close" + self.cleanup_admin_and_callbacks(oc,Exception('local close')) + + def connection_lost(self,oc): + """ overlay connection telling us to clear admin """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: connection_lost" + self.cleanup_admin_and_callbacks(oc,Exception('connection lost')) + + def got_message(self,dns,message): + """ received message from peer, pass to upper layer """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: got_message",getMessageName(message[0]) + if self.usermsghandler is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: User receive callback not set" + return + try: + ret = self.usermsghandler(dns,message) + if ret is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: INTERNAL ERROR:", \ + "User receive callback returned None, not True or False" + ret = False + return ret + except: + # Catch all + print_exc(file=sys.stderr) + return False + + + def get_max_len(self): + return self.max_len + + def get_my_peer_id(self): + return self.myid + + def measurefunc(self,length): + pass + + def start_connection(self,dns): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: Attempt to connect to",dns + singsock = self.sock_hand.start_connection(dns) + oc = ReturnConnection(self,singsock,self.rawserver, + locally_initiated=True,specified_dns=dns) + singsock.set_handler(oc) + return oc + + def cleanup_admin_and_callbacks(self,oc,exc): + oc.cleanup_callbacks(exc) + self.cleanup_admin(oc) + if self.userconnhandler is not None: + self.userconnhandler(exc,(oc.get_ip(),oc.get_listen_port()),oc.is_locally_initiated()) + + def cleanup_admin(self,oc): + iplports = [] + d = 0 + for key in self.iplport2oc.keys(): + #print "***** iplport2oc:", key, self.iplport2oc[key] + if self.iplport2oc[key] == oc: + del self.iplport2oc[key] + #print "*****!!! del", key, oc + d += 1 + + +class Task: + def __init__(self,method,*args, **kwargs): + self.method = method + self.args = args + self.kwargs = kwargs + + def start(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbreturn: task: start",self.method + #print_stack(file=sys.stderr) + self.method(*self.args,**self.kwargs) + + +class ReturnConnection: + def __init__(self,handler,singsock,rawserver,locally_initiated = False, + specified_dns = None, ext_handshake = False,options = None): + # Called by network thread + self.handler = handler + self.singsock = singsock # for writing + self.rawserver = rawserver + self.buffer = StringIO() + self.cb_queue = [] + self.listen_port = None + self.options = None + self.locally_initiated = locally_initiated + self.specified_dns = specified_dns + self.last_use = time() + + self.state = STATE_INITIAL + self.write(chr(len(protocol_name)) + protocol_name + + option_pattern + dialback_infohash + self.handler.get_my_peer_id()) + if ext_handshake: + self.state = STATE_HS_PEERID_WAIT + self.next_len = 20 + self.next_func = self.read_peer_id + self.set_options(options) + else: + self.state = STATE_HS_FULL_WAIT + self.next_len = 1 + self.next_func = self.read_header_len + + # Leave autoclose here instead of ReturnConnHandler, as that doesn't record + # remotely-initiated ReturnConnections before authentication is done. + self.rawserver.add_task(self._dlbconn_auto_close, EXPIRE_CHECK_INTERVAL) + + # + # Interface for SocketHandler + # + def data_came_in(self, singsock, data): + """ sockethandler received data """ + # now we got something we can ask for the peer's real port + dummy_port = singsock.get_port(True) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbconn: data_came_in",singsock.get_ip(),singsock.get_port() + self.handler.measurefunc(len(data)) + self.last_use = time() + while 1: + if self.state == STATE_CLOSED: + return + i = self.next_len - self.buffer.tell() + if i > len(data): + self.buffer.write(data) + return + self.buffer.write(data[:i]) + data = data[i:] + m = self.buffer.getvalue() + self.buffer.reset() + self.buffer.truncate() + try: + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbconn: Trying to read",self.next_len,"using",self.next_func + x = self.next_func(m) + except: + self.next_len, self.next_func = 1, self.read_dead + if DEBUG: + print_exc(file=sys.stderr) + raise + if x is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbconn: next_func returned None",self.next_func + self.close() + return + self.next_len, self.next_func = x + + def connection_lost(self,singsock): + """ kernel or socket handler reports connection lost """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbconn: connection_lost",singsock.get_ip(),singsock.get_port(),self.state + if self.state != STATE_CLOSED: + self.state = STATE_CLOSED + self.handler.connection_lost(self) + + def connection_flushed(self,singsock): + """ sockethandler flushes connection """ + pass + + # + # Interface for ReturnConnHandler + # + def send_message(self,message): + self.last_use = time() + s = tobinary(len(message))+message + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbconn: Sending message",len(message) + self.write(s) + + def is_locally_initiated(self): + return self.locally_initiated + + def get_ip(self): + return self.singsock.get_ip() + + def get_port(self): + return self.singsock.get_port() + + def get_listen_port(self): + return self.listen_port + + def queue_callback(self,dns,callback): + if callback is not None: + self.cb_queue.append(callback) + + def dequeue_callbacks(self): + try: + for callback in self.cb_queue: + callback(None,self.specified_dns) + self.cb_queue = [] + except Exception,e: + print_exc(file=sys.stderr) + + + def cleanup_callbacks(self,exc): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbconn: cleanup_callbacks: #callbacks is",len(self.cb_queue) + try: + for callback in self.cb_queue: + ## Failure connecting + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbconn: cleanup_callbacks: callback is",callback + callback(exc,self.specified_dns) + except Exception,e: + print_exc(file=sys.stderr) + + # + # Internal methods + # + def read_header_len(self, s): + if ord(s) != len(protocol_name): + return None + return len(protocol_name), self.read_header + + def read_header(self, s): + if s != protocol_name: + return None + return 8, self.read_reserved + + def read_reserved(self, s): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbconn: Reserved bits:", `s` + self.set_options(s) + return 20, self.read_download_id + + def read_download_id(self, s): + if s != dialback_infohash: + return None + return 20, self.read_peer_id + + def read_peer_id(self, s): + self.unauth_peer_id = s + self.listen_port = decode_listen_port(self.unauth_peer_id) + self.state = STATE_DATA_WAIT + if not self.got_connection(): + self.close() + return + return 4, self.read_len + + + def got_connection(self): + return self.handler.got_connection(self) + + def read_len(self, s): + l = toint(s) + if l > self.handler.get_max_len(): + return None + return l, self.read_message + + def read_message(self, s): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbconn: read_message len",len(s),self.state + + if s != '': + if self.state == STATE_DATA_WAIT: + if not self.handler.got_message((self.get_ip(),self.get_listen_port()),s): + return None + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbconn: Received message while in illegal state, internal error!" + return None + return 4, self.read_len + + def read_dead(self, s): + return None + + def write(self,s): + self.singsock.write(s) + + def set_options(self,options): + self.options = options + + def close(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlbconn: we close()",self.get_ip(),self.get_port() + self.state_when_error = self.state + if self.state != STATE_CLOSED: + self.state = STATE_CLOSED + self.handler.local_close(self) + self.singsock.close() + return + + def _dlbconn_auto_close(self): + if (time() - self.last_use) > EXPIRE_THRESHOLD: + self.close() + else: + self.rawserver.add_task(self._dlbconn_auto_close, EXPIRE_CHECK_INTERVAL) + +def create_my_peer_id(my_listen_port): + myid = createPeerID() + myid = myid[:14] + pack('>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "TIMEOUTCHECK:", "-> ping" + + # Send the ping to the server specifying the delay of the reply + pingMsg = (str("ping:"+str(ping))) + udpsock.send(pingMsg) + udpsock.send(pingMsg) + udpsock.send(pingMsg) + + # Wait for reply from the server + while True: + + rcvaddr = None + + try: + reply = udpsock.recv(1024) + + except timeout: # No reply from the server: timeout passed + + if udpsock: + udpsock.close() + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "TIMEOUTCHECK:", "UDP connection to the pingback server has timed out for ping", ping + + lck.acquire() + evnt.set() + evnt.clear() + lck.release() + break + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", pingbacksrvr + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", rcvaddr + + if reply: + data = reply.split(':') + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", data, "received from the pingback server" + + if data[0] == "pong": + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "TIMEOUTCHECK:", "<-", data[0], "after", data[1], "seconds" + to = ping + if int(data[1])==145: + lck.acquire() + evnt.set() + evnt.clear() + lck.release() + return + + return + + +# Main method of the library: launches nat-timeout discovery algorithm +def GetTimeout(pingbacksrvr): + """ + Returns the NAT timeout for UDP traffic + """ + + pings = [25, 35, 55, 85, 115, 145] + + # Send pings and wait for replies + for ping in pings: + thread.start_new_thread(pingback, (ping, pingbacksrvr)) + + global evnt + evnt.wait() + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "TIMEOUTCHECK: timeout is", to + return to diff --git a/tribler-mod/Tribler/Core/NATFirewall/TimeoutCheck.py.bak b/tribler-mod/Tribler/Core/NATFirewall/TimeoutCheck.py.bak new file mode 100644 index 0000000..10ca032 --- /dev/null +++ b/tribler-mod/Tribler/Core/NATFirewall/TimeoutCheck.py.bak @@ -0,0 +1,93 @@ +# Written by Lucia D'Acunto +# see LICENSE.txt for license information + +from socket import * +import sys +import thread +import threading + + +DEBUG = False + + +to = -1 # timeout default value +lck = threading.Lock() +evnt = threading.Event() + + +# Sending pings to the pingback server and waiting for a reply +def pingback(ping, pingbacksrvr): + + global to, lck, evnt + + # Set up the socket + udpsock = socket(AF_INET, SOCK_DGRAM) + udpsock.connect(pingbacksrvr) + udpsock.settimeout(ping+10) + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "TIMEOUTCHECK:", "-> ping" + + # Send the ping to the server specifying the delay of the reply + pingMsg = (str("ping:"+str(ping))) + udpsock.send(pingMsg) + udpsock.send(pingMsg) + udpsock.send(pingMsg) + + # Wait for reply from the server + while True: + + rcvaddr = None + + try: + reply = udpsock.recv(1024) + + except timeout: # No reply from the server: timeout passed + + if udpsock: + udpsock.close() + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "TIMEOUTCHECK:", "UDP connection to the pingback server has timed out for ping", ping + + lck.acquire() + evnt.set() + evnt.clear() + lck.release() + break + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", pingbacksrvr + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", rcvaddr + + if reply: + data = reply.split(':') + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", data, "received from the pingback server" + + if data[0] == "pong": + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "TIMEOUTCHECK:", "<-", data[0], "after", data[1], "seconds" + to = ping + if int(data[1])==145: + lck.acquire() + evnt.set() + evnt.clear() + lck.release() + return + + return + + +# Main method of the library: launches nat-timeout discovery algorithm +def GetTimeout(pingbacksrvr): + """ + Returns the NAT timeout for UDP traffic + """ + + pings = [25, 35, 55, 85, 115, 145] + + # Send pings and wait for replies + for ping in pings: + thread.start_new_thread(pingback, (ping, pingbacksrvr)) + + global evnt + evnt.wait() + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "TIMEOUTCHECK: timeout is", to + return to diff --git a/tribler-mod/Tribler/Core/NATFirewall/UPnPThread.py b/tribler-mod/Tribler/Core/NATFirewall/UPnPThread.py new file mode 100644 index 0000000..6e8e22e --- /dev/null +++ b/tribler-mod/Tribler/Core/NATFirewall/UPnPThread.py @@ -0,0 +1,104 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys +from threading import Event,Thread +from traceback import print_exc +from Tribler.Core.BitTornado.natpunch import UPnPWrapper, UPnPError + +DEBUG = False + + +class UPnPThread(Thread): + """ Thread to run the UPnP code. Moved out of main startup- + sequence for performance. As you can see this thread won't + exit until the client exits. This is due to a funky problem + with UPnP mode 2. That uses Win32/COM API calls to find and + talk to the UPnP-enabled firewall. This mechanism apparently + requires all calls to be carried out by the same thread. + This means we cannot let the final DeletePortMapping(port) + (==UPnPWrapper.close(port)) be done by a different thread, + and we have to make this one wait until client shutdown. + + Arno, 2006-11-12 + """ + + def __init__(self,upnp_type,ext_ip,listen_port,error_func,got_ext_ip_func): + Thread.__init__(self) + self.setDaemon(True) + self.setName( "UPnP"+self.getName() ) + + self.upnp_type = upnp_type + self.locally_guessed_ext_ip = ext_ip + self.listen_port = listen_port + self.error_func = error_func + self.got_ext_ip_func = got_ext_ip_func + self.shutdownevent = Event() + + def run(self): + if self.upnp_type > 0: + self.upnp_wrap = UPnPWrapper.getInstance() + self.upnp_wrap.register(self.locally_guessed_ext_ip) + + if self.upnp_wrap.test(self.upnp_type): + try: + shownerror=False + # Get external IP address from firewall + if self.upnp_type != 1: # Mode 1 doesn't support getting the IP address" + ret = self.upnp_wrap.get_ext_ip() + if ret == None: + shownerror=True + self.error_func(self.upnp_type,self.listen_port,0) + else: + self.got_ext_ip_func(ret) + + # Do open_port irrespective of whether get_ext_ip() + # succeeds, UPnP mode 1 doesn't support get_ext_ip() + # get_ext_ip() must be done first to ensure we have the + # right IP ASAP. + + # Open TCP listen port on firewall + ret = self.upnp_wrap.open(self.listen_port,iproto='TCP') + if ret == False and not shownerror: + self.error_func(self.upnp_type,self.listen_port,0) + + # Open UDP listen port on firewall + ret = self.upnp_wrap.open(self.listen_port,iproto='UDP') + if ret == False and not shownerror: + self.error_func(self.upnp_type,self.listen_port,0,listenproto='UDP') + + except UPnPError,e: + self.error_func(self.upnp_type,self.listen_port,1,e) + else: + if self.upnp_type != 3: + self.error_func(self.upnp_type,self.listen_port,2) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: thread: Initialization failed, but didn't report error because UPnP mode 3 is now enabled by default" + + # Now that the firewall is hopefully open, activate other services + # here. For Buddycast we don't have an explicit notification that it + # can go ahead. It will start 15 seconds after client startup, which + # is assumed to be sufficient for UPnP to open the firewall. + ## dmh.start_active() + + if self.upnp_type > 0: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: thread: Waiting till shutdown" + self.shutdownevent.wait() + # Don't write to sys.stderr, that sometimes doesn't seem to exist + # any more?! Python garbage collection funkiness of module sys import? + # The GUI is definitely gone, so don't use self.error_func() + if DEBUG: + print "upnp: thread: Shutting down, closing port on firewall" + try: + self.upnp_wrap.close(self.listen_port,iproto='TCP') + self.upnp_wrap.close(self.listen_port,iproto='UDP') + except Exception,e: + print "upnp: thread: close port at shutdown threw",e + print_exc() + + # End of UPnPThread + + def shutdown(self): + self.shutdownevent.set() diff --git a/tribler-mod/Tribler/Core/NATFirewall/UPnPThread.py.bak b/tribler-mod/Tribler/Core/NATFirewall/UPnPThread.py.bak new file mode 100644 index 0000000..5db3a73 --- /dev/null +++ b/tribler-mod/Tribler/Core/NATFirewall/UPnPThread.py.bak @@ -0,0 +1,103 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys +from threading import Event,Thread +from traceback import print_exc +from Tribler.Core.BitTornado.natpunch import UPnPWrapper, UPnPError + +DEBUG = False + + +class UPnPThread(Thread): + """ Thread to run the UPnP code. Moved out of main startup- + sequence for performance. As you can see this thread won't + exit until the client exits. This is due to a funky problem + with UPnP mode 2. That uses Win32/COM API calls to find and + talk to the UPnP-enabled firewall. This mechanism apparently + requires all calls to be carried out by the same thread. + This means we cannot let the final DeletePortMapping(port) + (==UPnPWrapper.close(port)) be done by a different thread, + and we have to make this one wait until client shutdown. + + Arno, 2006-11-12 + """ + + def __init__(self,upnp_type,ext_ip,listen_port,error_func,got_ext_ip_func): + Thread.__init__(self) + self.setDaemon(True) + self.setName( "UPnP"+self.getName() ) + + self.upnp_type = upnp_type + self.locally_guessed_ext_ip = ext_ip + self.listen_port = listen_port + self.error_func = error_func + self.got_ext_ip_func = got_ext_ip_func + self.shutdownevent = Event() + + def run(self): + if self.upnp_type > 0: + self.upnp_wrap = UPnPWrapper.getInstance() + self.upnp_wrap.register(self.locally_guessed_ext_ip) + + if self.upnp_wrap.test(self.upnp_type): + try: + shownerror=False + # Get external IP address from firewall + if self.upnp_type != 1: # Mode 1 doesn't support getting the IP address" + ret = self.upnp_wrap.get_ext_ip() + if ret == None: + shownerror=True + self.error_func(self.upnp_type,self.listen_port,0) + else: + self.got_ext_ip_func(ret) + + # Do open_port irrespective of whether get_ext_ip() + # succeeds, UPnP mode 1 doesn't support get_ext_ip() + # get_ext_ip() must be done first to ensure we have the + # right IP ASAP. + + # Open TCP listen port on firewall + ret = self.upnp_wrap.open(self.listen_port,iproto='TCP') + if ret == False and not shownerror: + self.error_func(self.upnp_type,self.listen_port,0) + + # Open UDP listen port on firewall + ret = self.upnp_wrap.open(self.listen_port,iproto='UDP') + if ret == False and not shownerror: + self.error_func(self.upnp_type,self.listen_port,0,listenproto='UDP') + + except UPnPError,e: + self.error_func(self.upnp_type,self.listen_port,1,e) + else: + if self.upnp_type != 3: + self.error_func(self.upnp_type,self.listen_port,2) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: thread: Initialization failed, but didn't report error because UPnP mode 3 is now enabled by default" + + # Now that the firewall is hopefully open, activate other services + # here. For Buddycast we don't have an explicit notification that it + # can go ahead. It will start 15 seconds after client startup, which + # is assumed to be sufficient for UPnP to open the firewall. + ## dmh.start_active() + + if self.upnp_type > 0: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: thread: Waiting till shutdown" + self.shutdownevent.wait() + # Don't write to sys.stderr, that sometimes doesn't seem to exist + # any more?! Python garbage collection funkiness of module sys import? + # The GUI is definitely gone, so don't use self.error_func() + if DEBUG: + print "upnp: thread: Shutting down, closing port on firewall" + try: + self.upnp_wrap.close(self.listen_port,iproto='TCP') + self.upnp_wrap.close(self.listen_port,iproto='UDP') + except Exception,e: + print "upnp: thread: close port at shutdown threw",e + print_exc() + + # End of UPnPThread + + def shutdown(self): + self.shutdownevent.set() diff --git a/tribler-mod/Tribler/Core/NATFirewall/__init__.py b/tribler-mod/Tribler/Core/NATFirewall/__init__.py new file mode 100644 index 0000000..b7ef832 --- /dev/null +++ b/tribler-mod/Tribler/Core/NATFirewall/__init__.py @@ -0,0 +1,3 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/NATFirewall/__init__.py.bak b/tribler-mod/Tribler/Core/NATFirewall/__init__.py.bak new file mode 100644 index 0000000..395f8fb --- /dev/null +++ b/tribler-mod/Tribler/Core/NATFirewall/__init__.py.bak @@ -0,0 +1,2 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/NATFirewall/guessip.py b/tribler-mod/Tribler/Core/NATFirewall/guessip.py new file mode 100644 index 0000000..5ea33bc --- /dev/null +++ b/tribler-mod/Tribler/Core/NATFirewall/guessip.py @@ -0,0 +1,162 @@ +from time import localtime, strftime +# Written by Arno Bakker, Jan David Mol +# see LICENSE.txt for license information +# +# Code to guess the IP address of a host by which it is reachable on the +# Internet, given the host is not behind a firewall or NAT. +# +# For all OSes (Linux,Windows,MacOS X) we first look at the routing table to +# see what the gateway for the default route is. We then try to establish +# our IP address that's on the same network as the gateway. That is our +# external/WAN address. +# +# This code does not support IPv6, that is, IPv6 address are ignored. +# +# Arno, Jan David, 2006-06-30 +# +import os +import sys +import socket +from traceback import print_exc + +DEBUG = False + +def get_my_wan_ip(): + try: + if sys.platform == 'win32': + return get_my_wan_ip_win32() + elif sys.platform == 'darwin': + return get_my_wan_ip_darwin() + else: + return get_my_wan_ip_linux() + except: + print_exc() + return None + +def get_my_wan_ip_win32(): + + routecmd = "netstat -nr" + ifcmd = "ipconfig /all" + + gwip = None + for line in os.popen(routecmd).readlines(): + words = line.split() + if len(words) >= 3: + if words[0] == 'Default' and words[1] == 'Gateway:': + gwip = words[-1] + if DEBUG: + print "netstat found default gateway",gwip + break + + myip = None + mywanip = None + ingw = 0 + for line in os.popen(ifcmd).readlines(): + words = line.split() + if len(words) >= 3: + if (words[0] == 'IP' and words[1] == 'Address.') or (words[1] == 'IP' and words[2] == 'Address.'): # Autoconfiguration entry + try: + socket.getaddrinfo(words[-1],None,socket.AF_INET) + myip = words[-1] + if DEBUG: + print "ipconfig found IP address",myip + except socket.gaierror: + if DEBUG: + print "ipconfig ignoring IPv6 address",words[-1] + pass + elif words[0] == 'Default' and words[1] == 'Gateway': + if words[-1] == ':': + if DEBUG: + print "ipconfig ignoring empty default gateway" + pass + else: + ingw = 1 + if ingw >= 1: + # Assumption: the "Default Gateway" list can only have 2 entries, + # one for IPv4, one for IPv6. Since we don't know the order, look + # at both. + gwip2 = None + ingw = (ingw + 1) % 3 + try: + socket.getaddrinfo(words[-1],None,socket.AF_INET) + gwip2 = words[-1] + if DEBUG: + print "ipconfig found default gateway",gwip2 + except socket.gaierror: + if DEBUG: + print "ipconfig ignoring IPv6 default gateway",words[-1] + pass + if gwip == gwip2: + mywanip = myip + break + return mywanip + + +def get_my_wan_ip_linux(): + routecmd = '/bin/netstat -nr' + ifcmd = '/sbin/ifconfig -a' + + gwif = None + gwip = None + for line in os.popen(routecmd).readlines(): + words = line.split() + if len(words) >= 3: + if words[0] == '0.0.0.0': + gwif = words[-1] + gwip = words[1] + if DEBUG: + print "netstat found default gateway",gwip + break + + mywanip = None + for line in os.popen(ifcmd).readlines(): + words = line.split() + if len(words) >= 2: + if words[0] == gwif: + flag = True + elif words[0] == 'inet' and flag: + words2 = words[1].split(':') # "inet addr:130.37.192.1" line + if len(words2) == 2: + mywanip = words2[1] + break + else: + flag = False + else: + flag = False + return mywanip + + +def get_my_wan_ip_darwin(): + routecmd = '/usr/sbin/netstat -nr' + ifcmd = '/sbin/ifconfig -a' + + gwif = None + gwip = None + for line in os.popen(routecmd).readlines(): + words = line.split() + if len(words) >= 3: + if words[0] == 'default': + gwif = words[-1] + gwip = words[1] + if DEBUG: + print "netstat found default gateway",gwip + break + + mywanip = None + flag = False + for line in os.popen(ifcmd).readlines(): + words = line.split() + if len(words) >= 2: + if words[0] == "%s:" % gwif: + flag = True + elif words[0] == 'inet' and flag: + mywanip = words[1] # "inet 130.37.192.1" line + break + return mywanip + + + +if __name__ == "__main__": + DEBUG = True + ip = get_my_wan_ip() + print "External IP address is",ip diff --git a/tribler-mod/Tribler/Core/NATFirewall/guessip.py.bak b/tribler-mod/Tribler/Core/NATFirewall/guessip.py.bak new file mode 100644 index 0000000..c1eafca --- /dev/null +++ b/tribler-mod/Tribler/Core/NATFirewall/guessip.py.bak @@ -0,0 +1,161 @@ +# Written by Arno Bakker, Jan David Mol +# see LICENSE.txt for license information +# +# Code to guess the IP address of a host by which it is reachable on the +# Internet, given the host is not behind a firewall or NAT. +# +# For all OSes (Linux,Windows,MacOS X) we first look at the routing table to +# see what the gateway for the default route is. We then try to establish +# our IP address that's on the same network as the gateway. That is our +# external/WAN address. +# +# This code does not support IPv6, that is, IPv6 address are ignored. +# +# Arno, Jan David, 2006-06-30 +# +import os +import sys +import socket +from traceback import print_exc + +DEBUG = False + +def get_my_wan_ip(): + try: + if sys.platform == 'win32': + return get_my_wan_ip_win32() + elif sys.platform == 'darwin': + return get_my_wan_ip_darwin() + else: + return get_my_wan_ip_linux() + except: + print_exc() + return None + +def get_my_wan_ip_win32(): + + routecmd = "netstat -nr" + ifcmd = "ipconfig /all" + + gwip = None + for line in os.popen(routecmd).readlines(): + words = line.split() + if len(words) >= 3: + if words[0] == 'Default' and words[1] == 'Gateway:': + gwip = words[-1] + if DEBUG: + print "netstat found default gateway",gwip + break + + myip = None + mywanip = None + ingw = 0 + for line in os.popen(ifcmd).readlines(): + words = line.split() + if len(words) >= 3: + if (words[0] == 'IP' and words[1] == 'Address.') or (words[1] == 'IP' and words[2] == 'Address.'): # Autoconfiguration entry + try: + socket.getaddrinfo(words[-1],None,socket.AF_INET) + myip = words[-1] + if DEBUG: + print "ipconfig found IP address",myip + except socket.gaierror: + if DEBUG: + print "ipconfig ignoring IPv6 address",words[-1] + pass + elif words[0] == 'Default' and words[1] == 'Gateway': + if words[-1] == ':': + if DEBUG: + print "ipconfig ignoring empty default gateway" + pass + else: + ingw = 1 + if ingw >= 1: + # Assumption: the "Default Gateway" list can only have 2 entries, + # one for IPv4, one for IPv6. Since we don't know the order, look + # at both. + gwip2 = None + ingw = (ingw + 1) % 3 + try: + socket.getaddrinfo(words[-1],None,socket.AF_INET) + gwip2 = words[-1] + if DEBUG: + print "ipconfig found default gateway",gwip2 + except socket.gaierror: + if DEBUG: + print "ipconfig ignoring IPv6 default gateway",words[-1] + pass + if gwip == gwip2: + mywanip = myip + break + return mywanip + + +def get_my_wan_ip_linux(): + routecmd = '/bin/netstat -nr' + ifcmd = '/sbin/ifconfig -a' + + gwif = None + gwip = None + for line in os.popen(routecmd).readlines(): + words = line.split() + if len(words) >= 3: + if words[0] == '0.0.0.0': + gwif = words[-1] + gwip = words[1] + if DEBUG: + print "netstat found default gateway",gwip + break + + mywanip = None + for line in os.popen(ifcmd).readlines(): + words = line.split() + if len(words) >= 2: + if words[0] == gwif: + flag = True + elif words[0] == 'inet' and flag: + words2 = words[1].split(':') # "inet addr:130.37.192.1" line + if len(words2) == 2: + mywanip = words2[1] + break + else: + flag = False + else: + flag = False + return mywanip + + +def get_my_wan_ip_darwin(): + routecmd = '/usr/sbin/netstat -nr' + ifcmd = '/sbin/ifconfig -a' + + gwif = None + gwip = None + for line in os.popen(routecmd).readlines(): + words = line.split() + if len(words) >= 3: + if words[0] == 'default': + gwif = words[-1] + gwip = words[1] + if DEBUG: + print "netstat found default gateway",gwip + break + + mywanip = None + flag = False + for line in os.popen(ifcmd).readlines(): + words = line.split() + if len(words) >= 2: + if words[0] == "%s:" % gwif: + flag = True + elif words[0] == 'inet' and flag: + mywanip = words[1] # "inet 130.37.192.1" line + break + return mywanip + + + +if __name__ == "__main__": + DEBUG = True + ip = get_my_wan_ip() + print "External IP address is",ip diff --git a/tribler-mod/Tribler/Core/NATFirewall/upnp.py b/tribler-mod/Tribler/Core/NATFirewall/upnp.py new file mode 100644 index 0000000..e1cbe00 --- /dev/null +++ b/tribler-mod/Tribler/Core/NATFirewall/upnp.py @@ -0,0 +1,301 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information +# +# Platform independent UPnP client +# +# References: +# - UPnP Device Architecture 1.0, www.upnp.org +# - From Internet Gateway Device IGD V1.0: +# * WANIPConnection:1 Service Template Version 1.01 +# + +import sys +import socket +from cStringIO import StringIO +import urllib +import urllib2 +from urlparse import urlparse +import xml.sax as sax +from xml.sax.handler import ContentHandler +from traceback import print_exc + +UPNP_WANTED_SERVICETYPES = ['urn:schemas-upnp-org:service:WANIPConnection:1','urn:schemas-upnp-org:service:WANPPPConnection:1'] + +DEBUG = False + +class UPnPPlatformIndependent: + + def __init__(self): + # Maps location URL to a dict containing servicetype and control URL + self.services = {} + self.lastdiscovertime = 0 + + def discover(self): + """ Attempts to discover any UPnP services for X seconds + If any are found, they are stored in self.services + """ + #if self.lastdiscovertime != 0 and self.lastdiscovertime + DISCOVER_WAIT < time.time(): + # if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: discover: Already did a discovery recently" + # return + + maxwait = 4 + req = 'M-SEARCH * HTTP/1.1\r\n' + req += 'HOST: 239.255.255.250:1900\r\n' + req += 'MAN: "ssdp:discover"\r\n' # double quotes obligatory + req += 'MX: '+str(maxwait)+'\r\n' + req += 'ST: ssdp:all\r\n' # no double quotes + req += '\r\n\r\n' + + try: + self.s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) + self.s.settimeout(maxwait+2.0) + self.s.sendto(req,('239.255.255.250',1900)) + while True: # exited by socket.timeout exception only + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: discover: Wait 4 reply" + (rep,sender) = self.s.recvfrom(1024) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: discover: Got reply from",sender + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: discover: Saying:",rep + repio = StringIO(rep) + while True: + line = repio.readline() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","LINE",line + if line == '': + break + if line[-2:] == '\r\n': + line = line[:-2] + idx = line.find(':') + if idx == -1: + continue + key = line[:idx] + key = key.lower() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","key",key + if key.startswith('location'): + # Careful: MS Internet Connection Sharing returns "Location:http://bla", so no space + location = line[idx+1:].strip() + desc = self.get_description(location) + self.services[location] = self.parse_services(desc) + + except: + if DEBUG: + print_exc() + + def found_wanted_services(self): + """ Return True if WANIPConnection or WANPPPConnection were found by discover() """ + for location in self.services: + for servicetype in UPNP_WANTED_SERVICETYPES: + if self.services[location]['servicetype'] == servicetype: + return True + return False + + + def add_port_map(self,internalip,port,iproto='TCP'): + """ Sends an AddPortMapping request to all relevant IGDs found by discover() + + Raises UPnPError in case the IGD returned an error reply, + Raises Exception in case of any other error + """ + srch = self.do_soap_request('AddPortMapping',port,iproto=iproto,internalip=internalip) + if srch is not None: + se = srch.get_error() + if se is not None: + raise se + + def del_port_map(self,port,iproto='TCP'): + """ Sends a DeletePortMapping request to all relevant IGDs found by discover() + + Raises UPnPError in case the IGD returned an error reply, + Raises Exception in case of any other error + """ + srch = self.do_soap_request('DeletePortMapping',port,iproto=iproto) + if srch is not None: + se = srch.get_error() + if se is not None: + raise se + + def get_ext_ip(self): + """ Sends a GetExternalIPAddress request to all relevant IGDs found by discover() + + Raises UPnPError in case the IGD returned an error reply, + Raises Exception in case of any other error + """ + srch = self.do_soap_request('GetExternalIPAddress') + if srch is not None: + se = srch.get_error() + if se is not None: + raise se + else: + return srch.get_ext_ip() + + # + # Internal methods + # + def do_soap_request(self,methodname,port=-1,iproto='TCP',internalip=None): + for location in self.services: + for servicetype in UPNP_WANTED_SERVICETYPES: + if self.services[location]['servicetype'] == servicetype: + o = urlparse(location) + endpoint = o[0]+'://'+o[1]+self.services[location]['controlurl'] + # test: provoke error + #endpoint = o[0]+'://'+o[1]+'/bla'+self.services[location]['controlurl'] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: "+methodname+": Talking to endpoint ",endpoint + (headers,body) = self.create_soap_request(methodname,port,iproto=iproto,internalip=internalip) + #print body + try: + req = urllib2.Request(url=endpoint,data=body,headers=headers) + f = urllib2.urlopen(req) + resp = f.read() + except urllib2.HTTPError,e: + resp = e.fp.read() + if DEBUG: + print_exc() + srch = SOAPResponseContentHandler(methodname) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: "+methodname+": response is",resp + try: + srch.parse(resp) + except sax.SAXParseException,e: + # Our test linux-IGD appears to return an incompete + # SOAP error reply. Handle this. + se = srch.get_error() + if se is None: + raise e + # otherwise we were able to parse the error reply + return srch + + def get_description(self,url): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: discover: Reading description from",url + f = urllib.urlopen(url) + data = f.read() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: description: Got",data + return data + + def parse_services(self,desc): + dch = DescriptionContentHandler() + dch.parse(desc) + return dch.services + + def create_soap_request(self,methodname,port=-1,iproto="TCP",internalip=None): + headers = {} + #headers['Host'] = endpoint + #headers['Accept-Encoding'] = 'identity' + headers['Content-type'] = 'text/xml; charset="utf-8"' + headers['SOAPAction'] = '"urn:schemas-upnp-org:service:WANIPConnection:1#'+methodname+'"' + headers['User-Agent'] = 'Mozilla/4.0 (compatible; UPnP/1.0; Windows 9x)' + + body = '' + body += '' + body += '' + if methodname == 'AddPortMapping': + externalport = port + internalport = port + internalclient = internalip + body += '' + body += ''+str(externalport)+'' + body += ''+iproto+'' + body += ''+str(internalport)+'' + body += ''+internalclient+'' + body += '1' + body += 'Insert description here' + body += '0' + elif methodname == 'DeletePortMapping': + externalport = port + body += '' + body += ''+str(externalport)+'' + body += ''+iproto+'' + body += '' + body += '' + return (headers,body) + + +class UPnPError(Exception): + def __init__(self,errorcode,errordesc): + Exception.__init__(self) + self.errorcode = errorcode + self.errordesc = errordesc + + def __str__(self): + return 'UPnP Error %d: %s' % (self.errorcode, self.errordesc) + + +# +# Internal classes +# + +class DescriptionContentHandler(ContentHandler): + + def __init__(self): + ContentHandler.__init__(self) + self.services = {} + + def parse(self,desc): + sax.parseString(desc,self) + + def endDocument(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: discover: Services found",self.services + + def endElement(self, name): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","endElement",name + n = name.lower() + if n == 'servicetype': + self.services['servicetype'] = self.content + elif n == 'controlurl': + self.services['controlurl'] = self.content + + def characters(self, content): + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","content",content + self.content = content + + +class SOAPResponseContentHandler(ContentHandler): + + def __init__(self,methodname): + ContentHandler.__init__(self) + self.methodname = methodname + self.ip = None + self.errorset = False + self.errorcode = 0 + self.errordesc = 'No error' + self.content = None + + def parse(self,resp): + sax.parseString(resp,self) + + def get_ext_ip(self): + return self.ip + + def get_error(self): + if self.errorset: + return UPnPError(self.errorcode,self.methodname+": "+self.errordesc) + else: + return None + + def endElement(self, name): + n = name.lower() + if self.methodname == 'GetExternalIPAddress' and n.endswith('newexternalipaddress'): + self.ip = self.content + elif n== 'errorcode': + self.errorset = True + self.errorcode = int(self.content) + elif n == 'errordescription': + self.errorset = True + self.errordesc = self.content + + def characters(self, content): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: GOT CHARACTERS",content + self.content = content + +if __name__ == '__main__': + u = UPnPPlatformIndependent() + u.discover() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","IGD say my external IP address is",u.get_ext_ip() + #u.add_port_map('130.37.193.64',6881) diff --git a/tribler-mod/Tribler/Core/NATFirewall/upnp.py.bak b/tribler-mod/Tribler/Core/NATFirewall/upnp.py.bak new file mode 100644 index 0000000..64e431b --- /dev/null +++ b/tribler-mod/Tribler/Core/NATFirewall/upnp.py.bak @@ -0,0 +1,300 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information +# +# Platform independent UPnP client +# +# References: +# - UPnP Device Architecture 1.0, www.upnp.org +# - From Internet Gateway Device IGD V1.0: +# * WANIPConnection:1 Service Template Version 1.01 +# + +import sys +import socket +from cStringIO import StringIO +import urllib +import urllib2 +from urlparse import urlparse +import xml.sax as sax +from xml.sax.handler import ContentHandler +from traceback import print_exc + +UPNP_WANTED_SERVICETYPES = ['urn:schemas-upnp-org:service:WANIPConnection:1','urn:schemas-upnp-org:service:WANPPPConnection:1'] + +DEBUG = False + +class UPnPPlatformIndependent: + + def __init__(self): + # Maps location URL to a dict containing servicetype and control URL + self.services = {} + self.lastdiscovertime = 0 + + def discover(self): + """ Attempts to discover any UPnP services for X seconds + If any are found, they are stored in self.services + """ + #if self.lastdiscovertime != 0 and self.lastdiscovertime + DISCOVER_WAIT < time.time(): + # if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: discover: Already did a discovery recently" + # return + + maxwait = 4 + req = 'M-SEARCH * HTTP/1.1\r\n' + req += 'HOST: 239.255.255.250:1900\r\n' + req += 'MAN: "ssdp:discover"\r\n' # double quotes obligatory + req += 'MX: '+str(maxwait)+'\r\n' + req += 'ST: ssdp:all\r\n' # no double quotes + req += '\r\n\r\n' + + try: + self.s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) + self.s.settimeout(maxwait+2.0) + self.s.sendto(req,('239.255.255.250',1900)) + while True: # exited by socket.timeout exception only + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: discover: Wait 4 reply" + (rep,sender) = self.s.recvfrom(1024) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: discover: Got reply from",sender + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: discover: Saying:",rep + repio = StringIO(rep) + while True: + line = repio.readline() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","LINE",line + if line == '': + break + if line[-2:] == '\r\n': + line = line[:-2] + idx = line.find(':') + if idx == -1: + continue + key = line[:idx] + key = key.lower() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","key",key + if key.startswith('location'): + # Careful: MS Internet Connection Sharing returns "Location:http://bla", so no space + location = line[idx+1:].strip() + desc = self.get_description(location) + self.services[location] = self.parse_services(desc) + + except: + if DEBUG: + print_exc() + + def found_wanted_services(self): + """ Return True if WANIPConnection or WANPPPConnection were found by discover() """ + for location in self.services: + for servicetype in UPNP_WANTED_SERVICETYPES: + if self.services[location]['servicetype'] == servicetype: + return True + return False + + + def add_port_map(self,internalip,port,iproto='TCP'): + """ Sends an AddPortMapping request to all relevant IGDs found by discover() + + Raises UPnPError in case the IGD returned an error reply, + Raises Exception in case of any other error + """ + srch = self.do_soap_request('AddPortMapping',port,iproto=iproto,internalip=internalip) + if srch is not None: + se = srch.get_error() + if se is not None: + raise se + + def del_port_map(self,port,iproto='TCP'): + """ Sends a DeletePortMapping request to all relevant IGDs found by discover() + + Raises UPnPError in case the IGD returned an error reply, + Raises Exception in case of any other error + """ + srch = self.do_soap_request('DeletePortMapping',port,iproto=iproto) + if srch is not None: + se = srch.get_error() + if se is not None: + raise se + + def get_ext_ip(self): + """ Sends a GetExternalIPAddress request to all relevant IGDs found by discover() + + Raises UPnPError in case the IGD returned an error reply, + Raises Exception in case of any other error + """ + srch = self.do_soap_request('GetExternalIPAddress') + if srch is not None: + se = srch.get_error() + if se is not None: + raise se + else: + return srch.get_ext_ip() + + # + # Internal methods + # + def do_soap_request(self,methodname,port=-1,iproto='TCP',internalip=None): + for location in self.services: + for servicetype in UPNP_WANTED_SERVICETYPES: + if self.services[location]['servicetype'] == servicetype: + o = urlparse(location) + endpoint = o[0]+'://'+o[1]+self.services[location]['controlurl'] + # test: provoke error + #endpoint = o[0]+'://'+o[1]+'/bla'+self.services[location]['controlurl'] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: "+methodname+": Talking to endpoint ",endpoint + (headers,body) = self.create_soap_request(methodname,port,iproto=iproto,internalip=internalip) + #print body + try: + req = urllib2.Request(url=endpoint,data=body,headers=headers) + f = urllib2.urlopen(req) + resp = f.read() + except urllib2.HTTPError,e: + resp = e.fp.read() + if DEBUG: + print_exc() + srch = SOAPResponseContentHandler(methodname) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: "+methodname+": response is",resp + try: + srch.parse(resp) + except sax.SAXParseException,e: + # Our test linux-IGD appears to return an incompete + # SOAP error reply. Handle this. + se = srch.get_error() + if se is None: + raise e + # otherwise we were able to parse the error reply + return srch + + def get_description(self,url): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: discover: Reading description from",url + f = urllib.urlopen(url) + data = f.read() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: description: Got",data + return data + + def parse_services(self,desc): + dch = DescriptionContentHandler() + dch.parse(desc) + return dch.services + + def create_soap_request(self,methodname,port=-1,iproto="TCP",internalip=None): + headers = {} + #headers['Host'] = endpoint + #headers['Accept-Encoding'] = 'identity' + headers['Content-type'] = 'text/xml; charset="utf-8"' + headers['SOAPAction'] = '"urn:schemas-upnp-org:service:WANIPConnection:1#'+methodname+'"' + headers['User-Agent'] = 'Mozilla/4.0 (compatible; UPnP/1.0; Windows 9x)' + + body = '' + body += '' + body += '' + if methodname == 'AddPortMapping': + externalport = port + internalport = port + internalclient = internalip + body += '' + body += ''+str(externalport)+'' + body += ''+iproto+'' + body += ''+str(internalport)+'' + body += ''+internalclient+'' + body += '1' + body += 'Insert description here' + body += '0' + elif methodname == 'DeletePortMapping': + externalport = port + body += '' + body += ''+str(externalport)+'' + body += ''+iproto+'' + body += '' + body += '' + return (headers,body) + + +class UPnPError(Exception): + def __init__(self,errorcode,errordesc): + Exception.__init__(self) + self.errorcode = errorcode + self.errordesc = errordesc + + def __str__(self): + return 'UPnP Error %d: %s' % (self.errorcode, self.errordesc) + + +# +# Internal classes +# + +class DescriptionContentHandler(ContentHandler): + + def __init__(self): + ContentHandler.__init__(self) + self.services = {} + + def parse(self,desc): + sax.parseString(desc,self) + + def endDocument(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: discover: Services found",self.services + + def endElement(self, name): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","endElement",name + n = name.lower() + if n == 'servicetype': + self.services['servicetype'] = self.content + elif n == 'controlurl': + self.services['controlurl'] = self.content + + def characters(self, content): + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","content",content + self.content = content + + +class SOAPResponseContentHandler(ContentHandler): + + def __init__(self,methodname): + ContentHandler.__init__(self) + self.methodname = methodname + self.ip = None + self.errorset = False + self.errorcode = 0 + self.errordesc = 'No error' + self.content = None + + def parse(self,resp): + sax.parseString(resp,self) + + def get_ext_ip(self): + return self.ip + + def get_error(self): + if self.errorset: + return UPnPError(self.errorcode,self.methodname+": "+self.errordesc) + else: + return None + + def endElement(self, name): + n = name.lower() + if self.methodname == 'GetExternalIPAddress' and n.endswith('newexternalipaddress'): + self.ip = self.content + elif n== 'errorcode': + self.errorset = True + self.errorcode = int(self.content) + elif n == 'errordescription': + self.errorset = True + self.errordesc = self.content + + def characters(self, content): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","upnp: GOT CHARACTERS",content + self.content = content + +if __name__ == '__main__': + u = UPnPPlatformIndependent() + u.discover() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","IGD say my external IP address is",u.get_ext_ip() + #u.add_port_map('130.37.193.64',6881) diff --git a/tribler-mod/Tribler/Core/Overlay/MetadataHandler.py b/tribler-mod/Tribler/Core/Overlay/MetadataHandler.py new file mode 100644 index 0000000..9da61b9 --- /dev/null +++ b/tribler-mod/Tribler/Core/Overlay/MetadataHandler.py @@ -0,0 +1,565 @@ +from time import localtime, strftime +# Written by Jie Yang, Arno Bakker +# see LICENSE.txt for license information +import sys +import os +from sha import sha +from time import time, ctime +from traceback import print_exc, print_stack +from sets import Set + +from Tribler.Core.BitTornado.bencode import bencode, bdecode +from Tribler.Core.BitTornado.BT1.MessageID import * +from Tribler.Core.Utilities.utilities import isValidInfohash, show_permid_short, sort_dictlist, bin2str +from Tribler.Core.Overlay.SecureOverlay import OLPROTO_VER_FOURTH +from Tribler.Core.simpledefs import * +from Tribler.TrackerChecking.TorrentChecking import TorrentChecking +from Tribler.Core.osutils import getfreespace,get_readable_torrent_name +from Tribler.Core.CacheDB.CacheDBHandler import BarterCastDBHandler +from threading import currentThread + +DEBUG = False + +BARTERCAST_TORRENTS = False + +# Python no recursive imports? +# from overlayswarm import overlay_infohash +overlay_infohash = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + +Max_Torrent_Size = 2*1024*1024 # 2MB torrent = 6GB ~ 250GB content + + +def get_filename(infohash, metadata=None, humanreadable=False): + # Arno: Better would have been the infohash in hex. + if humanreadable: + torrent = bdecode(metadata) + raw_name = torrent['info'].get('name','') + file_name = get_readable_torrent_name(infohash, raw_name) + else: + file_name = sha(infohash).hexdigest()+'.torrent' # notice: it's sha1-hash of infohash + #_path = os.path.join(self.torrent_dir, file_name) + #if os.path.exists(_path): + # assign a name for the torrent. add a timestamp if it exists. + #file_name = str(time()) + '_' + file_name + return file_name + # exceptions will be handled by got_metadata() + +class MetadataHandler: + + __single = None + + def __init__(self): + if MetadataHandler.__single: + raise RuntimeError, "MetadataHandler is singleton" + MetadataHandler.__single = self + self.num_torrents = -100 + self.avg_torrent_size = 25*(2**10) + self.initialized = False + self.registered = False + + + def getInstance(*args, **kw): + if MetadataHandler.__single is None: + MetadataHandler(*args, **kw) + return MetadataHandler.__single + getInstance = staticmethod(getInstance) + + def register(self, overlay_bridge, dlhelper, launchmany, config): + self.registered = True + self.overlay_bridge = overlay_bridge + self.dlhelper = dlhelper + self.launchmany = launchmany + self.torrent_db = launchmany.torrent_db + self.config = config + self.min_free_space = self.config['stop_collecting_threshold']*(2**20) + #if self.min_free_space <= 0: + # self.min_free_space = 200*(2**20) # at least 200 MB left on disk + self.config_dir = os.path.abspath(self.config['state_dir']) + self.torrent_dir = os.path.abspath(self.config['torrent_collecting_dir']) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: collect dir is",self.torrent_dir + assert os.path.isdir(self.torrent_dir) + self.free_space = self.get_free_space() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Available space for database and collecting torrents: %d MB," % (self.free_space/(2**20)), "Min free space", self.min_free_space/(2**20), "MB" + self.max_num_torrents = self.init_max_num_torrents = int(self.config['torrent_collecting_max_torrents']) + self.upload_rate = 1024 * int(self.config['torrent_collecting_rate']) # 5KB/s + self.num_collected_torrents = 0 + self.recently_collected_torrents = [] # list of infohashes + self.upload_queue = [] + self.requested_torrents = Set() + self.next_upload_time = 0 + self.initialized = True + self.rquerytorrenthandler = None + self.delayed_check_overflow(5) + + def register2(self,rquerytorrenthandler): + self.rquerytorrenthandler = rquerytorrenthandler + + + def handleMessage(self,permid,selversion,message): + + t = message[0] + + if t == GET_METADATA: # the other peer requests a torrent + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: Got GET_METADATA",len(message),show_permid_short(permid) + return self.send_metadata(permid, message, selversion) + elif t == METADATA: # the other peer sends me a torrent + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: Got METADATA",len(message),show_permid_short(permid),selversion, currentThread().getName() + return self.got_metadata(permid, message, selversion) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: UNKNOWN OVERLAY MESSAGE", ord(t) + return False + + def send_metadata_request(self, permid, infohash, selversion=-1, caller="BC"): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: Connect to send GET_METADATA to",show_permid_short(permid) + if not isValidInfohash(infohash): + return False + + filename,metadata = self.torrent_exists(infohash) + if filename is not None: # torrent already exists on disk + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: send_meta_req: Already on disk??!" + self.notify_torrent_is_in(infohash, metadata, filename) + return True + + if caller == "dlhelp": + self.requested_torrents.add(infohash) + + if self.min_free_space != 0 and (self.free_space - self.avg_torrent_size < self.min_free_space): # no space to collect + self.free_space = self.get_free_space() + if self.free_space - self.avg_torrent_size < self.min_free_space: + self.warn_disk_full() + return True + + try: + # Optimization: don't connect if we're connected, although it won't + # do any harm. + if selversion == -1: # not currently connected + self.overlay_bridge.connect(permid,lambda e,d,p,s:self.get_metadata_connect_callback(e,d,p,s,infohash)) + else: + self.get_metadata_connect_callback(None,None,permid,selversion,infohash) + + except: + print_exc() + return False + return True + + def torrent_exists(self, infohash): + # if the torrent is already on disk, put it in db + + file_name = get_filename(infohash) + torrent_path = os.path.join(self.torrent_dir, file_name) + if not os.path.exists(torrent_path): + return None,None + else: + metadata = self.read_torrent(torrent_path) + if not self.valid_metadata(infohash, metadata): + return None + self.addTorrentToDB(torrent_path, infohash, metadata, source="BC", extra_info={}) + return file_name, metadata + + def get_metadata_connect_callback(self,exc,dns,permid,selversion,infohash): + if exc is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: Sending GET_METADATA to",show_permid_short(permid) + ## Create metadata_request according to protocol version + try: + metadata_request = bencode(infohash) + self.overlay_bridge.send(permid, GET_METADATA + metadata_request,self.get_metadata_send_callback) + self.requested_torrents.add(infohash) + except: + print_exc() + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: GET_METADATA: error connecting to",show_permid_short(permid) + + def get_metadata_send_callback(self,exc,permid): + if exc is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: GET_METADATA: error sending to",show_permid_short(permid),exc + pass + else: + pass + + def send_metadata(self, permid, message, selversion): + try: + infohash = bdecode(message[1:]) + except: + print_exc() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: GET_METADATA: error becoding" + return False + if not isValidInfohash(infohash): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: GET_METADATA: invalid hash" + return False + + # TODO: + res = self.torrent_db.getOne(('torrent_file_name', 'status_id'), infohash=bin2str(infohash)) + if not res: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: GET_METADATA: not in database" + return True # don't close connection because I don't have the torrent + torrent_file_name, status_id = res + if status_id == self.torrent_db._getStatusID('dead'): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: GET_METADATA: Torrent was dead" + return True + if not torrent_file_name: + return True + torrent_path = os.path.join(self.torrent_dir, torrent_file_name) + if not os.path.isfile(torrent_path): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: GET_METADATA: not existing", res, torrent_path + return True + + task = {'permid':permid, 'infohash':infohash, 'torrent_path':torrent_path, 'selversion':selversion} + self.upload_queue.append(task) + if int(time()) >= self.next_upload_time: + self.checking_upload_queue() + + return True + + def read_and_send_metadata(self, permid, infohash, torrent_path, selversion): + torrent_data = self.read_torrent(torrent_path) + if torrent_data: + # Arno: Don't send private torrents + try: + metainfo = bdecode(torrent_data) + if 'info' in metainfo and 'private' in metainfo['info'] and metainfo['info']['private']: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: Not sending torrent", `torrent_path`,"because it is private" + return 0 + except: + print_exc() + return 0 + + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: sending torrent", `torrent_path`, len(torrent_data) + torrent = {'torrent_hash':infohash, + 'metadata':torrent_data} + if selversion >= OLPROTO_VER_FOURTH: + data = self.torrent_db.getTorrent(infohash) + if data is None: + # DB inconsistency + return 0 + nleechers = data.get('leecher', -1) + nseeders = data.get('seeder', -1) + last_check_ago = int(time()) - data.get('last_check_time', 0) # relative time + if last_check_ago < 0: + last_check_ago = 0 + status = data.get('status', 'unknown') + + torrent.update({'leecher':nleechers, + 'seeder':nseeders, + 'last_check_time':last_check_ago, + 'status':status}) + + return self.do_send_metadata(permid, torrent, selversion) + else: # deleted before sending it + self.torrent_db.deleteTorrent(infohash, delete_file=True, updateFlag=True) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: GET_METADATA: no torrent data to send" + return 0 + + def do_send_metadata(self, permid, torrent, selversion): + metadata_request = bencode(torrent) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: send metadata", len(metadata_request) + ## Optimization: we know we're currently connected + self.overlay_bridge.send(permid,METADATA + metadata_request,self.metadata_send_callback) + + # BarterCast: add bytes of torrent to BarterCastDB + # Save exchanged KBs in BarterCastDB + if permid != None and BARTERCAST_TORRENTS: + self.overlay_bridge.add_task(lambda:self.olthread_bartercast_torrentexchange(permid, 'uploaded'), 0) + + return len(metadata_request) + + def olthread_bartercast_torrentexchange(self, permid, up_or_down): + + if up_or_down != 'uploaded' and up_or_down != 'downloaded': + return + + bartercastdb = BarterCastDBHandler.getInstance() + + torrent_kb = float(self.avg_torrent_size) / 1024 + name = bartercastdb.getName(permid) + my_permid = bartercastdb.my_permid + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercast: Torrent (%d KB) %s to/from peer %s" % (torrent_kb, up_or_down, `name`) + + if torrent_kb > 0: + bartercastdb.incrementItem((my_permid, permid), up_or_down, torrent_kb) + + + def metadata_send_callback(self,exc,permid): + if exc is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: METADATA: error sending to",show_permid_short(permid),exc + pass + + def read_torrent(self, torrent_path): + try: + f = open(torrent_path, "rb") + torrent_data = f.read() + f.close() + torrent_size = len(torrent_data) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: read torrent", `torrent_path`, torrent_size + if torrent_size > Max_Torrent_Size: + return None + return torrent_data + except: + print_exc() + return None + + + def addTorrentToDB(self, filename, torrent_hash, metadata, source='BC', extra_info={}, hack=False): + """ Arno: no need to delegate to olbridge, this is already run by OverlayThread """ + torrent = self.torrent_db.addExternalTorrent(filename, source, extra_info) + if torrent is None: + return + + # Arno, 2008-10-20: XXX torrents are filtered out in the final display stage + self.launchmany.set_activity(NTFY_ACT_GOT_METADATA,unicode('"'+torrent['name']+'"'),torrent['category']) + + if self.initialized: + self.num_torrents += 1 # for free disk limitation + + if not extra_info: + self.refreshTrackerStatus(torrent) + + if len(self.recently_collected_torrents) < 50: # Queue of 50 + self.recently_collected_torrents.append(torrent_hash) + else: + self.recently_collected_torrents.pop(0) + self.recently_collected_torrents.append(torrent_hash) + + + def set_overflow(self, max_num_torrent): + self.max_num_torrents = self.init_max_num_torrents = max_num_torrent + + def delayed_check_overflow(self, delay=2): + if not self.initialized: + return + self.overlay_bridge.add_task(self.check_overflow, delay) + + def delayed_check_free_space(self, delay=2): + self.free_space = self.get_free_space() + + def check_overflow(self): # check if there are too many torrents relative to the free disk space + if self.num_torrents < 0: + self.num_torrents = self.torrent_db.getNumberCollectedTorrents() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "**** torrent collectin self.num_torrents=", self.num_torrents + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: check overflow: current", self.num_torrents, "max", self.max_num_torrents + + if self.num_torrents > self.max_num_torrents: + num_delete = int(self.num_torrents - self.max_num_torrents*0.95) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "** limit space::", self.num_torrents, self.max_num_torrents, num_delete + self.limit_space(num_delete) + + def limit_space(self, num_delete): + deleted = self.torrent_db.freeSpace(num_delete) + if deleted: + self.num_torrents = self.torrent_db.getNumberCollectedTorrents() + self.free_space = self.get_free_space() + + + def save_torrent(self, infohash, metadata, source='BC', extra_info={}): + # check if disk is full before save it to disk and database + if not self.initialized: + return None + + self.check_overflow() + + if self.min_free_space != 0 and (self.free_space - len(metadata) < self.min_free_space or self.num_collected_torrents % 10 == 0): + self.free_space = self.get_free_space() + if self.free_space - len(metadata) < self.min_free_space: + self.warn_disk_full() + return None + + file_name = get_filename(infohash, metadata) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: Storing torrent", sha(infohash).hexdigest(),"in",file_name + + save_path = self.write_torrent(metadata, self.torrent_dir, file_name) + if save_path: + self.num_collected_torrents += 1 + self.free_space -= len(metadata) + self.addTorrentToDB(save_path, infohash, metadata, source=source, extra_info=extra_info) + # check if space is enough and remove old torrents + + return file_name + + + def refreshTrackerStatus(self, torrent): + "Upon the reception of a new discovered torrent, directly check its tracker" + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "metadata: checking tracker status of new torrent" + check = TorrentChecking(torrent['infohash']) + check.start() + + def write_torrent(self, metadata, dir, name): + try: + if not os.access(dir,os.F_OK): + os.mkdir(dir) + save_path = os.path.join(dir, name) + file = open(save_path, 'wb') + file.write(metadata) + file.close() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: write torrent", `save_path`, len(metadata), hash(metadata) + return save_path + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "metadata: write torrent failed" + return None + + def valid_metadata(self, infohash, metadata): + try: + metainfo = bdecode(metadata) + got_infohash = sha(bencode(metainfo['info'])).digest() + if infohash != got_infohash: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "metadata: infohash doesn't match the torrent " + \ + "hash. Required: " + `infohash` + ", but got: " + `got_infohash` + return False + return True + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "problem metadata:", repr(metadata) + return False + + def got_metadata(self, permid, message, selversion): + """ receive torrent file from others """ + + # Arno, 2007-06-20: Disabled the following code. What's this? Somebody sends + # us something and we refuse? Also doesn't take into account download help + #and remote-query extension. + + #if self.upload_rate <= 0: # if no upload, no download, that's the game + # return True # don't close connection + + try: + message = bdecode(message[1:]) + except: + print_exc() + return False + if not isinstance(message, dict): + return False + try: + infohash = message['torrent_hash'] + if not isValidInfohash(infohash): + return False + + if not infohash in self.requested_torrents: # got a torrent which was not requested + return True + if self.torrent_db.hasMetaData(infohash): + return True + + metadata = message['metadata'] + if not self.valid_metadata(infohash, metadata): + return False + if DEBUG: + torrent_size = len(metadata) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: Recvd torrent", `infohash`, sha(infohash).hexdigest(), torrent_size + + extra_info = {} + if selversion >= OLPROTO_VER_FOURTH: + try: + extra_info = {'leecher': message.get('leecher', -1), + 'seeder': message.get('seeder', -1), + 'last_check_time': message.get('last_check_time', -1), + 'status':message.get('status', 'unknown')} + except Exception, msg: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "metadata: wrong extra info in msg - ", message + extra_info = {} + + filename = self.save_torrent(infohash, metadata, extra_info=extra_info) + self.requested_torrents.remove(infohash) + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: Was I asked to dlhelp someone",self.dlhelper + + if filename is not None: + self.notify_torrent_is_in(infohash,metadata,filename) + + + # BarterCast: add bytes of torrent to BarterCastDB + # Save exchanged KBs in BarterCastDB + if permid is not None and BARTERCAST_TORRENTS: + self.overlay_bridge.add_task(lambda:self.olthread_bartercast_torrentexchange(permid, 'downloaded'), 0) + + + except Exception, e: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: Received metadata is broken",e, message.keys() + return False + + return True + + def notify_torrent_is_in(self,infohash,metadata,filename): + if self.dlhelper is not None: + self.dlhelper.metadatahandler_received_torrent(infohash, metadata) + if self.rquerytorrenthandler is not None: + self.rquerytorrenthandler.metadatahandler_got_torrent(infohash,metadata,filename) + + def get_num_torrents(self): + return self.num_torrents + + def warn_disk_full(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: send_meta_req: Disk full!" + drive,dir = os.path.splitdrive(os.path.abspath(self.torrent_dir)) + if not drive: + drive = dir + self.launchmany.set_activity(NTFY_ACT_DISK_FULL, drive) + + def get_free_space(self): + if not self.registered: + return 0 + try: + freespace = getfreespace(self.torrent_dir) + return freespace + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "meta: cannot get free space of", self.torrent_dir + print_exc() + return 0 + + def set_rate(self, rate): + self.upload_rate = rate * 1024 + + def set_min_free_space(self, min_free_space): + self.min_free_space = min_free_space*(2**20) + + def checking_upload_queue(self): + """ check the upload queue every 5 seconds, and send torrent out if the queue + is not empty and the max upload rate is not reached. + It is used for rate control + """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "metadata: checking_upload_queue, length:", len(self.upload_queue), "now:", ctime(time()), "next check:", ctime(self.next_upload_time) + if self.upload_rate > 0 and int(time()) >= self.next_upload_time and len(self.upload_queue) > 0: + task = self.upload_queue.pop(0) + permid = task['permid'] + infohash = task['infohash'] + torrent_path = task['torrent_path'] + selversion = task['selversion'] + sent_size = self.read_and_send_metadata(permid, infohash, torrent_path, selversion) + idel = sent_size / self.upload_rate + 1 + self.next_upload_time = int(time()) + idel + self.overlay_bridge.add_task(self.checking_upload_queue, idel) + + def getRecentlyCollectedTorrents(self, num): + if not self.initialized: + return [] + return self.recently_collected_torrents[-1*num:] # get the last ones + diff --git a/tribler-mod/Tribler/Core/Overlay/MetadataHandler.py.bak b/tribler-mod/Tribler/Core/Overlay/MetadataHandler.py.bak new file mode 100644 index 0000000..0a1f722 --- /dev/null +++ b/tribler-mod/Tribler/Core/Overlay/MetadataHandler.py.bak @@ -0,0 +1,564 @@ +# Written by Jie Yang, Arno Bakker +# see LICENSE.txt for license information +import sys +import os +from sha import sha +from time import time, ctime +from traceback import print_exc, print_stack +from sets import Set + +from Tribler.Core.BitTornado.bencode import bencode, bdecode +from Tribler.Core.BitTornado.BT1.MessageID import * +from Tribler.Core.Utilities.utilities import isValidInfohash, show_permid_short, sort_dictlist, bin2str +from Tribler.Core.Overlay.SecureOverlay import OLPROTO_VER_FOURTH +from Tribler.Core.simpledefs import * +from Tribler.TrackerChecking.TorrentChecking import TorrentChecking +from Tribler.Core.osutils import getfreespace,get_readable_torrent_name +from Tribler.Core.CacheDB.CacheDBHandler import BarterCastDBHandler +from threading import currentThread + +DEBUG = False + +BARTERCAST_TORRENTS = False + +# Python no recursive imports? +# from overlayswarm import overlay_infohash +overlay_infohash = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + +Max_Torrent_Size = 2*1024*1024 # 2MB torrent = 6GB ~ 250GB content + + +def get_filename(infohash, metadata=None, humanreadable=False): + # Arno: Better would have been the infohash in hex. + if humanreadable: + torrent = bdecode(metadata) + raw_name = torrent['info'].get('name','') + file_name = get_readable_torrent_name(infohash, raw_name) + else: + file_name = sha(infohash).hexdigest()+'.torrent' # notice: it's sha1-hash of infohash + #_path = os.path.join(self.torrent_dir, file_name) + #if os.path.exists(_path): + # assign a name for the torrent. add a timestamp if it exists. + #file_name = str(time()) + '_' + file_name + return file_name + # exceptions will be handled by got_metadata() + +class MetadataHandler: + + __single = None + + def __init__(self): + if MetadataHandler.__single: + raise RuntimeError, "MetadataHandler is singleton" + MetadataHandler.__single = self + self.num_torrents = -100 + self.avg_torrent_size = 25*(2**10) + self.initialized = False + self.registered = False + + + def getInstance(*args, **kw): + if MetadataHandler.__single is None: + MetadataHandler(*args, **kw) + return MetadataHandler.__single + getInstance = staticmethod(getInstance) + + def register(self, overlay_bridge, dlhelper, launchmany, config): + self.registered = True + self.overlay_bridge = overlay_bridge + self.dlhelper = dlhelper + self.launchmany = launchmany + self.torrent_db = launchmany.torrent_db + self.config = config + self.min_free_space = self.config['stop_collecting_threshold']*(2**20) + #if self.min_free_space <= 0: + # self.min_free_space = 200*(2**20) # at least 200 MB left on disk + self.config_dir = os.path.abspath(self.config['state_dir']) + self.torrent_dir = os.path.abspath(self.config['torrent_collecting_dir']) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: collect dir is",self.torrent_dir + assert os.path.isdir(self.torrent_dir) + self.free_space = self.get_free_space() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Available space for database and collecting torrents: %d MB," % (self.free_space/(2**20)), "Min free space", self.min_free_space/(2**20), "MB" + self.max_num_torrents = self.init_max_num_torrents = int(self.config['torrent_collecting_max_torrents']) + self.upload_rate = 1024 * int(self.config['torrent_collecting_rate']) # 5KB/s + self.num_collected_torrents = 0 + self.recently_collected_torrents = [] # list of infohashes + self.upload_queue = [] + self.requested_torrents = Set() + self.next_upload_time = 0 + self.initialized = True + self.rquerytorrenthandler = None + self.delayed_check_overflow(5) + + def register2(self,rquerytorrenthandler): + self.rquerytorrenthandler = rquerytorrenthandler + + + def handleMessage(self,permid,selversion,message): + + t = message[0] + + if t == GET_METADATA: # the other peer requests a torrent + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: Got GET_METADATA",len(message),show_permid_short(permid) + return self.send_metadata(permid, message, selversion) + elif t == METADATA: # the other peer sends me a torrent + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: Got METADATA",len(message),show_permid_short(permid),selversion, currentThread().getName() + return self.got_metadata(permid, message, selversion) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: UNKNOWN OVERLAY MESSAGE", ord(t) + return False + + def send_metadata_request(self, permid, infohash, selversion=-1, caller="BC"): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: Connect to send GET_METADATA to",show_permid_short(permid) + if not isValidInfohash(infohash): + return False + + filename,metadata = self.torrent_exists(infohash) + if filename is not None: # torrent already exists on disk + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: send_meta_req: Already on disk??!" + self.notify_torrent_is_in(infohash, metadata, filename) + return True + + if caller == "dlhelp": + self.requested_torrents.add(infohash) + + if self.min_free_space != 0 and (self.free_space - self.avg_torrent_size < self.min_free_space): # no space to collect + self.free_space = self.get_free_space() + if self.free_space - self.avg_torrent_size < self.min_free_space: + self.warn_disk_full() + return True + + try: + # Optimization: don't connect if we're connected, although it won't + # do any harm. + if selversion == -1: # not currently connected + self.overlay_bridge.connect(permid,lambda e,d,p,s:self.get_metadata_connect_callback(e,d,p,s,infohash)) + else: + self.get_metadata_connect_callback(None,None,permid,selversion,infohash) + + except: + print_exc() + return False + return True + + def torrent_exists(self, infohash): + # if the torrent is already on disk, put it in db + + file_name = get_filename(infohash) + torrent_path = os.path.join(self.torrent_dir, file_name) + if not os.path.exists(torrent_path): + return None,None + else: + metadata = self.read_torrent(torrent_path) + if not self.valid_metadata(infohash, metadata): + return None + self.addTorrentToDB(torrent_path, infohash, metadata, source="BC", extra_info={}) + return file_name, metadata + + def get_metadata_connect_callback(self,exc,dns,permid,selversion,infohash): + if exc is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: Sending GET_METADATA to",show_permid_short(permid) + ## Create metadata_request according to protocol version + try: + metadata_request = bencode(infohash) + self.overlay_bridge.send(permid, GET_METADATA + metadata_request,self.get_metadata_send_callback) + self.requested_torrents.add(infohash) + except: + print_exc() + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: GET_METADATA: error connecting to",show_permid_short(permid) + + def get_metadata_send_callback(self,exc,permid): + if exc is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: GET_METADATA: error sending to",show_permid_short(permid),exc + pass + else: + pass + + def send_metadata(self, permid, message, selversion): + try: + infohash = bdecode(message[1:]) + except: + print_exc() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: GET_METADATA: error becoding" + return False + if not isValidInfohash(infohash): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: GET_METADATA: invalid hash" + return False + + # TODO: + res = self.torrent_db.getOne(('torrent_file_name', 'status_id'), infohash=bin2str(infohash)) + if not res: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: GET_METADATA: not in database" + return True # don't close connection because I don't have the torrent + torrent_file_name, status_id = res + if status_id == self.torrent_db._getStatusID('dead'): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: GET_METADATA: Torrent was dead" + return True + if not torrent_file_name: + return True + torrent_path = os.path.join(self.torrent_dir, torrent_file_name) + if not os.path.isfile(torrent_path): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: GET_METADATA: not existing", res, torrent_path + return True + + task = {'permid':permid, 'infohash':infohash, 'torrent_path':torrent_path, 'selversion':selversion} + self.upload_queue.append(task) + if int(time()) >= self.next_upload_time: + self.checking_upload_queue() + + return True + + def read_and_send_metadata(self, permid, infohash, torrent_path, selversion): + torrent_data = self.read_torrent(torrent_path) + if torrent_data: + # Arno: Don't send private torrents + try: + metainfo = bdecode(torrent_data) + if 'info' in metainfo and 'private' in metainfo['info'] and metainfo['info']['private']: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: Not sending torrent", `torrent_path`,"because it is private" + return 0 + except: + print_exc() + return 0 + + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: sending torrent", `torrent_path`, len(torrent_data) + torrent = {'torrent_hash':infohash, + 'metadata':torrent_data} + if selversion >= OLPROTO_VER_FOURTH: + data = self.torrent_db.getTorrent(infohash) + if data is None: + # DB inconsistency + return 0 + nleechers = data.get('leecher', -1) + nseeders = data.get('seeder', -1) + last_check_ago = int(time()) - data.get('last_check_time', 0) # relative time + if last_check_ago < 0: + last_check_ago = 0 + status = data.get('status', 'unknown') + + torrent.update({'leecher':nleechers, + 'seeder':nseeders, + 'last_check_time':last_check_ago, + 'status':status}) + + return self.do_send_metadata(permid, torrent, selversion) + else: # deleted before sending it + self.torrent_db.deleteTorrent(infohash, delete_file=True, updateFlag=True) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: GET_METADATA: no torrent data to send" + return 0 + + def do_send_metadata(self, permid, torrent, selversion): + metadata_request = bencode(torrent) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: send metadata", len(metadata_request) + ## Optimization: we know we're currently connected + self.overlay_bridge.send(permid,METADATA + metadata_request,self.metadata_send_callback) + + # BarterCast: add bytes of torrent to BarterCastDB + # Save exchanged KBs in BarterCastDB + if permid != None and BARTERCAST_TORRENTS: + self.overlay_bridge.add_task(lambda:self.olthread_bartercast_torrentexchange(permid, 'uploaded'), 0) + + return len(metadata_request) + + def olthread_bartercast_torrentexchange(self, permid, up_or_down): + + if up_or_down != 'uploaded' and up_or_down != 'downloaded': + return + + bartercastdb = BarterCastDBHandler.getInstance() + + torrent_kb = float(self.avg_torrent_size) / 1024 + name = bartercastdb.getName(permid) + my_permid = bartercastdb.my_permid + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bartercast: Torrent (%d KB) %s to/from peer %s" % (torrent_kb, up_or_down, `name`) + + if torrent_kb > 0: + bartercastdb.incrementItem((my_permid, permid), up_or_down, torrent_kb) + + + def metadata_send_callback(self,exc,permid): + if exc is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: METADATA: error sending to",show_permid_short(permid),exc + pass + + def read_torrent(self, torrent_path): + try: + f = open(torrent_path, "rb") + torrent_data = f.read() + f.close() + torrent_size = len(torrent_data) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: read torrent", `torrent_path`, torrent_size + if torrent_size > Max_Torrent_Size: + return None + return torrent_data + except: + print_exc() + return None + + + def addTorrentToDB(self, filename, torrent_hash, metadata, source='BC', extra_info={}, hack=False): + """ Arno: no need to delegate to olbridge, this is already run by OverlayThread """ + torrent = self.torrent_db.addExternalTorrent(filename, source, extra_info) + if torrent is None: + return + + # Arno, 2008-10-20: XXX torrents are filtered out in the final display stage + self.launchmany.set_activity(NTFY_ACT_GOT_METADATA,unicode('"'+torrent['name']+'"'),torrent['category']) + + if self.initialized: + self.num_torrents += 1 # for free disk limitation + + if not extra_info: + self.refreshTrackerStatus(torrent) + + if len(self.recently_collected_torrents) < 50: # Queue of 50 + self.recently_collected_torrents.append(torrent_hash) + else: + self.recently_collected_torrents.pop(0) + self.recently_collected_torrents.append(torrent_hash) + + + def set_overflow(self, max_num_torrent): + self.max_num_torrents = self.init_max_num_torrents = max_num_torrent + + def delayed_check_overflow(self, delay=2): + if not self.initialized: + return + self.overlay_bridge.add_task(self.check_overflow, delay) + + def delayed_check_free_space(self, delay=2): + self.free_space = self.get_free_space() + + def check_overflow(self): # check if there are too many torrents relative to the free disk space + if self.num_torrents < 0: + self.num_torrents = self.torrent_db.getNumberCollectedTorrents() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "**** torrent collectin self.num_torrents=", self.num_torrents + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: check overflow: current", self.num_torrents, "max", self.max_num_torrents + + if self.num_torrents > self.max_num_torrents: + num_delete = int(self.num_torrents - self.max_num_torrents*0.95) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "** limit space::", self.num_torrents, self.max_num_torrents, num_delete + self.limit_space(num_delete) + + def limit_space(self, num_delete): + deleted = self.torrent_db.freeSpace(num_delete) + if deleted: + self.num_torrents = self.torrent_db.getNumberCollectedTorrents() + self.free_space = self.get_free_space() + + + def save_torrent(self, infohash, metadata, source='BC', extra_info={}): + # check if disk is full before save it to disk and database + if not self.initialized: + return None + + self.check_overflow() + + if self.min_free_space != 0 and (self.free_space - len(metadata) < self.min_free_space or self.num_collected_torrents % 10 == 0): + self.free_space = self.get_free_space() + if self.free_space - len(metadata) < self.min_free_space: + self.warn_disk_full() + return None + + file_name = get_filename(infohash, metadata) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: Storing torrent", sha(infohash).hexdigest(),"in",file_name + + save_path = self.write_torrent(metadata, self.torrent_dir, file_name) + if save_path: + self.num_collected_torrents += 1 + self.free_space -= len(metadata) + self.addTorrentToDB(save_path, infohash, metadata, source=source, extra_info=extra_info) + # check if space is enough and remove old torrents + + return file_name + + + def refreshTrackerStatus(self, torrent): + "Upon the reception of a new discovered torrent, directly check its tracker" + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "metadata: checking tracker status of new torrent" + check = TorrentChecking(torrent['infohash']) + check.start() + + def write_torrent(self, metadata, dir, name): + try: + if not os.access(dir,os.F_OK): + os.mkdir(dir) + save_path = os.path.join(dir, name) + file = open(save_path, 'wb') + file.write(metadata) + file.close() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: write torrent", `save_path`, len(metadata), hash(metadata) + return save_path + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "metadata: write torrent failed" + return None + + def valid_metadata(self, infohash, metadata): + try: + metainfo = bdecode(metadata) + got_infohash = sha(bencode(metainfo['info'])).digest() + if infohash != got_infohash: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "metadata: infohash doesn't match the torrent " + \ + "hash. Required: " + `infohash` + ", but got: " + `got_infohash` + return False + return True + except: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "problem metadata:", repr(metadata) + return False + + def got_metadata(self, permid, message, selversion): + """ receive torrent file from others """ + + # Arno, 2007-06-20: Disabled the following code. What's this? Somebody sends + # us something and we refuse? Also doesn't take into account download help + #and remote-query extension. + + #if self.upload_rate <= 0: # if no upload, no download, that's the game + # return True # don't close connection + + try: + message = bdecode(message[1:]) + except: + print_exc() + return False + if not isinstance(message, dict): + return False + try: + infohash = message['torrent_hash'] + if not isValidInfohash(infohash): + return False + + if not infohash in self.requested_torrents: # got a torrent which was not requested + return True + if self.torrent_db.hasMetaData(infohash): + return True + + metadata = message['metadata'] + if not self.valid_metadata(infohash, metadata): + return False + if DEBUG: + torrent_size = len(metadata) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: Recvd torrent", `infohash`, sha(infohash).hexdigest(), torrent_size + + extra_info = {} + if selversion >= OLPROTO_VER_FOURTH: + try: + extra_info = {'leecher': message.get('leecher', -1), + 'seeder': message.get('seeder', -1), + 'last_check_time': message.get('last_check_time', -1), + 'status':message.get('status', 'unknown')} + except Exception, msg: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "metadata: wrong extra info in msg - ", message + extra_info = {} + + filename = self.save_torrent(infohash, metadata, extra_info=extra_info) + self.requested_torrents.remove(infohash) + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: Was I asked to dlhelp someone",self.dlhelper + + if filename is not None: + self.notify_torrent_is_in(infohash,metadata,filename) + + + # BarterCast: add bytes of torrent to BarterCastDB + # Save exchanged KBs in BarterCastDB + if permid is not None and BARTERCAST_TORRENTS: + self.overlay_bridge.add_task(lambda:self.olthread_bartercast_torrentexchange(permid, 'downloaded'), 0) + + + except Exception, e: + print_exc() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: Received metadata is broken",e, message.keys() + return False + + return True + + def notify_torrent_is_in(self,infohash,metadata,filename): + if self.dlhelper is not None: + self.dlhelper.metadatahandler_received_torrent(infohash, metadata) + if self.rquerytorrenthandler is not None: + self.rquerytorrenthandler.metadatahandler_got_torrent(infohash,metadata,filename) + + def get_num_torrents(self): + return self.num_torrents + + def warn_disk_full(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","metadata: send_meta_req: Disk full!" + drive,dir = os.path.splitdrive(os.path.abspath(self.torrent_dir)) + if not drive: + drive = dir + self.launchmany.set_activity(NTFY_ACT_DISK_FULL, drive) + + def get_free_space(self): + if not self.registered: + return 0 + try: + freespace = getfreespace(self.torrent_dir) + return freespace + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "meta: cannot get free space of", self.torrent_dir + print_exc() + return 0 + + def set_rate(self, rate): + self.upload_rate = rate * 1024 + + def set_min_free_space(self, min_free_space): + self.min_free_space = min_free_space*(2**20) + + def checking_upload_queue(self): + """ check the upload queue every 5 seconds, and send torrent out if the queue + is not empty and the max upload rate is not reached. + It is used for rate control + """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "metadata: checking_upload_queue, length:", len(self.upload_queue), "now:", ctime(time()), "next check:", ctime(self.next_upload_time) + if self.upload_rate > 0 and int(time()) >= self.next_upload_time and len(self.upload_queue) > 0: + task = self.upload_queue.pop(0) + permid = task['permid'] + infohash = task['infohash'] + torrent_path = task['torrent_path'] + selversion = task['selversion'] + sent_size = self.read_and_send_metadata(permid, infohash, torrent_path, selversion) + idel = sent_size / self.upload_rate + 1 + self.next_upload_time = int(time()) + idel + self.overlay_bridge.add_task(self.checking_upload_queue, idel) + + def getRecentlyCollectedTorrents(self, num): + if not self.initialized: + return [] + return self.recently_collected_torrents[-1*num:] # get the last ones + diff --git a/tribler-mod/Tribler/Core/Overlay/OverlayApps.py b/tribler-mod/Tribler/Core/Overlay/OverlayApps.py new file mode 100644 index 0000000..794bab6 --- /dev/null +++ b/tribler-mod/Tribler/Core/Overlay/OverlayApps.py @@ -0,0 +1,306 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information +# +# All applications on top of the SecureOverlay should be started here. +# +from MetadataHandler import MetadataHandler +from threading import Lock +from threading import currentThread +from time import time +from traceback import print_exc +import sys + +from Tribler.Core.BitTornado.BT1.MessageID import * +from Tribler.Core.BuddyCast.buddycast import BuddyCastFactory +from Tribler.Core.CoopDownload.CoordinatorMessageHandler import CoordinatorMessageHandler +from Tribler.Core.CoopDownload.HelperMessageHandler import HelperMessageHandler +from Tribler.Core.NATFirewall.DialbackMsgHandler import DialbackMsgHandler +from Tribler.Core.NATFirewall.NatCheckMsgHandler import NatCheckMsgHandler +from Tribler.Core.SocialNetwork.FriendshipMsgHandler import FriendshipMsgHandler +from Tribler.Core.SocialNetwork.RemoteQueryMsgHandler import RemoteQueryMsgHandler +from Tribler.Core.SocialNetwork.RemoteTorrentHandler import RemoteTorrentHandler +from Tribler.Core.SocialNetwork.SocialNetworkMsgHandler import SocialNetworkMsgHandler +from Tribler.Core.Statistics.Crawler import Crawler +from Tribler.Core.Statistics.DatabaseCrawler import DatabaseCrawler +from Tribler.Core.Statistics.FriendshipCrawler import FriendshipCrawler +from Tribler.Core.Statistics.SeedingStatsCrawler import SeedingStatsCrawler +from Tribler.Core.Statistics.VideoPlaybackCrawler import VideoPlaybackCrawler +from Tribler.Core.Utilities.utilities import show_permid_short +from Tribler.Core.simpledefs import * + +DEBUG = False + +class OverlayApps: + # Code to make this a singleton + __single = None + + def __init__(self): + if OverlayApps.__single: + raise RuntimeError, "OverlayApps is Singleton" + OverlayApps.__single = self + self.coord_handler = None + self.help_handler = None + self.metadata_handler = None + self.buddycast = None + self.collect = None + self.dialback_handler = None + self.socnet_handler = None + self.rquery_handler = None + self.friendship_handler = None + self.msg_handlers = {} + self.connection_handlers = [] + self.text_mode = None + self.requestPolicyLock = Lock() + + def getInstance(*args, **kw): + if OverlayApps.__single is None: + OverlayApps(*args, **kw) + return OverlayApps.__single + getInstance = staticmethod(getInstance) + + def register(self, overlay_bridge, session, launchmany, config, requestPolicy): + self.overlay_bridge = overlay_bridge + self.launchmany = launchmany + self.requestPolicy = requestPolicy + self.text_mode = config.has_key('text_mode') + + # OverlayApps gets all messages, and demultiplexes + overlay_bridge.register_recv_callback(self.handleMessage) + overlay_bridge.register_conns_callback(self.handleConnection) + + # Create handler for metadata messages in two parts, as + # download help needs to know the metadata_handler and we need + # to know the download helper handler. + # Part 1: + self.metadata_handler = MetadataHandler.getInstance() + + if config['download_help']: + # Create handler for messages to dlhelp coordinator + self.coord_handler = CoordinatorMessageHandler(launchmany) + self.register_msg_handler(HelpHelperMessages, self.coord_handler.handleMessage) + + # Create handler for messages to dlhelp helper + self.help_handler = HelperMessageHandler() + self.help_handler.register(session,self.metadata_handler,config['download_help_dir'],config.get('coopdlconfig', False)) + self.register_msg_handler(HelpCoordinatorMessages, self.help_handler.handleMessage) + + # Part 2: + self.metadata_handler.register(overlay_bridge, self.help_handler, launchmany, config) + self.register_msg_handler(MetadataMessages, self.metadata_handler.handleMessage) + + if not config['torrent_collecting']: + self.torrent_collecting_solution = 0 + else: + self.torrent_collecting_solution = config['buddycast_collecting_solution'] + + if config['buddycast']: + # Create handler for Buddycast messages + + self.buddycast = BuddyCastFactory.getInstance(superpeer=config['superpeer'], log=config['overlay_log']) + # Using buddycast to handle torrent collecting since they are dependent + self.buddycast.register(overlay_bridge, launchmany, + launchmany.rawserver_fatalerrorfunc, + self.metadata_handler, + self.torrent_collecting_solution, + config['start_recommender'],config['buddycast_max_peers']) + self.register_msg_handler(BuddyCastMessages, self.buddycast.handleMessage) + self.register_connection_handler(self.buddycast.handleConnection) + + if config['dialback']: + self.dialback_handler = DialbackMsgHandler.getInstance() + # The Dialback mechanism needs the real rawserver, not the overlay_bridge + self.dialback_handler.register(overlay_bridge, launchmany, launchmany.rawserver, config) + self.register_msg_handler([DIALBACK_REQUEST], + self.dialback_handler.olthread_handleSecOverlayMessage) + self.register_connection_handler(self.dialback_handler.olthread_handleSecOverlayConnection) + else: + self.register_msg_handler([DIALBACK_REQUEST], self.handleDisabledMessage) + + if config['socnet']: + self.socnet_handler = SocialNetworkMsgHandler.getInstance() + self.socnet_handler.register(overlay_bridge, launchmany, config) + self.register_msg_handler(SocialNetworkMessages,self.socnet_handler.handleMessage) + self.register_connection_handler(self.socnet_handler.handleConnection) + + self.friendship_handler = FriendshipMsgHandler.getInstance() + self.friendship_handler.register(overlay_bridge, launchmany.session) + self.register_msg_handler(FriendshipMessages,self.friendship_handler.handleMessage) + self.register_connection_handler(self.friendship_handler.handleConnection) + + if config['rquery']: + self.rquery_handler = RemoteQueryMsgHandler.getInstance() + self.rquery_handler.register(overlay_bridge,launchmany,config,self.buddycast,log=config['overlay_log']) + self.register_msg_handler(RemoteQueryMessages,self.rquery_handler.handleMessage) + self.register_connection_handler(self.rquery_handler.handleConnection) + + if config['crawler']: + crawler = Crawler.get_instance(session) + self.register_msg_handler([CRAWLER_REQUEST], crawler.handle_request) + + database_crawler = DatabaseCrawler.get_instance() + crawler.register_message_handler(CRAWLER_DATABASE_QUERY, database_crawler.handle_crawler_request, database_crawler.handle_crawler_reply) + seeding_stats_crawler = SeedingStatsCrawler.get_instance() + crawler.register_message_handler(CRAWLER_SEEDINGSTATS_QUERY, seeding_stats_crawler.handle_crawler_request, seeding_stats_crawler.handle_crawler_reply) + friendship_crawler = FriendshipCrawler.get_instance(session) + crawler.register_message_handler(CRAWLER_FRIENDSHIP_STATS, friendship_crawler.handle_crawler_request, friendship_crawler.handle_crawler_reply) + natcheck_handler = NatCheckMsgHandler.getInstance() + natcheck_handler.register(launchmany) + crawler.register_message_handler(CRAWLER_NATCHECK, natcheck_handler.gotDoNatCheckMessage, natcheck_handler.gotNatCheckReplyMessage) + crawler.register_message_handler(CRAWLER_NATTRAVERSAL, natcheck_handler.gotUdpConnectRequest, natcheck_handler.gotUdpConnectReply) + videoplayback_crawler = VideoPlaybackCrawler.get_instance() + crawler.register_message_handler(CRAWLER_VIDEOPLAYBACK_INFO_QUERY, videoplayback_crawler.handle_info_crawler_request, videoplayback_crawler.handle_info_crawler_reply) + crawler.register_message_handler(CRAWLER_VIDEOPLAYBACK_EVENT_QUERY, videoplayback_crawler.handle_event_crawler_request, videoplayback_crawler.handle_event_crawler_reply) + + if crawler.am_crawler(): + + # we will only accept CRAWLER_REPLY messages when we are actully a crawler + self.register_msg_handler([CRAWLER_REPLY], crawler.handle_reply) + self.register_connection_handler(crawler.handle_connection) + + if "database" in sys.argv: + # allows access to tribler database (boudewijn) + crawler.register_crawl_initiator(database_crawler.query_initiator) + + if "videoplayback" in sys.argv: + # allows access to video-playback statistics (boudewijn) + crawler.register_crawl_initiator(videoplayback_crawler.query_info_initiator) + + if "seedingstats" in sys.argv: + # allows access to seeding statistics (Boxun) + crawler.register_crawl_initiator(seeding_stats_crawler.query_initiator, frequency=60*30) + + if "friendship" in sys.argv: + # allows access to friendship statistics (Ali) + crawler.register_crawl_initiator(friendship_crawler.query_initiator) + + if "natcheck" in sys.argv: + # allows access to nat-check statistics (Lucia) + crawler.register_crawl_initiator(natcheck_handler.doNatCheck, 1200) + + else: + self.register_msg_handler([CRAWLER_REQUEST, CRAWLER_REPLY], self.handleDisabledMessage) + + self.rtorrent_handler = RemoteTorrentHandler.getInstance() + self.rtorrent_handler.register(overlay_bridge,self.metadata_handler,session) + self.metadata_handler.register2(self.rtorrent_handler) + + # Add notifier as connection handler + self.register_connection_handler(self.notifier_handles_connection) + + if config['buddycast']: + # Arno: to prevent concurrency between mainthread and overlay + # thread where BuddyCast schedules tasks + self.buddycast.register2() + + def early_shutdown(self): + """ Called as soon as Session shutdown is initiated. Used to start + shutdown tasks that takes some time and that can run in parallel + to checkpointing, etc. + """ + # Called by OverlayThread + if self.friendship_handler is not None: + self.friendship_handler.shutdown() + + + def register_msg_handler(self, ids, handler): + """ + ids is the [ID1, ID2, ..] where IDn is a sort of message ID in overlay + swarm. Each ID can only be handled by one handler, but a handler can + handle multiple IDs + """ + for id in ids: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olapps: Message handler registered for",getMessageName(id) + self.msg_handlers[id] = handler + + def register_connection_handler(self, handler): + """ + Register a handler for if a connection is established + handler-function is called like: + handler(exc,permid,selversion,locally_initiated) + """ + assert handler not in self.connection_handlers, 'This connection_handler is already registered' + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "olapps: Connection handler registered for", handler + self.connection_handlers.append(handler) + + def handleMessage(self,permid,selversion,message): + """ demultiplex message stream to handlers """ + + # Check auth + if not self.requestAllowed(permid, message[0]): + return False + + if message[0] in self.msg_handlers: + # This is a one byte id. (For instance a regular + # BitTorrent message) + id_ = message[0] + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "olapps: No handler found for", getMessageName(message[0:2]) + return False + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "olapps: handleMessage", getMessageName(id_), "v" + str(selversion) + + try: + if DEBUG: + st = time() + ret = self.msg_handlers[id_](permid, selversion, message) + et = time() + diff = et - st + if diff > 0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olapps: ",getMessageName(id_),"TOOK %.5f" % diff + return ret + else: + return self.msg_handlers[id_](permid, selversion, message) + except: + # Catch all + print_exc() + return False + + def handleDisabledMessage(*args): + return True + + def handleConnection(self,exc,permid,selversion,locally_initiated): + """ An overlay-connection was established. Notify interested parties. """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olapps: handleConnection",exc,selversion,locally_initiated,currentThread().getName() + + for handler in self.connection_handlers: + try: + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olapps: calling connection handler:",'%s.%s' % (handler.__module__, handler.__name__) + handler(exc,permid,selversion,locally_initiated) + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'olapps: Exception during connection handler calling' + print_exc() + + def requestAllowed(self, permid, messageType): + self.requestPolicyLock.acquire() + try: + rp = self.requestPolicy + finally: + self.requestPolicyLock.release() + allowed = rp.allowed(permid, messageType) + if DEBUG: + if allowed: + word = 'allowed' + else: + word = 'denied' + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'olapps: Request type %s from %s was %s' % (getMessageName(messageType), show_permid_short(permid), word) + return allowed + + def setRequestPolicy(self, requestPolicy): + self.requestPolicyLock.acquire() + try: + self.requestPolicy = requestPolicy + finally: + self.requestPolicyLock.release() + + + def notifier_handles_connection(self, exc,permid,selversion,locally_initiated): + # Notify interested parties (that use the notifier/observer structure) about a connection + self.launchmany.session.uch.notify(NTFY_PEERS, NTFY_CONNECTION, permid, True) diff --git a/tribler-mod/Tribler/Core/Overlay/OverlayApps.py.bak b/tribler-mod/Tribler/Core/Overlay/OverlayApps.py.bak new file mode 100644 index 0000000..24948d3 --- /dev/null +++ b/tribler-mod/Tribler/Core/Overlay/OverlayApps.py.bak @@ -0,0 +1,305 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information +# +# All applications on top of the SecureOverlay should be started here. +# +from MetadataHandler import MetadataHandler +from threading import Lock +from threading import currentThread +from time import time +from traceback import print_exc +import sys + +from Tribler.Core.BitTornado.BT1.MessageID import * +from Tribler.Core.BuddyCast.buddycast import BuddyCastFactory +from Tribler.Core.CoopDownload.CoordinatorMessageHandler import CoordinatorMessageHandler +from Tribler.Core.CoopDownload.HelperMessageHandler import HelperMessageHandler +from Tribler.Core.NATFirewall.DialbackMsgHandler import DialbackMsgHandler +from Tribler.Core.NATFirewall.NatCheckMsgHandler import NatCheckMsgHandler +from Tribler.Core.SocialNetwork.FriendshipMsgHandler import FriendshipMsgHandler +from Tribler.Core.SocialNetwork.RemoteQueryMsgHandler import RemoteQueryMsgHandler +from Tribler.Core.SocialNetwork.RemoteTorrentHandler import RemoteTorrentHandler +from Tribler.Core.SocialNetwork.SocialNetworkMsgHandler import SocialNetworkMsgHandler +from Tribler.Core.Statistics.Crawler import Crawler +from Tribler.Core.Statistics.DatabaseCrawler import DatabaseCrawler +from Tribler.Core.Statistics.FriendshipCrawler import FriendshipCrawler +from Tribler.Core.Statistics.SeedingStatsCrawler import SeedingStatsCrawler +from Tribler.Core.Statistics.VideoPlaybackCrawler import VideoPlaybackCrawler +from Tribler.Core.Utilities.utilities import show_permid_short +from Tribler.Core.simpledefs import * + +DEBUG = False + +class OverlayApps: + # Code to make this a singleton + __single = None + + def __init__(self): + if OverlayApps.__single: + raise RuntimeError, "OverlayApps is Singleton" + OverlayApps.__single = self + self.coord_handler = None + self.help_handler = None + self.metadata_handler = None + self.buddycast = None + self.collect = None + self.dialback_handler = None + self.socnet_handler = None + self.rquery_handler = None + self.friendship_handler = None + self.msg_handlers = {} + self.connection_handlers = [] + self.text_mode = None + self.requestPolicyLock = Lock() + + def getInstance(*args, **kw): + if OverlayApps.__single is None: + OverlayApps(*args, **kw) + return OverlayApps.__single + getInstance = staticmethod(getInstance) + + def register(self, overlay_bridge, session, launchmany, config, requestPolicy): + self.overlay_bridge = overlay_bridge + self.launchmany = launchmany + self.requestPolicy = requestPolicy + self.text_mode = config.has_key('text_mode') + + # OverlayApps gets all messages, and demultiplexes + overlay_bridge.register_recv_callback(self.handleMessage) + overlay_bridge.register_conns_callback(self.handleConnection) + + # Create handler for metadata messages in two parts, as + # download help needs to know the metadata_handler and we need + # to know the download helper handler. + # Part 1: + self.metadata_handler = MetadataHandler.getInstance() + + if config['download_help']: + # Create handler for messages to dlhelp coordinator + self.coord_handler = CoordinatorMessageHandler(launchmany) + self.register_msg_handler(HelpHelperMessages, self.coord_handler.handleMessage) + + # Create handler for messages to dlhelp helper + self.help_handler = HelperMessageHandler() + self.help_handler.register(session,self.metadata_handler,config['download_help_dir'],config.get('coopdlconfig', False)) + self.register_msg_handler(HelpCoordinatorMessages, self.help_handler.handleMessage) + + # Part 2: + self.metadata_handler.register(overlay_bridge, self.help_handler, launchmany, config) + self.register_msg_handler(MetadataMessages, self.metadata_handler.handleMessage) + + if not config['torrent_collecting']: + self.torrent_collecting_solution = 0 + else: + self.torrent_collecting_solution = config['buddycast_collecting_solution'] + + if config['buddycast']: + # Create handler for Buddycast messages + + self.buddycast = BuddyCastFactory.getInstance(superpeer=config['superpeer'], log=config['overlay_log']) + # Using buddycast to handle torrent collecting since they are dependent + self.buddycast.register(overlay_bridge, launchmany, + launchmany.rawserver_fatalerrorfunc, + self.metadata_handler, + self.torrent_collecting_solution, + config['start_recommender'],config['buddycast_max_peers']) + self.register_msg_handler(BuddyCastMessages, self.buddycast.handleMessage) + self.register_connection_handler(self.buddycast.handleConnection) + + if config['dialback']: + self.dialback_handler = DialbackMsgHandler.getInstance() + # The Dialback mechanism needs the real rawserver, not the overlay_bridge + self.dialback_handler.register(overlay_bridge, launchmany, launchmany.rawserver, config) + self.register_msg_handler([DIALBACK_REQUEST], + self.dialback_handler.olthread_handleSecOverlayMessage) + self.register_connection_handler(self.dialback_handler.olthread_handleSecOverlayConnection) + else: + self.register_msg_handler([DIALBACK_REQUEST], self.handleDisabledMessage) + + if config['socnet']: + self.socnet_handler = SocialNetworkMsgHandler.getInstance() + self.socnet_handler.register(overlay_bridge, launchmany, config) + self.register_msg_handler(SocialNetworkMessages,self.socnet_handler.handleMessage) + self.register_connection_handler(self.socnet_handler.handleConnection) + + self.friendship_handler = FriendshipMsgHandler.getInstance() + self.friendship_handler.register(overlay_bridge, launchmany.session) + self.register_msg_handler(FriendshipMessages,self.friendship_handler.handleMessage) + self.register_connection_handler(self.friendship_handler.handleConnection) + + if config['rquery']: + self.rquery_handler = RemoteQueryMsgHandler.getInstance() + self.rquery_handler.register(overlay_bridge,launchmany,config,self.buddycast,log=config['overlay_log']) + self.register_msg_handler(RemoteQueryMessages,self.rquery_handler.handleMessage) + self.register_connection_handler(self.rquery_handler.handleConnection) + + if config['crawler']: + crawler = Crawler.get_instance(session) + self.register_msg_handler([CRAWLER_REQUEST], crawler.handle_request) + + database_crawler = DatabaseCrawler.get_instance() + crawler.register_message_handler(CRAWLER_DATABASE_QUERY, database_crawler.handle_crawler_request, database_crawler.handle_crawler_reply) + seeding_stats_crawler = SeedingStatsCrawler.get_instance() + crawler.register_message_handler(CRAWLER_SEEDINGSTATS_QUERY, seeding_stats_crawler.handle_crawler_request, seeding_stats_crawler.handle_crawler_reply) + friendship_crawler = FriendshipCrawler.get_instance(session) + crawler.register_message_handler(CRAWLER_FRIENDSHIP_STATS, friendship_crawler.handle_crawler_request, friendship_crawler.handle_crawler_reply) + natcheck_handler = NatCheckMsgHandler.getInstance() + natcheck_handler.register(launchmany) + crawler.register_message_handler(CRAWLER_NATCHECK, natcheck_handler.gotDoNatCheckMessage, natcheck_handler.gotNatCheckReplyMessage) + crawler.register_message_handler(CRAWLER_NATTRAVERSAL, natcheck_handler.gotUdpConnectRequest, natcheck_handler.gotUdpConnectReply) + videoplayback_crawler = VideoPlaybackCrawler.get_instance() + crawler.register_message_handler(CRAWLER_VIDEOPLAYBACK_INFO_QUERY, videoplayback_crawler.handle_info_crawler_request, videoplayback_crawler.handle_info_crawler_reply) + crawler.register_message_handler(CRAWLER_VIDEOPLAYBACK_EVENT_QUERY, videoplayback_crawler.handle_event_crawler_request, videoplayback_crawler.handle_event_crawler_reply) + + if crawler.am_crawler(): + + # we will only accept CRAWLER_REPLY messages when we are actully a crawler + self.register_msg_handler([CRAWLER_REPLY], crawler.handle_reply) + self.register_connection_handler(crawler.handle_connection) + + if "database" in sys.argv: + # allows access to tribler database (boudewijn) + crawler.register_crawl_initiator(database_crawler.query_initiator) + + if "videoplayback" in sys.argv: + # allows access to video-playback statistics (boudewijn) + crawler.register_crawl_initiator(videoplayback_crawler.query_info_initiator) + + if "seedingstats" in sys.argv: + # allows access to seeding statistics (Boxun) + crawler.register_crawl_initiator(seeding_stats_crawler.query_initiator, frequency=60*30) + + if "friendship" in sys.argv: + # allows access to friendship statistics (Ali) + crawler.register_crawl_initiator(friendship_crawler.query_initiator) + + if "natcheck" in sys.argv: + # allows access to nat-check statistics (Lucia) + crawler.register_crawl_initiator(natcheck_handler.doNatCheck, 1200) + + else: + self.register_msg_handler([CRAWLER_REQUEST, CRAWLER_REPLY], self.handleDisabledMessage) + + self.rtorrent_handler = RemoteTorrentHandler.getInstance() + self.rtorrent_handler.register(overlay_bridge,self.metadata_handler,session) + self.metadata_handler.register2(self.rtorrent_handler) + + # Add notifier as connection handler + self.register_connection_handler(self.notifier_handles_connection) + + if config['buddycast']: + # Arno: to prevent concurrency between mainthread and overlay + # thread where BuddyCast schedules tasks + self.buddycast.register2() + + def early_shutdown(self): + """ Called as soon as Session shutdown is initiated. Used to start + shutdown tasks that takes some time and that can run in parallel + to checkpointing, etc. + """ + # Called by OverlayThread + if self.friendship_handler is not None: + self.friendship_handler.shutdown() + + + def register_msg_handler(self, ids, handler): + """ + ids is the [ID1, ID2, ..] where IDn is a sort of message ID in overlay + swarm. Each ID can only be handled by one handler, but a handler can + handle multiple IDs + """ + for id in ids: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olapps: Message handler registered for",getMessageName(id) + self.msg_handlers[id] = handler + + def register_connection_handler(self, handler): + """ + Register a handler for if a connection is established + handler-function is called like: + handler(exc,permid,selversion,locally_initiated) + """ + assert handler not in self.connection_handlers, 'This connection_handler is already registered' + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "olapps: Connection handler registered for", handler + self.connection_handlers.append(handler) + + def handleMessage(self,permid,selversion,message): + """ demultiplex message stream to handlers """ + + # Check auth + if not self.requestAllowed(permid, message[0]): + return False + + if message[0] in self.msg_handlers: + # This is a one byte id. (For instance a regular + # BitTorrent message) + id_ = message[0] + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "olapps: No handler found for", getMessageName(message[0:2]) + return False + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "olapps: handleMessage", getMessageName(id_), "v" + str(selversion) + + try: + if DEBUG: + st = time() + ret = self.msg_handlers[id_](permid, selversion, message) + et = time() + diff = et - st + if diff > 0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olapps: ",getMessageName(id_),"TOOK %.5f" % diff + return ret + else: + return self.msg_handlers[id_](permid, selversion, message) + except: + # Catch all + print_exc() + return False + + def handleDisabledMessage(*args): + return True + + def handleConnection(self,exc,permid,selversion,locally_initiated): + """ An overlay-connection was established. Notify interested parties. """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olapps: handleConnection",exc,selversion,locally_initiated,currentThread().getName() + + for handler in self.connection_handlers: + try: + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olapps: calling connection handler:",'%s.%s' % (handler.__module__, handler.__name__) + handler(exc,permid,selversion,locally_initiated) + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'olapps: Exception during connection handler calling' + print_exc() + + def requestAllowed(self, permid, messageType): + self.requestPolicyLock.acquire() + try: + rp = self.requestPolicy + finally: + self.requestPolicyLock.release() + allowed = rp.allowed(permid, messageType) + if DEBUG: + if allowed: + word = 'allowed' + else: + word = 'denied' + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'olapps: Request type %s from %s was %s' % (getMessageName(messageType), show_permid_short(permid), word) + return allowed + + def setRequestPolicy(self, requestPolicy): + self.requestPolicyLock.acquire() + try: + self.requestPolicy = requestPolicy + finally: + self.requestPolicyLock.release() + + + def notifier_handles_connection(self, exc,permid,selversion,locally_initiated): + # Notify interested parties (that use the notifier/observer structure) about a connection + self.launchmany.session.uch.notify(NTFY_PEERS, NTFY_CONNECTION, permid, True) diff --git a/tribler-mod/Tribler/Core/Overlay/OverlayThreadingBridge.py b/tribler-mod/Tribler/Core/Overlay/OverlayThreadingBridge.py new file mode 100644 index 0000000..79300e3 --- /dev/null +++ b/tribler-mod/Tribler/Core/Overlay/OverlayThreadingBridge.py @@ -0,0 +1,217 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information +# +# This class bridges between the OverlayApps class and the SecureOverlay +# and ensures that all upcalls made by the NetworkThread via the SecureOverlay +# are handed over to a different thread, the OverlayThread that propagates the +# upcall to the OverlayApps. +# + +import sys +from threading import currentThread +from traceback import print_exc + +from Tribler.Core.Overlay.SecureOverlay import CloseException +from Tribler.Core.BitTornado.BT1.MessageID import getMessageName +from Tribler.Core.Utilities.utilities import show_permid_short +from Tribler.Utilities.TimedTaskQueue import TimedTaskQueue +import threading + +DEBUG = False + +class OverlayThreadingBridge: + + __single = None + lock = threading.Lock() + + def __init__(self): + if OverlayThreadingBridge.__single: + raise RuntimeError, "OverlayThreadingBridge is Singleton" + OverlayThreadingBridge.__single = self + + self.secover = None + self.olapps = None + self.olappsmsghandler = None + self.olappsconnhandler = None + + # Current impl of wrapper: single thread + self.tqueue = TimedTaskQueue(nameprefix="Overlay") + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if OverlayThreadingBridge.__single is None: + OverlayThreadingBridge.lock.acquire() + try: + if OverlayThreadingBridge.__single is None: + OverlayThreadingBridge(*args, **kw) + finally: + OverlayThreadingBridge.lock.release() + return OverlayThreadingBridge.__single + getInstance = staticmethod(getInstance) + + def register_bridge(self,secover,olapps): + """ Called by MainThread """ + self.secover = secover + self.olapps = olapps + + secover.register_recv_callback(self.handleMessage) + secover.register_conns_callback(self.handleConnection) + + # + # SecOverlay interface + # + def register(self,launchmanycore,max_len): + """ Called by MainThread """ + self.secover.register(launchmanycore,max_len) + + # FOR TESTING ONLY + self.iplport2oc = self.secover.iplport2oc + + def get_handler(self): + return self.secover + + def start_listening(self): + """ Called by MainThread """ + self.secover.start_listening() + + def register_recv_callback(self,callback): + """ Called by MainThread """ + self.olappsmsghandler = callback + + def register_conns_callback(self,callback): + """ Called by MainThread """ + self.olappsconnhandler = callback + + def handleConnection(self,exc,permid,selversion,locally_initiated,hisdns): + """ Called by NetworkThread """ + # called by SecureOverlay.got_auth_connection() or cleanup_admin_and_callbacks() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olbridge: handleConnection",exc,show_permid_short(permid),selversion,locally_initiated,hisdns,currentThread().getName() + + def olbridge_handle_conn_func(): + # Called by OverlayThread + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olbridge: handle_conn_func",exc,show_permid_short(permid),selversion,locally_initiated,hisdns,currentThread().getName() + + try: + if hisdns: + self.secover.add_peer_to_db(permid,hisdns,selversion) + + if self.olappsconnhandler is not None: # self.olappsconnhandler = OverlayApps.handleConnection + self.olappsconnhandler(exc,permid,selversion,locally_initiated) + except: + print_exc() + + if isinstance(exc,CloseException): + self.secover.update_peer_status(permid,exc.was_auth_done()) + + self.tqueue.add_task(olbridge_handle_conn_func,0) + + def handleMessage(self,permid,selversion,message): + """ Called by NetworkThread """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olbridge: handleMessage",show_permid_short(permid),selversion,getMessageName(message[0]),currentThread().getName() + + def olbridge_handle_msg_func(): + # Called by OverlayThread + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olbridge: handle_msg_func",show_permid_short(permid),selversion,getMessageName(message[0]),currentThread().getName() + + try: + if self.olappsmsghandler is None: + ret = True + else: + ret = self.olappsmsghandler(permid,selversion,message) + except: + print_exc() + ret = False + if ret == False: + self.close(permid) + + self.tqueue.add_task(olbridge_handle_msg_func,0) + return True + + + def connect_dns(self,dns,callback): + """ Called by OverlayThread/NetworkThread """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olbridge: connect_dns",dns + + def olbridge_connect_dns_callback(cexc,cdns,cpermid,cselver): + # Called by network thread + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olbridge: connect_dns_callback",cexc,cdns,show_permid_short(cpermid),cselver + + olbridge_connect_dns_callback_lambda = lambda:callback(cexc,cdns,cpermid,cselver) + self.add_task(olbridge_connect_dns_callback_lambda,0) + + self.secover.connect_dns(dns,olbridge_connect_dns_callback) + + + def connect(self,permid,callback): + """ Called by OverlayThread """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olbridge: connect",show_permid_short(permid), currentThread().getName() + + def olbridge_connect_callback(cexc,cdns,cpermid,cselver): + # Called by network thread + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olbridge: connect_callback",cexc,cdns,show_permid_short(cpermid),cselver, callback, currentThread().getName() + + + olbridge_connect_callback_lambda = lambda:callback(cexc,cdns,cpermid,cselver) + # Jie: postpone to call this callback to schedule it after the peer has been added to buddycast connection list + # Arno, 2008-09-15: No-no-no + self.add_task(olbridge_connect_callback_lambda,0) + + self.secover.connect(permid,olbridge_connect_callback) + + + def send(self,permid,msg,callback): + """ Called by OverlayThread """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olbridge: send",show_permid_short(permid),len(msg) + + def olbridge_send_callback(cexc,cpermid): + # Called by network thread + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olbridge: send_callback",cexc,show_permid_short(cpermid) + + + olbridge_send_callback_lambda = lambda:callback(cexc,cpermid) + self.add_task(olbridge_send_callback_lambda,0) + + self.secover.send(permid,msg,olbridge_send_callback) + + def close(self,permid): + """ Called by OverlayThread """ + self.secover.close(permid) + + def add_task(self,task,t=0,ident=None): + """ Called by OverlayThread """ + self.tqueue.add_task(task,t,ident) + +#=============================================================================== +# # Jie: according to Arno's suggestion, commit on demand instead of periodically +# def periodic_commit(self): +# period = 5*60 # commit every 5 min +# try: +# db = SQLiteCacheDB.getInstance() +# db.commit() +# except: +# period = period*2 +# self.add_task(self.periodic_commit, period) +# +#=============================================================================== + + \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/Overlay/OverlayThreadingBridge.py.bak b/tribler-mod/Tribler/Core/Overlay/OverlayThreadingBridge.py.bak new file mode 100644 index 0000000..5b6ec4e --- /dev/null +++ b/tribler-mod/Tribler/Core/Overlay/OverlayThreadingBridge.py.bak @@ -0,0 +1,216 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information +# +# This class bridges between the OverlayApps class and the SecureOverlay +# and ensures that all upcalls made by the NetworkThread via the SecureOverlay +# are handed over to a different thread, the OverlayThread that propagates the +# upcall to the OverlayApps. +# + +import sys +from threading import currentThread +from traceback import print_exc + +from Tribler.Core.Overlay.SecureOverlay import CloseException +from Tribler.Core.BitTornado.BT1.MessageID import getMessageName +from Tribler.Core.Utilities.utilities import show_permid_short +from Tribler.Utilities.TimedTaskQueue import TimedTaskQueue +import threading + +DEBUG = False + +class OverlayThreadingBridge: + + __single = None + lock = threading.Lock() + + def __init__(self): + if OverlayThreadingBridge.__single: + raise RuntimeError, "OverlayThreadingBridge is Singleton" + OverlayThreadingBridge.__single = self + + self.secover = None + self.olapps = None + self.olappsmsghandler = None + self.olappsconnhandler = None + + # Current impl of wrapper: single thread + self.tqueue = TimedTaskQueue(nameprefix="Overlay") + + def getInstance(*args, **kw): + # Singleton pattern with double-checking + if OverlayThreadingBridge.__single is None: + OverlayThreadingBridge.lock.acquire() + try: + if OverlayThreadingBridge.__single is None: + OverlayThreadingBridge(*args, **kw) + finally: + OverlayThreadingBridge.lock.release() + return OverlayThreadingBridge.__single + getInstance = staticmethod(getInstance) + + def register_bridge(self,secover,olapps): + """ Called by MainThread """ + self.secover = secover + self.olapps = olapps + + secover.register_recv_callback(self.handleMessage) + secover.register_conns_callback(self.handleConnection) + + # + # SecOverlay interface + # + def register(self,launchmanycore,max_len): + """ Called by MainThread """ + self.secover.register(launchmanycore,max_len) + + # FOR TESTING ONLY + self.iplport2oc = self.secover.iplport2oc + + def get_handler(self): + return self.secover + + def start_listening(self): + """ Called by MainThread """ + self.secover.start_listening() + + def register_recv_callback(self,callback): + """ Called by MainThread """ + self.olappsmsghandler = callback + + def register_conns_callback(self,callback): + """ Called by MainThread """ + self.olappsconnhandler = callback + + def handleConnection(self,exc,permid,selversion,locally_initiated,hisdns): + """ Called by NetworkThread """ + # called by SecureOverlay.got_auth_connection() or cleanup_admin_and_callbacks() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olbridge: handleConnection",exc,show_permid_short(permid),selversion,locally_initiated,hisdns,currentThread().getName() + + def olbridge_handle_conn_func(): + # Called by OverlayThread + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olbridge: handle_conn_func",exc,show_permid_short(permid),selversion,locally_initiated,hisdns,currentThread().getName() + + try: + if hisdns: + self.secover.add_peer_to_db(permid,hisdns,selversion) + + if self.olappsconnhandler is not None: # self.olappsconnhandler = OverlayApps.handleConnection + self.olappsconnhandler(exc,permid,selversion,locally_initiated) + except: + print_exc() + + if isinstance(exc,CloseException): + self.secover.update_peer_status(permid,exc.was_auth_done()) + + self.tqueue.add_task(olbridge_handle_conn_func,0) + + def handleMessage(self,permid,selversion,message): + """ Called by NetworkThread """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olbridge: handleMessage",show_permid_short(permid),selversion,getMessageName(message[0]),currentThread().getName() + + def olbridge_handle_msg_func(): + # Called by OverlayThread + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olbridge: handle_msg_func",show_permid_short(permid),selversion,getMessageName(message[0]),currentThread().getName() + + try: + if self.olappsmsghandler is None: + ret = True + else: + ret = self.olappsmsghandler(permid,selversion,message) + except: + print_exc() + ret = False + if ret == False: + self.close(permid) + + self.tqueue.add_task(olbridge_handle_msg_func,0) + return True + + + def connect_dns(self,dns,callback): + """ Called by OverlayThread/NetworkThread """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olbridge: connect_dns",dns + + def olbridge_connect_dns_callback(cexc,cdns,cpermid,cselver): + # Called by network thread + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olbridge: connect_dns_callback",cexc,cdns,show_permid_short(cpermid),cselver + + olbridge_connect_dns_callback_lambda = lambda:callback(cexc,cdns,cpermid,cselver) + self.add_task(olbridge_connect_dns_callback_lambda,0) + + self.secover.connect_dns(dns,olbridge_connect_dns_callback) + + + def connect(self,permid,callback): + """ Called by OverlayThread """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olbridge: connect",show_permid_short(permid), currentThread().getName() + + def olbridge_connect_callback(cexc,cdns,cpermid,cselver): + # Called by network thread + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olbridge: connect_callback",cexc,cdns,show_permid_short(cpermid),cselver, callback, currentThread().getName() + + + olbridge_connect_callback_lambda = lambda:callback(cexc,cdns,cpermid,cselver) + # Jie: postpone to call this callback to schedule it after the peer has been added to buddycast connection list + # Arno, 2008-09-15: No-no-no + self.add_task(olbridge_connect_callback_lambda,0) + + self.secover.connect(permid,olbridge_connect_callback) + + + def send(self,permid,msg,callback): + """ Called by OverlayThread """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olbridge: send",show_permid_short(permid),len(msg) + + def olbridge_send_callback(cexc,cpermid): + # Called by network thread + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olbridge: send_callback",cexc,show_permid_short(cpermid) + + + olbridge_send_callback_lambda = lambda:callback(cexc,cpermid) + self.add_task(olbridge_send_callback_lambda,0) + + self.secover.send(permid,msg,olbridge_send_callback) + + def close(self,permid): + """ Called by OverlayThread """ + self.secover.close(permid) + + def add_task(self,task,t=0,ident=None): + """ Called by OverlayThread """ + self.tqueue.add_task(task,t,ident) + +#=============================================================================== +# # Jie: according to Arno's suggestion, commit on demand instead of periodically +# def periodic_commit(self): +# period = 5*60 # commit every 5 min +# try: +# db = SQLiteCacheDB.getInstance() +# db.commit() +# except: +# period = period*2 +# self.add_task(self.periodic_commit, period) +# +#=============================================================================== + + \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/Overlay/SecureOverlay.py b/tribler-mod/Tribler/Core/Overlay/SecureOverlay.py new file mode 100644 index 0000000..2cf43c5 --- /dev/null +++ b/tribler-mod/Tribler/Core/Overlay/SecureOverlay.py @@ -0,0 +1,929 @@ +from time import localtime, strftime +# Written by Arno Bakker, Bram Cohen, Jie Yang +# see LICENSE.txt for license information +# +# Please apply networking code fixes also to DialbackConnHandler.py + +from cStringIO import StringIO +from struct import pack,unpack +from threading import currentThread +from time import time +from traceback import print_exc,print_stack +import sys + +from Tribler.Core.BitTornado.BT1.MessageID import protocol_name,option_pattern,getMessageName +from Tribler.Core.BitTornado.BT1.convert import tobinary,toint +from Tribler.Core.BitTornado.__init__ import createPeerID +from Tribler.Core.CacheDB.sqlitecachedb import safe_dict,bin2str +from Tribler.Core.Overlay.permid import ChallengeResponse +from Tribler.Core.Utilities.utilities import show_permid_short,hostname_or_ip2ip +from Tribler.Core.simpledefs import * + +DEBUG = False + +# +# Public definitions +# +overlay_infohash = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + +# Overlay-protocol version numbers in use in the wild +OLPROTO_VER_FIRST = 1 # Internally used only. +OLPROTO_VER_SECOND = 2 # First public release, >= 3.3.4 +OLPROTO_VER_THIRD = 3 # Second public release, >= 3.6.0, Dialback, BuddyCast2 +OLPROTO_VER_FOURTH = 4 # Third public release, >= 3.7.0, BuddyCast3 +OLPROTO_VER_FIFTH = 5 # Fourth public release, >= 4.0.0, SOCIAL_OVERLAP +OLPROTO_VER_SIXTH = 6 # Fifth public release, >= 4.1.0, extra BC fields, remote query +OLPROTO_VER_SEVENTH = 7 # Sixth public release, >= 4.5.0, supports CRAWLER_REQUEST and CRAWLER_REPLY messages +OLPROTO_VER_EIGHTH = 8 # Seventh public release, >= 5.0, supporting BuddyCast with clicklog info. +OLPROTO_VER_NINE = 9 # Eighth public release, >= 5.1, additional torrent_size in remote search query reply (this code is submitted to the branches/release5.0-p1 repository and will be merges in due time) +OLPROTO_VER_TEN = 10 # Nineth public release, >= 5.X, simplified the VOD statistics (this code is submitted to the branches/trial-m18 repository and will be merged in due time) + + +# Overlay-swarm protocol version numbers +OLPROTO_VER_CURRENT = OLPROTO_VER_NINE + +OLPROTO_VER_LOWEST = OLPROTO_VER_SECOND +SupportedVersions = range(OLPROTO_VER_LOWEST, OLPROTO_VER_CURRENT+1) + +# +# Private definitions +# + +# States for overlay connection +STATE_INITIAL = 0 +STATE_HS_FULL_WAIT = 1 +STATE_HS_PEERID_WAIT = 2 +STATE_AUTH_WAIT = 3 +STATE_DATA_WAIT = 4 +STATE_CLOSED = 5 + +# Misc +EXPIRE_THRESHOLD = 300 # seconds:: keep consistent with sockethandler +EXPIRE_CHECK_INTERVAL = 60 # seconds +NO_REMOTE_LISTEN_PORT_KNOWN = -481 + + +class SecureOverlay: + __single = None + + def __init__(self): + if SecureOverlay.__single: + raise RuntimeError, "SecureOverlay is Singleton" + SecureOverlay.__single = self + self.olproto_ver_current = OLPROTO_VER_CURRENT + self.usermsghandler = None + self.userconnhandler = None + # ARNOCOMMENT: Remove this, DB should be fast enough. Don't want caches allover + self.dns = safe_dict() + + # + # Interface for upper layer + # + def getInstance(*args, **kw): + if SecureOverlay.__single is None: + SecureOverlay(*args, **kw) + return SecureOverlay.__single + getInstance = staticmethod(getInstance) + + def register(self,launchmanycore, max_len): + self.lm = launchmanycore + self.rawserver = self.lm.rawserver + self.sock_hand = self.rawserver.sockethandler + self.multihandler = self.lm.multihandler + self.overlay_rawserver = self.multihandler.newRawServer(overlay_infohash, + self.rawserver.doneflag, + protocol_name) + self.max_len = max_len + self.iplport2oc = {} # (IP,listen port) -> OverlayConnection + self.peer_db = self.lm.peer_db + self.mykeypair = self.lm.session.keypair + self.permid = self.lm.session.get_permid() + self.myip = self.lm.get_ext_ip() + self.myport = self.lm.session.get_listen_port() + self.myid = create_my_peer_id(self.myport) + self.last_activity = time() + + def resetSingleton(self): + """ For testing purposes """ + SecureOverlay.__single = None + + def start_listening(self): + self.overlay_rawserver.start_listening(self) + self.overlay_rawserver.add_task(self.monitor_activity, 2) + + def monitor_activity(self): + """ + periodically notify the network status + """ + diff = time() - self.last_activity + if diff > 120 + 1: + # 120 is set as the check_period for buddycast until a + # KEEP_ALIVE message is send + msg = "no network" + else: + msg = "network active" + self.lm.set_activity(NTFY_ACT_ACTIVE, msg, diff) + self.overlay_rawserver.add_task(self.monitor_activity, 2) + + def connect_dns(self,dns,callback): + """ Connects to the indicated endpoint and determines the permid + at that endpoint. Non-blocking. + + Pre: "dns" must be an IP address, not a hostname. + + Network thread calls "callback(exc,dns,permid,selver)" when the connection + is established or when an error occurs during connection + establishment. In the former case, exc is None, otherwise + it contains an Exception. + + The established connection will auto close after EXPIRE_THRESHOLD + seconds of inactivity. + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: connect_dns",dns + # To prevent concurrency problems on sockets the calling thread + # delegates to the network thread. + task = Task(self._connect_dns,dns,callback) + self.rawserver.add_task(task.start, 0) + + + def connect(self,permid,callback): + """ Connects to the indicated permid. Non-blocking. + + Network thread calls "callback(exc,dns,permid,selver)" when the connection + is established or when an error occurs during connection + establishment. In the former case, exc is None, otherwise + it contains an Exception. + + The established connection will auto close after EXPIRE_THRESHOLD + seconds of inactivity. + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: connect",show_permid_short(permid), currentThread().getName() + # To prevent concurrency problems on sockets the calling thread + # delegates to the network thread. + + dns = self.get_dns_from_peerdb(permid) + task = Task(self._connect,permid,dns,callback) + self.rawserver.add_task(task.start, 0) + + + def send(self,permid,msg,callback): + """ Sends a message to the indicated permid. Non-blocking. + + Pre: connection to permid must have been established successfully. + + Network thread calls "callback(exc,permid)" when the message is sent + or when an error occurs during sending. In the former case, exc + is None, otherwise it contains an Exception. + """ + # To prevent concurrency problems on sockets the calling thread + # delegates to the network thread. + dns = self.get_dns_from_peerdb(permid) + task = Task(self._send,permid,dns,msg,callback) + self.rawserver.add_task(task.start, 0) + + + + def close(self,permid): + """ Closes any connection to indicated permid. Non-blocking. + + Pre: connection to permid must have been established successfully. + + Network thread calls "callback(exc,permid,selver)" when the connection + is closed. + """ + # To prevent concurrency problems on sockets the calling thread + # delegates to the network thread. + task = Task(self._close,permid) + self.rawserver.add_task(task.start, 0) + + + def register_recv_callback(self,callback): + """ Register a callback to be called when receiving a message from + any permid. Non-blocking. + + Network thread calls "callback(exc,permid,selver,msg)" when a message + is received. The callback is not called on errors e.g. remote + connection close. + + The callback must return True to keep the connection open. + """ + self.usermsghandler = callback + + def register_conns_callback(self,callback): + """ Register a callback to be called when receiving a connection from + any permid. Non-blocking. + + Network thread calls "callback(exc,permid,selver,locally_initiated)" + when a connection is established (locally initiated or remote), or + when a connection is closed locally or remotely. In the former case, + exc is None, otherwise it contains an Exception. + + Note that this means that if a callback is registered via this method, + both this callback and the callback passed to a connect() method + will be called. + """ + self.userconnhandler = callback + + + # + # Internal methods + # + def _connect_dns(self,dns,callback): + try: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: actual connect_dns",dns + if dns[0] == self.myip and int(dns[1]) == self.myport: + callback(KeyError('IP and port of the target is the same as myself'),dns,None,0) + iplport = ip_and_port2str(dns[0],dns[1]) + oc = None + try: + oc = self.iplport2oc[iplport] + except KeyError: + pass + if oc is None: + oc = self.start_connection(dns) + self.iplport2oc[iplport] = oc + if not oc.is_auth_done(): + oc.queue_callback(dns,callback) + else: + callback(None,dns,oc.get_auth_permid(),oc.get_sel_proto_ver()) + except Exception,exc: + if DEBUG: + print_exc() + callback(exc,dns,None,0) + + def _connect(self,expectedpermid,dns,callback): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: actual connect",show_permid_short(expectedpermid), currentThread().getName() + if expectedpermid == self.permid: + callback(KeyError('The target permid is the same as my permid'),None,expectedpermid,0) + try: + oc = self.get_oc_by_permid(expectedpermid) + if oc is None: + if dns is None: + callback(KeyError('IP address + port for permid unknown'),dns,expectedpermid,0) + else: + self._connect_dns(dns,lambda exc,dns2,peerpermid,selver:\ + self._whoishe_callback(exc,dns2,peerpermid,selver,expectedpermid,callback)) + else: + # We already have a connection to this permid + self._whoishe_callback(None,(oc.get_ip(),oc.get_auth_listen_port()),expectedpermid,oc.get_sel_proto_ver(),expectedpermid,callback) + except Exception,exc: + if DEBUG: + print_exc() + callback(exc,None,expectedpermid,0) + + def _whoishe_callback(self,exc,dns,peerpermid,selver,expectedpermid,callback): + """ Called by network thread after the permid on the other side is known + or an error occured + """ + try: + if exc is None: + # Connect went OK + if peerpermid == expectedpermid: + callback(None,dns,expectedpermid,selver) + else: + # Someone else answered the phone + callback(KeyError('Recorded IP address + port now of other permid'), + dns,expectedpermid,0) + else: + callback(exc,dns,expectedpermid,0) + except Exception,exc: + if DEBUG: + print_exc() + callback(exc,dns,expectedpermid,0) + + def _send(self,permid,dns,message,callback): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: actual send",getMessageName(message[0]),\ + "to",show_permid_short(permid), currentThread().getName() + try: + if dns is None: + callback(KeyError('IP address + port for permid unknown'),permid) + else: + iplport = ip_and_port2str(dns[0],dns[1]) + oc = None + try: + oc = self.iplport2oc[iplport] + except KeyError: + pass + if oc is None: + callback(KeyError('Not connected to permid'),permid) + elif oc.is_auth_done(): + if oc.get_auth_permid() == permid: + oc.send_message(message) + callback(None,permid) + else: + callback(KeyError('Recorded IP address + port now of other permid'),permid) + else: + callback(KeyError('Connection not yet established'),permid) + except Exception,exc: + if DEBUG: + print_exc() + callback(exc,permid) + + + def _close(self,permid): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: actual close",show_permid_short(permid) + try: + oc = self.get_oc_by_permid(permid) + if not oc: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: error - actual close, but no connection to peer in admin" + else: + oc.close() + except Exception,e: + print_exc() + + # + # Interface for SocketHandler + # + def get_handler(self): + return self + + def external_connection_made(self,singsock): + """ incoming connection (never used) """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: external_connection_made",singsock.get_ip(),singsock.get_port() + self.last_activity = time() + oc = OverlayConnection(self,singsock,self.rawserver) + singsock.set_handler(oc) + + def connection_flushed(self,singsock): + """ sockethandler flushes connection """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: connection_flushed",singsock.get_ip(),singsock.get_port() + + # + # Interface for ServerPortHandler + # + def externally_handshaked_connection_made(self, singsock, options, msg_remainder): + """ incoming connection, handshake partially read to identity + as an it as overlay connection (used always) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: externally_handshaked_connection_made",\ + singsock.get_ip(),singsock.get_port() + oc = OverlayConnection(self,singsock,self.rawserver,ext_handshake = True, options = options) + singsock.set_handler(oc) + if msg_remainder: + oc.data_came_in(singsock,msg_remainder) + return True + + + # + # Interface for OverlayConnection + # + def got_auth_connection(self,oc): + """ authentication of peer via identity protocol succesful """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: got_auth_connection", \ + show_permid_short(oc.get_auth_permid()),oc.get_ip(),oc.get_auth_listen_port(), currentThread().getName() + + if oc.is_locally_initiated() and oc.get_port() != oc.get_auth_listen_port(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: got_auth_connection: closing because auth", \ + "listen port not as expected",oc.get_port(),oc.get_auth_listen_port() + self.cleanup_admin_and_callbacks(oc,Exception('closing because auth listen port not as expected')) + return False + + self.last_activity = time() + + ret = True + iplport = ip_and_port2str(oc.get_ip(),oc.get_auth_listen_port()) + known = iplport in self.iplport2oc + if not known: + self.iplport2oc[iplport] = oc + elif known and not oc.is_locally_initiated(): + # Locally initiated connections will already be registered, + # so if it's not a local connection and we already have one + # we have a duplicate, and we close the new one. + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: got_auth_connection:", \ + "closing because we already have a connection to",iplport + self.cleanup_admin_and_callbacks(oc, + Exception('closing because we already have a connection to peer')) + ret = False + + if ret: + if oc.is_auth_done(): + hisdns = (oc.get_ip(),oc.get_auth_listen_port()) + else: + hisdns = None + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: userconnhandler is",self.userconnhandler + + if self.userconnhandler is not None: + try: + self.userconnhandler(None,oc.get_auth_permid(),oc.get_sel_proto_ver(),oc.is_locally_initiated(),hisdns) + except: + # Catch all + print_exc() + oc.dequeue_callbacks() + return ret + + def local_close(self,oc): + """ our side is closing the connection """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: local_close" + self.cleanup_admin_and_callbacks(oc,CloseException('local close',oc.is_auth_done())) + + def connection_lost(self,oc): + """ overlay connection telling us to clear admin """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: connection_lost" + self.cleanup_admin_and_callbacks(oc,CloseException('connection lost',oc.is_auth_done())) + + + def got_message(self,permid,message,selversion): + """ received message from authenticated peer, pass to upper layer """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: got_message",getMessageName(message[0]),\ + "v"+str(selversion) + self.last_activity = time() + if self.usermsghandler is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: User receive callback not set" + return + try: + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: usermsghandler is",self.usermsghandler + + ret = self.usermsghandler(permid,selversion,message) + if ret is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: INTERNAL ERROR:", \ + "User receive callback returned None, not True or False" + ret = False + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: message handler returned",ret + return ret + except: + # Catch all + print_exc() + return False + + + def get_max_len(self): + return self.max_len + + def get_my_peer_id(self): + return self.myid + + def get_my_keypair(self): + return self.mykeypair + + def measurefunc(self,length): + pass + + # + # Interface for OverlayThreadingBridge + # + def get_dns_from_peerdb(self,permid,use_cache=True): + # Called by any thread, except NetworkThread + + if currentThread().getName().startswith("NetworkThread"): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: get_dns_from_peerdb: called by NetworkThread!" + print_stack() + + dns = self.dns.get(permid, None) + + if not dns: + values = ('ip', 'port') + peer = self.peer_db.getOne(values, permid=bin2str(permid)) + if peer and peer[0] and peer[1]: + ip = hostname_or_ip2ip(peer[0]) + dns = (ip, int(peer[1])) + return dns + + def add_peer_to_db(self,permid,dns,selversion): + """ add a connected peer to database """ + # Called by OverlayThread + + if currentThread().getName().startswith("NetworkThread"): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: add_peer_to_peerdb: called by NetworkThread!" + print_stack() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: add_peer_to_peerdb: called by",currentThread().getName() + + self.dns[permid] = dns # cache it to avoid querying db later + now = int(time()) + peer_data = {'permid':permid, 'ip':dns[0], 'port':dns[1], 'oversion':selversion, 'last_seen':now, 'last_connected':now} + self.peer_db.addPeer(permid, peer_data, update_dns=True, update_connected=True, commit=True) + #self.peer_db.updateTimes(permid, 'connected_times', 1, commit=True) + + + def update_peer_status(self,permid,authwasdone): + """ update last_seen and last_connected in peer db when close """ + # Called by OverlayThread + + if currentThread().getName().startswith("NetworkThread"): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: update_peer_status: called by NetworkThread!" + print_stack() + + now = int(time()) + if authwasdone: + self.peer_db.updatePeer(permid, last_seen=now, last_connected=now) + self.lm.session.uch.notify(NTFY_PEERS, NTFY_CONNECTION, permid, False) + # + # Interface for debugging + # + def debug_get_live_connections(self): + """ return a list of (permid,dns) tuples of the peers with which we + are connected. Like all methods here it must be called by the network thread + """ + live_conn = [] + for iplport in self.iplport2oc: + oc = self.iplport2oc[iplport] + if oc: + peer_permid = oc.get_auth_permid() + if peer_permid: + live_conn.append((peer_permid,(oc.get_ip(),oc.get_port()))) + return live_conn + + + # + # Internal methods + # + def start_connection(self,dns): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: Attempt to connect to",dns + singsock = self.sock_hand.start_connection(dns) + oc = OverlayConnection(self,singsock,self.rawserver, + locally_initiated=True,specified_dns=dns) + singsock.set_handler(oc) + return oc + + def cleanup_admin_and_callbacks(self,oc,exc): + oc.cleanup_callbacks(exc) + self.cleanup_admin(oc) + if oc.is_auth_done() and self.userconnhandler is not None: + self.userconnhandler(exc,oc.get_auth_permid(),oc.get_sel_proto_ver(), + oc.is_locally_initiated(),None) + + def cleanup_admin(self,oc): + iplports = [] + d = 0 + for key in self.iplport2oc.keys(): + #print "***** iplport2oc:", key, self.iplport2oc[key] + if self.iplport2oc[key] == oc: + del self.iplport2oc[key] + #print "*****!!! del", key, oc + d += 1 + + def get_oc_by_permid(self, permid): + """ return the OverlayConnection instance given a permid """ + + for iplport in self.iplport2oc: + oc = self.iplport2oc[iplport] + if oc.get_auth_permid() == permid: + return oc + return None + + + +class Task: + def __init__(self,method,*args, **kwargs): + self.method = method + self.args = args + self.kwargs = kwargs + + def start(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: task: start",self.method + #print_stack() + self.method(*self.args,**self.kwargs) + + +class CloseException(Exception): + def __init__(self,msg=None,authdone=False): + Exception.__init__(self,msg) + self.authdone= authdone + + def __str__(self): + return str(self.__class__)+': '+Exception.__str__(self) + + def was_auth_done(self): + return self.authdone + + +class OverlayConnection: + def __init__(self,handler,singsock,rawserver,locally_initiated = False, + specified_dns = None, ext_handshake = False,options = None): + self.handler = handler + self.singsock = singsock # for writing + self.rawserver = rawserver + self.buffer = StringIO() + self.cb_queue = [] + self.auth_permid = None + self.unauth_peer_id = None + self.auth_peer_id = None + self.auth_listen_port = None + self.low_proto_ver = 0 + self.cur_proto_ver = 0 + self.sel_proto_ver = 0 + self.options = None + self.locally_initiated = locally_initiated + self.specified_dns = specified_dns + self.last_use = time() + + self.state = STATE_INITIAL + self.write(chr(len(protocol_name)) + protocol_name + + option_pattern + overlay_infohash + self.handler.get_my_peer_id()) + if ext_handshake: + self.state = STATE_HS_PEERID_WAIT + self.next_len = 20 + self.next_func = self.read_peer_id + self.set_options(options) + else: + self.state = STATE_HS_FULL_WAIT + self.next_len = 1 + self.next_func = self.read_header_len + + # Leave autoclose here instead of SecureOverlay, as that doesn't record + # remotely-initiated OverlayConnections before authentication is done. + self.rawserver.add_task(self._olconn_auto_close, EXPIRE_CHECK_INTERVAL) + + # + # Interface for SocketHandler + # + def data_came_in(self, singsock, data): + """ sockethandler received data """ + # now we got something we can ask for the peer's real port + dummy_port = singsock.get_port(True) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olconn: data_came_in",singsock.get_ip(),singsock.get_port() + self.handler.measurefunc(len(data)) + self.last_use = time() + while 1: + if self.state == STATE_CLOSED: + return + i = self.next_len - self.buffer.tell() + if i > len(data): + self.buffer.write(data) + return + self.buffer.write(data[:i]) + data = data[i:] + m = self.buffer.getvalue() + self.buffer.reset() + self.buffer.truncate() + try: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olconn: Trying to read",self.next_len #,"using",self.next_func + x = self.next_func(m) + except: + self.next_len, self.next_func = 1, self.read_dead + if DEBUG: + print_exc() + raise + if x is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olconn: next_func returned None",self.next_func + self.close() + return + self.next_len, self.next_func = x + + def connection_lost(self,singsock): + """ kernel or socket handler reports connection lost """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olconn: connection_lost",singsock.get_ip(),singsock.get_port(),self.state + if self.state != STATE_CLOSED: + self.state = STATE_CLOSED + self.handler.connection_lost(self) + + def connection_flushed(self,singsock): + """ sockethandler flushes connection """ + pass + + # + # Interface for SecureOverlay + # + def send_message(self,message): + self.last_use = time() + s = tobinary(len(message))+message + self.write(s) + + def is_locally_initiated(self): + return self.locally_initiated + + def get_ip(self): + return self.singsock.get_ip() + + def get_port(self): + return self.singsock.get_port() + + def is_auth_done(self): + return self.auth_permid is not None + + def get_auth_permid(self): + return self.auth_permid + + def get_auth_listen_port(self): + return self.auth_listen_port + + def get_remote_listen_port(self): + if self.is_auth_done(): + return self.auth_listen_port + elif self.is_locally_initiated(): + return self.specified_dns[1] + else: + return NO_REMOTE_LISTEN_PORT_KNOWN + + def get_low_proto_ver(self): + return self.low_proto_ver + + def get_cur_proto_ver(self): + return self.cur_proto_ver + + def get_sel_proto_ver(self): + return self.sel_proto_ver + + def queue_callback(self,dns,callback): + if callback is not None: + self.cb_queue.append(callback) + + def dequeue_callbacks(self): + try: + permid = self.get_auth_permid() + for callback in self.cb_queue: + callback(None,self.specified_dns,permid,self.get_sel_proto_ver()) + self.cb_queue = [] + except Exception,e: + print_exc() + + + def cleanup_callbacks(self,exc): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olconn: cleanup_callbacks: #callbacks is",len(self.cb_queue) + try: + for callback in self.cb_queue: + ## Failure connecting + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olconn: cleanup_callbacks: callback is",callback + callback(exc,self.specified_dns,self.get_auth_permid(),0) + except Exception,e: + print_exc() + + # + # Interface for ChallengeResponse + # + def get_unauth_peer_id(self): + return self.unauth_peer_id + + def got_auth_connection(self,singsock,permid,peer_id): + """ authentication of peer via identity protocol succesful """ + self.auth_permid = str(permid) + self.auth_peer_id = peer_id + self.auth_listen_port = decode_auth_listen_port(peer_id) + + self.state = STATE_DATA_WAIT + + if not self.handler.got_auth_connection(self): + self.close() + return + + # + # Internal methods + # + def read_header_len(self, s): + if ord(s) != len(protocol_name): + return None + return len(protocol_name), self.read_header + + def read_header(self, s): + if s != protocol_name: + return None + return 8, self.read_reserved + + def read_reserved(self, s): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olconn: Reserved bits:", `s` + self.set_options(s) + return 20, self.read_download_id + + def read_download_id(self, s): + if s != overlay_infohash: + return None + return 20, self.read_peer_id + + def read_peer_id(self, s): + self.unauth_peer_id = s + [self.low_proto_ver,self.cur_proto_ver] = get_proto_version_from_peer_id(self.unauth_peer_id) + self.sel_proto_ver = select_supported_protoversion(self.low_proto_ver,self.cur_proto_ver) + if not self.sel_proto_ver: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olconn: We don't support peer's version of the protocol" + return None + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olconn: Selected protocol version",self.sel_proto_ver + + self.state = STATE_AUTH_WAIT + self.cr = ChallengeResponse(self.handler.get_my_keypair(),self.handler.get_my_peer_id(),self) + if self.locally_initiated: + self.cr.start_cr(self) + return 4, self.read_len + + + def read_len(self, s): + l = toint(s) + if l > self.handler.get_max_len(): + return None + return l, self.read_message + + def read_message(self, s): + if s != '': + if self.state == STATE_AUTH_WAIT: + if not self.cr.got_message(self,s): + return None + elif self.state == STATE_DATA_WAIT: + if not self.handler.got_message(self.auth_permid,s,self.sel_proto_ver): + return None + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olconn: Received message while in illegal state, internal error!" + return None + return 4, self.read_len + + def read_dead(self, s): + return None + + def write(self,s): + self.singsock.write(s) + + def set_options(self,options): + self.options = options + + def close(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olconn: we close()",self.get_ip(),self.get_port() + #print_stack() + self.state_when_error = self.state + if self.state != STATE_CLOSED: + self.state = STATE_CLOSED + self.handler.local_close(self) + self.singsock.close() + return + + def _olconn_auto_close(self): + if (time() - self.last_use) > EXPIRE_THRESHOLD: + self.close() + else: + self.rawserver.add_task(self._olconn_auto_close, EXPIRE_CHECK_INTERVAL) + + +# +# Internal functions +# +def create_my_peer_id(my_listen_port): + myid = createPeerID() + myid = myid[:16] + pack(' OLPROTO_VER_CURRENT: # the other's version is too high + return False + if cur_ver < OLPROTO_VER_LOWEST: # the other's version is too low + return False + if cur_ver < OLPROTO_VER_CURRENT and \ + cur_ver not in SupportedVersions: # the other's version is not supported + return False + return True + +def select_supported_protoversion(his_low_ver,his_cur_ver): + selected = None + if his_cur_ver != OLPROTO_VER_CURRENT: + if his_low_ver > OLPROTO_VER_CURRENT: # the other's low version is too high + return selected + if his_cur_ver < OLPROTO_VER_LOWEST: # the other's current version is too low + return selected + if his_cur_ver < OLPROTO_VER_CURRENT and \ + his_cur_ver not in SupportedVersions: # the other's current version is not supported (peer of this version is abondoned) + return selected + + selected = min(his_cur_ver,OLPROTO_VER_CURRENT) + return selected + +def decode_auth_listen_port(peerid): + bin = peerid[14:16] + tup = unpack('= 3.3.4 +OLPROTO_VER_THIRD = 3 # Second public release, >= 3.6.0, Dialback, BuddyCast2 +OLPROTO_VER_FOURTH = 4 # Third public release, >= 3.7.0, BuddyCast3 +OLPROTO_VER_FIFTH = 5 # Fourth public release, >= 4.0.0, SOCIAL_OVERLAP +OLPROTO_VER_SIXTH = 6 # Fifth public release, >= 4.1.0, extra BC fields, remote query +OLPROTO_VER_SEVENTH = 7 # Sixth public release, >= 4.5.0, supports CRAWLER_REQUEST and CRAWLER_REPLY messages +OLPROTO_VER_EIGHTH = 8 # Seventh public release, >= 5.0, supporting BuddyCast with clicklog info. +OLPROTO_VER_NINE = 9 # Eighth public release, >= 5.1, additional torrent_size in remote search query reply (this code is submitted to the branches/release5.0-p1 repository and will be merges in due time) +OLPROTO_VER_TEN = 10 # Nineth public release, >= 5.X, simplified the VOD statistics (this code is submitted to the branches/trial-m18 repository and will be merged in due time) + + +# Overlay-swarm protocol version numbers +OLPROTO_VER_CURRENT = OLPROTO_VER_NINE + +OLPROTO_VER_LOWEST = OLPROTO_VER_SECOND +SupportedVersions = range(OLPROTO_VER_LOWEST, OLPROTO_VER_CURRENT+1) + +# +# Private definitions +# + +# States for overlay connection +STATE_INITIAL = 0 +STATE_HS_FULL_WAIT = 1 +STATE_HS_PEERID_WAIT = 2 +STATE_AUTH_WAIT = 3 +STATE_DATA_WAIT = 4 +STATE_CLOSED = 5 + +# Misc +EXPIRE_THRESHOLD = 300 # seconds:: keep consistent with sockethandler +EXPIRE_CHECK_INTERVAL = 60 # seconds +NO_REMOTE_LISTEN_PORT_KNOWN = -481 + + +class SecureOverlay: + __single = None + + def __init__(self): + if SecureOverlay.__single: + raise RuntimeError, "SecureOverlay is Singleton" + SecureOverlay.__single = self + self.olproto_ver_current = OLPROTO_VER_CURRENT + self.usermsghandler = None + self.userconnhandler = None + # ARNOCOMMENT: Remove this, DB should be fast enough. Don't want caches allover + self.dns = safe_dict() + + # + # Interface for upper layer + # + def getInstance(*args, **kw): + if SecureOverlay.__single is None: + SecureOverlay(*args, **kw) + return SecureOverlay.__single + getInstance = staticmethod(getInstance) + + def register(self,launchmanycore, max_len): + self.lm = launchmanycore + self.rawserver = self.lm.rawserver + self.sock_hand = self.rawserver.sockethandler + self.multihandler = self.lm.multihandler + self.overlay_rawserver = self.multihandler.newRawServer(overlay_infohash, + self.rawserver.doneflag, + protocol_name) + self.max_len = max_len + self.iplport2oc = {} # (IP,listen port) -> OverlayConnection + self.peer_db = self.lm.peer_db + self.mykeypair = self.lm.session.keypair + self.permid = self.lm.session.get_permid() + self.myip = self.lm.get_ext_ip() + self.myport = self.lm.session.get_listen_port() + self.myid = create_my_peer_id(self.myport) + self.last_activity = time() + + def resetSingleton(self): + """ For testing purposes """ + SecureOverlay.__single = None + + def start_listening(self): + self.overlay_rawserver.start_listening(self) + self.overlay_rawserver.add_task(self.monitor_activity, 2) + + def monitor_activity(self): + """ + periodically notify the network status + """ + diff = time() - self.last_activity + if diff > 120 + 1: + # 120 is set as the check_period for buddycast until a + # KEEP_ALIVE message is send + msg = "no network" + else: + msg = "network active" + self.lm.set_activity(NTFY_ACT_ACTIVE, msg, diff) + self.overlay_rawserver.add_task(self.monitor_activity, 2) + + def connect_dns(self,dns,callback): + """ Connects to the indicated endpoint and determines the permid + at that endpoint. Non-blocking. + + Pre: "dns" must be an IP address, not a hostname. + + Network thread calls "callback(exc,dns,permid,selver)" when the connection + is established or when an error occurs during connection + establishment. In the former case, exc is None, otherwise + it contains an Exception. + + The established connection will auto close after EXPIRE_THRESHOLD + seconds of inactivity. + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: connect_dns",dns + # To prevent concurrency problems on sockets the calling thread + # delegates to the network thread. + task = Task(self._connect_dns,dns,callback) + self.rawserver.add_task(task.start, 0) + + + def connect(self,permid,callback): + """ Connects to the indicated permid. Non-blocking. + + Network thread calls "callback(exc,dns,permid,selver)" when the connection + is established or when an error occurs during connection + establishment. In the former case, exc is None, otherwise + it contains an Exception. + + The established connection will auto close after EXPIRE_THRESHOLD + seconds of inactivity. + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: connect",show_permid_short(permid), currentThread().getName() + # To prevent concurrency problems on sockets the calling thread + # delegates to the network thread. + + dns = self.get_dns_from_peerdb(permid) + task = Task(self._connect,permid,dns,callback) + self.rawserver.add_task(task.start, 0) + + + def send(self,permid,msg,callback): + """ Sends a message to the indicated permid. Non-blocking. + + Pre: connection to permid must have been established successfully. + + Network thread calls "callback(exc,permid)" when the message is sent + or when an error occurs during sending. In the former case, exc + is None, otherwise it contains an Exception. + """ + # To prevent concurrency problems on sockets the calling thread + # delegates to the network thread. + dns = self.get_dns_from_peerdb(permid) + task = Task(self._send,permid,dns,msg,callback) + self.rawserver.add_task(task.start, 0) + + + + def close(self,permid): + """ Closes any connection to indicated permid. Non-blocking. + + Pre: connection to permid must have been established successfully. + + Network thread calls "callback(exc,permid,selver)" when the connection + is closed. + """ + # To prevent concurrency problems on sockets the calling thread + # delegates to the network thread. + task = Task(self._close,permid) + self.rawserver.add_task(task.start, 0) + + + def register_recv_callback(self,callback): + """ Register a callback to be called when receiving a message from + any permid. Non-blocking. + + Network thread calls "callback(exc,permid,selver,msg)" when a message + is received. The callback is not called on errors e.g. remote + connection close. + + The callback must return True to keep the connection open. + """ + self.usermsghandler = callback + + def register_conns_callback(self,callback): + """ Register a callback to be called when receiving a connection from + any permid. Non-blocking. + + Network thread calls "callback(exc,permid,selver,locally_initiated)" + when a connection is established (locally initiated or remote), or + when a connection is closed locally or remotely. In the former case, + exc is None, otherwise it contains an Exception. + + Note that this means that if a callback is registered via this method, + both this callback and the callback passed to a connect() method + will be called. + """ + self.userconnhandler = callback + + + # + # Internal methods + # + def _connect_dns(self,dns,callback): + try: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: actual connect_dns",dns + if dns[0] == self.myip and int(dns[1]) == self.myport: + callback(KeyError('IP and port of the target is the same as myself'),dns,None,0) + iplport = ip_and_port2str(dns[0],dns[1]) + oc = None + try: + oc = self.iplport2oc[iplport] + except KeyError: + pass + if oc is None: + oc = self.start_connection(dns) + self.iplport2oc[iplport] = oc + if not oc.is_auth_done(): + oc.queue_callback(dns,callback) + else: + callback(None,dns,oc.get_auth_permid(),oc.get_sel_proto_ver()) + except Exception,exc: + if DEBUG: + print_exc() + callback(exc,dns,None,0) + + def _connect(self,expectedpermid,dns,callback): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: actual connect",show_permid_short(expectedpermid), currentThread().getName() + if expectedpermid == self.permid: + callback(KeyError('The target permid is the same as my permid'),None,expectedpermid,0) + try: + oc = self.get_oc_by_permid(expectedpermid) + if oc is None: + if dns is None: + callback(KeyError('IP address + port for permid unknown'),dns,expectedpermid,0) + else: + self._connect_dns(dns,lambda exc,dns2,peerpermid,selver:\ + self._whoishe_callback(exc,dns2,peerpermid,selver,expectedpermid,callback)) + else: + # We already have a connection to this permid + self._whoishe_callback(None,(oc.get_ip(),oc.get_auth_listen_port()),expectedpermid,oc.get_sel_proto_ver(),expectedpermid,callback) + except Exception,exc: + if DEBUG: + print_exc() + callback(exc,None,expectedpermid,0) + + def _whoishe_callback(self,exc,dns,peerpermid,selver,expectedpermid,callback): + """ Called by network thread after the permid on the other side is known + or an error occured + """ + try: + if exc is None: + # Connect went OK + if peerpermid == expectedpermid: + callback(None,dns,expectedpermid,selver) + else: + # Someone else answered the phone + callback(KeyError('Recorded IP address + port now of other permid'), + dns,expectedpermid,0) + else: + callback(exc,dns,expectedpermid,0) + except Exception,exc: + if DEBUG: + print_exc() + callback(exc,dns,expectedpermid,0) + + def _send(self,permid,dns,message,callback): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: actual send",getMessageName(message[0]),\ + "to",show_permid_short(permid), currentThread().getName() + try: + if dns is None: + callback(KeyError('IP address + port for permid unknown'),permid) + else: + iplport = ip_and_port2str(dns[0],dns[1]) + oc = None + try: + oc = self.iplport2oc[iplport] + except KeyError: + pass + if oc is None: + callback(KeyError('Not connected to permid'),permid) + elif oc.is_auth_done(): + if oc.get_auth_permid() == permid: + oc.send_message(message) + callback(None,permid) + else: + callback(KeyError('Recorded IP address + port now of other permid'),permid) + else: + callback(KeyError('Connection not yet established'),permid) + except Exception,exc: + if DEBUG: + print_exc() + callback(exc,permid) + + + def _close(self,permid): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: actual close",show_permid_short(permid) + try: + oc = self.get_oc_by_permid(permid) + if not oc: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: error - actual close, but no connection to peer in admin" + else: + oc.close() + except Exception,e: + print_exc() + + # + # Interface for SocketHandler + # + def get_handler(self): + return self + + def external_connection_made(self,singsock): + """ incoming connection (never used) """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: external_connection_made",singsock.get_ip(),singsock.get_port() + self.last_activity = time() + oc = OverlayConnection(self,singsock,self.rawserver) + singsock.set_handler(oc) + + def connection_flushed(self,singsock): + """ sockethandler flushes connection """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: connection_flushed",singsock.get_ip(),singsock.get_port() + + # + # Interface for ServerPortHandler + # + def externally_handshaked_connection_made(self, singsock, options, msg_remainder): + """ incoming connection, handshake partially read to identity + as an it as overlay connection (used always) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: externally_handshaked_connection_made",\ + singsock.get_ip(),singsock.get_port() + oc = OverlayConnection(self,singsock,self.rawserver,ext_handshake = True, options = options) + singsock.set_handler(oc) + if msg_remainder: + oc.data_came_in(singsock,msg_remainder) + return True + + + # + # Interface for OverlayConnection + # + def got_auth_connection(self,oc): + """ authentication of peer via identity protocol succesful """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: got_auth_connection", \ + show_permid_short(oc.get_auth_permid()),oc.get_ip(),oc.get_auth_listen_port(), currentThread().getName() + + if oc.is_locally_initiated() and oc.get_port() != oc.get_auth_listen_port(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: got_auth_connection: closing because auth", \ + "listen port not as expected",oc.get_port(),oc.get_auth_listen_port() + self.cleanup_admin_and_callbacks(oc,Exception('closing because auth listen port not as expected')) + return False + + self.last_activity = time() + + ret = True + iplport = ip_and_port2str(oc.get_ip(),oc.get_auth_listen_port()) + known = iplport in self.iplport2oc + if not known: + self.iplport2oc[iplport] = oc + elif known and not oc.is_locally_initiated(): + # Locally initiated connections will already be registered, + # so if it's not a local connection and we already have one + # we have a duplicate, and we close the new one. + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: got_auth_connection:", \ + "closing because we already have a connection to",iplport + self.cleanup_admin_and_callbacks(oc, + Exception('closing because we already have a connection to peer')) + ret = False + + if ret: + if oc.is_auth_done(): + hisdns = (oc.get_ip(),oc.get_auth_listen_port()) + else: + hisdns = None + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: userconnhandler is",self.userconnhandler + + if self.userconnhandler is not None: + try: + self.userconnhandler(None,oc.get_auth_permid(),oc.get_sel_proto_ver(),oc.is_locally_initiated(),hisdns) + except: + # Catch all + print_exc() + oc.dequeue_callbacks() + return ret + + def local_close(self,oc): + """ our side is closing the connection """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: local_close" + self.cleanup_admin_and_callbacks(oc,CloseException('local close',oc.is_auth_done())) + + def connection_lost(self,oc): + """ overlay connection telling us to clear admin """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: connection_lost" + self.cleanup_admin_and_callbacks(oc,CloseException('connection lost',oc.is_auth_done())) + + + def got_message(self,permid,message,selversion): + """ received message from authenticated peer, pass to upper layer """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: got_message",getMessageName(message[0]),\ + "v"+str(selversion) + self.last_activity = time() + if self.usermsghandler is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: User receive callback not set" + return + try: + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: usermsghandler is",self.usermsghandler + + ret = self.usermsghandler(permid,selversion,message) + if ret is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: INTERNAL ERROR:", \ + "User receive callback returned None, not True or False" + ret = False + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: message handler returned",ret + return ret + except: + # Catch all + print_exc() + return False + + + def get_max_len(self): + return self.max_len + + def get_my_peer_id(self): + return self.myid + + def get_my_keypair(self): + return self.mykeypair + + def measurefunc(self,length): + pass + + # + # Interface for OverlayThreadingBridge + # + def get_dns_from_peerdb(self,permid,use_cache=True): + # Called by any thread, except NetworkThread + + if currentThread().getName().startswith("NetworkThread"): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: get_dns_from_peerdb: called by NetworkThread!" + print_stack() + + dns = self.dns.get(permid, None) + + if not dns: + values = ('ip', 'port') + peer = self.peer_db.getOne(values, permid=bin2str(permid)) + if peer and peer[0] and peer[1]: + ip = hostname_or_ip2ip(peer[0]) + dns = (ip, int(peer[1])) + return dns + + def add_peer_to_db(self,permid,dns,selversion): + """ add a connected peer to database """ + # Called by OverlayThread + + if currentThread().getName().startswith("NetworkThread"): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: add_peer_to_peerdb: called by NetworkThread!" + print_stack() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: add_peer_to_peerdb: called by",currentThread().getName() + + self.dns[permid] = dns # cache it to avoid querying db later + now = int(time()) + peer_data = {'permid':permid, 'ip':dns[0], 'port':dns[1], 'oversion':selversion, 'last_seen':now, 'last_connected':now} + self.peer_db.addPeer(permid, peer_data, update_dns=True, update_connected=True, commit=True) + #self.peer_db.updateTimes(permid, 'connected_times', 1, commit=True) + + + def update_peer_status(self,permid,authwasdone): + """ update last_seen and last_connected in peer db when close """ + # Called by OverlayThread + + if currentThread().getName().startswith("NetworkThread"): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: update_peer_status: called by NetworkThread!" + print_stack() + + now = int(time()) + if authwasdone: + self.peer_db.updatePeer(permid, last_seen=now, last_connected=now) + self.lm.session.uch.notify(NTFY_PEERS, NTFY_CONNECTION, permid, False) + # + # Interface for debugging + # + def debug_get_live_connections(self): + """ return a list of (permid,dns) tuples of the peers with which we + are connected. Like all methods here it must be called by the network thread + """ + live_conn = [] + for iplport in self.iplport2oc: + oc = self.iplport2oc[iplport] + if oc: + peer_permid = oc.get_auth_permid() + if peer_permid: + live_conn.append((peer_permid,(oc.get_ip(),oc.get_port()))) + return live_conn + + + # + # Internal methods + # + def start_connection(self,dns): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: Attempt to connect to",dns + singsock = self.sock_hand.start_connection(dns) + oc = OverlayConnection(self,singsock,self.rawserver, + locally_initiated=True,specified_dns=dns) + singsock.set_handler(oc) + return oc + + def cleanup_admin_and_callbacks(self,oc,exc): + oc.cleanup_callbacks(exc) + self.cleanup_admin(oc) + if oc.is_auth_done() and self.userconnhandler is not None: + self.userconnhandler(exc,oc.get_auth_permid(),oc.get_sel_proto_ver(), + oc.is_locally_initiated(),None) + + def cleanup_admin(self,oc): + iplports = [] + d = 0 + for key in self.iplport2oc.keys(): + #print "***** iplport2oc:", key, self.iplport2oc[key] + if self.iplport2oc[key] == oc: + del self.iplport2oc[key] + #print "*****!!! del", key, oc + d += 1 + + def get_oc_by_permid(self, permid): + """ return the OverlayConnection instance given a permid """ + + for iplport in self.iplport2oc: + oc = self.iplport2oc[iplport] + if oc.get_auth_permid() == permid: + return oc + return None + + + +class Task: + def __init__(self,method,*args, **kwargs): + self.method = method + self.args = args + self.kwargs = kwargs + + def start(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","secover: task: start",self.method + #print_stack() + self.method(*self.args,**self.kwargs) + + +class CloseException(Exception): + def __init__(self,msg=None,authdone=False): + Exception.__init__(self,msg) + self.authdone= authdone + + def __str__(self): + return str(self.__class__)+': '+Exception.__str__(self) + + def was_auth_done(self): + return self.authdone + + +class OverlayConnection: + def __init__(self,handler,singsock,rawserver,locally_initiated = False, + specified_dns = None, ext_handshake = False,options = None): + self.handler = handler + self.singsock = singsock # for writing + self.rawserver = rawserver + self.buffer = StringIO() + self.cb_queue = [] + self.auth_permid = None + self.unauth_peer_id = None + self.auth_peer_id = None + self.auth_listen_port = None + self.low_proto_ver = 0 + self.cur_proto_ver = 0 + self.sel_proto_ver = 0 + self.options = None + self.locally_initiated = locally_initiated + self.specified_dns = specified_dns + self.last_use = time() + + self.state = STATE_INITIAL + self.write(chr(len(protocol_name)) + protocol_name + + option_pattern + overlay_infohash + self.handler.get_my_peer_id()) + if ext_handshake: + self.state = STATE_HS_PEERID_WAIT + self.next_len = 20 + self.next_func = self.read_peer_id + self.set_options(options) + else: + self.state = STATE_HS_FULL_WAIT + self.next_len = 1 + self.next_func = self.read_header_len + + # Leave autoclose here instead of SecureOverlay, as that doesn't record + # remotely-initiated OverlayConnections before authentication is done. + self.rawserver.add_task(self._olconn_auto_close, EXPIRE_CHECK_INTERVAL) + + # + # Interface for SocketHandler + # + def data_came_in(self, singsock, data): + """ sockethandler received data """ + # now we got something we can ask for the peer's real port + dummy_port = singsock.get_port(True) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olconn: data_came_in",singsock.get_ip(),singsock.get_port() + self.handler.measurefunc(len(data)) + self.last_use = time() + while 1: + if self.state == STATE_CLOSED: + return + i = self.next_len - self.buffer.tell() + if i > len(data): + self.buffer.write(data) + return + self.buffer.write(data[:i]) + data = data[i:] + m = self.buffer.getvalue() + self.buffer.reset() + self.buffer.truncate() + try: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olconn: Trying to read",self.next_len #,"using",self.next_func + x = self.next_func(m) + except: + self.next_len, self.next_func = 1, self.read_dead + if DEBUG: + print_exc() + raise + if x is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olconn: next_func returned None",self.next_func + self.close() + return + self.next_len, self.next_func = x + + def connection_lost(self,singsock): + """ kernel or socket handler reports connection lost """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olconn: connection_lost",singsock.get_ip(),singsock.get_port(),self.state + if self.state != STATE_CLOSED: + self.state = STATE_CLOSED + self.handler.connection_lost(self) + + def connection_flushed(self,singsock): + """ sockethandler flushes connection """ + pass + + # + # Interface for SecureOverlay + # + def send_message(self,message): + self.last_use = time() + s = tobinary(len(message))+message + self.write(s) + + def is_locally_initiated(self): + return self.locally_initiated + + def get_ip(self): + return self.singsock.get_ip() + + def get_port(self): + return self.singsock.get_port() + + def is_auth_done(self): + return self.auth_permid is not None + + def get_auth_permid(self): + return self.auth_permid + + def get_auth_listen_port(self): + return self.auth_listen_port + + def get_remote_listen_port(self): + if self.is_auth_done(): + return self.auth_listen_port + elif self.is_locally_initiated(): + return self.specified_dns[1] + else: + return NO_REMOTE_LISTEN_PORT_KNOWN + + def get_low_proto_ver(self): + return self.low_proto_ver + + def get_cur_proto_ver(self): + return self.cur_proto_ver + + def get_sel_proto_ver(self): + return self.sel_proto_ver + + def queue_callback(self,dns,callback): + if callback is not None: + self.cb_queue.append(callback) + + def dequeue_callbacks(self): + try: + permid = self.get_auth_permid() + for callback in self.cb_queue: + callback(None,self.specified_dns,permid,self.get_sel_proto_ver()) + self.cb_queue = [] + except Exception,e: + print_exc() + + + def cleanup_callbacks(self,exc): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olconn: cleanup_callbacks: #callbacks is",len(self.cb_queue) + try: + for callback in self.cb_queue: + ## Failure connecting + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olconn: cleanup_callbacks: callback is",callback + callback(exc,self.specified_dns,self.get_auth_permid(),0) + except Exception,e: + print_exc() + + # + # Interface for ChallengeResponse + # + def get_unauth_peer_id(self): + return self.unauth_peer_id + + def got_auth_connection(self,singsock,permid,peer_id): + """ authentication of peer via identity protocol succesful """ + self.auth_permid = str(permid) + self.auth_peer_id = peer_id + self.auth_listen_port = decode_auth_listen_port(peer_id) + + self.state = STATE_DATA_WAIT + + if not self.handler.got_auth_connection(self): + self.close() + return + + # + # Internal methods + # + def read_header_len(self, s): + if ord(s) != len(protocol_name): + return None + return len(protocol_name), self.read_header + + def read_header(self, s): + if s != protocol_name: + return None + return 8, self.read_reserved + + def read_reserved(self, s): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olconn: Reserved bits:", `s` + self.set_options(s) + return 20, self.read_download_id + + def read_download_id(self, s): + if s != overlay_infohash: + return None + return 20, self.read_peer_id + + def read_peer_id(self, s): + self.unauth_peer_id = s + [self.low_proto_ver,self.cur_proto_ver] = get_proto_version_from_peer_id(self.unauth_peer_id) + self.sel_proto_ver = select_supported_protoversion(self.low_proto_ver,self.cur_proto_ver) + if not self.sel_proto_ver: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olconn: We don't support peer's version of the protocol" + return None + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olconn: Selected protocol version",self.sel_proto_ver + + self.state = STATE_AUTH_WAIT + self.cr = ChallengeResponse(self.handler.get_my_keypair(),self.handler.get_my_peer_id(),self) + if self.locally_initiated: + self.cr.start_cr(self) + return 4, self.read_len + + + def read_len(self, s): + l = toint(s) + if l > self.handler.get_max_len(): + return None + return l, self.read_message + + def read_message(self, s): + if s != '': + if self.state == STATE_AUTH_WAIT: + if not self.cr.got_message(self,s): + return None + elif self.state == STATE_DATA_WAIT: + if not self.handler.got_message(self.auth_permid,s,self.sel_proto_ver): + return None + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olconn: Received message while in illegal state, internal error!" + return None + return 4, self.read_len + + def read_dead(self, s): + return None + + def write(self,s): + self.singsock.write(s) + + def set_options(self,options): + self.options = options + + def close(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","olconn: we close()",self.get_ip(),self.get_port() + #print_stack() + self.state_when_error = self.state + if self.state != STATE_CLOSED: + self.state = STATE_CLOSED + self.handler.local_close(self) + self.singsock.close() + return + + def _olconn_auto_close(self): + if (time() - self.last_use) > EXPIRE_THRESHOLD: + self.close() + else: + self.rawserver.add_task(self._olconn_auto_close, EXPIRE_CHECK_INTERVAL) + + +# +# Internal functions +# +def create_my_peer_id(my_listen_port): + myid = createPeerID() + myid = myid[:16] + pack(' OLPROTO_VER_CURRENT: # the other's version is too high + return False + if cur_ver < OLPROTO_VER_LOWEST: # the other's version is too low + return False + if cur_ver < OLPROTO_VER_CURRENT and \ + cur_ver not in SupportedVersions: # the other's version is not supported + return False + return True + +def select_supported_protoversion(his_low_ver,his_cur_ver): + selected = None + if his_cur_ver != OLPROTO_VER_CURRENT: + if his_low_ver > OLPROTO_VER_CURRENT: # the other's low version is too high + return selected + if his_cur_ver < OLPROTO_VER_LOWEST: # the other's current version is too low + return selected + if his_cur_ver < OLPROTO_VER_CURRENT and \ + his_cur_ver not in SupportedVersions: # the other's current version is not supported (peer of this version is abondoned) + return selected + + selected = min(his_cur_ver,OLPROTO_VER_CURRENT) + return selected + +def decode_auth_listen_port(peerid): + bin = peerid[14:16] + tup = unpack('>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","permid: Exception in verify_torrent_signature:",str(e) + return False + + +# Exported classes +class PermIDException(Exception): pass + +class ChallengeResponse: + """ Exchange Challenge/Response via Overlay Swarm """ + + def __init__(self, my_keypair, my_id, secure_overlay): + self.my_keypair = my_keypair + self.permid = str(my_keypair.pub().get_der()) + self.my_id = my_id + self.secure_overlay = secure_overlay + + self.my_random = None + self.peer_id = None + self.peer_random = None + self.peer_pub = None + self.state = STATE_INITIAL + # Calculate message limits: + [dummy_random,cdata] = generate_challenge() + [dummy_random1,rdata1] = generate_response1(dummy_random,my_id,self.my_keypair) + rdata2 = generate_response2(dummy_random,my_id,dummy_random,self.my_keypair) + self.minchal = 1+len(cdata) # 1+ = message type + self.minr1 = 1+len(rdata1) - 1 # Arno: hack, also here, just to be on the safe side + self.minr2 = 1+len(rdata2) - 1 # Arno: hack, sometimes the official minimum is too big + + def starting_party(self,locally_initiated): + if self.state == STATE_INITIAL and locally_initiated: + self.state = STATE_AWAIT_R1 + return True + else: + return False + + def create_challenge(self): + [self.my_random,cdata] = generate_challenge() + return cdata + + def got_challenge_event(self,cdata,peer_id): + if self.state != STATE_INITIAL: + self.state = STATE_FAILED + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Got unexpected CHALLENGE message" + raise PermIDException + self.peer_random = check_challenge(cdata) + if self.peer_random is None: + self.state = STATE_FAILED + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Got bad CHALLENGE message" + raise PermIDException + self.peer_id = peer_id + [self.my_random,rdata1] = generate_response1(self.peer_random,peer_id,self.my_keypair) + self.state = STATE_AWAIT_R2 + return rdata1 + + def got_response1_event(self,rdata1,peer_id): + if self.state != STATE_AWAIT_R1: + self.state = STATE_FAILED + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Got unexpected RESPONSE1 message" + raise PermIDException + [randomA,peer_pub] = check_response1(rdata1,self.my_random,self.my_id) + + if randomA is None or peer_pub is None: + self.state = STATE_FAILED + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Got bad RESPONSE1 message" + raise PermIDException + + # avoid being connected by myself + peer_permid = str(peer_pub.get_der()) + if self.permid == peer_permid: + self.state = STATE_FAILED + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Got the same Permid as myself" + raise PermIDException + + self.peer_id = peer_id + self.peer_random = randomA + self.peer_pub = peer_pub + self.set_peer_authenticated() + rdata2 = generate_response2(self.peer_random,self.peer_id,self.my_random,self.my_keypair) + return rdata2 + + def got_response2_event(self,rdata2): + if self.state != STATE_AWAIT_R2: + self.state = STATE_FAILED + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Got unexpected RESPONSE2 message" + raise PermIDException + self.peer_pub = check_response2(rdata2,self.my_random,self.my_id,self.peer_random,self.peer_id) + if self.peer_pub is None: + self.state = STATE_FAILED + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Got bad RESPONSE2 message, authentication failed." + raise PermIDException + else: + # avoid being connected by myself + peer_permid = str(self.peer_pub.get_der()) + if self.permid == peer_permid: + self.state = STATE_FAILED + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Got the same Permid as myself" + raise PermIDException + else: + self.set_peer_authenticated() + + def set_peer_authenticated(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","permid: Challenge response succesful!" + self.state = STATE_AUTHENTICATED + + def get_peer_authenticated(self): + return self.state == STATE_AUTHENTICATED + + def get_peer_permid(self): + if self.state != STATE_AUTHENTICATED: + raise PermIDException + return self.peer_pub.get_der() + + def get_auth_peer_id(self): + if self.state != STATE_AUTHENTICATED: + raise PermIDException + return self.peer_id + + def get_challenge_minlen(self): + return self.minchal + + def get_response1_minlen(self): + return self.minr1 + + def get_response2_minlen(self): + return self.minr2 + +#--------------------------------------- + + def start_cr(self, conn): + if not self.get_peer_authenticated() and self.starting_party(conn.is_locally_initiated()): + self.send_challenge(conn) + + def send_challenge(self, conn): + cdata = self.create_challenge() + conn.send_message(CHALLENGE + str(cdata) ) + + def got_challenge(self, cdata, conn): + rdata1 = self.got_challenge_event(cdata, conn.get_unauth_peer_id()) + conn.send_message(RESPONSE1 + rdata1) + + def got_response1(self, rdata1, conn): + rdata2 = self.got_response1_event(rdata1, conn.get_unauth_peer_id()) + conn.send_message(RESPONSE2 + rdata2) + # get_peer_permid() throws exception if auth has failed + self.secure_overlay.got_auth_connection(conn,self.get_peer_permid(),self.get_auth_peer_id()) + + def got_response2(self, rdata2, conn): + self.got_response2_event(rdata2) + if self.get_peer_authenticated(): + #conn.send_message('') # Send KeepAlive message as reply + self.secure_overlay.got_auth_connection(conn,self.get_peer_permid(),self.get_auth_peer_id()) + + + def got_message(self, conn, message): + """ Handle message for PermID exchange and return if the message is valid """ + + if not conn: + return False + t = message[0] + if message[1:]: + msg = message[1:] + + if t == CHALLENGE: + if len(message) < self.get_challenge_minlen(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","permid: Close on bad CHALLENGE: msg len",len(message) + self.state = STATE_FAILED + return False + try: + self.got_challenge(msg, conn) + except Exception,e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","permid: Close on bad CHALLENGE: exception",str(e) + traceback.print_exc() + return False + elif t == RESPONSE1: + if len(message) < self.get_response1_minlen(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","permid: Close on bad RESPONSE1: msg len",len(message) + self.state = STATE_FAILED + return False + try: + self.got_response1(msg, conn) + except Exception,e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","permid: Close on bad RESPONSE1: exception",str(e) + traceback.print_exc() + return False + elif t == RESPONSE2: + if len(message) < self.get_response2_minlen(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","permid: Close on bad RESPONSE2: msg len",len(message) + self.state = STATE_FAILED + return False + try: + self.got_response2(msg, conn) + except Exception,e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","permid: Close on bad RESPONSE2: exception",str(e) + traceback.print_exc() + return False + else: + return False + return True + +if __name__ == '__main__': + init() +# ChallengeResponse(None, None) diff --git a/tribler-mod/Tribler/Core/Overlay/permid.py.bak b/tribler-mod/Tribler/Core/Overlay/permid.py.bak new file mode 100644 index 0000000..87fbcbd --- /dev/null +++ b/tribler-mod/Tribler/Core/Overlay/permid.py.bak @@ -0,0 +1,407 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys +from sha import sha +from base64 import encodestring +from copy import deepcopy +import traceback,os + +from M2Crypto import Rand,EC +from Tribler.Core.BitTornado.bencode import bencode, bdecode +from Tribler.Core.BitTornado.BT1.MessageID import * + +DEBUG = False + +# Internal constants +keypair_ecc_curve = EC.NID_sect233k1 +num_random_bits = 1024*8 # bits + +# Protocol states +STATE_INITIAL = 0 +STATE_AWAIT_R1 = 1 +STATE_AWAIT_R2 = 2 +STATE_AUTHENTICATED = 3 +STATE_FAILED = 4 + +# Exported functions +def init(): + Rand.rand_seed(os.urandom(num_random_bits/8)) + +def exit(): + pass + +def generate_keypair(): + ec_keypair=EC.gen_params(keypair_ecc_curve) + ec_keypair.gen_key() + return ec_keypair + +def read_keypair(keypairfilename): + return EC.load_key(keypairfilename) + +def save_keypair(keypair,keypairfilename): + keypair.save_key(keypairfilename, None) + +def save_pub_key(keypair,pubkeyfilename): + keypair.save_pub_key(pubkeyfilename) + + +# def show_permid(permid): +# See Tribler/utilities.py + +def permid_for_user(permid): + # Full BASE64-encoded + return encodestring(permid).replace("\n","") + +# For convenience +def sign_data(plaintext,ec_keypair): + digest = sha(plaintext).digest() + return ec_keypair.sign_dsa_asn1(digest) + +def verify_data(plaintext,permid,blob): + pubkey = EC.pub_key_from_der(permid) + digest = sha(plaintext).digest() + return pubkey.verify_dsa_asn1(digest,blob) + +def verify_data_pubkeyobj(plaintext,pubkey,blob): + digest = sha(plaintext).digest() + return pubkey.verify_dsa_asn1(digest,blob) + + +# Internal functions + +# +# The following methods and ChallengeResponse class implement a +# Challenge/Response identification protocol, notably the +# ISO/IEC 9798-3 protocol, as described in $10.3.3 (ii) (2) of the +# ``Handbook of Applied Cryptography''by Alfred J. Menezes et al. +# + +def generate_challenge(): + randomB = Rand.rand_bytes(num_random_bits/8) + return [randomB,bencode(randomB)] + +def check_challenge(cdata): + try: + randomB = bdecode(cdata) + except: + return None + if len(randomB) != num_random_bits/8: + return None + else: + return randomB + +def generate_response1(randomB,peeridB,keypairA): + randomA = Rand.rand_bytes(num_random_bits/8) + response1 = {} + response1['certA'] = str(keypairA.pub().get_der()) + response1['rA'] = randomA + response1['B'] = peeridB + response1['SA'] = sign_response(randomA,randomB,peeridB,keypairA) + return [randomA,bencode(response1)] + +def check_response1(rdata1,randomB,peeridB): + try: + response1 = bdecode(rdata1) + except: + return [None,None] + if response1['B'] != peeridB: + return [None,None] + pubA_der = response1['certA'] + pubA = EC.pub_key_from_der(pubA_der) + sigA = response1['SA'] + randomA = response1['rA'] + if verify_response(randomA,randomB,peeridB,pubA,sigA): + return [randomA,pubA] + else: + return [None,None] + +def generate_response2(randomA,peeridA,randomB,keypairB): + response2 = {} + response2['certB'] = str(keypairB.pub().get_der()) + response2['A'] = peeridA + response2['SB'] = sign_response(randomB,randomA,peeridA,keypairB) + return bencode(response2) + +def check_response2(rdata2,randomA,peeridA,randomB,peeridB): + try: + response2 = bdecode(rdata2) + except: + return None + if response2['A'] != peeridA: + return None + pubB_der = response2['certB'] + pubB = EC.pub_key_from_der(pubB_der) + sigB = response2['SB'] + if verify_response(randomB,randomA,peeridA,pubB,sigB): + return pubB + else: + return None + +def sign_response(randomA,randomB,peeridB,keypairA): + list = [ randomA, randomB, peeridB ] + blist = bencode(list) + digest = sha(blist).digest() + blob = keypairA.sign_dsa_asn1(digest) + return blob + +def verify_response(randomA,randomB,peeridB,pubA,sigA): + list = [ randomA, randomB, peeridB ] + blist = bencode(list) + digest = sha(blist).digest() + return pubA.verify_dsa_asn1(digest,sigA) + + +# External functions + +def create_torrent_signature(metainfo,keypairfilename): + keypair = EC.load_key(keypairfilename) + bmetainfo = bencode(metainfo) + digester = sha(bmetainfo[:]) + digest = digester.digest() + sigstr = keypair.sign_dsa_asn1(digest) + metainfo['signature'] = sigstr + metainfo['signer'] = str(keypair.pub().get_der()) + +def verify_torrent_signature(metainfo): + r = deepcopy(metainfo) + signature = r['signature'] + signer = r['signer'] + del r['signature'] + del r['signer'] + bmetainfo = bencode(r) + digester = sha(bmetainfo[:]) + digest = digester.digest() + return do_verify_torrent_signature(digest,signature,signer) + + +# Internal + +def do_verify_torrent_signature(digest,sigstr,permid): + if permid is None: + return False + try: + ecpub = EC.pub_key_from_der(permid) + if ecpub is None: + return False + intret = ecpub.verify_dsa_asn1(digest,sigstr) + return intret == 1 + except Exception, e: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","permid: Exception in verify_torrent_signature:",str(e) + return False + + +# Exported classes +class PermIDException(Exception): pass + +class ChallengeResponse: + """ Exchange Challenge/Response via Overlay Swarm """ + + def __init__(self, my_keypair, my_id, secure_overlay): + self.my_keypair = my_keypair + self.permid = str(my_keypair.pub().get_der()) + self.my_id = my_id + self.secure_overlay = secure_overlay + + self.my_random = None + self.peer_id = None + self.peer_random = None + self.peer_pub = None + self.state = STATE_INITIAL + # Calculate message limits: + [dummy_random,cdata] = generate_challenge() + [dummy_random1,rdata1] = generate_response1(dummy_random,my_id,self.my_keypair) + rdata2 = generate_response2(dummy_random,my_id,dummy_random,self.my_keypair) + self.minchal = 1+len(cdata) # 1+ = message type + self.minr1 = 1+len(rdata1) - 1 # Arno: hack, also here, just to be on the safe side + self.minr2 = 1+len(rdata2) - 1 # Arno: hack, sometimes the official minimum is too big + + def starting_party(self,locally_initiated): + if self.state == STATE_INITIAL and locally_initiated: + self.state = STATE_AWAIT_R1 + return True + else: + return False + + def create_challenge(self): + [self.my_random,cdata] = generate_challenge() + return cdata + + def got_challenge_event(self,cdata,peer_id): + if self.state != STATE_INITIAL: + self.state = STATE_FAILED + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Got unexpected CHALLENGE message" + raise PermIDException + self.peer_random = check_challenge(cdata) + if self.peer_random is None: + self.state = STATE_FAILED + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Got bad CHALLENGE message" + raise PermIDException + self.peer_id = peer_id + [self.my_random,rdata1] = generate_response1(self.peer_random,peer_id,self.my_keypair) + self.state = STATE_AWAIT_R2 + return rdata1 + + def got_response1_event(self,rdata1,peer_id): + if self.state != STATE_AWAIT_R1: + self.state = STATE_FAILED + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Got unexpected RESPONSE1 message" + raise PermIDException + [randomA,peer_pub] = check_response1(rdata1,self.my_random,self.my_id) + + if randomA is None or peer_pub is None: + self.state = STATE_FAILED + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Got bad RESPONSE1 message" + raise PermIDException + + # avoid being connected by myself + peer_permid = str(peer_pub.get_der()) + if self.permid == peer_permid: + self.state = STATE_FAILED + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Got the same Permid as myself" + raise PermIDException + + self.peer_id = peer_id + self.peer_random = randomA + self.peer_pub = peer_pub + self.set_peer_authenticated() + rdata2 = generate_response2(self.peer_random,self.peer_id,self.my_random,self.my_keypair) + return rdata2 + + def got_response2_event(self,rdata2): + if self.state != STATE_AWAIT_R2: + self.state = STATE_FAILED + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Got unexpected RESPONSE2 message" + raise PermIDException + self.peer_pub = check_response2(rdata2,self.my_random,self.my_id,self.peer_random,self.peer_id) + if self.peer_pub is None: + self.state = STATE_FAILED + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Got bad RESPONSE2 message, authentication failed." + raise PermIDException + else: + # avoid being connected by myself + peer_permid = str(self.peer_pub.get_der()) + if self.permid == peer_permid: + self.state = STATE_FAILED + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Got the same Permid as myself" + raise PermIDException + else: + self.set_peer_authenticated() + + def set_peer_authenticated(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","permid: Challenge response succesful!" + self.state = STATE_AUTHENTICATED + + def get_peer_authenticated(self): + return self.state == STATE_AUTHENTICATED + + def get_peer_permid(self): + if self.state != STATE_AUTHENTICATED: + raise PermIDException + return self.peer_pub.get_der() + + def get_auth_peer_id(self): + if self.state != STATE_AUTHENTICATED: + raise PermIDException + return self.peer_id + + def get_challenge_minlen(self): + return self.minchal + + def get_response1_minlen(self): + return self.minr1 + + def get_response2_minlen(self): + return self.minr2 + +#--------------------------------------- + + def start_cr(self, conn): + if not self.get_peer_authenticated() and self.starting_party(conn.is_locally_initiated()): + self.send_challenge(conn) + + def send_challenge(self, conn): + cdata = self.create_challenge() + conn.send_message(CHALLENGE + str(cdata) ) + + def got_challenge(self, cdata, conn): + rdata1 = self.got_challenge_event(cdata, conn.get_unauth_peer_id()) + conn.send_message(RESPONSE1 + rdata1) + + def got_response1(self, rdata1, conn): + rdata2 = self.got_response1_event(rdata1, conn.get_unauth_peer_id()) + conn.send_message(RESPONSE2 + rdata2) + # get_peer_permid() throws exception if auth has failed + self.secure_overlay.got_auth_connection(conn,self.get_peer_permid(),self.get_auth_peer_id()) + + def got_response2(self, rdata2, conn): + self.got_response2_event(rdata2) + if self.get_peer_authenticated(): + #conn.send_message('') # Send KeepAlive message as reply + self.secure_overlay.got_auth_connection(conn,self.get_peer_permid(),self.get_auth_peer_id()) + + + def got_message(self, conn, message): + """ Handle message for PermID exchange and return if the message is valid """ + + if not conn: + return False + t = message[0] + if message[1:]: + msg = message[1:] + + if t == CHALLENGE: + if len(message) < self.get_challenge_minlen(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","permid: Close on bad CHALLENGE: msg len",len(message) + self.state = STATE_FAILED + return False + try: + self.got_challenge(msg, conn) + except Exception,e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","permid: Close on bad CHALLENGE: exception",str(e) + traceback.print_exc() + return False + elif t == RESPONSE1: + if len(message) < self.get_response1_minlen(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","permid: Close on bad RESPONSE1: msg len",len(message) + self.state = STATE_FAILED + return False + try: + self.got_response1(msg, conn) + except Exception,e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","permid: Close on bad RESPONSE1: exception",str(e) + traceback.print_exc() + return False + elif t == RESPONSE2: + if len(message) < self.get_response2_minlen(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","permid: Close on bad RESPONSE2: msg len",len(message) + self.state = STATE_FAILED + return False + try: + self.got_response2(msg, conn) + except Exception,e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","permid: Close on bad RESPONSE2: exception",str(e) + traceback.print_exc() + return False + else: + return False + return True + +if __name__ == '__main__': + init() +# ChallengeResponse(None, None) diff --git a/tribler-mod/Tribler/Core/RequestPolicy.py b/tribler-mod/Tribler/Core/RequestPolicy.py new file mode 100644 index 0000000..ae1e0f4 --- /dev/null +++ b/tribler-mod/Tribler/Core/RequestPolicy.py @@ -0,0 +1,139 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg +# see LICENSE.txt for license information +""" Controls the authorization of messages received via the Tribler Overlay """ + +from Tribler.Core.simpledefs import * +from Tribler.Core.exceptions import * +from Tribler.Core.BitTornado.BT1.MessageID import * + +DEBUG = False + +MAX_QUERIES_FROM_RANDOM_PEER = 1000 + + +class AbstractRequestPolicy: + """ Superclass for all Tribler RequestPolicies. A RequestPolicy controls + the authorization of messages received via the Tribler Overlay, such + as distributed recommendations, remote queries, etc. + """ + def __init__(self): + """ Constructor """ + + def allowed(self, permid, messageID): + """ Returns whether or not the peer identified by permid is allowed to + send us a message of type messageID. + @param permid The permid of the sending peer. + @param messageID A integer messageID, see Tribler.Core.BitTornado.BT1.MessageID + @returns A boolean indicating whether the message is authorized. + """ + raise NotYetImplementedException() + + +class AllowAllRequestPolicy(AbstractRequestPolicy): + """ A RequestPolicy that allows all messages to be sent by all peers. """ + + def allowed(self, permid, messageID): + return self.allowAllRequestsAllPeers(permid, messageID) + + def allowAllRequestsAllPeers(self, permid, messageID): + return True + + +class CommonRequestPolicy(AbstractRequestPolicy): + """ A base class implementing some methods that can be used as building + blocks for RequestPolicies. + """ + def __init__(self,session): + """ Constructor """ + self.session = session + self.friendsdb = session.open_dbhandler(NTFY_FRIENDS) + self.peerdb = session.open_dbhandler(NTFY_PEERS) + AbstractRequestPolicy.__init__(self) + + def isFriend(self, permid): + """ + @param permid The permid of the sending peer. + @return Whether or not the specified permid is a friend. + """ + fs = self.friendsdb.getFriendState(permid) + return (fs == FS_MUTUAL or fs == FS_I_INVITED) + + def isSuperPeer(self, permid): + """ + @param permid The permid of the sending peer. + @return Whether of not the specified permid is a superpeer. + """ + return permid in self.session.lm.superpeer_db.getSuperPeers() + + def isCrawler(self, permid): + """ + @param permid The permid of the sending peer. + @return Whether of not the specified permid is a superpeer. + """ + return permid in self.session.lm.crawler_db.getCrawlers() + + def benign_random_peer(self,permid): + """ + @param permid The permid of the sending peer. + @return Whether or not the specified permid has exceeded his + quota of remote query messages. + """ + if MAX_QUERIES_FROM_RANDOM_PEER > 0: + nqueries = self.get_peer_nqueries(permid) + return nqueries < MAX_QUERIES_FROM_RANDOM_PEER + else: + return True + + def get_peer_nqueries(self, permid): + """ + @param permid The permid of the sending peer. + @return The number of remote query messages already received from + this peer. + """ + peer = self.peerdb.getPeer(permid) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","CommonRequestPolicy: get_peer_nqueries: getPeer",`permid`,peer + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","CommonRequestPolicy: get_peer_nqueries: called by",currentThread().getName() + if peer is None: + return 0 + else: + return peer['num_queries'] + +class AllowFriendsRequestPolicy(CommonRequestPolicy): + """ + A RequestPolicy that allows all non-crawler messages to be sent by + friends only. Crawler messages are allowed from Crawlers only. + """ + + def allowed(self, permid, messageID): + if messageID in (CRAWLER_REQUEST, CRAWLER_REPLY): + return self.isCrawler(permid) + else: + return self.allowAllRequestsFromFriends(permid, messageID) + + def allowAllRequestsFromFriends(self, permid, messageID): + # Access control + return self.isFriend(permid) + + +class FriendsCoopDLOtherRQueryQuotumCrawlerAllowAllRequestPolicy(CommonRequestPolicy): + """ + Allows friends to send all messages related to cooperative + downloads, subjects all other peers to a remote query quotum of + 100, and allows all peers to send all other non-crawler + messages. Crawler messages are allowed from Crawlers only. + """ + + def allowed(self, permid, messageID): + """ Returns whether or not the peer identified by permid is allowed to + send us a message of type messageID. + @return Boolean. """ + if messageID == CRAWLER_REQUEST: + return self.isCrawler(permid) + elif (messageID in HelpCoordinatorMessages or messageID in HelpHelperMessages) and not self.isFriend(permid): + return False + elif messageID == QUERY and not (self.isFriend(permid) or self.benign_random_peer(permid)): + return False + else: + return True + diff --git a/tribler-mod/Tribler/Core/RequestPolicy.py.bak b/tribler-mod/Tribler/Core/RequestPolicy.py.bak new file mode 100644 index 0000000..ba43128 --- /dev/null +++ b/tribler-mod/Tribler/Core/RequestPolicy.py.bak @@ -0,0 +1,138 @@ +# Written by Jelle Roozenburg +# see LICENSE.txt for license information +""" Controls the authorization of messages received via the Tribler Overlay """ + +from Tribler.Core.simpledefs import * +from Tribler.Core.exceptions import * +from Tribler.Core.BitTornado.BT1.MessageID import * + +DEBUG = False + +MAX_QUERIES_FROM_RANDOM_PEER = 1000 + + +class AbstractRequestPolicy: + """ Superclass for all Tribler RequestPolicies. A RequestPolicy controls + the authorization of messages received via the Tribler Overlay, such + as distributed recommendations, remote queries, etc. + """ + def __init__(self): + """ Constructor """ + + def allowed(self, permid, messageID): + """ Returns whether or not the peer identified by permid is allowed to + send us a message of type messageID. + @param permid The permid of the sending peer. + @param messageID A integer messageID, see Tribler.Core.BitTornado.BT1.MessageID + @returns A boolean indicating whether the message is authorized. + """ + raise NotYetImplementedException() + + +class AllowAllRequestPolicy(AbstractRequestPolicy): + """ A RequestPolicy that allows all messages to be sent by all peers. """ + + def allowed(self, permid, messageID): + return self.allowAllRequestsAllPeers(permid, messageID) + + def allowAllRequestsAllPeers(self, permid, messageID): + return True + + +class CommonRequestPolicy(AbstractRequestPolicy): + """ A base class implementing some methods that can be used as building + blocks for RequestPolicies. + """ + def __init__(self,session): + """ Constructor """ + self.session = session + self.friendsdb = session.open_dbhandler(NTFY_FRIENDS) + self.peerdb = session.open_dbhandler(NTFY_PEERS) + AbstractRequestPolicy.__init__(self) + + def isFriend(self, permid): + """ + @param permid The permid of the sending peer. + @return Whether or not the specified permid is a friend. + """ + fs = self.friendsdb.getFriendState(permid) + return (fs == FS_MUTUAL or fs == FS_I_INVITED) + + def isSuperPeer(self, permid): + """ + @param permid The permid of the sending peer. + @return Whether of not the specified permid is a superpeer. + """ + return permid in self.session.lm.superpeer_db.getSuperPeers() + + def isCrawler(self, permid): + """ + @param permid The permid of the sending peer. + @return Whether of not the specified permid is a superpeer. + """ + return permid in self.session.lm.crawler_db.getCrawlers() + + def benign_random_peer(self,permid): + """ + @param permid The permid of the sending peer. + @return Whether or not the specified permid has exceeded his + quota of remote query messages. + """ + if MAX_QUERIES_FROM_RANDOM_PEER > 0: + nqueries = self.get_peer_nqueries(permid) + return nqueries < MAX_QUERIES_FROM_RANDOM_PEER + else: + return True + + def get_peer_nqueries(self, permid): + """ + @param permid The permid of the sending peer. + @return The number of remote query messages already received from + this peer. + """ + peer = self.peerdb.getPeer(permid) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","CommonRequestPolicy: get_peer_nqueries: getPeer",`permid`,peer + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","CommonRequestPolicy: get_peer_nqueries: called by",currentThread().getName() + if peer is None: + return 0 + else: + return peer['num_queries'] + +class AllowFriendsRequestPolicy(CommonRequestPolicy): + """ + A RequestPolicy that allows all non-crawler messages to be sent by + friends only. Crawler messages are allowed from Crawlers only. + """ + + def allowed(self, permid, messageID): + if messageID in (CRAWLER_REQUEST, CRAWLER_REPLY): + return self.isCrawler(permid) + else: + return self.allowAllRequestsFromFriends(permid, messageID) + + def allowAllRequestsFromFriends(self, permid, messageID): + # Access control + return self.isFriend(permid) + + +class FriendsCoopDLOtherRQueryQuotumCrawlerAllowAllRequestPolicy(CommonRequestPolicy): + """ + Allows friends to send all messages related to cooperative + downloads, subjects all other peers to a remote query quotum of + 100, and allows all peers to send all other non-crawler + messages. Crawler messages are allowed from Crawlers only. + """ + + def allowed(self, permid, messageID): + """ Returns whether or not the peer identified by permid is allowed to + send us a message of type messageID. + @return Boolean. """ + if messageID == CRAWLER_REQUEST: + return self.isCrawler(permid) + elif (messageID in HelpCoordinatorMessages or messageID in HelpHelperMessages) and not self.isFriend(permid): + return False + elif messageID == QUERY and not (self.isFriend(permid) or self.benign_random_peer(permid)): + return False + else: + return True + diff --git a/tribler-mod/Tribler/Core/Search/KeywordSearch.py b/tribler-mod/Tribler/Core/Search/KeywordSearch.py new file mode 100644 index 0000000..c911c01 --- /dev/null +++ b/tribler-mod/Tribler/Core/Search/KeywordSearch.py @@ -0,0 +1,108 @@ +from time import localtime, strftime +# written by Jelle Roozenburg +# see LICENSE.txt for license information + +import re +import sys + +DEBUG = False + +class KeywordSearch: + """ + Tribler keywordsearch now has the following features: + 1. All items with one of the keywords in the 'name' field are returned (self.simpleSearch() ) + 2. The sorting of the results is based on: + a) The number of matching keywords + b) The length of the matching keywords + c) If the keywords matched a whole word (search for 'cat' find 'category') + (done in self.search() ) + 3. Searching is case insensitive + """ + def search(self, haystack, needles, haystackismatching=False): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'kws: unprocessed keywords: %s' % needles + needles = self.unRegExpifySearchwords(needles) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'kws: Searching for %s in %d items' % (repr(needles), len(haystack)) + + if not haystackismatching: + searchspace = self.simpleSearch(haystack, needles) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'kws: Found %s items using simple search' % len(searchspace) + else: + searchspace = haystack + results = [] + wbsearch = [] + + for needle in needles: + wbsearch.append(re.compile(r'\b%s\b' % needle)) + + for item in searchspace: + title = item['name'].lower() + score = 0 + for i in xrange(len(needles)): + wb = wbsearch[i].findall(title) + score += len(wb) * 2 * len(needles[i]) + if len(wb) == 0: + if title.find(needles[i].lower()) != -1: + score += len(needles[i]) + + results.append((score, item)) + + results.sort(reverse=True) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'kws: Found %d items eventually' % len(results) + #for r in results: + # print r + return [r[1] for r in results] + + + def unRegExpifySearchwords(self, needles): + replaceRegExpChars = re.compile(r'(\\|\*|\.|\+|\?|\||\(|\)|\[|\]|\{|\})') + new_needles = [] + for needle in needles: + needle = needle.strip() + if len(needle)== 0: + continue + new_needle = re.sub(replaceRegExpChars, r'\\\1', needle.lower()) + new_needles.append(new_needle) + return new_needles + + def simpleSearch(self, haystack, needles, searchtype='AND'): + "Can do both OR or AND search" + hits = [] + if searchtype == 'OR': + searchRegexp = r'' + for needle in needles: + searchRegexp+= needle+'|' + searchRegexp = re.compile(searchRegexp[:-1]) + for item in haystack: + title = item['name'].lower() + if len(searchRegexp.findall(title)) > 0: + hits.append(item) + elif searchtype == 'AND': + for item in haystack: + title = item['name'].lower() + foundAll = True + for needle in needles: + if title.find(needle) == -1: + foundAll = False + break + if foundAll: + hits.append(item) + return hits + + +def test(): + data = [{'name':'Fedoras 3.10'}, + {'name':'Fedora 2.10'}, + {'name':'Movie 3.10'}, + {'name':'fedora_2'}, + {'name':'movie_theater.avi'} + ] + words = ['fedora', '1'] + #print KeywordSearch().simpleSearch(data, words) + print KeywordSearch().search(data, words) +if __name__ == '__main__': + test() + diff --git a/tribler-mod/Tribler/Core/Search/KeywordSearch.py.bak b/tribler-mod/Tribler/Core/Search/KeywordSearch.py.bak new file mode 100644 index 0000000..c1f99fc --- /dev/null +++ b/tribler-mod/Tribler/Core/Search/KeywordSearch.py.bak @@ -0,0 +1,107 @@ +# written by Jelle Roozenburg +# see LICENSE.txt for license information + +import re +import sys + +DEBUG = False + +class KeywordSearch: + """ + Tribler keywordsearch now has the following features: + 1. All items with one of the keywords in the 'name' field are returned (self.simpleSearch() ) + 2. The sorting of the results is based on: + a) The number of matching keywords + b) The length of the matching keywords + c) If the keywords matched a whole word (search for 'cat' find 'category') + (done in self.search() ) + 3. Searching is case insensitive + """ + def search(self, haystack, needles, haystackismatching=False): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'kws: unprocessed keywords: %s' % needles + needles = self.unRegExpifySearchwords(needles) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'kws: Searching for %s in %d items' % (repr(needles), len(haystack)) + + if not haystackismatching: + searchspace = self.simpleSearch(haystack, needles) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'kws: Found %s items using simple search' % len(searchspace) + else: + searchspace = haystack + results = [] + wbsearch = [] + + for needle in needles: + wbsearch.append(re.compile(r'\b%s\b' % needle)) + + for item in searchspace: + title = item['name'].lower() + score = 0 + for i in xrange(len(needles)): + wb = wbsearch[i].findall(title) + score += len(wb) * 2 * len(needles[i]) + if len(wb) == 0: + if title.find(needles[i].lower()) != -1: + score += len(needles[i]) + + results.append((score, item)) + + results.sort(reverse=True) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'kws: Found %d items eventually' % len(results) + #for r in results: + # print r + return [r[1] for r in results] + + + def unRegExpifySearchwords(self, needles): + replaceRegExpChars = re.compile(r'(\\|\*|\.|\+|\?|\||\(|\)|\[|\]|\{|\})') + new_needles = [] + for needle in needles: + needle = needle.strip() + if len(needle)== 0: + continue + new_needle = re.sub(replaceRegExpChars, r'\\\1', needle.lower()) + new_needles.append(new_needle) + return new_needles + + def simpleSearch(self, haystack, needles, searchtype='AND'): + "Can do both OR or AND search" + hits = [] + if searchtype == 'OR': + searchRegexp = r'' + for needle in needles: + searchRegexp+= needle+'|' + searchRegexp = re.compile(searchRegexp[:-1]) + for item in haystack: + title = item['name'].lower() + if len(searchRegexp.findall(title)) > 0: + hits.append(item) + elif searchtype == 'AND': + for item in haystack: + title = item['name'].lower() + foundAll = True + for needle in needles: + if title.find(needle) == -1: + foundAll = False + break + if foundAll: + hits.append(item) + return hits + + +def test(): + data = [{'name':'Fedoras 3.10'}, + {'name':'Fedora 2.10'}, + {'name':'Movie 3.10'}, + {'name':'fedora_2'}, + {'name':'movie_theater.avi'} + ] + words = ['fedora', '1'] + #print KeywordSearch().simpleSearch(data, words) + print KeywordSearch().search(data, words) +if __name__ == '__main__': + test() + diff --git a/tribler-mod/Tribler/Core/Search/Reranking.py b/tribler-mod/Tribler/Core/Search/Reranking.py new file mode 100644 index 0000000..33b0e32 --- /dev/null +++ b/tribler-mod/Tribler/Core/Search/Reranking.py @@ -0,0 +1,98 @@ +from time import localtime, strftime +# written by Nicolas Neubauer +# see LICENSE.txt for license information + +import sys, time + +DEBUG = False + +class Reranker: + def getID(self): + """the ID that is stored in the clicklog 'reranking_strategy' field for later comparison""" + return 0 + + def rerank(self, hits, keywords, torrent_db, pref_db, mypref_db, search_db): + """takes hits and reorders them given the current keywords""" + return hits + +class DefaultTorrentReranker(Reranker): + """ just leave the hits alone """ + def getID(self): + return 1 + def rerank(self, hits, keywords, torrent_db, pref_db, mypref_db, search_db): + return hits + +class TestReranker(Reranker): + """ for testing purposes only """ + def getID(self): + return 2 + def rerank(self, hits, keywords, torrent_db, pref_db, mypref_db, search_db): + if len(hits)>1: + h = hits[0] + hits[0] = hits[1] + hits[1] = h + return hits + +class SwapFirstTwoReranker(Reranker): + """ swaps first and second place if second place has been frequently selected from bad position """ + + def __init__(self): + self.MAX_SEEN_BEFORE_RERANK = 5 + self.MAX_POPULAR_RATIO = 5 + + def getID(self): + return 2 + + def rerank(self, hits, keywords, torrent_db, pref_db, mypref_db, search_db): + if len(hits)<2: + return hits + + torrent_id_0 = hits[0].get('torrent_id',0) + torrent_id_1 = hits[1].get('torrent_id',0) + if torrent_id_0 == 0 or torrent_id_1 == 0: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "reranking: torrent_id=0 in hits, exiting" + # we got some problems elsewhere, don't add to it + return hits + + (num_hits_0, position_score_0) = pref_db.getPositionScore(torrent_id_0, keywords) + (num_hits_1, position_score_1) = pref_db.getPositionScore(torrent_id_1, keywords) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "reranking: first torrent (%d): (num, score)= (%s, %s)" % (torrent_id_0, num_hits_0, position_score_0) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "reranking: second torrent (%d): (num, score)= (%s, %s)" % (torrent_id_1, num_hits_1, position_score_1) + + if (num_hits_0 < self.MAX_SEEN_BEFORE_RERANK or num_hits_1 < self.MAX_SEEN_BEFORE_RERANK): + # only start thinking about reranking if we have seen enough samples + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "reranking: not enough samples, not reranking" + return hits + + if (num_hits_0/num_hits_1 > self.MAX_POPULAR_RATIO): + # if number one is much more popular, keep everything as it is + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "reranking: first torrent is too popular, not reranking" + return hits + + # if all these tests are successful, we may swap first and second if second + # has gotten hits from worse positions than first + + if position_score_0>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "reranking: second torrent has better position score, reranking!" + h = hits[0] + hits[0] = hits[1] + hits[1] = h + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "reranking: second torrent does not have better position score, reranking!" + + return hits + +_rerankers = [DefaultTorrentReranker(), SwapFirstTwoReranker()] + + +def getTorrentReranker(): + global _rerankers + index = int(time.strftime("%H")) % (len(_rerankers)) + return _rerankers[index] + diff --git a/tribler-mod/Tribler/Core/Search/Reranking.py.bak b/tribler-mod/Tribler/Core/Search/Reranking.py.bak new file mode 100644 index 0000000..cefc9ff --- /dev/null +++ b/tribler-mod/Tribler/Core/Search/Reranking.py.bak @@ -0,0 +1,97 @@ +# written by Nicolas Neubauer +# see LICENSE.txt for license information + +import sys, time + +DEBUG = False + +class Reranker: + def getID(self): + """the ID that is stored in the clicklog 'reranking_strategy' field for later comparison""" + return 0 + + def rerank(self, hits, keywords, torrent_db, pref_db, mypref_db, search_db): + """takes hits and reorders them given the current keywords""" + return hits + +class DefaultTorrentReranker(Reranker): + """ just leave the hits alone """ + def getID(self): + return 1 + def rerank(self, hits, keywords, torrent_db, pref_db, mypref_db, search_db): + return hits + +class TestReranker(Reranker): + """ for testing purposes only """ + def getID(self): + return 2 + def rerank(self, hits, keywords, torrent_db, pref_db, mypref_db, search_db): + if len(hits)>1: + h = hits[0] + hits[0] = hits[1] + hits[1] = h + return hits + +class SwapFirstTwoReranker(Reranker): + """ swaps first and second place if second place has been frequently selected from bad position """ + + def __init__(self): + self.MAX_SEEN_BEFORE_RERANK = 5 + self.MAX_POPULAR_RATIO = 5 + + def getID(self): + return 2 + + def rerank(self, hits, keywords, torrent_db, pref_db, mypref_db, search_db): + if len(hits)<2: + return hits + + torrent_id_0 = hits[0].get('torrent_id',0) + torrent_id_1 = hits[1].get('torrent_id',0) + if torrent_id_0 == 0 or torrent_id_1 == 0: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "reranking: torrent_id=0 in hits, exiting" + # we got some problems elsewhere, don't add to it + return hits + + (num_hits_0, position_score_0) = pref_db.getPositionScore(torrent_id_0, keywords) + (num_hits_1, position_score_1) = pref_db.getPositionScore(torrent_id_1, keywords) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "reranking: first torrent (%d): (num, score)= (%s, %s)" % (torrent_id_0, num_hits_0, position_score_0) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "reranking: second torrent (%d): (num, score)= (%s, %s)" % (torrent_id_1, num_hits_1, position_score_1) + + if (num_hits_0 < self.MAX_SEEN_BEFORE_RERANK or num_hits_1 < self.MAX_SEEN_BEFORE_RERANK): + # only start thinking about reranking if we have seen enough samples + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "reranking: not enough samples, not reranking" + return hits + + if (num_hits_0/num_hits_1 > self.MAX_POPULAR_RATIO): + # if number one is much more popular, keep everything as it is + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "reranking: first torrent is too popular, not reranking" + return hits + + # if all these tests are successful, we may swap first and second if second + # has gotten hits from worse positions than first + + if position_score_0>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "reranking: second torrent has better position score, reranking!" + h = hits[0] + hits[0] = hits[1] + hits[1] = h + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "reranking: second torrent does not have better position score, reranking!" + + return hits + +_rerankers = [DefaultTorrentReranker(), SwapFirstTwoReranker()] + + +def getTorrentReranker(): + global _rerankers + index = int(time.strftime("%H")) % (len(_rerankers)) + return _rerankers[index] + diff --git a/tribler-mod/Tribler/Core/Search/SearchManager.py b/tribler-mod/Tribler/Core/Search/SearchManager.py new file mode 100644 index 0000000..c3918b5 --- /dev/null +++ b/tribler-mod/Tribler/Core/Search/SearchManager.py @@ -0,0 +1,39 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg, Arno Bakker +# see LICENSE.txt for license information +import sys + +from Tribler.Core.Search.KeywordSearch import KeywordSearch + +DEBUG = False + +class SearchManager: + """ Arno: This is DB neutral. All it assumes is a DBHandler with + a searchNames() method that returns records with at least a 'name' field + in them. + """ + + def __init__(self,dbhandler): + self.dbhandler = dbhandler + self.keywordsearch = KeywordSearch() + + def search(self,kws,maxhits=None): + """ Called by any thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SearchManager: search",kws + + namerecs = self.dbhandler.searchNames(kws) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SearchManager: search: Got namerecs",len(namerecs),`namerecs` + + hits = self.keywordsearch.search(namerecs,kws) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SearchManager: search: Filtered namerecs",len(hits) + + if maxhits is None: + return hits + else: + return hits[:maxhits] + diff --git a/tribler-mod/Tribler/Core/Search/SearchManager.py.bak b/tribler-mod/Tribler/Core/Search/SearchManager.py.bak new file mode 100644 index 0000000..ce54c08 --- /dev/null +++ b/tribler-mod/Tribler/Core/Search/SearchManager.py.bak @@ -0,0 +1,38 @@ +# Written by Jelle Roozenburg, Arno Bakker +# see LICENSE.txt for license information +import sys + +from Tribler.Core.Search.KeywordSearch import KeywordSearch + +DEBUG = False + +class SearchManager: + """ Arno: This is DB neutral. All it assumes is a DBHandler with + a searchNames() method that returns records with at least a 'name' field + in them. + """ + + def __init__(self,dbhandler): + self.dbhandler = dbhandler + self.keywordsearch = KeywordSearch() + + def search(self,kws,maxhits=None): + """ Called by any thread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SearchManager: search",kws + + namerecs = self.dbhandler.searchNames(kws) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SearchManager: search: Got namerecs",len(namerecs),`namerecs` + + hits = self.keywordsearch.search(namerecs,kws) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SearchManager: search: Filtered namerecs",len(hits) + + if maxhits is None: + return hits + else: + return hits[:maxhits] + diff --git a/tribler-mod/Tribler/Core/Search/__init__.py b/tribler-mod/Tribler/Core/Search/__init__.py new file mode 100644 index 0000000..b7ef832 --- /dev/null +++ b/tribler-mod/Tribler/Core/Search/__init__.py @@ -0,0 +1,3 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/Search/__init__.py.bak b/tribler-mod/Tribler/Core/Search/__init__.py.bak new file mode 100644 index 0000000..395f8fb --- /dev/null +++ b/tribler-mod/Tribler/Core/Search/__init__.py.bak @@ -0,0 +1,2 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/Session.py b/tribler-mod/Tribler/Core/Session.py new file mode 100644 index 0000000..e90e018 --- /dev/null +++ b/tribler-mod/Tribler/Core/Session.py @@ -0,0 +1,894 @@ +from time import localtime, strftime + +# Written by Arno Bakker +# see LICENSE.txt for license information +""" A Session is a running instance of the Tribler Core and the Core's central class. """ + +import os +import sys +import copy +import binascii +from traceback import print_exc +from threading import RLock + +from Tribler.__init__ import LIBRARYNAME +from Tribler.Core.simpledefs import * +from Tribler.Core.defaults import sessdefaults +from Tribler.Core.Base import * +from Tribler.Core.SessionConfig import * +import Tribler.Core.Overlay.permid +from Tribler.Core.DownloadConfig import get_default_dest_dir +from Tribler.Core.Utilities.utilities import find_prog_in_PATH +from Tribler.Core.APIImplementation.SessionRuntimeConfig import SessionRuntimeConfig +from Tribler.Core.APIImplementation.LaunchManyCore import TriblerLaunchMany +from Tribler.Core.APIImplementation.UserCallbackHandler import UserCallbackHandler +from Tribler.Core.SocialNetwork.RemoteQueryMsgHandler import RemoteQueryMsgHandler +from Tribler.Core.SocialNetwork.RemoteTorrentHandler import RemoteTorrentHandler +from Tribler.Core.SocialNetwork.FriendshipMsgHandler import FriendshipMsgHandler +from Tribler.Core.NATFirewall.ConnectionCheck import ConnectionCheck +from Tribler.Core.NATFirewall.DialbackMsgHandler import DialbackMsgHandler + +import Tribler.Core.Overlay.permid as permidmod + +DEBUG = True #False + +def get_home_dir(): + try: + # when there are special unicode characters in the username, + # the following will fail on python 2.4, 2.5, 2.x this will + # always succeed on python 3.x + return os.path.expanduser(u"~") + except Exception, e: + unicode_error = e + + # non-unicode home + home = os.path.expanduser("~") + head, tail = os.path.split(home) + + dirs = os.listdir(head) + udirs = os.listdir(unicode(head)) + + # the character set may be different, but the string length is + # still the same + islen = lambda dir: len(dir) == len(tail) + dirs = filter(islen, dirs) + udirs = filter(islen, udirs) + if len(dirs) == 1 and len(udirs) == 1: + return os.path.join(head, udirs[0]) + + # remove all dirs that are equal in unicode and non-unicode. we + # know that we don't need these dirs because the initial + # expandusers would not have failed on them + for dir in dirs[:]: + if dir in udirs: + dirs.remove(dir) + udirs.remove(dir) + if len(dirs) == 1 and len(udirs) == 1: + return os.path.join(head, udirs[0]) + + # assume that the user has write access in her own + # directory. therefore we can filter out any non-writable + # directories + writable_udir = [udir for udir in udirs if os.access(udir, os.W_OK)] + if len(writable_udir) == 1: + return os.path.join(head, writable_udir[0]) + + # fallback: assume that the order of entries in dirs is the same + # as in udirs + for dir, udir in zip(dirs, udirs): + if dir == tail: + return os.path.join(head, udir) + + # failure + raise unicode_exception + +class Session(SessionRuntimeConfig): + """ + + A Session is a running instance of the Tribler Core and the Core's central + class. It implements the SessionConfigInterface which can be used to change + session parameters at runtime (for selected parameters). + + cf. libtorrent session + """ + __single = None + + + def __init__(self,scfg=None,ignore_singleton=False): + """ + A Session object is created which is configured following a copy of the + SessionStartupConfig scfg. (copy constructor used internally) + + @param scfg SessionStartupConfig object or None, in which case we + look for a saved session in the default location (state dir). If + we can't find it, we create a new SessionStartupConfig() object to + serve as startup config. Next, the config is saved in the directory + indicated by its 'state_dir' attribute. + + In the current implementation only a single session instance can exist + at a time in a process. The ignore_singleton flag is used for testing. + """ + if not ignore_singleton: + if Session.__single: + raise RuntimeError, "Session is singleton" + Session.__single = self + + self.sesslock = RLock() + + # Determine startup config to use + if scfg is None: # If no override + try: + # Then try to read from default location + state_dir = Session.get_default_state_dir() + cfgfilename = Session.get_default_config_filename(state_dir) + scfg = SessionStartupConfig.load(cfgfilename) + except: + # If that fails, create a fresh config with factory defaults + print_exc() + scfg = SessionStartupConfig() + self.sessconfig = scfg.sessconfig + else: # overrides any saved config + # Work from copy + self.sessconfig = copy.copy(scfg.sessconfig) + + # Create dir for session state, if not exist + state_dir = self.sessconfig['state_dir'] + if state_dir is None: + state_dir = Session.get_default_state_dir() + self.sessconfig['state_dir'] = state_dir + + if not os.path.isdir(state_dir): + os.makedirs(state_dir) + + collected_torrent_dir = self.sessconfig['torrent_collecting_dir'] + if not collected_torrent_dir: + collected_torrent_dir = os.path.join(self.sessconfig['state_dir'], STATEDIR_TORRENTCOLL_DIR) + self.sessconfig['torrent_collecting_dir'] = collected_torrent_dir + + if not os.path.exists(collected_torrent_dir): + os.makedirs(collected_torrent_dir) + + if not self.sessconfig['peer_icon_path']: + self.sessconfig['peer_icon_path'] = os.path.join(self.sessconfig['state_dir'], STATEDIR_PEERICON_DIR) + + # PERHAPS: load default TorrentDef and DownloadStartupConfig from state dir + # Let user handle that, he's got default_state_dir, etc. + + # Core init + permidmod.init() + + #print 'Session: __init__ config is', self.sessconfig + + # + # Set params that depend on state_dir + # + # 1. keypair + # + pairfilename = os.path.join(self.sessconfig['state_dir'],'ec.pem') + if self.sessconfig['eckeypairfilename'] is None: + self.sessconfig['eckeypairfilename'] = pairfilename + + if os.access(self.sessconfig['eckeypairfilename'],os.F_OK): + # May throw exceptions + self.keypair = permidmod.read_keypair(self.sessconfig['eckeypairfilename']) + else: + self.keypair = permidmod.generate_keypair() + + # Save keypair + pubfilename = os.path.join(self.sessconfig['state_dir'],'ecpub.pem') + permidmod.save_keypair(self.keypair,pairfilename) + permidmod.save_pub_key(self.keypair,pubfilename) + + + # 2. Downloads persistent state dir + dlpstatedir = os.path.join(self.sessconfig['state_dir'],STATEDIR_DLPSTATE_DIR) + if not os.path.isdir(dlpstatedir): + os.mkdir(dlpstatedir) + + # 3. tracker + trackerdir = self.get_internal_tracker_dir() + if not os.path.isdir(trackerdir): + os.mkdir(trackerdir) + + if self.sessconfig['tracker_dfile'] is None: + self.sessconfig['tracker_dfile'] = os.path.join(trackerdir,'tracker.db') + + if self.sessconfig['tracker_allowed_dir'] is None: + self.sessconfig['tracker_allowed_dir'] = trackerdir + + if self.sessconfig['tracker_logfile'] is None: + if sys.platform == "win32": + # Not "Nul:" but "nul" is /dev/null on Win32 + sink = 'nul' + else: + sink = '/dev/null' + self.sessconfig['tracker_logfile'] = sink + + # 4. superpeer.txt and crawler.txt + if self.sessconfig['superpeer_file'] is None: + self.sessconfig['superpeer_file'] = os.path.join(self.sessconfig['install_dir'],LIBRARYNAME,'Core','superpeer.txt') + if 'crawler_file' not in self.sessconfig or self.sessconfig['crawler_file'] is None: + self.sessconfig['crawler_file'] = os.path.join(self.sessconfig['install_dir'], LIBRARYNAME,'Core','Statistics','crawler.txt') + + # 5. download_help_dir + if self.sessconfig['overlay'] and self.sessconfig['download_help']: + if self.sessconfig['download_help_dir'] is None: + self.sessconfig['download_help_dir'] = os.path.join(get_default_dest_dir(),DESTDIR_COOPDOWNLOAD) + # Jelle: under linux, default_dest_dir can be /tmp. Then download_help_dir can be deleted inbetween + # sessions. + if not os.path.isdir(self.sessconfig['download_help_dir']): + os.makedirs(self.sessconfig['download_help_dir']) + + # 6. peer_icon_path + if self.sessconfig['peer_icon_path'] is None: + self.sessconfig['peer_icon_path'] = os.path.join(self.sessconfig['state_dir'],STATEDIR_PEERICON_DIR) + if not os.path.isdir(self.sessconfig['peer_icon_path']): + os.mkdir(self.sessconfig['peer_icon_path']) + + # 7. Poor man's versioning of SessionConfig, add missing + # default values. Really should use PERSISTENTSTATE_CURRENTVERSION + # and do conversions. + for key,defvalue in sessdefaults.iteritems(): + if key not in self.sessconfig: + self.sessconfig[key] = defvalue + + if not 'live_aux_seeders' in self.sessconfig: + # Poor man's versioning, really should update PERSISTENTSTATE_CURRENTVERSION + self.sessconfig['live_aux_seeders'] = sessdefaults['live_aux_seeders'] + + if not 'nat_detect' in self.sessconfig: + self.sessconfig['nat_detect'] = sessdefaults['nat_detect'] + if not 'puncturing_internal_port' in self.sessconfig: + self.sessconfig['puncturing_internal_port'] = sessdefaults['puncturing_internal_port'] + if not 'stun_servers' in self.sessconfig: + self.sessconfig['stun_servers'] = sessdefaults['stun_servers'] + if not 'pingback_servers' in self.sessconfig: + self.sessconfig['pingback_servers'] = sessdefaults['pingback_servers'] + if not 'mainline_dht' in self.sessconfig: + self.sessconfig['mainline_dht'] = sessdefaults['mainline_dht'] + + # Checkpoint startup config + self.save_pstate_sessconfig() + + # Create handler for calling back the user via separate threads + self.uch = UserCallbackHandler(self) + + # Create engine with network thread + self.lm = TriblerLaunchMany() + self.lm.register(self,self.sesslock) + self.lm.start() + + + # + # Class methods + # + def get_instance(*args, **kw): + """ Returns the Session singleton if it exists or otherwise + creates it first, in which case you need to pass the constructor + params. + @return Session.""" + if Session.__single is None: + Session(*args, **kw) + return Session.__single + get_instance = staticmethod(get_instance) + + def get_default_state_dir(homedirpostfix='.Tribler'): + """ Returns the factory default directory for storing session state + on the current platform (Win32,Mac,Unix). + @return An absolute path name. """ + + # Allow override + statedirvar = '${TSTATEDIR}' + statedir = os.path.expandvars(statedirvar) + if statedir and statedir != statedirvar: + return statedir + + # Boudewijn: retrieving the homedir fails with python 2.x on + # windows when the username contains specific unicode + # characters. using the get_home_dir() function patches this + # problem. + # + homedir = get_home_dir() + + if sys.platform == "win32": + # 5 = XP, 6 = Vista + if sys.getwindowsversion()[0] == 6: + appdir = os.path.join(homedir,u"AppData",u"Roaming") + else: + appdir = os.path.join(homedir,u"Application Data") + else: + appdir = homedir + + statedir = os.path.join(appdir, homedirpostfix) + return statedir + + get_default_state_dir = staticmethod(get_default_state_dir) + + + # + # Public methods + # + def start_download(self,tdef,dcfg=None): + """ + Creates a Download object and adds it to the session. The passed + TorrentDef and DownloadStartupConfig are copied into the new Download + object. The Download is then started and checkpointed. + + If a checkpointed version of the Download is found, that is restarted + overriding the saved DownloadStartupConfig is "dcfg" is not None. + + @param tdef A finalized TorrentDef + @param dcfg DownloadStartupConfig or None, in which case + a new DownloadStartupConfig() is created with its default settings + and the result becomes the runtime config of this Download. + @return Download + """ + # locking by lm + return self.lm.add(tdef,dcfg) + + def resume_download_from_file(self,filename): + """ + Recreates Download from resume file + + @return a Download object. + + Note: this cannot be made into a method of Download, as the Download + needs to be bound to a session, it cannot exist independently. + """ + raise NotYetImplementedException() + + def get_downloads(self): + """ + Returns a copy of the list of Downloads. + @return A list of Download objects. + """ + # locking by lm + return self.lm.get_downloads() + + + def remove_download(self,d,removecontent=False): + """ + Stops the download and removes it from the session. + @param d The Download to remove + @param removecontent Whether to delete the already downloaded content + from disk. + """ + # locking by lm + self.lm.remove(d,removecontent=removecontent) + + + def set_download_states_callback(self,usercallback,getpeerlist=False): + """ + See Download.set_state_callback. Calls usercallback with a list of + DownloadStates, one for each Download in the Session as first argument. + The usercallback must return a tuple (when,getpeerlist) that indicates + when to reinvoke the callback again (as a number of seconds from now, + or < 0.0 if not at all) and whether to also include the details of + the connected peers in the DownloadStates on that next call. + + The callback will be called by a popup thread which can be used + indefinitely (within reason) by the higher level code. + + @param usercallback A function adhering to the above spec. + """ + self.lm.set_download_states_callback(usercallback,getpeerlist) + + + # + # Config parameters that only exist at runtime + # + def get_permid(self): + """ Returns the PermID of the Session, as determined by the + SessionConfig.set_permid() parameter. A PermID is a public key + @return The PermID encoded in a string in DER format. """ + self.sesslock.acquire() + try: + return str(self.keypair.pub().get_der()) + finally: + self.sesslock.release() + + def get_external_ip(self): + """ Returns the external IP address of this Session, i.e., by which + it is reachable from the Internet. This address is determined via + various mechanisms such as the UPnP protocol, our dialback mechanism, + and an inspection of the local network configuration. + @return A string. """ + # locking done by lm + return self.lm.get_ext_ip() + + + def get_externally_reachable(self): + """ Returns whether the Session is externally reachable, i.e., its + listen port is not firewalled. Use add_observer() with NTFY_REACHABLE + to register to the event of detecting reachablility. Note that due to + the use of UPnP a Session may become reachable some time after + startup and due to the Dialback mechanism, this method may return + False while the Session is actually already reachable. Note that True + doesn't mean the Session is reachable from the open Internet, could just + be from the local (otherwise firewalled) LAN. + @return A boolean. """ + return DialbackMsgHandler.getInstance().isConnectable() + + + def get_current_startup_config_copy(self): + """ Returns a SessionStartupConfig that is a copy of the current runtime + SessionConfig. + @return SessionStartupConfig + """ + # Called by any thread + self.sesslock.acquire() + try: + sessconfig = copy.copy(self.sessconfig) + return SessionStartupConfig(sessconfig=sessconfig) + finally: + self.sesslock.release() + + # + # Internal tracker + # + def get_internal_tracker_url(self): + """ Returns the announce URL for the internal tracker. + @return URL """ + # Called by any thread + self.sesslock.acquire() + try: + url = None + if 'tracker_url' in self.sessconfig: + url = self.sessconfig['tracker_url'] # user defined override, e.g. specific hostname + if url is None: + ip = self.lm.get_ext_ip() + port = self.get_listen_port() + url = 'http://'+ip+':'+str(port)+'/announce/' + return url + finally: + self.sesslock.release() + + + def get_internal_tracker_dir(self): + """ Returns the directory containing the torrents tracked by the internal + tracker (and associated databases). + @return An absolute path. """ + # Called by any thread + self.sesslock.acquire() + try: + if self.sessconfig['state_dir'] is None: + return None + else: + return os.path.join(self.sessconfig['state_dir'],STATEDIR_ITRACKER_DIR) + finally: + self.sesslock.release() + + + def add_to_internal_tracker(self,tdef): + """ Add a torrent def to the list of torrents tracked by the internal + tracker. Use this method to use the Session as a standalone tracker. + @param tdef A finalized TorrentDef. + """ + # Called by any thread + self.sesslock.acquire() + try: + infohash = tdef.get_infohash() + filename = self.get_internal_tracker_torrentfilename(infohash) + tdef.save(filename) + # Bring to attention of Tracker thread + self.lm.tracker_rescan_dir() + finally: + self.sesslock.release() + + def remove_from_internal_tracker(self,tdef): + """ Remove a torrent def from the list of torrents tracked by the + internal tracker. Use this method to use the Session as a standalone + tracker. + @param tdef A finalized TorrentDef. + """ + infohash = tdef.get_infohash() + self.remove_from_internal_tracker_by_infohash(infohash) + + def remove_from_internal_tracker_by_infohash(self,infohash): + """ Remove a torrent def from the list of torrents tracked by the + internal tracker. Use this method to use the Session as a standalone + tracker. + @param infohash Identifier of the torrent def to remove. + """ + # Called by any thread + self.sesslock.acquire() + try: + filename = self.get_internal_tracker_torrentfilename(infohash) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Session: removing itracker entry",filename + if os.access(filename,os.F_OK): + os.remove(filename) + # Bring to attention of Tracker thread + self.lm.tracker_rescan_dir() + finally: + self.sesslock.release() + + # + # Notification of events in the Session + # + def add_observer(self, func, subject, changeTypes = [NTFY_UPDATE, NTFY_INSERT, NTFY_DELETE], objectID = None): + """ Add an observer function function to the Session. The observer + function will be called when one of the specified events (changeTypes) + occurs on the specified subject. + + The function will be called by a popup thread which can be used + indefinitely (within reason) by the higher level code. + + @param func The observer function. It should accept as its first argument + the subject, as second argument the changeType, as third argument an + objectID (e.g. the primary key in the observed database) and an + optional list of arguments. + @param subject The subject to observe, one of NTFY_* subjects (see + simpledefs). + @param changeTypes The list of events to be notified of one of NTFY_* + events. + @param objectID The specific object in the subject to monitor (e.g. a + specific primary key in a database to monitor for updates.) + + + TODO: Jelle will add per-subject/event description here ;o) + + """ + #Called by any thread + self.uch.notifier.add_observer(func, subject, changeTypes, objectID) # already threadsafe + + def remove_observer(self, func): + """ Remove observer function. No more callbacks will be made. + @param func The observer function to remove. """ + #Called by any thread + self.uch.notifier.remove_observer(func) # already threadsafe + + def open_dbhandler(self,subject): + """ Opens a connection to the specified database. Only the thread + calling this method may use this connection. The connection must be + closed with close_dbhandler() when this thread exits. + + @param subject The database to open. Must be one of the subjects + specified here. + @return A reference to a DBHandler class for the specified subject or + None when the Session was not started with megacaches enabled. +
 NTFY_PEERS -> PeerDBHandler
+        NTFY_TORRENTS -> TorrentDBHandler
+        NTFY_PREFERENCES -> PreferenceDBHandler
+        NTFY_SUPERPEERS -> SuperpeerDBHandler
+        NTFY_FRIENDS -> FriendsDBHandler
+        NTFY_MYPREFERENCES -> MyPreferenceDBHandler
+        NTFY_BARTERCAST -> BartercastDBHandler
+        NTFY_SEARCH -> SearchDBHandler
+        NTFY_TERM -> TermDBHandler
+        NTFY_MODERATIONCAST -> ModerationCastDBHandler
+        NTFY_VOTECAST -> VotecastDBHandler
+        
+ """ + # Called by any thread + self.sesslock.acquire() + try: + if subject == NTFY_PEERS: + return self.lm.peer_db + elif subject == NTFY_TORRENTS: + return self.lm.torrent_db + elif subject == NTFY_PREFERENCES: + return self.lm.pref_db + elif subject == NTFY_SUPERPEERS: + return self.lm.superpeer_db + elif subject == NTFY_FRIENDS: + return self.lm.friend_db + elif subject == NTFY_MYPREFERENCES: + return self.lm.mypref_db + elif subject == NTFY_BARTERCAST: + return self.lm.bartercast_db + elif subject == NTFY_SEEDINGSTATS: + return self.lm.seedingstats_db + elif subject == NTFY_SEEDINGSTATSSETTINGS: + return self.lm.seedingstatssettings_db + elif subject == NTFY_MODERATIONCAST: + return self.lm.modcast_db + elif subject == NTFY_VOTECAST: + return self.lm.votecast_db + elif subject == NTFY_SEARCH: + return self.lm.search_db + elif subject == NTFY_TERM: + return self.lm.term_db + else: + raise ValueError('Cannot open DB subject: '+subject) + finally: + self.sesslock.release() + + + def close_dbhandler(self,dbhandler): + """ Closes the given database connection """ + dbhandler.close() + + + # + # Access control + # + def set_overlay_request_policy(self, reqpol): + """ + Set a function which defines which overlay requests (e.g. dl_helper, rquery msg) + will be answered or will be denied. + + The function will be called by a network thread and must return + as soon as possible to prevent performance problems. + + @param reqpol is a Tribler.Core.RequestPolicy.AbstractRequestPolicy + object. + """ + # Called by any thread + # to protect self.sessconfig + self.sesslock.acquire() + try: + overlay_loaded = self.sessconfig['overlay'] + finally: + self.sesslock.release() + if overlay_loaded: + self.lm.overlay_apps.setRequestPolicy(reqpol) # already threadsafe + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Session: overlay is disabled, so no overlay request policy needed" + + + # + # Persistence and shutdown + # + def load_checkpoint(self,initialdlstatus=None): + """ Restart Downloads from checkpoint, if any. + + This method allows the API user to manage restoring downloads. + E.g. a video player that wants to start the torrent the user clicked + on first, and only then restart any sleeping torrents (e.g. seeding). + The optional initialdlstatus parameter can be set to DLSTATUS_STOPPED + to restore all the Downloads in DLSTATUS_STOPPED state. + """ + self.lm.load_checkpoint(initialdlstatus) + + + def checkpoint(self): + """ Saves the internal session state to the Session's state dir. """ + #Called by any thread + self.checkpoint_shutdown(stop=False,checkpoint=True,gracetime=None,hacksessconfcheckpoint=False) + + def shutdown(self,checkpoint=True,gracetime=2.0,hacksessconfcheckpoint=True): + """ Checkpoints the session and closes it, stopping the download engine. + @param checkpoint Whether to checkpoint the Session state on shutdown. + @param gracetime Time to allow for graceful shutdown + signoff (seconds). + """ + # Called by any thread + self.lm.early_shutdown() + self.checkpoint_shutdown(stop=True,checkpoint=checkpoint,gracetime=gracetime,hacksessconfcheckpoint=hacksessconfcheckpoint) + self.uch.shutdown() + + def has_shutdown(self): + """ Whether the Session has completely shutdown, i.e., its internal + threads are finished and it is safe to quit the process the Session + is running in. + @return A Boolean. + """ + return self.lm.sessdoneflag.isSet() + + def get_downloads_pstate_dir(self): + """ Returns the directory in which to checkpoint the Downloads in this + Session. """ + # Called by network thread + self.sesslock.acquire() + try: + return os.path.join(self.sessconfig['state_dir'],STATEDIR_DLPSTATE_DIR) + finally: + self.sesslock.release() + + # + # Tribler Core special features + # + def query_connected_peers(self,query,usercallback,max_peers_to_query=None): + """ Ask all Tribler peers we're currently connected to resolve the + specified query and return the hits. For each peer that returns + hits the usercallback method is called with first parameter the + permid of the peer, as second parameter the query string and + as third parameter a dictionary of hits. The number of times the + usercallback method will be called is undefined. + + The callback will be called by a popup thread which can be used + indefinitely (within reason) by the higher level code. + + At the moment we support one type of query, which is a query for + torrent files that match a set of keywords. The format of the + query string is "SIMPLE kw1 kw2 kw3". In the future we plan + to support full SQL queries. + + For SIMPLE queries the dictionary of hits consists of + (infohash,torrentrecord) pairs. The torrentrecord is a + dictionary that contains the following keys: +
+        * 'content_name': The 'name' field of the torrent.
+        * 'length': The total size of the content in the torrent.
+        * 'leecher': The currently known number of downloaders.
+        * 'seeder': The currently known number of seeders.
+        * 'category': A list of category strings the torrent was classified into
+          by the remote peer.
+        
+ + From Session API version 1.0.2 the following keys were added + to the torrentrecord: +
+        * 'torrent_size': The size of the .torrent file.
+        
+ + @param query A Unicode query string adhering to the above spec. + @param usercallback A function adhering to the above spec. + """ + self.sesslock.acquire() + try: + if self.sessconfig['overlay']: + if not query.startswith('SIMPLE '): + raise ValueError('Query does not start with SIMPLE') + + rqmh = RemoteQueryMsgHandler.getInstance() + rqmh.send_query(query,usercallback,max_peers_to_query=max_peers_to_query) + else: + raise OperationNotEnabledByConfigurationException("Overlay not enabled") + finally: + self.sesslock.release() + + + def download_torrentfile_from_peer(self,permid,infohash,usercallback): + """ Ask the designated peer to send us the torrentfile for the torrent + identified by the passed infohash. If the torrent is succesfully + received, the usercallback method is called with the infohash as first + and the contents of the torrentfile (bencoded dict) as second parameter. + If the torrent could not be obtained, the callback is not called. + The torrent will have been added to the TorrentDBHandler (if enabled) + at the time of the call. + + @param permid The PermID of the peer to query. + @param infohash The infohash of the torrent. + @param usercallback A function adhering to the above spec. + """ + self.sesslock.acquire() + try: + if self.sessconfig['overlay']: + rtorrent_handler = RemoteTorrentHandler.getInstance() + rtorrent_handler.download_torrent(permid,infohash,usercallback) + else: + raise OperationNotEnabledByConfigurationException("Overlay not enabled") + finally: + self.sesslock.release() + + + # + # Internal persistence methods + # + def checkpoint_shutdown(self,stop,checkpoint,gracetime,hacksessconfcheckpoint): + """ Checkpoints the Session and optionally shuts down the Session. + @param stop Whether to shutdown the Session as well. + @param checkpoint Whether to checkpoint at all, or just to stop. + @param gracetime Time to allow for graceful shutdown + signoff (seconds). + """ + # Called by any thread + self.sesslock.acquire() + try: + # Arno: Make checkpoint optional on shutdown. At the moment setting + # the config at runtime is not possible (see SessionRuntimeConfig) + # so this has little use, and interferes with our way of + # changing the startup config, which is to write a new + # config to disk that will be read at start up. + if hacksessconfcheckpoint: + try: + self.save_pstate_sessconfig() + except Exception,e: + self.lm.rawserver_nonfatalerrorfunc(e) + + # Checkpoint all Downloads and stop NetworkThread + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Session: checkpoint_shutdown" + self.lm.checkpoint(stop=stop,checkpoint=checkpoint,gracetime=gracetime) + finally: + self.sesslock.release() + + def save_pstate_sessconfig(self): + """ Save the runtime SessionConfig to disk """ + # Called by any thread + sscfg = self.get_current_startup_config_copy() + cfgfilename = Session.get_default_config_filename(sscfg.get_state_dir()) + sscfg.save(cfgfilename) + + + def get_default_config_filename(state_dir): + """ Return the name of the file where a session config is saved by default. + @return A filename + """ + return os.path.join(state_dir,STATEDIR_SESSCONFIG) + get_default_config_filename = staticmethod(get_default_config_filename) + + + def get_internal_tracker_torrentfilename(self,infohash): + """ Return the absolute pathname of the torrent file used by the + internal tracker. + @return A filename + """ + trackerdir = self.get_internal_tracker_dir() + basename = binascii.hexlify(infohash)+'.torrent' # ignore .tribe stuff, not vital + return os.path.join(trackerdir,basename) + + def get_nat_type(self, callback=None): + """ Return the type of Network Address Translator (NAT) detected. + + When a callback parameter is supplied it will always be + called. When the NAT-type is already known the callback will + be made instantly. Otherwise, the callback will be made when + the NAT discovery has finished. + + The callback will be called by a popup thread which can be used + indefinitely (within reason) by the higher level code. + + Return values: + "Blocked" + "Open Internet" + "Restricted Cone Firewall" + "Port Restricted Cone Firewall" + "Full Cone NAT" + "Restricted Cone NAT" + "Port Restricted Cone NAT" + "Symmetric NAT" + "Unknown NAT/Firewall" + + @param callback Optional callback used to notify the NAT type + @return String + """ + # TODO: define constants in simpledefs for these + # Called by any thread + self.sesslock.acquire() + try: + return ConnectionCheck.getInstance(self).get_nat_type(callback=callback) + finally: + self.sesslock.release() + + # + # Friendship functions + # + def send_friendship_message(self,permid,mtype,approved=None): + """ Send friendship msg to the specified peer + + F_REQUEST_MSG: + + F_RESPONSE_MSG: + @param approved Whether you want him as friend or not. + + """ + self.sesslock.acquire() + try: + if self.sessconfig['overlay']: + if mtype == F_FORWARD_MSG: + raise ValueError("User cannot send FORWARD messages directly") + + fmh = FriendshipMsgHandler.getInstance() + params = {} + if approved is not None: + params['response'] = int(approved) + fmh.anythread_send_friendship_msg(permid,mtype,params) + else: + raise OperationNotEnabledByConfigurationException("Overlay not enabled") + finally: + self.sesslock.release() + + + def set_friendship_callback(self,usercallback): + """ When a new friendship request is received the given + callback function is called with as first parameter the + requester's permid and as second parameter a dictionary of + request arguments: + callback(requester_permid,params) + + The callback is called by a popup thread which can be used + indefinitely (within reason) by the higher level code. + + @param usercallback A callback function adhering to the above spec. + """ + self.sesslock.acquire() + try: + if self.sessconfig['overlay']: + fmh = FriendshipMsgHandler.getInstance() + fmh.register_usercallback(usercallback) + else: + raise OperationNotEnabledByConfigurationException("Overlay not enabled") + finally: + self.sesslock.release() + diff --git a/tribler-mod/Tribler/Core/Session.py.bak b/tribler-mod/Tribler/Core/Session.py.bak new file mode 100644 index 0000000..c99e6de --- /dev/null +++ b/tribler-mod/Tribler/Core/Session.py.bak @@ -0,0 +1,893 @@ + +# Written by Arno Bakker +# see LICENSE.txt for license information +""" A Session is a running instance of the Tribler Core and the Core's central class. """ + +import os +import sys +import copy +import binascii +from traceback import print_exc +from threading import RLock + +from Tribler.__init__ import LIBRARYNAME +from Tribler.Core.simpledefs import * +from Tribler.Core.defaults import sessdefaults +from Tribler.Core.Base import * +from Tribler.Core.SessionConfig import * +import Tribler.Core.Overlay.permid +from Tribler.Core.DownloadConfig import get_default_dest_dir +from Tribler.Core.Utilities.utilities import find_prog_in_PATH +from Tribler.Core.APIImplementation.SessionRuntimeConfig import SessionRuntimeConfig +from Tribler.Core.APIImplementation.LaunchManyCore import TriblerLaunchMany +from Tribler.Core.APIImplementation.UserCallbackHandler import UserCallbackHandler +from Tribler.Core.SocialNetwork.RemoteQueryMsgHandler import RemoteQueryMsgHandler +from Tribler.Core.SocialNetwork.RemoteTorrentHandler import RemoteTorrentHandler +from Tribler.Core.SocialNetwork.FriendshipMsgHandler import FriendshipMsgHandler +from Tribler.Core.NATFirewall.ConnectionCheck import ConnectionCheck +from Tribler.Core.NATFirewall.DialbackMsgHandler import DialbackMsgHandler + +import Tribler.Core.Overlay.permid as permidmod + +DEBUG = True #False + +def get_home_dir(): + try: + # when there are special unicode characters in the username, + # the following will fail on python 2.4, 2.5, 2.x this will + # always succeed on python 3.x + return os.path.expanduser(u"~") + except Exception, e: + unicode_error = e + + # non-unicode home + home = os.path.expanduser("~") + head, tail = os.path.split(home) + + dirs = os.listdir(head) + udirs = os.listdir(unicode(head)) + + # the character set may be different, but the string length is + # still the same + islen = lambda dir: len(dir) == len(tail) + dirs = filter(islen, dirs) + udirs = filter(islen, udirs) + if len(dirs) == 1 and len(udirs) == 1: + return os.path.join(head, udirs[0]) + + # remove all dirs that are equal in unicode and non-unicode. we + # know that we don't need these dirs because the initial + # expandusers would not have failed on them + for dir in dirs[:]: + if dir in udirs: + dirs.remove(dir) + udirs.remove(dir) + if len(dirs) == 1 and len(udirs) == 1: + return os.path.join(head, udirs[0]) + + # assume that the user has write access in her own + # directory. therefore we can filter out any non-writable + # directories + writable_udir = [udir for udir in udirs if os.access(udir, os.W_OK)] + if len(writable_udir) == 1: + return os.path.join(head, writable_udir[0]) + + # fallback: assume that the order of entries in dirs is the same + # as in udirs + for dir, udir in zip(dirs, udirs): + if dir == tail: + return os.path.join(head, udir) + + # failure + raise unicode_exception + +class Session(SessionRuntimeConfig): + """ + + A Session is a running instance of the Tribler Core and the Core's central + class. It implements the SessionConfigInterface which can be used to change + session parameters at runtime (for selected parameters). + + cf. libtorrent session + """ + __single = None + + + def __init__(self,scfg=None,ignore_singleton=False): + """ + A Session object is created which is configured following a copy of the + SessionStartupConfig scfg. (copy constructor used internally) + + @param scfg SessionStartupConfig object or None, in which case we + look for a saved session in the default location (state dir). If + we can't find it, we create a new SessionStartupConfig() object to + serve as startup config. Next, the config is saved in the directory + indicated by its 'state_dir' attribute. + + In the current implementation only a single session instance can exist + at a time in a process. The ignore_singleton flag is used for testing. + """ + if not ignore_singleton: + if Session.__single: + raise RuntimeError, "Session is singleton" + Session.__single = self + + self.sesslock = RLock() + + # Determine startup config to use + if scfg is None: # If no override + try: + # Then try to read from default location + state_dir = Session.get_default_state_dir() + cfgfilename = Session.get_default_config_filename(state_dir) + scfg = SessionStartupConfig.load(cfgfilename) + except: + # If that fails, create a fresh config with factory defaults + print_exc() + scfg = SessionStartupConfig() + self.sessconfig = scfg.sessconfig + else: # overrides any saved config + # Work from copy + self.sessconfig = copy.copy(scfg.sessconfig) + + # Create dir for session state, if not exist + state_dir = self.sessconfig['state_dir'] + if state_dir is None: + state_dir = Session.get_default_state_dir() + self.sessconfig['state_dir'] = state_dir + + if not os.path.isdir(state_dir): + os.makedirs(state_dir) + + collected_torrent_dir = self.sessconfig['torrent_collecting_dir'] + if not collected_torrent_dir: + collected_torrent_dir = os.path.join(self.sessconfig['state_dir'], STATEDIR_TORRENTCOLL_DIR) + self.sessconfig['torrent_collecting_dir'] = collected_torrent_dir + + if not os.path.exists(collected_torrent_dir): + os.makedirs(collected_torrent_dir) + + if not self.sessconfig['peer_icon_path']: + self.sessconfig['peer_icon_path'] = os.path.join(self.sessconfig['state_dir'], STATEDIR_PEERICON_DIR) + + # PERHAPS: load default TorrentDef and DownloadStartupConfig from state dir + # Let user handle that, he's got default_state_dir, etc. + + # Core init + permidmod.init() + + #print 'Session: __init__ config is', self.sessconfig + + # + # Set params that depend on state_dir + # + # 1. keypair + # + pairfilename = os.path.join(self.sessconfig['state_dir'],'ec.pem') + if self.sessconfig['eckeypairfilename'] is None: + self.sessconfig['eckeypairfilename'] = pairfilename + + if os.access(self.sessconfig['eckeypairfilename'],os.F_OK): + # May throw exceptions + self.keypair = permidmod.read_keypair(self.sessconfig['eckeypairfilename']) + else: + self.keypair = permidmod.generate_keypair() + + # Save keypair + pubfilename = os.path.join(self.sessconfig['state_dir'],'ecpub.pem') + permidmod.save_keypair(self.keypair,pairfilename) + permidmod.save_pub_key(self.keypair,pubfilename) + + + # 2. Downloads persistent state dir + dlpstatedir = os.path.join(self.sessconfig['state_dir'],STATEDIR_DLPSTATE_DIR) + if not os.path.isdir(dlpstatedir): + os.mkdir(dlpstatedir) + + # 3. tracker + trackerdir = self.get_internal_tracker_dir() + if not os.path.isdir(trackerdir): + os.mkdir(trackerdir) + + if self.sessconfig['tracker_dfile'] is None: + self.sessconfig['tracker_dfile'] = os.path.join(trackerdir,'tracker.db') + + if self.sessconfig['tracker_allowed_dir'] is None: + self.sessconfig['tracker_allowed_dir'] = trackerdir + + if self.sessconfig['tracker_logfile'] is None: + if sys.platform == "win32": + # Not "Nul:" but "nul" is /dev/null on Win32 + sink = 'nul' + else: + sink = '/dev/null' + self.sessconfig['tracker_logfile'] = sink + + # 4. superpeer.txt and crawler.txt + if self.sessconfig['superpeer_file'] is None: + self.sessconfig['superpeer_file'] = os.path.join(self.sessconfig['install_dir'],LIBRARYNAME,'Core','superpeer.txt') + if 'crawler_file' not in self.sessconfig or self.sessconfig['crawler_file'] is None: + self.sessconfig['crawler_file'] = os.path.join(self.sessconfig['install_dir'], LIBRARYNAME,'Core','Statistics','crawler.txt') + + # 5. download_help_dir + if self.sessconfig['overlay'] and self.sessconfig['download_help']: + if self.sessconfig['download_help_dir'] is None: + self.sessconfig['download_help_dir'] = os.path.join(get_default_dest_dir(),DESTDIR_COOPDOWNLOAD) + # Jelle: under linux, default_dest_dir can be /tmp. Then download_help_dir can be deleted inbetween + # sessions. + if not os.path.isdir(self.sessconfig['download_help_dir']): + os.makedirs(self.sessconfig['download_help_dir']) + + # 6. peer_icon_path + if self.sessconfig['peer_icon_path'] is None: + self.sessconfig['peer_icon_path'] = os.path.join(self.sessconfig['state_dir'],STATEDIR_PEERICON_DIR) + if not os.path.isdir(self.sessconfig['peer_icon_path']): + os.mkdir(self.sessconfig['peer_icon_path']) + + # 7. Poor man's versioning of SessionConfig, add missing + # default values. Really should use PERSISTENTSTATE_CURRENTVERSION + # and do conversions. + for key,defvalue in sessdefaults.iteritems(): + if key not in self.sessconfig: + self.sessconfig[key] = defvalue + + if not 'live_aux_seeders' in self.sessconfig: + # Poor man's versioning, really should update PERSISTENTSTATE_CURRENTVERSION + self.sessconfig['live_aux_seeders'] = sessdefaults['live_aux_seeders'] + + if not 'nat_detect' in self.sessconfig: + self.sessconfig['nat_detect'] = sessdefaults['nat_detect'] + if not 'puncturing_internal_port' in self.sessconfig: + self.sessconfig['puncturing_internal_port'] = sessdefaults['puncturing_internal_port'] + if not 'stun_servers' in self.sessconfig: + self.sessconfig['stun_servers'] = sessdefaults['stun_servers'] + if not 'pingback_servers' in self.sessconfig: + self.sessconfig['pingback_servers'] = sessdefaults['pingback_servers'] + if not 'mainline_dht' in self.sessconfig: + self.sessconfig['mainline_dht'] = sessdefaults['mainline_dht'] + + # Checkpoint startup config + self.save_pstate_sessconfig() + + # Create handler for calling back the user via separate threads + self.uch = UserCallbackHandler(self) + + # Create engine with network thread + self.lm = TriblerLaunchMany() + self.lm.register(self,self.sesslock) + self.lm.start() + + + # + # Class methods + # + def get_instance(*args, **kw): + """ Returns the Session singleton if it exists or otherwise + creates it first, in which case you need to pass the constructor + params. + @return Session.""" + if Session.__single is None: + Session(*args, **kw) + return Session.__single + get_instance = staticmethod(get_instance) + + def get_default_state_dir(homedirpostfix='.Tribler'): + """ Returns the factory default directory for storing session state + on the current platform (Win32,Mac,Unix). + @return An absolute path name. """ + + # Allow override + statedirvar = '${TSTATEDIR}' + statedir = os.path.expandvars(statedirvar) + if statedir and statedir != statedirvar: + return statedir + + # Boudewijn: retrieving the homedir fails with python 2.x on + # windows when the username contains specific unicode + # characters. using the get_home_dir() function patches this + # problem. + # + homedir = get_home_dir() + + if sys.platform == "win32": + # 5 = XP, 6 = Vista + if sys.getwindowsversion()[0] == 6: + appdir = os.path.join(homedir,u"AppData",u"Roaming") + else: + appdir = os.path.join(homedir,u"Application Data") + else: + appdir = homedir + + statedir = os.path.join(appdir, homedirpostfix) + return statedir + + get_default_state_dir = staticmethod(get_default_state_dir) + + + # + # Public methods + # + def start_download(self,tdef,dcfg=None): + """ + Creates a Download object and adds it to the session. The passed + TorrentDef and DownloadStartupConfig are copied into the new Download + object. The Download is then started and checkpointed. + + If a checkpointed version of the Download is found, that is restarted + overriding the saved DownloadStartupConfig is "dcfg" is not None. + + @param tdef A finalized TorrentDef + @param dcfg DownloadStartupConfig or None, in which case + a new DownloadStartupConfig() is created with its default settings + and the result becomes the runtime config of this Download. + @return Download + """ + # locking by lm + return self.lm.add(tdef,dcfg) + + def resume_download_from_file(self,filename): + """ + Recreates Download from resume file + + @return a Download object. + + Note: this cannot be made into a method of Download, as the Download + needs to be bound to a session, it cannot exist independently. + """ + raise NotYetImplementedException() + + def get_downloads(self): + """ + Returns a copy of the list of Downloads. + @return A list of Download objects. + """ + # locking by lm + return self.lm.get_downloads() + + + def remove_download(self,d,removecontent=False): + """ + Stops the download and removes it from the session. + @param d The Download to remove + @param removecontent Whether to delete the already downloaded content + from disk. + """ + # locking by lm + self.lm.remove(d,removecontent=removecontent) + + + def set_download_states_callback(self,usercallback,getpeerlist=False): + """ + See Download.set_state_callback. Calls usercallback with a list of + DownloadStates, one for each Download in the Session as first argument. + The usercallback must return a tuple (when,getpeerlist) that indicates + when to reinvoke the callback again (as a number of seconds from now, + or < 0.0 if not at all) and whether to also include the details of + the connected peers in the DownloadStates on that next call. + + The callback will be called by a popup thread which can be used + indefinitely (within reason) by the higher level code. + + @param usercallback A function adhering to the above spec. + """ + self.lm.set_download_states_callback(usercallback,getpeerlist) + + + # + # Config parameters that only exist at runtime + # + def get_permid(self): + """ Returns the PermID of the Session, as determined by the + SessionConfig.set_permid() parameter. A PermID is a public key + @return The PermID encoded in a string in DER format. """ + self.sesslock.acquire() + try: + return str(self.keypair.pub().get_der()) + finally: + self.sesslock.release() + + def get_external_ip(self): + """ Returns the external IP address of this Session, i.e., by which + it is reachable from the Internet. This address is determined via + various mechanisms such as the UPnP protocol, our dialback mechanism, + and an inspection of the local network configuration. + @return A string. """ + # locking done by lm + return self.lm.get_ext_ip() + + + def get_externally_reachable(self): + """ Returns whether the Session is externally reachable, i.e., its + listen port is not firewalled. Use add_observer() with NTFY_REACHABLE + to register to the event of detecting reachablility. Note that due to + the use of UPnP a Session may become reachable some time after + startup and due to the Dialback mechanism, this method may return + False while the Session is actually already reachable. Note that True + doesn't mean the Session is reachable from the open Internet, could just + be from the local (otherwise firewalled) LAN. + @return A boolean. """ + return DialbackMsgHandler.getInstance().isConnectable() + + + def get_current_startup_config_copy(self): + """ Returns a SessionStartupConfig that is a copy of the current runtime + SessionConfig. + @return SessionStartupConfig + """ + # Called by any thread + self.sesslock.acquire() + try: + sessconfig = copy.copy(self.sessconfig) + return SessionStartupConfig(sessconfig=sessconfig) + finally: + self.sesslock.release() + + # + # Internal tracker + # + def get_internal_tracker_url(self): + """ Returns the announce URL for the internal tracker. + @return URL """ + # Called by any thread + self.sesslock.acquire() + try: + url = None + if 'tracker_url' in self.sessconfig: + url = self.sessconfig['tracker_url'] # user defined override, e.g. specific hostname + if url is None: + ip = self.lm.get_ext_ip() + port = self.get_listen_port() + url = 'http://'+ip+':'+str(port)+'/announce/' + return url + finally: + self.sesslock.release() + + + def get_internal_tracker_dir(self): + """ Returns the directory containing the torrents tracked by the internal + tracker (and associated databases). + @return An absolute path. """ + # Called by any thread + self.sesslock.acquire() + try: + if self.sessconfig['state_dir'] is None: + return None + else: + return os.path.join(self.sessconfig['state_dir'],STATEDIR_ITRACKER_DIR) + finally: + self.sesslock.release() + + + def add_to_internal_tracker(self,tdef): + """ Add a torrent def to the list of torrents tracked by the internal + tracker. Use this method to use the Session as a standalone tracker. + @param tdef A finalized TorrentDef. + """ + # Called by any thread + self.sesslock.acquire() + try: + infohash = tdef.get_infohash() + filename = self.get_internal_tracker_torrentfilename(infohash) + tdef.save(filename) + # Bring to attention of Tracker thread + self.lm.tracker_rescan_dir() + finally: + self.sesslock.release() + + def remove_from_internal_tracker(self,tdef): + """ Remove a torrent def from the list of torrents tracked by the + internal tracker. Use this method to use the Session as a standalone + tracker. + @param tdef A finalized TorrentDef. + """ + infohash = tdef.get_infohash() + self.remove_from_internal_tracker_by_infohash(infohash) + + def remove_from_internal_tracker_by_infohash(self,infohash): + """ Remove a torrent def from the list of torrents tracked by the + internal tracker. Use this method to use the Session as a standalone + tracker. + @param infohash Identifier of the torrent def to remove. + """ + # Called by any thread + self.sesslock.acquire() + try: + filename = self.get_internal_tracker_torrentfilename(infohash) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Session: removing itracker entry",filename + if os.access(filename,os.F_OK): + os.remove(filename) + # Bring to attention of Tracker thread + self.lm.tracker_rescan_dir() + finally: + self.sesslock.release() + + # + # Notification of events in the Session + # + def add_observer(self, func, subject, changeTypes = [NTFY_UPDATE, NTFY_INSERT, NTFY_DELETE], objectID = None): + """ Add an observer function function to the Session. The observer + function will be called when one of the specified events (changeTypes) + occurs on the specified subject. + + The function will be called by a popup thread which can be used + indefinitely (within reason) by the higher level code. + + @param func The observer function. It should accept as its first argument + the subject, as second argument the changeType, as third argument an + objectID (e.g. the primary key in the observed database) and an + optional list of arguments. + @param subject The subject to observe, one of NTFY_* subjects (see + simpledefs). + @param changeTypes The list of events to be notified of one of NTFY_* + events. + @param objectID The specific object in the subject to monitor (e.g. a + specific primary key in a database to monitor for updates.) + + + TODO: Jelle will add per-subject/event description here ;o) + + """ + #Called by any thread + self.uch.notifier.add_observer(func, subject, changeTypes, objectID) # already threadsafe + + def remove_observer(self, func): + """ Remove observer function. No more callbacks will be made. + @param func The observer function to remove. """ + #Called by any thread + self.uch.notifier.remove_observer(func) # already threadsafe + + def open_dbhandler(self,subject): + """ Opens a connection to the specified database. Only the thread + calling this method may use this connection. The connection must be + closed with close_dbhandler() when this thread exits. + + @param subject The database to open. Must be one of the subjects + specified here. + @return A reference to a DBHandler class for the specified subject or + None when the Session was not started with megacaches enabled. +
 NTFY_PEERS -> PeerDBHandler
+        NTFY_TORRENTS -> TorrentDBHandler
+        NTFY_PREFERENCES -> PreferenceDBHandler
+        NTFY_SUPERPEERS -> SuperpeerDBHandler
+        NTFY_FRIENDS -> FriendsDBHandler
+        NTFY_MYPREFERENCES -> MyPreferenceDBHandler
+        NTFY_BARTERCAST -> BartercastDBHandler
+        NTFY_SEARCH -> SearchDBHandler
+        NTFY_TERM -> TermDBHandler
+        NTFY_MODERATIONCAST -> ModerationCastDBHandler
+        NTFY_VOTECAST -> VotecastDBHandler
+        
+ """ + # Called by any thread + self.sesslock.acquire() + try: + if subject == NTFY_PEERS: + return self.lm.peer_db + elif subject == NTFY_TORRENTS: + return self.lm.torrent_db + elif subject == NTFY_PREFERENCES: + return self.lm.pref_db + elif subject == NTFY_SUPERPEERS: + return self.lm.superpeer_db + elif subject == NTFY_FRIENDS: + return self.lm.friend_db + elif subject == NTFY_MYPREFERENCES: + return self.lm.mypref_db + elif subject == NTFY_BARTERCAST: + return self.lm.bartercast_db + elif subject == NTFY_SEEDINGSTATS: + return self.lm.seedingstats_db + elif subject == NTFY_SEEDINGSTATSSETTINGS: + return self.lm.seedingstatssettings_db + elif subject == NTFY_MODERATIONCAST: + return self.lm.modcast_db + elif subject == NTFY_VOTECAST: + return self.lm.votecast_db + elif subject == NTFY_SEARCH: + return self.lm.search_db + elif subject == NTFY_TERM: + return self.lm.term_db + else: + raise ValueError('Cannot open DB subject: '+subject) + finally: + self.sesslock.release() + + + def close_dbhandler(self,dbhandler): + """ Closes the given database connection """ + dbhandler.close() + + + # + # Access control + # + def set_overlay_request_policy(self, reqpol): + """ + Set a function which defines which overlay requests (e.g. dl_helper, rquery msg) + will be answered or will be denied. + + The function will be called by a network thread and must return + as soon as possible to prevent performance problems. + + @param reqpol is a Tribler.Core.RequestPolicy.AbstractRequestPolicy + object. + """ + # Called by any thread + # to protect self.sessconfig + self.sesslock.acquire() + try: + overlay_loaded = self.sessconfig['overlay'] + finally: + self.sesslock.release() + if overlay_loaded: + self.lm.overlay_apps.setRequestPolicy(reqpol) # already threadsafe + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Session: overlay is disabled, so no overlay request policy needed" + + + # + # Persistence and shutdown + # + def load_checkpoint(self,initialdlstatus=None): + """ Restart Downloads from checkpoint, if any. + + This method allows the API user to manage restoring downloads. + E.g. a video player that wants to start the torrent the user clicked + on first, and only then restart any sleeping torrents (e.g. seeding). + The optional initialdlstatus parameter can be set to DLSTATUS_STOPPED + to restore all the Downloads in DLSTATUS_STOPPED state. + """ + self.lm.load_checkpoint(initialdlstatus) + + + def checkpoint(self): + """ Saves the internal session state to the Session's state dir. """ + #Called by any thread + self.checkpoint_shutdown(stop=False,checkpoint=True,gracetime=None,hacksessconfcheckpoint=False) + + def shutdown(self,checkpoint=True,gracetime=2.0,hacksessconfcheckpoint=True): + """ Checkpoints the session and closes it, stopping the download engine. + @param checkpoint Whether to checkpoint the Session state on shutdown. + @param gracetime Time to allow for graceful shutdown + signoff (seconds). + """ + # Called by any thread + self.lm.early_shutdown() + self.checkpoint_shutdown(stop=True,checkpoint=checkpoint,gracetime=gracetime,hacksessconfcheckpoint=hacksessconfcheckpoint) + self.uch.shutdown() + + def has_shutdown(self): + """ Whether the Session has completely shutdown, i.e., its internal + threads are finished and it is safe to quit the process the Session + is running in. + @return A Boolean. + """ + return self.lm.sessdoneflag.isSet() + + def get_downloads_pstate_dir(self): + """ Returns the directory in which to checkpoint the Downloads in this + Session. """ + # Called by network thread + self.sesslock.acquire() + try: + return os.path.join(self.sessconfig['state_dir'],STATEDIR_DLPSTATE_DIR) + finally: + self.sesslock.release() + + # + # Tribler Core special features + # + def query_connected_peers(self,query,usercallback,max_peers_to_query=None): + """ Ask all Tribler peers we're currently connected to resolve the + specified query and return the hits. For each peer that returns + hits the usercallback method is called with first parameter the + permid of the peer, as second parameter the query string and + as third parameter a dictionary of hits. The number of times the + usercallback method will be called is undefined. + + The callback will be called by a popup thread which can be used + indefinitely (within reason) by the higher level code. + + At the moment we support one type of query, which is a query for + torrent files that match a set of keywords. The format of the + query string is "SIMPLE kw1 kw2 kw3". In the future we plan + to support full SQL queries. + + For SIMPLE queries the dictionary of hits consists of + (infohash,torrentrecord) pairs. The torrentrecord is a + dictionary that contains the following keys: +
+        * 'content_name': The 'name' field of the torrent.
+        * 'length': The total size of the content in the torrent.
+        * 'leecher': The currently known number of downloaders.
+        * 'seeder': The currently known number of seeders.
+        * 'category': A list of category strings the torrent was classified into
+          by the remote peer.
+        
+ + From Session API version 1.0.2 the following keys were added + to the torrentrecord: +
+        * 'torrent_size': The size of the .torrent file.
+        
+ + @param query A Unicode query string adhering to the above spec. + @param usercallback A function adhering to the above spec. + """ + self.sesslock.acquire() + try: + if self.sessconfig['overlay']: + if not query.startswith('SIMPLE '): + raise ValueError('Query does not start with SIMPLE') + + rqmh = RemoteQueryMsgHandler.getInstance() + rqmh.send_query(query,usercallback,max_peers_to_query=max_peers_to_query) + else: + raise OperationNotEnabledByConfigurationException("Overlay not enabled") + finally: + self.sesslock.release() + + + def download_torrentfile_from_peer(self,permid,infohash,usercallback): + """ Ask the designated peer to send us the torrentfile for the torrent + identified by the passed infohash. If the torrent is succesfully + received, the usercallback method is called with the infohash as first + and the contents of the torrentfile (bencoded dict) as second parameter. + If the torrent could not be obtained, the callback is not called. + The torrent will have been added to the TorrentDBHandler (if enabled) + at the time of the call. + + @param permid The PermID of the peer to query. + @param infohash The infohash of the torrent. + @param usercallback A function adhering to the above spec. + """ + self.sesslock.acquire() + try: + if self.sessconfig['overlay']: + rtorrent_handler = RemoteTorrentHandler.getInstance() + rtorrent_handler.download_torrent(permid,infohash,usercallback) + else: + raise OperationNotEnabledByConfigurationException("Overlay not enabled") + finally: + self.sesslock.release() + + + # + # Internal persistence methods + # + def checkpoint_shutdown(self,stop,checkpoint,gracetime,hacksessconfcheckpoint): + """ Checkpoints the Session and optionally shuts down the Session. + @param stop Whether to shutdown the Session as well. + @param checkpoint Whether to checkpoint at all, or just to stop. + @param gracetime Time to allow for graceful shutdown + signoff (seconds). + """ + # Called by any thread + self.sesslock.acquire() + try: + # Arno: Make checkpoint optional on shutdown. At the moment setting + # the config at runtime is not possible (see SessionRuntimeConfig) + # so this has little use, and interferes with our way of + # changing the startup config, which is to write a new + # config to disk that will be read at start up. + if hacksessconfcheckpoint: + try: + self.save_pstate_sessconfig() + except Exception,e: + self.lm.rawserver_nonfatalerrorfunc(e) + + # Checkpoint all Downloads and stop NetworkThread + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Session: checkpoint_shutdown" + self.lm.checkpoint(stop=stop,checkpoint=checkpoint,gracetime=gracetime) + finally: + self.sesslock.release() + + def save_pstate_sessconfig(self): + """ Save the runtime SessionConfig to disk """ + # Called by any thread + sscfg = self.get_current_startup_config_copy() + cfgfilename = Session.get_default_config_filename(sscfg.get_state_dir()) + sscfg.save(cfgfilename) + + + def get_default_config_filename(state_dir): + """ Return the name of the file where a session config is saved by default. + @return A filename + """ + return os.path.join(state_dir,STATEDIR_SESSCONFIG) + get_default_config_filename = staticmethod(get_default_config_filename) + + + def get_internal_tracker_torrentfilename(self,infohash): + """ Return the absolute pathname of the torrent file used by the + internal tracker. + @return A filename + """ + trackerdir = self.get_internal_tracker_dir() + basename = binascii.hexlify(infohash)+'.torrent' # ignore .tribe stuff, not vital + return os.path.join(trackerdir,basename) + + def get_nat_type(self, callback=None): + """ Return the type of Network Address Translator (NAT) detected. + + When a callback parameter is supplied it will always be + called. When the NAT-type is already known the callback will + be made instantly. Otherwise, the callback will be made when + the NAT discovery has finished. + + The callback will be called by a popup thread which can be used + indefinitely (within reason) by the higher level code. + + Return values: + "Blocked" + "Open Internet" + "Restricted Cone Firewall" + "Port Restricted Cone Firewall" + "Full Cone NAT" + "Restricted Cone NAT" + "Port Restricted Cone NAT" + "Symmetric NAT" + "Unknown NAT/Firewall" + + @param callback Optional callback used to notify the NAT type + @return String + """ + # TODO: define constants in simpledefs for these + # Called by any thread + self.sesslock.acquire() + try: + return ConnectionCheck.getInstance(self).get_nat_type(callback=callback) + finally: + self.sesslock.release() + + # + # Friendship functions + # + def send_friendship_message(self,permid,mtype,approved=None): + """ Send friendship msg to the specified peer + + F_REQUEST_MSG: + + F_RESPONSE_MSG: + @param approved Whether you want him as friend or not. + + """ + self.sesslock.acquire() + try: + if self.sessconfig['overlay']: + if mtype == F_FORWARD_MSG: + raise ValueError("User cannot send FORWARD messages directly") + + fmh = FriendshipMsgHandler.getInstance() + params = {} + if approved is not None: + params['response'] = int(approved) + fmh.anythread_send_friendship_msg(permid,mtype,params) + else: + raise OperationNotEnabledByConfigurationException("Overlay not enabled") + finally: + self.sesslock.release() + + + def set_friendship_callback(self,usercallback): + """ When a new friendship request is received the given + callback function is called with as first parameter the + requester's permid and as second parameter a dictionary of + request arguments: + callback(requester_permid,params) + + The callback is called by a popup thread which can be used + indefinitely (within reason) by the higher level code. + + @param usercallback A callback function adhering to the above spec. + """ + self.sesslock.acquire() + try: + if self.sessconfig['overlay']: + fmh = FriendshipMsgHandler.getInstance() + fmh.register_usercallback(usercallback) + else: + raise OperationNotEnabledByConfigurationException("Overlay not enabled") + finally: + self.sesslock.release() + diff --git a/tribler-mod/Tribler/Core/SessionConfig.py b/tribler-mod/Tribler/Core/SessionConfig.py new file mode 100644 index 0000000..7a69cb4 --- /dev/null +++ b/tribler-mod/Tribler/Core/SessionConfig.py @@ -0,0 +1,1240 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information +""" Controls the operation of a Session """ + +import sys +import copy +import pickle + +from Tribler.Core.simpledefs import * +from Tribler.Core.defaults import sessdefaults +from Tribler.Core.Base import * +from Tribler.Core.BitTornado.RawServer import autodetect_socket_style +from Tribler.Core.Utilities.utilities import find_prog_in_PATH + + +class SessionConfigInterface: + """ + (key,value) pair config of global parameters, + e.g. PermID keypair, listen port, max upload speed, etc. + + Use SessionStartupConfig from creating and manipulation configurations + before session startup time. This is just a parent class. + """ + def __init__(self,sessconfig=None): + """ Constructor. + @param sessconfig Optional dictionary used internally + to make this a copy constructor. + """ + + if sessconfig is not None: # copy constructor + self.sessconfig = sessconfig + return + + self.sessconfig = {} + + # Define the built-in default here + self.sessconfig.update(sessdefaults) + + # Set video_analyser_path + if sys.platform == 'win32': + ffmpegname = "ffmpeg.exe" + else: + ffmpegname = "ffmpeg" + + ffmpegpath = find_prog_in_PATH(ffmpegname) + if ffmpegpath is None: + if sys.platform == 'win32': + self.sessconfig['videoanalyserpath'] = ffmpegname + elif sys.platform == 'darwin': + self.sessconfig['videoanalyserpath'] = "macbinaries/ffmpeg" + else: + self.sessconfig['videoanalyserpath'] = ffmpegname + else: + self.sessconfig['videoanalyserpath'] = ffmpegpath + + self.sessconfig['ipv6_binds_v4'] = autodetect_socket_style() + + + + def set_state_dir(self,statedir): + """ Set the directory to store the Session's state in. + @param statedir A preferably absolute path name. If the directory + does not yet exist it will be created at Session create time. + """ + self.sessconfig['state_dir'] = statedir + + def get_state_dir(self): + """ Returns the directory the Session stores its state in. + @return An absolute path name. """ + return self.sessconfig['state_dir'] + + def set_install_dir(self,installdir): + """ Set the directory in which the Tribler Core software is installed. + @param installdir An absolute path name + """ + self.sessconfig['install_dir'] = installdir + + def get_install_dir(self): + """ Returns the directory the Tribler Core software is installed in. + @return An absolute path name. """ + return self.sessconfig['install_dir'] + + + def set_permid_keypair_filename(self,keypairfilename): + """ Set the filename containing the Elliptic Curve keypair to use for + PermID-based authentication in this Session. + + Note: if a Session is started with a SessionStartupConfig that + points to an existing state dir and that state dir contains a saved + keypair, that keypair will be used unless a different keypair is + explicitly configured via this method. + """ + self.sessconfig['eckeypairfilename'] = keypairfilename + + def get_permid_keypair_filename(self): + """ Returns the filename of the Session's keypair. + @return An absolute path name. """ + return self.sessconfig['eckeypairfilename'] + + + def set_listen_port(self,port): + """ Set the UDP and TCP listen port for this Session. + @param port A port number. + """ + self.sessconfig['minport'] = port + self.sessconfig['maxport'] = port + + def get_listen_port(self): + """ Returns the current UDP/TCP listen port. + @return Port number. """ + return self.sessconfig['minport'] + + # + # Advanced network settings + # + def set_ip_for_tracker(self,value): + """ IP address to report to the tracker (default = set automatically). + @param value An IP address as string. """ + self.sessconfig['ip'] = value + + def get_ip_for_tracker(self): + """ Returns the IP address being reported to the tracker. + @return String """ + return self.sessconfig['ip'] + + def set_bind_to_addresses(self,value): + """ Set the list of IP addresses/hostnames to bind to locally. + @param value A list of IP addresses as strings. """ + self.sessconfig['bind'] = value + + def get_bind_to_addresses(self): + """ Returns the list of IP addresses bound to. + @return list """ + return self.sessconfig['bind'] + + def set_upnp_mode(self,value): + """ Use to autoconfigure a UPnP router to forward the UDP/TCP listen + port to this host: +
+         * UPNPMODE_DISABLED: Autoconfigure turned off.
+         * UPNPMODE_WIN32_HNetCfg_NATUPnP: Use Windows COM interface (slow)
+         * UPNPMODE_WIN32_UPnP_UPnPDeviceFinder: Use Windows COM interface (faster)
+         * UPNPMODE_UNIVERSAL_DIRECT: Talk UPnP directly to the network (best)
+        
+ @param value UPNPMODE_* + """ + self.sessconfig['upnp_nat_access'] = value + + def get_upnp_mode(self): + """ Returns the UPnP mode set. + @return UPNPMODE_* """ + return self.sessconfig['upnp_nat_access'] + + def set_autoclose_timeout(self,value): + """ Time to wait between closing sockets which nothing has been received + on. + @param value A number of seconds. + """ + self.sessconfig['timeout'] = value + + def get_autoclose_timeout(self): + """ Returns the autoclose timeout. + @return A number of seconds. """ + return self.sessconfig['timeout'] + + def set_autoclose_check_interval(self,value): + """ Time to wait between checking if any connections have timed out. + @param value A number of seconds. + """ + self.sessconfig['timeout_check_interval'] = value + + def get_autoclose_check_interval(self): + """ Returns the autoclose check interval. + @return A number of seconds. """ + return self.sessconfig['timeout_check_interval'] + + # + # Enable/disable Tribler features + # + def set_megacache(self,value): + """ Enable megacache databases to cache peers, torrent files and + preferences (default = True). + @param value Boolean. """ + self.sessconfig['megacache'] = value + + def get_megacache(self): + """ Returns whether Megacache is enabled. + @return Boolean. """ + return self.sessconfig['megacache'] + + # + # Secure Overlay + # + def set_overlay(self,value): + """ Enable overlay swarm to enable Tribler's special features + (default = True). + @param value Boolean. + """ + self.sessconfig['overlay'] = value + + def get_overlay(self): + """ Returns whether overlay-swarm extension is enabled. The overlay + swarm allows strong authentication of peers and is used for all + Tribler-specific messages. + @return Boolean. """ + return self.sessconfig['overlay'] + + def set_overlay_max_message_length(self,value): + """ Maximal message length for messages sent over the secure overlay. + @param value A number of bytes. + """ + self.sessconfig['overlay_max_message_length'] = value + + def get_overlay_max_message_length(self): + """ Returns the maximum overlay-message length. + @return A number of bytes. """ + return self.sessconfig['overlay_max_message_length'] + + + # + # Buddycast + # + def set_buddycast(self,value): + """ Enable buddycast recommendation system at startup (default = True) + @param value Boolean. + """ + self.sessconfig['buddycast'] = value + + def get_buddycast(self): + """ Returns whether buddycast is enabled at startup. + @return Boolean.""" + return self.sessconfig['buddycast'] + + def set_start_recommender(self,value): + """ Buddycast can be temporarily disabled via this parameter + (default = True). Must have been enabled at startup, see + set_buddycast(). + @param value Boolean. + """ + self.sessconfig['start_recommender'] = value + + def get_start_recommender(self): + """ Returns whether Buddycast is temporarily enabled. + @return Boolean.""" + return self.sessconfig['start_recommender'] + + def set_buddycast_interval(self,value): + """ Number of seconds to pause between exchanging preference with a + peer in Buddycast. + @param value A number of seconds. + """ + self.sessconfig['buddycast_interval'] = value + + def get_buddycast_interval(self): + """ Returns the number of seconds between Buddycast pref. exchanges. + @return A number of seconds. """ + return self.sessconfig['buddycast_interval'] + + def set_buddycast_collecting_solution(self,value): + """ Set the Buddycast collecting solution. Only one policy implemented + at the moment: +
+         * BCCOLPOLICY_SIMPLE: Simplest solution: per torrent/buddycasted peer/4 hours,
+         
+ @param value BCCOLPOLICY_* + """ + self.sessconfig['buddycast_collecting_solution'] = value + + def get_buddycast_collecting_solution(self): + """ Returns the Buddycast collecting solution. + @return BCOLPOLICY_* """ + return self.sessconfig['buddycast_collecting_solution'] + + def set_buddycast_max_peers(self,value): + """ Set max number of peers to use for Buddycast recommendations """ + self.sessconfig['buddycast_max_peers'] = value + + def get_buddycast_max_peers(self): + """ Return the max number of peers to use for Buddycast recommendations. + @return A number of peers. + """ + return self.sessconfig['buddycast_max_peers'] + + # + # Download helper / cooperative download + # + def set_download_help(self,value): + """ Enable download helping/cooperative download (default = True). + @param value Boolean. """ + self.sessconfig['download_help'] = value + + def get_download_help(self): + """ Returns whether download help is enabled. + @return Boolean. """ + return self.sessconfig['download_help'] + + def set_download_help_dir(self,value): + """ Set the directory for storing state and content for download + helping (default = Default destination dir (see get_default_dest_dir() + +'downloadhelp'. + @param value An absolute path. """ + self.sessconfig['download_help_dir'] = value + + def get_download_help_dir(self): + """ Returns the directory for download helping storage. + @return An absolute path name. """ + return self.sessconfig['download_help_dir'] + + # + # Torrent file collecting + # + def set_torrent_collecting(self,value): + """ Automatically collect torrents from peers in the network (default = + True). + @param value Boolean. + """ + self.sessconfig['torrent_collecting'] = value + + def get_torrent_collecting(self): + """ Returns whether to automatically collect torrents. + @return Boolean. """ + return self.sessconfig['torrent_collecting'] + + def set_torrent_collecting_max_torrents(self,value): + """ Set the maximum number of torrents to collect from other peers. + @param value A number of torrents. + """ + self.sessconfig['torrent_collecting_max_torrents'] = value + + def get_torrent_collecting_max_torrents(self): + """ Returns the maximum number of torrents to collect. + @return A number of torrents. """ + return self.sessconfig['torrent_collecting_max_torrents'] + + def set_torrent_collecting_dir(self,value): + """ Where to place collected torrents? (default is state_dir + 'collected_torrent_files') + @param value An absolute path. + """ + self.sessconfig['torrent_collecting_dir'] = value + + def get_torrent_collecting_dir(self): + """ Returns the directory to save collected torrents. + @return An absolute path name. """ + return self.sessconfig['torrent_collecting_dir'] + + def set_torrent_collecting_rate(self,value): + """ Maximum download rate to use for torrent collecting. + @param value A rate in KB/s. """ + self.sessconfig['torrent_collecting_rate'] = value + + def get_torrent_collecting_rate(self): + """ Returns the download rate to use for torrent collecting. + @return A rate in KB/s. """ + return self.sessconfig['torrent_collecting_rate'] + + def set_torrent_checking(self,value): + """ Whether to automatically check the health of collected torrents by + contacting their trackers (default = True). + @param value Boolean + """ + self.sessconfig['torrent_checking'] = value + + def get_torrent_checking(self): + """ Returns whether to check health of collected torrents. + @return Boolean. """ + return self.sessconfig['torrent_checking'] + + def set_torrent_checking_period(self,value): + """ Interval between automatic torrent health checks. + @param value An interval in seconds. + """ + self.sessconfig['torrent_checking_period'] = value + + def get_torrent_checking_period(self): + """ Returns the check interval. + @return A number of seconds. """ + return self.sessconfig['torrent_checking_period'] + + def set_stop_collecting_threshold(self,value): + """ Stop collecting more torrents if the disk has less than this limit + @param value A limit in MB. + """ + self.sessconfig['stop_collecting_threshold'] = value + + def get_stop_collecting_threshold(self): + """ Returns the disk-space limit when to stop collecting torrents. + @return A number of megabytes. """ + return self.sessconfig['stop_collecting_threshold'] + + + # + # The Tribler dialback mechanism is used to test whether a Session is + # reachable from the outside and what its external IP address is. + # + def set_dialback(self,value): + """ Use other peers to determine external IP address (default = True) + @param value Boolean + """ + self.sessconfig['dialback'] = value + + def get_dialback(self): + """ Returns whether to use the dialback mechanism. + @return Boolean. """ + return self.sessconfig['dialback'] + + # + # Tribler's social networking feature transmits a nickname and picture + # to all Tribler peers it meets. + # + def set_social_networking(self,value): + """ Enable social networking. If enabled, a message containing the + user's nickname and icon is sent to each Tribler peer met + (default = True). + @param value Boolean + """ + self.sessconfig['socnet'] = value + + def get_social_networking(self): + """ Returns whether social network is enabled. + @return Boolean. """ + return self.sessconfig['socnet'] + + def set_nickname(self,value): + """ The nickname you want to show to others. + @param value A string. + """ + self.sessconfig['nickname'] = value + + def get_nickname(self): + """ Returns the set nickname. + @return String. """ + return self.sessconfig['nickname'] + + def set_mugshot(self,value, mime = 'image/jpeg'): + """ The picture of yourself you want to show to others. + @param value A string of binary data of your image. + @param mime A string of the mimetype of the data + """ + self.sessconfig['mugshot'] = (mime, value) + + def get_mugshot(self): + """ Returns binary image data and mime-type of your picture. + @return (String, String) value and mimetype. """ + if self.sessconfig['mugshot'] is None: + return None, None + else: + return self.sessconfig['mugshot'] + + def set_peer_icon_path(self,value): + """ Directory to store received peer icons (Default is statedir + + STATEDIR_PEERICON_DIR). + @param value An absolute path. """ + self.sessconfig['peer_icon_path'] = value + + def get_peer_icon_path(self): + """ Returns the directory to store peer icons. + @return An absolute path name. """ + return self.sessconfig['peer_icon_path'] + + # + # Tribler remote query: ask other peers when looking for a torrent file + # or peer + # + def set_remote_query(self,value): + """ Enable queries from other peers. At the moment peers can ask + whether this Session has collected or opened a torrent that matches + a specified keyword query. (default = True) + @param value Boolean""" + self.sessconfig['rquery'] = value + + def get_remote_query(self): + """ Returns whether remote query is enabled. + @return Boolean. """ + return self.sessconfig['rquery'] + + # + # BarterCast + # + def set_bartercast(self,value): + """ Exchange upload/download statistics with peers (default = True) + @param value Boolean + """ + self.sessconfig['bartercast'] = value + + def get_bartercast(self): + """ Returns to exchange statistics with peers. + @return Boolean. """ + return self.sessconfig['bartercast'] + + + # + # For Tribler Video-On-Demand + # + def set_video_analyser_path(self,value): + """ Path to video analyser FFMPEG. The analyser is used to guess the + bitrate of a video if that information is not present in the torrent + definition. (default = look for it in $PATH) + @param value An absolute path name. + """ + self.sessconfig['videoanalyserpath'] = value + + def get_video_analyser_path(self): + """ Returns the path of the FFMPEG video analyser. + @return An absolute path name. """ + return self.sessconfig['videoanalyserpath'] # strings immutable + + + # + # Tribler's internal tracker + # + def set_internal_tracker(self,value): + """ Enable internal tracker (default = True) + @param value Boolean. + """ + self.sessconfig['internaltracker'] = value + + def get_internal_tracker(self): + """ Returns whether the internal tracker is enabled. + @return Boolean. """ + return self.sessconfig['internaltracker'] + + def set_internal_tracker_url(self,value): + """ Set the internal tracker URL (default = determined dynamically + from Session's IP+port) + @param value URL. + """ + self.sessconfig['tracker_url'] = value + + def get_internal_tracker_url(self): + """ Returns the URL of the tracker as set by set_internal_tracker_url(). + Overridden at runtime by Session class. + @return URL. """ + return self.sessconfig['tracker_url'] + + + def set_mainline_dht(self,value): + """ Enable mainline DHT support (default = True) + @param value Boolean. + """ + self.sessconfig['mainline_dht'] = value + + def get_mainline_dht(self): + """ Returns whether mainline DHT support is enabled. + @return Boolean. """ + return self.sessconfig['mainline_dht'] + + + # + # Internal tracker access control settings + # + def set_tracker_allowed_dir(self,value): + """ Only accept tracking requests for torrent in this dir (default is + Session state-dir + STATEDIR_ITRACKER_DIR + @param value An absolute path name. + """ + self.sessconfig['tracker_allowed_dir'] = value + + def get_tracker_allowed_dir(self): + """ Returns the internal tracker's directory of allowed torrents. + @return An absolute path name. """ + return self.sessconfig['tracker_allowed_dir'] + + def set_tracker_allowed_list(self,value): + """ Only allow peers to register for torrents that appear in the + specified file. Cannot be used in combination with set_tracker_allowed_dir() + @param value An absolute filename containing a list of torrent infohashes in hex format, one per + line. """ + self.sessconfig['tracker_allowed_list'] = value + + def get_tracker_allowed_list(self): + """ Returns the filename of the list of allowed torrents. + @return An absolute path name. """ + return self.sessconfig['tracker_allowed_list'] + + def set_tracker_allowed_controls(self,value): + """ Allow special keys in torrents in the allowed_dir to affect tracker + access. + @param value Boolean + """ + self.sessconfig['tracker_allowed_controls'] = value + + def get_tracker_allowed_controls(self): + """ Returns whether to allow allowed torrents to control tracker access. + @return Boolean. """ + return self.sessconfig['tracker_allowed_controls'] + + def set_tracker_allowed_ips(self,value): + """ Only allow connections from IPs specified in the given file; file + contains subnet data in the format: aa.bb.cc.dd/len. + @param value An absolute path name. + """ + self.sessconfig['tracker_allowed_ips'] = value + + def get_tracker_allowed_ips(self): + """ Returns the filename containing allowed IP addresses. + @return An absolute path name.""" + return self.sessconfig['tracker_allowed_ips'] + + def set_tracker_banned_ips(self,value): + """ Don't allow connections from IPs specified in the given file; file + contains IP range data in the format: xxx:xxx:ip1-ip2 + @param value An absolute path name. + """ + self.sessconfig['tracker_banned_ips'] = value + + def get_tracker_banned_ips(self): + """ Returns the filename containing banned IP addresses. + @return An absolute path name. """ + return self.sessconfig['tracker_banned_ips'] + + def set_tracker_only_local_override_ip(self,value): + """ Ignore the 'ip' parameter in the GET announce from machines which + aren't on local network IPs. +
+         * ITRACK_IGNORE_ANNOUNCEIP_NEVER
+         * ITRACK_IGNORE_ANNOUNCEIP_ALWAYS
+         * ITRACK_IGNORE_ANNOUNCEIP_IFNONATCHECK
+        
+ @param value ITRACK_IGNORE_ANNOUNCEIP* + """ + self.sessconfig['tracker_only_local_override_ip'] = value + + def get_tracker_only_local_override_ip(self): + """ Returns the ignore policy for 'ip' parameters in announces. + @return ITRACK_IGNORE_ANNOUNCEIP_* """ + return self.sessconfig['tracker_only_local_override_ip'] + + def set_tracker_parse_dir_interval(self,value): + """ Seconds between reloading of allowed_dir or allowed_file and + allowed_ips and banned_ips lists. + @param value A number of seconds. + """ + self.sessconfig['tracker_parse_dir_interval'] = value + + def get_tracker_parse_dir_interval(self): + """ Returns the number of seconds between refreshes of access control + info. + @return A number of seconds. """ + return self.sessconfig['tracker_parse_dir_interval'] + + def set_tracker_scrape_allowed(self,value): + """ Allow scrape access on the internal tracker (with a scrape request + a BitTorrent client can retrieve information about how many peers are + downloading the content. +
+        * ITRACKSCRAPE_ALLOW_NONE: Don't allow scrape requests.
+        * ITRACKSCRAPE_ALLOW_SPECIFIC: Allow scrape requests for a specific torrent.
+        * ITRACKSCRAPE_ALLOW_FULL: Allow scrape of all torrents at once.
+        
+ @param value ITRACKSCRAPE_* + """ + self.sessconfig['tracker_scrape_allowed'] = value + + def get_tracker_scrape_allowed(self): + """ Returns the scrape access policy. + @return ITRACKSCRAPE_ALLOW_* """ + return self.sessconfig['tracker_scrape_allowed'] + + def set_tracker_allow_get(self,value): + """ Setting this parameter adds a /file?hash={hash} links to the + overview page that the internal tracker makes available via HTTP + at hostname:listenport. These links allow users to download the + torrent file from the internal tracker. Use with 'allowed_dir' parameter. + @param value Boolean. + """ + self.sessconfig['tracker_allow_get'] = value + + def get_tracker_allow_get(self): + """ Returns whether to allow HTTP torrent-file downloads from the + internal tracker. + @return Boolean. """ + return self.sessconfig['tracker_allow_get'] + + + # + # Controls for internal tracker's output as Web server + # + def set_tracker_favicon(self,value): + """ File containing image/x-icon data to return when browser requests + favicon.ico from the internal tracker. (Default = Tribler/Images/tribler.ico) + @param value An absolute filename. + """ + self.sessconfig['tracker_favicon'] = value + + def get_tracker_favicon(self): + """ Returns the filename of the internal tracker favicon. + @return An absolute path name. """ + return self.sessconfig['tracker_favicon'] + + def set_tracker_show_infopage(self,value): + """ Whether to display an info page when the tracker's root dir is + requested via HTTP. + @param value Boolean + """ + self.sessconfig['tracker_show_infopage'] = value + + def get_tracker_show_infopage(self): + """ Returns whether to show an info page on the internal tracker. + @return Boolean. """ + return self.sessconfig['tracker_show_infopage'] + + def set_tracker_infopage_redirect(self,value): + """ A URL to redirect the request for an info page to. + @param value URL. + """ + self.sessconfig['tracker_infopage_redirect'] = value + + def get_tracker_infopage_redirect(self): + """ Returns the URL to redirect request for info pages to. + @return URL """ + return self.sessconfig['tracker_infopage_redirect'] + + def set_tracker_show_names(self,value): + """ Whether to display names from the 'allowed dir'. + @param value Boolean. + """ + self.sessconfig['tracker_show_names'] = value + + def get_tracker_show_names(self): + """ Returns whether the tracker displays names from the 'allowed dir'. + @return Boolean. """ + return self.sessconfig['tracker_show_names'] + + def set_tracker_keep_dead(self,value): + """ Keep dead torrents after they expire (so they still show up on your + /scrape and web page) + @param value Boolean. + """ + self.sessconfig['tracker_keep_dead'] = value + + def get_tracker_keep_dead(self): + """ Returns whether to keep dead torrents for statistics. + @return Boolean. """ + return self.sessconfig['tracker_keep_dead'] + + # + # Controls for internal tracker replies + # + def set_tracker_reannounce_interval(self,value): + """ Seconds downloaders should wait between reannouncing themselves + to the internal tracker. + @param value A number of seconds. + """ + self.sessconfig['tracker_reannounce_interval'] = value + + def get_tracker_reannounce_interval(self): + """ Returns the reannounce interval for the internal tracker. + @return A number of seconds. """ + return self.sessconfig['tracker_reannounce_interval'] + + def set_tracker_response_size(self,value): + """ Number of peers to send to a peer in a reply to its announce + at the internal tracker (i.e., in the info message) + @param value A number of peers. + """ + self.sessconfig['tracker_response_size'] = value + + def get_tracker_response_size(self): + """ Returns the number of peers to send in a tracker reply. + @return A number of peers. """ + return self.sessconfig['tracker_response_size'] + + def set_tracker_nat_check(self,value): + """ How many times the internal tracker should attempt to check if a + downloader is behind a Network Address Translator (NAT) or firewall. + If it is, the downloader won't be registered at the tracker, as other + peers can probably not contact it. + @param value A number of times, 0 = don't check. + """ + self.sessconfig['tracker_nat_check'] = value + + def get_tracker_nat_check(self): + """ Returns the number of times to check for a firewall. + @return A number of times. """ + return self.sessconfig['tracker_nat_check'] + + + # + # Internal tracker persistence + # + def set_tracker_dfile(self,value): + """ File to store recent downloader info in (default = Session state + dir + STATEDIR_ITRACKER_DIR + tracker.db + @param value An absolute path name. + """ + self.sessconfig['tracker_dfile'] = value + + def get_tracker_dfile(self): + """ Returns the tracker database file. + @return An absolute path name. """ + return self.sessconfig['tracker_dfile'] + + def set_tracker_dfile_format(self,value): + """ Format of the tracker database file. *_PICKLE is needed when Unicode + filenames may appear in the tracker's state (=default). +
+         * ITRACKDBFORMAT_BENCODE: Use BitTorrent bencoding to store records.
+         * ITRACKDBFORMAT_PICKLE: Use Python pickling to store records.
+        
+ @param value ITRACKDBFORFMAT_* + """ + self.sessconfig['tracker_dfile_format'] = value + + def get_tracker_dfile_format(self): + """ Returns the format of the tracker database file. + @return ITRACKDBFORMAT_* """ + return self.sessconfig['tracker_dfile_format'] + + def set_tracker_save_dfile_interval(self,value): + """ The interval between saving the internal tracker's state to + the tracker database (see set_tracker_dfile()). + @param value A number of seconds. + """ + self.sessconfig['tracker_save_dfile_interval'] = value + + def get_tracker_save_dfile_interval(self): + """ Returns the tracker-database save interval. + @return A number of seconds. """ + return self.sessconfig['tracker_save_dfile_interval'] + + def set_tracker_logfile(self,value): + """ File to write the tracker logs to (default is NIL: or /dev/null). + @param value A device name. + """ + self.sessconfig['tracker_logfile'] = value + + def get_tracker_logfile(self): + """ Returns the device name to write log messages to. + @return A device name. """ + return self.sessconfig['tracker_logfile'] + + def set_tracker_min_time_between_log_flushes(self,value): + """ Minimum time between flushes of the tracker log. + @param value A number of seconds. + """ + self.sessconfig['tracker_min_time_between_log_flushes'] = value + + def get_tracker_min_time_between_log_flushes(self): + """ Returns time between tracker log flushes. + @return A number of seconds. """ + return self.sessconfig['tracker_min_time_between_log_flushes'] + + def set_tracker_log_nat_checks(self,value): + """ Whether to add entries to the tracker log for NAT-check results. + @param value Boolean + """ + self.sessconfig['tracker_log_nat_checks'] = value + + def get_tracker_log_nat_checks(self): + """ Returns whether to log NAT-check attempts to the tracker log. + @return Boolean. """ + return self.sessconfig['tracker_log_nat_checks'] + + def set_tracker_hupmonitor(self,value): + """ Whether to reopen the tracker log file upon receipt of a SIGHUP + signal (Mac/UNIX only). + @param value Boolean. + """ + self.sessconfig['tracker_hupmonitor'] = value + + def get_tracker_hupmonitor(self): + """ Returns whether to reopen the tracker log file upon receipt of a + SIGHUP signal. + @return Boolean. """ + return self.sessconfig['tracker_hupmonitor'] + + + # + # Esoteric tracker config parameters + # + def set_tracker_socket_timeout(self,value): + """ Set timeout for closing connections to trackers. + @param value A number of seconds. + """ + self.sessconfig['tracker_socket_timeout'] = value + + def get_tracker_socket_timeout(self): + """ Returns the tracker socket timeout. + @return A number of seconds. """ + return self.sessconfig['tracker_socket_timeout'] + + def set_tracker_timeout_downloaders_interval(self,value): + """ Interval between checks for expired downloaders, i.e., peers + no longer in the swarm because they did not reannounce themselves. + @param value A number of seconds. + """ + self.sessconfig['tracker_timeout_downloaders_interval'] = value + + def get_tracker_timeout_downloaders_interval(self): + """ Returns the number of seconds between checks for expired peers. + @return A number of seconds. """ + return self.sessconfig['tracker_timeout_downloaders_interval'] + + def set_tracker_timeout_check_interval(self,value): + """ Time to wait between checking if any connections to the internal + tracker have timed out. + @param value A number of seconds. + """ + self.sessconfig['tracker_timeout_check_interval'] = value + + def get_tracker_timeout_check_interval(self): + """ Returns timeout for connections to the internal tracker. + @return A number of seconds. """ + return self.sessconfig['tracker_timeout_check_interval'] + + def set_tracker_min_time_between_cache_refreshes(self,value): + """ Minimum time before a cache is considered stale and is + flushed. + @param value A number of seconds. + """ + self.sessconfig['tracker_min_time_between_cache_refreshes'] = value + + def get_tracker_min_time_between_cache_refreshes(self): + """ Return the minimum time between cache refreshes. + @return A number of seconds. """ + return self.sessconfig['tracker_min_time_between_cache_refreshes'] + + + # + # BitTornado's Multitracker feature + # + def set_tracker_multitracker_enabled(self,value): + """ Whether to enable multitracker operation in which multiple + trackers are used to register the peers for a specific torrent. + @param value Boolean. + """ + self.sessconfig['tracker_multitracker_enabled'] = value + + def get_tracker_multitracker_enabled(self): + """ Returns whether multitracking is enabled. + @return Boolean. """ + return self.sessconfig['tracker_multitracker_enabled'] + + def set_tracker_multitracker_allowed(self,value): + """ Whether to allow incoming tracker announces. +
+         * ITRACKMULTI_ALLOW_NONE: Don't allow.
+         * ITRACKMULTI_ALLOW_AUTODETECT: Allow for allowed torrents (see set_tracker_allowed_dir())
+         * ITRACKMULTI_ALLOW_ALL: Allow for all. 
+        
+ @param value ITRACKMULTI_ALLOW_* + """ + self.sessconfig['tracker_multitracker_allowed'] = value + + def get_tracker_multitracker_allowed(self): + """ Returns the multitracker allow policy of the internal tracker. + @return ITRACKMULTI_ALLOW_* """ + return self.sessconfig['tracker_multitracker_allowed'] + + def set_tracker_multitracker_reannounce_interval(self,value): + """ Seconds between outgoing tracker announces to the other trackers in + a multi-tracker setup. + @param value A number of seconds. + """ + self.sessconfig['tracker_multitracker_reannounce_interval'] = value + + def get_tracker_multitracker_reannounce_interval(self): + """ Returns the multitracker reannouce interval. + @return A number of seconds. """ + return self.sessconfig['tracker_multitracker_reannounce_interval'] + + def set_tracker_multitracker_maxpeers(self,value): + """ Number of peers to retrieve from the other trackers in a tracker + announce in a multi-tracker setup. + @param value A number of peers. + """ + self.sessconfig['tracker_multitracker_maxpeers'] = value + + def get_tracker_multitracker_maxpeers(self): + """ Returns the number of peers to retrieve from another tracker. + @return A number of peers. """ + return self.sessconfig['tracker_multitracker_maxpeers'] + + def set_tracker_aggregate_forward(self,value): + """ Set an URL to which, if set, all non-multitracker requests are + forwarded, with a password added (optional). + @param value A 2-item list with format: [,|None] + """ + self.sessconfig['tracker_aggregate_forward'] = value + + def get_tracker_aggregate_forward(self): + """ Returns the aggregate forward URL and optional password as a 2-item + list. + @return URL """ + return self.sessconfig['tracker_aggregate_forward'] + + def set_tracker_aggregator(self,value): + """ Whether to act as a data aggregator rather than a tracker. + To enable, set to True or ; if password is set, then an + incoming password is required for access. + @param value Boolean or string. + """ + self.sessconfig['tracker_aggregator'] = value + + def get_tracker_aggregator(self): + """ Returns the tracker aggregator parameter. + @return Boolean or string. """ + return self.sessconfig['tracker_aggregator'] + + def set_tracker_multitracker_http_timeout(self,value): + """ Time to wait before assuming that an HTTP connection + to another tracker in a multi-tracker setup has timed out. + @param value A number of seconds. + """ + self.sessconfig['tracker_multitracker_http_timeout'] = value + + def get_tracker_multitracker_http_timeout(self): + """ Returns timeout for inter-multi-tracker HTTP connections. + @return A number of seconds. """ + return self.sessconfig['tracker_multitracker_http_timeout'] + + + # + # For Tribler superpeer servers + # + def set_superpeer(self,value): + """ Run Session in super peer mode (default = disabled). + @param value Boolean. + """ + self.sessconfig['superpeer'] = value + + def get_superpeer(self): + """ Returns whether the Session runs in superpeer mode. + @return Boolean. """ + return self.sessconfig['superpeer'] + + def set_superpeer_file(self,value): + """ File with addresses of superpeers (default = install_dir+ + Tribler/Core/superpeer.txt). + @param value An absolute path name. + """ + self.sessconfig['superpeer_file'] = value + + def get_superpeer_file(self): + """ Returns the superpeer file. + @return An absolute path name. """ + return self.sessconfig['superpeer_file'] + + def set_overlay_log(self,value): + """ File to log message to in super peer mode (default = No logging) + @param value An absolute path name. + """ + + print_stack() + + self.sessconfig['overlay_log'] = value + + def get_overlay_log(self): + """ Returns the file to log messages to or None. + @return An absolute path name. """ + return self.sessconfig['overlay_log'] + + def set_coopdlconfig(self,dscfg): + """ Sets the DownloadStartupConfig with which to start Downloads + when you are asked to help in a cooperative download. + """ + c = dscfg.copy() + self.sessconfig['coopdlconfig'] = c.dlconfig # copy internal dict + + def get_coopdlconfig(self): + """ Return the DownloadStartupConfig that is used when helping others + in a cooperative download. + @return DownloadStartupConfig + """ + dlconfig = self.sessconfig['coopdlconfig'] + if dlconfig is None: + return None + else: + from Tribler.Core.DownloadConfig import DownloadStartupConfig + return DownloadStartupConfig(dlconfig) + + + # + # NAT Puncturing servers information setting + # + def set_nat_detect(self,value): + """ Whether to try to detect the type of Network Address Translator + in place. + @param value Boolean. + """ + self.sessconfig['nat_detect'] = value + + def set_puncturing_internal_port(self, puncturing_internal_port): + """ The listening port of the puncturing module. + @param puncturing_internal_port integer. """ + self.sessconfig['puncturing_internal_port'] = puncturing_internal_port + + def set_stun_servers(self, stun_servers): + """ The addresses of the STUN servers (at least 2) + @param stun_servers List of (hostname/ip,port) tuples. """ + self.sessconfig['stun_servers'] = stun_servers + + def set_pingback_servers(self, pingback_servers): + """ The addresses of the pingback servers (at least 1) + @param pingback_servers List of (hostname/ip,port) tuples. """ + self.sessconfig['pingback_servers'] = pingback_servers + + # Puncturing servers information retrieval + def get_nat_detect(self): + """ Whether to try to detect the type of Network Address Translator + in place. + @return Boolean + """ + return self.sessconfig['nat_detect'] + + def get_puncturing_internal_port(self): + """ Returns the listening port of the puncturing module. + @return integer. """ + return self.sessconfig['puncturing_internal_port'] + + def get_stun_servers(self): + """ Returns the addresses of the STUN servers. + @return List of (hostname/ip,port) tuples. """ + return self.sessconfig['stun_servers'] + + def get_pingback_servers(self): + """ Returns the addresses of the pingback servers. + @return List of (hostname/ip,port) tuples. """ + return self.sessconfig['pingback_servers'] + + # + # Crawler + # + def set_crawler(self, value): + """ Handle crawler messages when received (default = True) + @param value Boolean + """ + self.sessconfig['crawler'] = value + + def get_crawler(self): + """ Whether crawler messages are processed + @return Boolean. """ + return self.sessconfig['crawler'] + + # + # RSS feed settings + # + def set_rss_reload_frequency(self, frequency): + """ reload a rss source every n seconds """ + self.sessconfig['rss_reload_frequency'] = frequency + + def get_rss_reload_frequency(self): + """ Returns the reload frequency for a rss source """ + return self.sessconfig['rss_reload_frequency'] + + def set_rss_check_frequency(self, frequency): + """ test a potential .torrent in a rss source every n seconds """ + self.sessconfig['rss_check_frequency'] = frequency + + def get_rss_check_frequency(self): + """ Returns the check frequency for a potential .torrent in a rss source """ + return self.sessconfig['rss_check_frequency'] + + # + # ModerationCast + # + def set_moderationcast_recent_own_moderations_per_have(self, n): + self.sessconfig['moderationcast_recent_own_moderations_per_have'] = n + + def get_moderationcast_recent_own_moderations_per_have(self): + return self.sessconfig['moderationcast_recent_own_moderations_per_have'] + + def set_moderationcast_random_own_moderations_per_have(self, n): + self.sessconfig['moderationcast_random_own_moderations_per_have'] = n + + def get_moderationcast_random_own_moderations_per_have(self): + return self.sessconfig['moderationcast_random_own_moderations_per_have'] + + def set_moderationcast_recent_forward_moderations_per_have(self, n): + self.sessconfig['moderationcast_recent_forward_moderations_per_have'] = n + + def get_moderationcast_recent_forward_moderations_per_have(self): + return self.sessconfig['moderationcast_recent_forward_moderations_per_have'] + + def set_moderationcast_random_forward_moderations_per_have(self, n): + self.sessconfig['moderationcast_random_forward_moderations_per_have'] = n + + def get_moderationcast_random_forward_moderations_per_have(self): + return self.sessconfig['moderationcast_random_forward_moderations_per_have'] + + def get_moderationcast_moderations_per_have(self): + return self.sessconfig['moderationcast_recent_own_moderations_per_have'] + \ + self.sessconfig['moderationcast_random_own_moderations_per_have'] + \ + self.sessconfig['moderationcast_recent_forward_moderations_per_have'] + \ + self.sessconfig['moderationcast_random_forward_moderations_per_have'] + + def set_moderationcast_upload_bandwidth_limit(self, limit): + self.sessconfig['moderationcast_upload_bandwidth_limit'] = limit + + def get_moderationcast_upload_bandwidth_limit(self): + return self.sessconfig['moderationcast_upload_bandwidth_limit'] + + def set_moderationcast_download_bandwidth_limit(self, limit): + self.sessconfig['moderationcast_download_bandwidth_limit'] = limit + + def get_moderationcast_download_bandwidth_limit(self): + return self.sessconfig['moderationcast_download_bandwidth_limit'] + + + +class SessionStartupConfig(SessionConfigInterface,Copyable,Serializable): + """ Class to configure a Session """ + + def __init__(self,sessconfig=None): + SessionConfigInterface.__init__(self,sessconfig) + + # + # Class method + # + def load(filename): + """ + Load a saved SessionStartupConfig from disk. + + @param filename An absolute Unicode filename + @return SessionStartupConfig object + """ + # Class method, no locking required + f = open(filename,"rb") + sessconfig = pickle.load(f) + sscfg = SessionStartupConfig(sessconfig) + f.close() + return sscfg + load = staticmethod(load) + + def save(self,filename): + """ Save the SessionStartupConfig to disk. + @param filename An absolute Unicode filename + """ + # Called by any thread + f = open(filename,"wb") + pickle.dump(self.sessconfig,f) + f.close() + + # + # Copyable interface + # + def copy(self): + config = copy.copy(self.sessconfig) + return SessionStartupConfig(config) diff --git a/tribler-mod/Tribler/Core/SessionConfig.py.bak b/tribler-mod/Tribler/Core/SessionConfig.py.bak new file mode 100644 index 0000000..d7dcbea --- /dev/null +++ b/tribler-mod/Tribler/Core/SessionConfig.py.bak @@ -0,0 +1,1239 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information +""" Controls the operation of a Session """ + +import sys +import copy +import pickle + +from Tribler.Core.simpledefs import * +from Tribler.Core.defaults import sessdefaults +from Tribler.Core.Base import * +from Tribler.Core.BitTornado.RawServer import autodetect_socket_style +from Tribler.Core.Utilities.utilities import find_prog_in_PATH + + +class SessionConfigInterface: + """ + (key,value) pair config of global parameters, + e.g. PermID keypair, listen port, max upload speed, etc. + + Use SessionStartupConfig from creating and manipulation configurations + before session startup time. This is just a parent class. + """ + def __init__(self,sessconfig=None): + """ Constructor. + @param sessconfig Optional dictionary used internally + to make this a copy constructor. + """ + + if sessconfig is not None: # copy constructor + self.sessconfig = sessconfig + return + + self.sessconfig = {} + + # Define the built-in default here + self.sessconfig.update(sessdefaults) + + # Set video_analyser_path + if sys.platform == 'win32': + ffmpegname = "ffmpeg.exe" + else: + ffmpegname = "ffmpeg" + + ffmpegpath = find_prog_in_PATH(ffmpegname) + if ffmpegpath is None: + if sys.platform == 'win32': + self.sessconfig['videoanalyserpath'] = ffmpegname + elif sys.platform == 'darwin': + self.sessconfig['videoanalyserpath'] = "macbinaries/ffmpeg" + else: + self.sessconfig['videoanalyserpath'] = ffmpegname + else: + self.sessconfig['videoanalyserpath'] = ffmpegpath + + self.sessconfig['ipv6_binds_v4'] = autodetect_socket_style() + + + + def set_state_dir(self,statedir): + """ Set the directory to store the Session's state in. + @param statedir A preferably absolute path name. If the directory + does not yet exist it will be created at Session create time. + """ + self.sessconfig['state_dir'] = statedir + + def get_state_dir(self): + """ Returns the directory the Session stores its state in. + @return An absolute path name. """ + return self.sessconfig['state_dir'] + + def set_install_dir(self,installdir): + """ Set the directory in which the Tribler Core software is installed. + @param installdir An absolute path name + """ + self.sessconfig['install_dir'] = installdir + + def get_install_dir(self): + """ Returns the directory the Tribler Core software is installed in. + @return An absolute path name. """ + return self.sessconfig['install_dir'] + + + def set_permid_keypair_filename(self,keypairfilename): + """ Set the filename containing the Elliptic Curve keypair to use for + PermID-based authentication in this Session. + + Note: if a Session is started with a SessionStartupConfig that + points to an existing state dir and that state dir contains a saved + keypair, that keypair will be used unless a different keypair is + explicitly configured via this method. + """ + self.sessconfig['eckeypairfilename'] = keypairfilename + + def get_permid_keypair_filename(self): + """ Returns the filename of the Session's keypair. + @return An absolute path name. """ + return self.sessconfig['eckeypairfilename'] + + + def set_listen_port(self,port): + """ Set the UDP and TCP listen port for this Session. + @param port A port number. + """ + self.sessconfig['minport'] = port + self.sessconfig['maxport'] = port + + def get_listen_port(self): + """ Returns the current UDP/TCP listen port. + @return Port number. """ + return self.sessconfig['minport'] + + # + # Advanced network settings + # + def set_ip_for_tracker(self,value): + """ IP address to report to the tracker (default = set automatically). + @param value An IP address as string. """ + self.sessconfig['ip'] = value + + def get_ip_for_tracker(self): + """ Returns the IP address being reported to the tracker. + @return String """ + return self.sessconfig['ip'] + + def set_bind_to_addresses(self,value): + """ Set the list of IP addresses/hostnames to bind to locally. + @param value A list of IP addresses as strings. """ + self.sessconfig['bind'] = value + + def get_bind_to_addresses(self): + """ Returns the list of IP addresses bound to. + @return list """ + return self.sessconfig['bind'] + + def set_upnp_mode(self,value): + """ Use to autoconfigure a UPnP router to forward the UDP/TCP listen + port to this host: +
+         * UPNPMODE_DISABLED: Autoconfigure turned off.
+         * UPNPMODE_WIN32_HNetCfg_NATUPnP: Use Windows COM interface (slow)
+         * UPNPMODE_WIN32_UPnP_UPnPDeviceFinder: Use Windows COM interface (faster)
+         * UPNPMODE_UNIVERSAL_DIRECT: Talk UPnP directly to the network (best)
+        
+ @param value UPNPMODE_* + """ + self.sessconfig['upnp_nat_access'] = value + + def get_upnp_mode(self): + """ Returns the UPnP mode set. + @return UPNPMODE_* """ + return self.sessconfig['upnp_nat_access'] + + def set_autoclose_timeout(self,value): + """ Time to wait between closing sockets which nothing has been received + on. + @param value A number of seconds. + """ + self.sessconfig['timeout'] = value + + def get_autoclose_timeout(self): + """ Returns the autoclose timeout. + @return A number of seconds. """ + return self.sessconfig['timeout'] + + def set_autoclose_check_interval(self,value): + """ Time to wait between checking if any connections have timed out. + @param value A number of seconds. + """ + self.sessconfig['timeout_check_interval'] = value + + def get_autoclose_check_interval(self): + """ Returns the autoclose check interval. + @return A number of seconds. """ + return self.sessconfig['timeout_check_interval'] + + # + # Enable/disable Tribler features + # + def set_megacache(self,value): + """ Enable megacache databases to cache peers, torrent files and + preferences (default = True). + @param value Boolean. """ + self.sessconfig['megacache'] = value + + def get_megacache(self): + """ Returns whether Megacache is enabled. + @return Boolean. """ + return self.sessconfig['megacache'] + + # + # Secure Overlay + # + def set_overlay(self,value): + """ Enable overlay swarm to enable Tribler's special features + (default = True). + @param value Boolean. + """ + self.sessconfig['overlay'] = value + + def get_overlay(self): + """ Returns whether overlay-swarm extension is enabled. The overlay + swarm allows strong authentication of peers and is used for all + Tribler-specific messages. + @return Boolean. """ + return self.sessconfig['overlay'] + + def set_overlay_max_message_length(self,value): + """ Maximal message length for messages sent over the secure overlay. + @param value A number of bytes. + """ + self.sessconfig['overlay_max_message_length'] = value + + def get_overlay_max_message_length(self): + """ Returns the maximum overlay-message length. + @return A number of bytes. """ + return self.sessconfig['overlay_max_message_length'] + + + # + # Buddycast + # + def set_buddycast(self,value): + """ Enable buddycast recommendation system at startup (default = True) + @param value Boolean. + """ + self.sessconfig['buddycast'] = value + + def get_buddycast(self): + """ Returns whether buddycast is enabled at startup. + @return Boolean.""" + return self.sessconfig['buddycast'] + + def set_start_recommender(self,value): + """ Buddycast can be temporarily disabled via this parameter + (default = True). Must have been enabled at startup, see + set_buddycast(). + @param value Boolean. + """ + self.sessconfig['start_recommender'] = value + + def get_start_recommender(self): + """ Returns whether Buddycast is temporarily enabled. + @return Boolean.""" + return self.sessconfig['start_recommender'] + + def set_buddycast_interval(self,value): + """ Number of seconds to pause between exchanging preference with a + peer in Buddycast. + @param value A number of seconds. + """ + self.sessconfig['buddycast_interval'] = value + + def get_buddycast_interval(self): + """ Returns the number of seconds between Buddycast pref. exchanges. + @return A number of seconds. """ + return self.sessconfig['buddycast_interval'] + + def set_buddycast_collecting_solution(self,value): + """ Set the Buddycast collecting solution. Only one policy implemented + at the moment: +
+         * BCCOLPOLICY_SIMPLE: Simplest solution: per torrent/buddycasted peer/4 hours,
+         
+ @param value BCCOLPOLICY_* + """ + self.sessconfig['buddycast_collecting_solution'] = value + + def get_buddycast_collecting_solution(self): + """ Returns the Buddycast collecting solution. + @return BCOLPOLICY_* """ + return self.sessconfig['buddycast_collecting_solution'] + + def set_buddycast_max_peers(self,value): + """ Set max number of peers to use for Buddycast recommendations """ + self.sessconfig['buddycast_max_peers'] = value + + def get_buddycast_max_peers(self): + """ Return the max number of peers to use for Buddycast recommendations. + @return A number of peers. + """ + return self.sessconfig['buddycast_max_peers'] + + # + # Download helper / cooperative download + # + def set_download_help(self,value): + """ Enable download helping/cooperative download (default = True). + @param value Boolean. """ + self.sessconfig['download_help'] = value + + def get_download_help(self): + """ Returns whether download help is enabled. + @return Boolean. """ + return self.sessconfig['download_help'] + + def set_download_help_dir(self,value): + """ Set the directory for storing state and content for download + helping (default = Default destination dir (see get_default_dest_dir() + +'downloadhelp'. + @param value An absolute path. """ + self.sessconfig['download_help_dir'] = value + + def get_download_help_dir(self): + """ Returns the directory for download helping storage. + @return An absolute path name. """ + return self.sessconfig['download_help_dir'] + + # + # Torrent file collecting + # + def set_torrent_collecting(self,value): + """ Automatically collect torrents from peers in the network (default = + True). + @param value Boolean. + """ + self.sessconfig['torrent_collecting'] = value + + def get_torrent_collecting(self): + """ Returns whether to automatically collect torrents. + @return Boolean. """ + return self.sessconfig['torrent_collecting'] + + def set_torrent_collecting_max_torrents(self,value): + """ Set the maximum number of torrents to collect from other peers. + @param value A number of torrents. + """ + self.sessconfig['torrent_collecting_max_torrents'] = value + + def get_torrent_collecting_max_torrents(self): + """ Returns the maximum number of torrents to collect. + @return A number of torrents. """ + return self.sessconfig['torrent_collecting_max_torrents'] + + def set_torrent_collecting_dir(self,value): + """ Where to place collected torrents? (default is state_dir + 'collected_torrent_files') + @param value An absolute path. + """ + self.sessconfig['torrent_collecting_dir'] = value + + def get_torrent_collecting_dir(self): + """ Returns the directory to save collected torrents. + @return An absolute path name. """ + return self.sessconfig['torrent_collecting_dir'] + + def set_torrent_collecting_rate(self,value): + """ Maximum download rate to use for torrent collecting. + @param value A rate in KB/s. """ + self.sessconfig['torrent_collecting_rate'] = value + + def get_torrent_collecting_rate(self): + """ Returns the download rate to use for torrent collecting. + @return A rate in KB/s. """ + return self.sessconfig['torrent_collecting_rate'] + + def set_torrent_checking(self,value): + """ Whether to automatically check the health of collected torrents by + contacting their trackers (default = True). + @param value Boolean + """ + self.sessconfig['torrent_checking'] = value + + def get_torrent_checking(self): + """ Returns whether to check health of collected torrents. + @return Boolean. """ + return self.sessconfig['torrent_checking'] + + def set_torrent_checking_period(self,value): + """ Interval between automatic torrent health checks. + @param value An interval in seconds. + """ + self.sessconfig['torrent_checking_period'] = value + + def get_torrent_checking_period(self): + """ Returns the check interval. + @return A number of seconds. """ + return self.sessconfig['torrent_checking_period'] + + def set_stop_collecting_threshold(self,value): + """ Stop collecting more torrents if the disk has less than this limit + @param value A limit in MB. + """ + self.sessconfig['stop_collecting_threshold'] = value + + def get_stop_collecting_threshold(self): + """ Returns the disk-space limit when to stop collecting torrents. + @return A number of megabytes. """ + return self.sessconfig['stop_collecting_threshold'] + + + # + # The Tribler dialback mechanism is used to test whether a Session is + # reachable from the outside and what its external IP address is. + # + def set_dialback(self,value): + """ Use other peers to determine external IP address (default = True) + @param value Boolean + """ + self.sessconfig['dialback'] = value + + def get_dialback(self): + """ Returns whether to use the dialback mechanism. + @return Boolean. """ + return self.sessconfig['dialback'] + + # + # Tribler's social networking feature transmits a nickname and picture + # to all Tribler peers it meets. + # + def set_social_networking(self,value): + """ Enable social networking. If enabled, a message containing the + user's nickname and icon is sent to each Tribler peer met + (default = True). + @param value Boolean + """ + self.sessconfig['socnet'] = value + + def get_social_networking(self): + """ Returns whether social network is enabled. + @return Boolean. """ + return self.sessconfig['socnet'] + + def set_nickname(self,value): + """ The nickname you want to show to others. + @param value A string. + """ + self.sessconfig['nickname'] = value + + def get_nickname(self): + """ Returns the set nickname. + @return String. """ + return self.sessconfig['nickname'] + + def set_mugshot(self,value, mime = 'image/jpeg'): + """ The picture of yourself you want to show to others. + @param value A string of binary data of your image. + @param mime A string of the mimetype of the data + """ + self.sessconfig['mugshot'] = (mime, value) + + def get_mugshot(self): + """ Returns binary image data and mime-type of your picture. + @return (String, String) value and mimetype. """ + if self.sessconfig['mugshot'] is None: + return None, None + else: + return self.sessconfig['mugshot'] + + def set_peer_icon_path(self,value): + """ Directory to store received peer icons (Default is statedir + + STATEDIR_PEERICON_DIR). + @param value An absolute path. """ + self.sessconfig['peer_icon_path'] = value + + def get_peer_icon_path(self): + """ Returns the directory to store peer icons. + @return An absolute path name. """ + return self.sessconfig['peer_icon_path'] + + # + # Tribler remote query: ask other peers when looking for a torrent file + # or peer + # + def set_remote_query(self,value): + """ Enable queries from other peers. At the moment peers can ask + whether this Session has collected or opened a torrent that matches + a specified keyword query. (default = True) + @param value Boolean""" + self.sessconfig['rquery'] = value + + def get_remote_query(self): + """ Returns whether remote query is enabled. + @return Boolean. """ + return self.sessconfig['rquery'] + + # + # BarterCast + # + def set_bartercast(self,value): + """ Exchange upload/download statistics with peers (default = True) + @param value Boolean + """ + self.sessconfig['bartercast'] = value + + def get_bartercast(self): + """ Returns to exchange statistics with peers. + @return Boolean. """ + return self.sessconfig['bartercast'] + + + # + # For Tribler Video-On-Demand + # + def set_video_analyser_path(self,value): + """ Path to video analyser FFMPEG. The analyser is used to guess the + bitrate of a video if that information is not present in the torrent + definition. (default = look for it in $PATH) + @param value An absolute path name. + """ + self.sessconfig['videoanalyserpath'] = value + + def get_video_analyser_path(self): + """ Returns the path of the FFMPEG video analyser. + @return An absolute path name. """ + return self.sessconfig['videoanalyserpath'] # strings immutable + + + # + # Tribler's internal tracker + # + def set_internal_tracker(self,value): + """ Enable internal tracker (default = True) + @param value Boolean. + """ + self.sessconfig['internaltracker'] = value + + def get_internal_tracker(self): + """ Returns whether the internal tracker is enabled. + @return Boolean. """ + return self.sessconfig['internaltracker'] + + def set_internal_tracker_url(self,value): + """ Set the internal tracker URL (default = determined dynamically + from Session's IP+port) + @param value URL. + """ + self.sessconfig['tracker_url'] = value + + def get_internal_tracker_url(self): + """ Returns the URL of the tracker as set by set_internal_tracker_url(). + Overridden at runtime by Session class. + @return URL. """ + return self.sessconfig['tracker_url'] + + + def set_mainline_dht(self,value): + """ Enable mainline DHT support (default = True) + @param value Boolean. + """ + self.sessconfig['mainline_dht'] = value + + def get_mainline_dht(self): + """ Returns whether mainline DHT support is enabled. + @return Boolean. """ + return self.sessconfig['mainline_dht'] + + + # + # Internal tracker access control settings + # + def set_tracker_allowed_dir(self,value): + """ Only accept tracking requests for torrent in this dir (default is + Session state-dir + STATEDIR_ITRACKER_DIR + @param value An absolute path name. + """ + self.sessconfig['tracker_allowed_dir'] = value + + def get_tracker_allowed_dir(self): + """ Returns the internal tracker's directory of allowed torrents. + @return An absolute path name. """ + return self.sessconfig['tracker_allowed_dir'] + + def set_tracker_allowed_list(self,value): + """ Only allow peers to register for torrents that appear in the + specified file. Cannot be used in combination with set_tracker_allowed_dir() + @param value An absolute filename containing a list of torrent infohashes in hex format, one per + line. """ + self.sessconfig['tracker_allowed_list'] = value + + def get_tracker_allowed_list(self): + """ Returns the filename of the list of allowed torrents. + @return An absolute path name. """ + return self.sessconfig['tracker_allowed_list'] + + def set_tracker_allowed_controls(self,value): + """ Allow special keys in torrents in the allowed_dir to affect tracker + access. + @param value Boolean + """ + self.sessconfig['tracker_allowed_controls'] = value + + def get_tracker_allowed_controls(self): + """ Returns whether to allow allowed torrents to control tracker access. + @return Boolean. """ + return self.sessconfig['tracker_allowed_controls'] + + def set_tracker_allowed_ips(self,value): + """ Only allow connections from IPs specified in the given file; file + contains subnet data in the format: aa.bb.cc.dd/len. + @param value An absolute path name. + """ + self.sessconfig['tracker_allowed_ips'] = value + + def get_tracker_allowed_ips(self): + """ Returns the filename containing allowed IP addresses. + @return An absolute path name.""" + return self.sessconfig['tracker_allowed_ips'] + + def set_tracker_banned_ips(self,value): + """ Don't allow connections from IPs specified in the given file; file + contains IP range data in the format: xxx:xxx:ip1-ip2 + @param value An absolute path name. + """ + self.sessconfig['tracker_banned_ips'] = value + + def get_tracker_banned_ips(self): + """ Returns the filename containing banned IP addresses. + @return An absolute path name. """ + return self.sessconfig['tracker_banned_ips'] + + def set_tracker_only_local_override_ip(self,value): + """ Ignore the 'ip' parameter in the GET announce from machines which + aren't on local network IPs. +
+         * ITRACK_IGNORE_ANNOUNCEIP_NEVER
+         * ITRACK_IGNORE_ANNOUNCEIP_ALWAYS
+         * ITRACK_IGNORE_ANNOUNCEIP_IFNONATCHECK
+        
+ @param value ITRACK_IGNORE_ANNOUNCEIP* + """ + self.sessconfig['tracker_only_local_override_ip'] = value + + def get_tracker_only_local_override_ip(self): + """ Returns the ignore policy for 'ip' parameters in announces. + @return ITRACK_IGNORE_ANNOUNCEIP_* """ + return self.sessconfig['tracker_only_local_override_ip'] + + def set_tracker_parse_dir_interval(self,value): + """ Seconds between reloading of allowed_dir or allowed_file and + allowed_ips and banned_ips lists. + @param value A number of seconds. + """ + self.sessconfig['tracker_parse_dir_interval'] = value + + def get_tracker_parse_dir_interval(self): + """ Returns the number of seconds between refreshes of access control + info. + @return A number of seconds. """ + return self.sessconfig['tracker_parse_dir_interval'] + + def set_tracker_scrape_allowed(self,value): + """ Allow scrape access on the internal tracker (with a scrape request + a BitTorrent client can retrieve information about how many peers are + downloading the content. +
+        * ITRACKSCRAPE_ALLOW_NONE: Don't allow scrape requests.
+        * ITRACKSCRAPE_ALLOW_SPECIFIC: Allow scrape requests for a specific torrent.
+        * ITRACKSCRAPE_ALLOW_FULL: Allow scrape of all torrents at once.
+        
+ @param value ITRACKSCRAPE_* + """ + self.sessconfig['tracker_scrape_allowed'] = value + + def get_tracker_scrape_allowed(self): + """ Returns the scrape access policy. + @return ITRACKSCRAPE_ALLOW_* """ + return self.sessconfig['tracker_scrape_allowed'] + + def set_tracker_allow_get(self,value): + """ Setting this parameter adds a /file?hash={hash} links to the + overview page that the internal tracker makes available via HTTP + at hostname:listenport. These links allow users to download the + torrent file from the internal tracker. Use with 'allowed_dir' parameter. + @param value Boolean. + """ + self.sessconfig['tracker_allow_get'] = value + + def get_tracker_allow_get(self): + """ Returns whether to allow HTTP torrent-file downloads from the + internal tracker. + @return Boolean. """ + return self.sessconfig['tracker_allow_get'] + + + # + # Controls for internal tracker's output as Web server + # + def set_tracker_favicon(self,value): + """ File containing image/x-icon data to return when browser requests + favicon.ico from the internal tracker. (Default = Tribler/Images/tribler.ico) + @param value An absolute filename. + """ + self.sessconfig['tracker_favicon'] = value + + def get_tracker_favicon(self): + """ Returns the filename of the internal tracker favicon. + @return An absolute path name. """ + return self.sessconfig['tracker_favicon'] + + def set_tracker_show_infopage(self,value): + """ Whether to display an info page when the tracker's root dir is + requested via HTTP. + @param value Boolean + """ + self.sessconfig['tracker_show_infopage'] = value + + def get_tracker_show_infopage(self): + """ Returns whether to show an info page on the internal tracker. + @return Boolean. """ + return self.sessconfig['tracker_show_infopage'] + + def set_tracker_infopage_redirect(self,value): + """ A URL to redirect the request for an info page to. + @param value URL. + """ + self.sessconfig['tracker_infopage_redirect'] = value + + def get_tracker_infopage_redirect(self): + """ Returns the URL to redirect request for info pages to. + @return URL """ + return self.sessconfig['tracker_infopage_redirect'] + + def set_tracker_show_names(self,value): + """ Whether to display names from the 'allowed dir'. + @param value Boolean. + """ + self.sessconfig['tracker_show_names'] = value + + def get_tracker_show_names(self): + """ Returns whether the tracker displays names from the 'allowed dir'. + @return Boolean. """ + return self.sessconfig['tracker_show_names'] + + def set_tracker_keep_dead(self,value): + """ Keep dead torrents after they expire (so they still show up on your + /scrape and web page) + @param value Boolean. + """ + self.sessconfig['tracker_keep_dead'] = value + + def get_tracker_keep_dead(self): + """ Returns whether to keep dead torrents for statistics. + @return Boolean. """ + return self.sessconfig['tracker_keep_dead'] + + # + # Controls for internal tracker replies + # + def set_tracker_reannounce_interval(self,value): + """ Seconds downloaders should wait between reannouncing themselves + to the internal tracker. + @param value A number of seconds. + """ + self.sessconfig['tracker_reannounce_interval'] = value + + def get_tracker_reannounce_interval(self): + """ Returns the reannounce interval for the internal tracker. + @return A number of seconds. """ + return self.sessconfig['tracker_reannounce_interval'] + + def set_tracker_response_size(self,value): + """ Number of peers to send to a peer in a reply to its announce + at the internal tracker (i.e., in the info message) + @param value A number of peers. + """ + self.sessconfig['tracker_response_size'] = value + + def get_tracker_response_size(self): + """ Returns the number of peers to send in a tracker reply. + @return A number of peers. """ + return self.sessconfig['tracker_response_size'] + + def set_tracker_nat_check(self,value): + """ How many times the internal tracker should attempt to check if a + downloader is behind a Network Address Translator (NAT) or firewall. + If it is, the downloader won't be registered at the tracker, as other + peers can probably not contact it. + @param value A number of times, 0 = don't check. + """ + self.sessconfig['tracker_nat_check'] = value + + def get_tracker_nat_check(self): + """ Returns the number of times to check for a firewall. + @return A number of times. """ + return self.sessconfig['tracker_nat_check'] + + + # + # Internal tracker persistence + # + def set_tracker_dfile(self,value): + """ File to store recent downloader info in (default = Session state + dir + STATEDIR_ITRACKER_DIR + tracker.db + @param value An absolute path name. + """ + self.sessconfig['tracker_dfile'] = value + + def get_tracker_dfile(self): + """ Returns the tracker database file. + @return An absolute path name. """ + return self.sessconfig['tracker_dfile'] + + def set_tracker_dfile_format(self,value): + """ Format of the tracker database file. *_PICKLE is needed when Unicode + filenames may appear in the tracker's state (=default). +
+         * ITRACKDBFORMAT_BENCODE: Use BitTorrent bencoding to store records.
+         * ITRACKDBFORMAT_PICKLE: Use Python pickling to store records.
+        
+ @param value ITRACKDBFORFMAT_* + """ + self.sessconfig['tracker_dfile_format'] = value + + def get_tracker_dfile_format(self): + """ Returns the format of the tracker database file. + @return ITRACKDBFORMAT_* """ + return self.sessconfig['tracker_dfile_format'] + + def set_tracker_save_dfile_interval(self,value): + """ The interval between saving the internal tracker's state to + the tracker database (see set_tracker_dfile()). + @param value A number of seconds. + """ + self.sessconfig['tracker_save_dfile_interval'] = value + + def get_tracker_save_dfile_interval(self): + """ Returns the tracker-database save interval. + @return A number of seconds. """ + return self.sessconfig['tracker_save_dfile_interval'] + + def set_tracker_logfile(self,value): + """ File to write the tracker logs to (default is NIL: or /dev/null). + @param value A device name. + """ + self.sessconfig['tracker_logfile'] = value + + def get_tracker_logfile(self): + """ Returns the device name to write log messages to. + @return A device name. """ + return self.sessconfig['tracker_logfile'] + + def set_tracker_min_time_between_log_flushes(self,value): + """ Minimum time between flushes of the tracker log. + @param value A number of seconds. + """ + self.sessconfig['tracker_min_time_between_log_flushes'] = value + + def get_tracker_min_time_between_log_flushes(self): + """ Returns time between tracker log flushes. + @return A number of seconds. """ + return self.sessconfig['tracker_min_time_between_log_flushes'] + + def set_tracker_log_nat_checks(self,value): + """ Whether to add entries to the tracker log for NAT-check results. + @param value Boolean + """ + self.sessconfig['tracker_log_nat_checks'] = value + + def get_tracker_log_nat_checks(self): + """ Returns whether to log NAT-check attempts to the tracker log. + @return Boolean. """ + return self.sessconfig['tracker_log_nat_checks'] + + def set_tracker_hupmonitor(self,value): + """ Whether to reopen the tracker log file upon receipt of a SIGHUP + signal (Mac/UNIX only). + @param value Boolean. + """ + self.sessconfig['tracker_hupmonitor'] = value + + def get_tracker_hupmonitor(self): + """ Returns whether to reopen the tracker log file upon receipt of a + SIGHUP signal. + @return Boolean. """ + return self.sessconfig['tracker_hupmonitor'] + + + # + # Esoteric tracker config parameters + # + def set_tracker_socket_timeout(self,value): + """ Set timeout for closing connections to trackers. + @param value A number of seconds. + """ + self.sessconfig['tracker_socket_timeout'] = value + + def get_tracker_socket_timeout(self): + """ Returns the tracker socket timeout. + @return A number of seconds. """ + return self.sessconfig['tracker_socket_timeout'] + + def set_tracker_timeout_downloaders_interval(self,value): + """ Interval between checks for expired downloaders, i.e., peers + no longer in the swarm because they did not reannounce themselves. + @param value A number of seconds. + """ + self.sessconfig['tracker_timeout_downloaders_interval'] = value + + def get_tracker_timeout_downloaders_interval(self): + """ Returns the number of seconds between checks for expired peers. + @return A number of seconds. """ + return self.sessconfig['tracker_timeout_downloaders_interval'] + + def set_tracker_timeout_check_interval(self,value): + """ Time to wait between checking if any connections to the internal + tracker have timed out. + @param value A number of seconds. + """ + self.sessconfig['tracker_timeout_check_interval'] = value + + def get_tracker_timeout_check_interval(self): + """ Returns timeout for connections to the internal tracker. + @return A number of seconds. """ + return self.sessconfig['tracker_timeout_check_interval'] + + def set_tracker_min_time_between_cache_refreshes(self,value): + """ Minimum time before a cache is considered stale and is + flushed. + @param value A number of seconds. + """ + self.sessconfig['tracker_min_time_between_cache_refreshes'] = value + + def get_tracker_min_time_between_cache_refreshes(self): + """ Return the minimum time between cache refreshes. + @return A number of seconds. """ + return self.sessconfig['tracker_min_time_between_cache_refreshes'] + + + # + # BitTornado's Multitracker feature + # + def set_tracker_multitracker_enabled(self,value): + """ Whether to enable multitracker operation in which multiple + trackers are used to register the peers for a specific torrent. + @param value Boolean. + """ + self.sessconfig['tracker_multitracker_enabled'] = value + + def get_tracker_multitracker_enabled(self): + """ Returns whether multitracking is enabled. + @return Boolean. """ + return self.sessconfig['tracker_multitracker_enabled'] + + def set_tracker_multitracker_allowed(self,value): + """ Whether to allow incoming tracker announces. +
+         * ITRACKMULTI_ALLOW_NONE: Don't allow.
+         * ITRACKMULTI_ALLOW_AUTODETECT: Allow for allowed torrents (see set_tracker_allowed_dir())
+         * ITRACKMULTI_ALLOW_ALL: Allow for all. 
+        
+ @param value ITRACKMULTI_ALLOW_* + """ + self.sessconfig['tracker_multitracker_allowed'] = value + + def get_tracker_multitracker_allowed(self): + """ Returns the multitracker allow policy of the internal tracker. + @return ITRACKMULTI_ALLOW_* """ + return self.sessconfig['tracker_multitracker_allowed'] + + def set_tracker_multitracker_reannounce_interval(self,value): + """ Seconds between outgoing tracker announces to the other trackers in + a multi-tracker setup. + @param value A number of seconds. + """ + self.sessconfig['tracker_multitracker_reannounce_interval'] = value + + def get_tracker_multitracker_reannounce_interval(self): + """ Returns the multitracker reannouce interval. + @return A number of seconds. """ + return self.sessconfig['tracker_multitracker_reannounce_interval'] + + def set_tracker_multitracker_maxpeers(self,value): + """ Number of peers to retrieve from the other trackers in a tracker + announce in a multi-tracker setup. + @param value A number of peers. + """ + self.sessconfig['tracker_multitracker_maxpeers'] = value + + def get_tracker_multitracker_maxpeers(self): + """ Returns the number of peers to retrieve from another tracker. + @return A number of peers. """ + return self.sessconfig['tracker_multitracker_maxpeers'] + + def set_tracker_aggregate_forward(self,value): + """ Set an URL to which, if set, all non-multitracker requests are + forwarded, with a password added (optional). + @param value A 2-item list with format: [,|None] + """ + self.sessconfig['tracker_aggregate_forward'] = value + + def get_tracker_aggregate_forward(self): + """ Returns the aggregate forward URL and optional password as a 2-item + list. + @return URL """ + return self.sessconfig['tracker_aggregate_forward'] + + def set_tracker_aggregator(self,value): + """ Whether to act as a data aggregator rather than a tracker. + To enable, set to True or ; if password is set, then an + incoming password is required for access. + @param value Boolean or string. + """ + self.sessconfig['tracker_aggregator'] = value + + def get_tracker_aggregator(self): + """ Returns the tracker aggregator parameter. + @return Boolean or string. """ + return self.sessconfig['tracker_aggregator'] + + def set_tracker_multitracker_http_timeout(self,value): + """ Time to wait before assuming that an HTTP connection + to another tracker in a multi-tracker setup has timed out. + @param value A number of seconds. + """ + self.sessconfig['tracker_multitracker_http_timeout'] = value + + def get_tracker_multitracker_http_timeout(self): + """ Returns timeout for inter-multi-tracker HTTP connections. + @return A number of seconds. """ + return self.sessconfig['tracker_multitracker_http_timeout'] + + + # + # For Tribler superpeer servers + # + def set_superpeer(self,value): + """ Run Session in super peer mode (default = disabled). + @param value Boolean. + """ + self.sessconfig['superpeer'] = value + + def get_superpeer(self): + """ Returns whether the Session runs in superpeer mode. + @return Boolean. """ + return self.sessconfig['superpeer'] + + def set_superpeer_file(self,value): + """ File with addresses of superpeers (default = install_dir+ + Tribler/Core/superpeer.txt). + @param value An absolute path name. + """ + self.sessconfig['superpeer_file'] = value + + def get_superpeer_file(self): + """ Returns the superpeer file. + @return An absolute path name. """ + return self.sessconfig['superpeer_file'] + + def set_overlay_log(self,value): + """ File to log message to in super peer mode (default = No logging) + @param value An absolute path name. + """ + + print_stack() + + self.sessconfig['overlay_log'] = value + + def get_overlay_log(self): + """ Returns the file to log messages to or None. + @return An absolute path name. """ + return self.sessconfig['overlay_log'] + + def set_coopdlconfig(self,dscfg): + """ Sets the DownloadStartupConfig with which to start Downloads + when you are asked to help in a cooperative download. + """ + c = dscfg.copy() + self.sessconfig['coopdlconfig'] = c.dlconfig # copy internal dict + + def get_coopdlconfig(self): + """ Return the DownloadStartupConfig that is used when helping others + in a cooperative download. + @return DownloadStartupConfig + """ + dlconfig = self.sessconfig['coopdlconfig'] + if dlconfig is None: + return None + else: + from Tribler.Core.DownloadConfig import DownloadStartupConfig + return DownloadStartupConfig(dlconfig) + + + # + # NAT Puncturing servers information setting + # + def set_nat_detect(self,value): + """ Whether to try to detect the type of Network Address Translator + in place. + @param value Boolean. + """ + self.sessconfig['nat_detect'] = value + + def set_puncturing_internal_port(self, puncturing_internal_port): + """ The listening port of the puncturing module. + @param puncturing_internal_port integer. """ + self.sessconfig['puncturing_internal_port'] = puncturing_internal_port + + def set_stun_servers(self, stun_servers): + """ The addresses of the STUN servers (at least 2) + @param stun_servers List of (hostname/ip,port) tuples. """ + self.sessconfig['stun_servers'] = stun_servers + + def set_pingback_servers(self, pingback_servers): + """ The addresses of the pingback servers (at least 1) + @param pingback_servers List of (hostname/ip,port) tuples. """ + self.sessconfig['pingback_servers'] = pingback_servers + + # Puncturing servers information retrieval + def get_nat_detect(self): + """ Whether to try to detect the type of Network Address Translator + in place. + @return Boolean + """ + return self.sessconfig['nat_detect'] + + def get_puncturing_internal_port(self): + """ Returns the listening port of the puncturing module. + @return integer. """ + return self.sessconfig['puncturing_internal_port'] + + def get_stun_servers(self): + """ Returns the addresses of the STUN servers. + @return List of (hostname/ip,port) tuples. """ + return self.sessconfig['stun_servers'] + + def get_pingback_servers(self): + """ Returns the addresses of the pingback servers. + @return List of (hostname/ip,port) tuples. """ + return self.sessconfig['pingback_servers'] + + # + # Crawler + # + def set_crawler(self, value): + """ Handle crawler messages when received (default = True) + @param value Boolean + """ + self.sessconfig['crawler'] = value + + def get_crawler(self): + """ Whether crawler messages are processed + @return Boolean. """ + return self.sessconfig['crawler'] + + # + # RSS feed settings + # + def set_rss_reload_frequency(self, frequency): + """ reload a rss source every n seconds """ + self.sessconfig['rss_reload_frequency'] = frequency + + def get_rss_reload_frequency(self): + """ Returns the reload frequency for a rss source """ + return self.sessconfig['rss_reload_frequency'] + + def set_rss_check_frequency(self, frequency): + """ test a potential .torrent in a rss source every n seconds """ + self.sessconfig['rss_check_frequency'] = frequency + + def get_rss_check_frequency(self): + """ Returns the check frequency for a potential .torrent in a rss source """ + return self.sessconfig['rss_check_frequency'] + + # + # ModerationCast + # + def set_moderationcast_recent_own_moderations_per_have(self, n): + self.sessconfig['moderationcast_recent_own_moderations_per_have'] = n + + def get_moderationcast_recent_own_moderations_per_have(self): + return self.sessconfig['moderationcast_recent_own_moderations_per_have'] + + def set_moderationcast_random_own_moderations_per_have(self, n): + self.sessconfig['moderationcast_random_own_moderations_per_have'] = n + + def get_moderationcast_random_own_moderations_per_have(self): + return self.sessconfig['moderationcast_random_own_moderations_per_have'] + + def set_moderationcast_recent_forward_moderations_per_have(self, n): + self.sessconfig['moderationcast_recent_forward_moderations_per_have'] = n + + def get_moderationcast_recent_forward_moderations_per_have(self): + return self.sessconfig['moderationcast_recent_forward_moderations_per_have'] + + def set_moderationcast_random_forward_moderations_per_have(self, n): + self.sessconfig['moderationcast_random_forward_moderations_per_have'] = n + + def get_moderationcast_random_forward_moderations_per_have(self): + return self.sessconfig['moderationcast_random_forward_moderations_per_have'] + + def get_moderationcast_moderations_per_have(self): + return self.sessconfig['moderationcast_recent_own_moderations_per_have'] + \ + self.sessconfig['moderationcast_random_own_moderations_per_have'] + \ + self.sessconfig['moderationcast_recent_forward_moderations_per_have'] + \ + self.sessconfig['moderationcast_random_forward_moderations_per_have'] + + def set_moderationcast_upload_bandwidth_limit(self, limit): + self.sessconfig['moderationcast_upload_bandwidth_limit'] = limit + + def get_moderationcast_upload_bandwidth_limit(self): + return self.sessconfig['moderationcast_upload_bandwidth_limit'] + + def set_moderationcast_download_bandwidth_limit(self, limit): + self.sessconfig['moderationcast_download_bandwidth_limit'] = limit + + def get_moderationcast_download_bandwidth_limit(self): + return self.sessconfig['moderationcast_download_bandwidth_limit'] + + + +class SessionStartupConfig(SessionConfigInterface,Copyable,Serializable): + """ Class to configure a Session """ + + def __init__(self,sessconfig=None): + SessionConfigInterface.__init__(self,sessconfig) + + # + # Class method + # + def load(filename): + """ + Load a saved SessionStartupConfig from disk. + + @param filename An absolute Unicode filename + @return SessionStartupConfig object + """ + # Class method, no locking required + f = open(filename,"rb") + sessconfig = pickle.load(f) + sscfg = SessionStartupConfig(sessconfig) + f.close() + return sscfg + load = staticmethod(load) + + def save(self,filename): + """ Save the SessionStartupConfig to disk. + @param filename An absolute Unicode filename + """ + # Called by any thread + f = open(filename,"wb") + pickle.dump(self.sessconfig,f) + f.close() + + # + # Copyable interface + # + def copy(self): + config = copy.copy(self.sessconfig) + return SessionStartupConfig(config) diff --git a/tribler-mod/Tribler/Core/SocialNetwork/FriendshipMsgHandler.py b/tribler-mod/Tribler/Core/SocialNetwork/FriendshipMsgHandler.py new file mode 100644 index 0000000..c5634da --- /dev/null +++ b/tribler-mod/Tribler/Core/SocialNetwork/FriendshipMsgHandler.py @@ -0,0 +1,875 @@ +from time import localtime, strftime +# Written by Ali Abbas, Arno Bakker +# see LICENSE.txt for license information + +# TODO: either maintain connections to friends always or supplement the +# list of friends with a number of on-line taste buddies. +# +# TODO: at least add fifo order to msgs, otherwise clicking +# "make friend", "delete friend", "make friend" could arive in wrong order +# due to forwarding. +# + +import threading +import sys +import os +import random +import cPickle +from time import time +from types import DictType +from traceback import print_exc +from sets import Set + +from Tribler.Core.simpledefs import * +from Tribler.Core.BitTornado.bencode import bencode, bdecode + +from Tribler.Core.BitTornado.BT1.MessageID import * +from Tribler.Core.CacheDB.CacheDBHandler import PeerDBHandler, FriendDBHandler +from Tribler.Core.CacheDB.SqliteFriendshipStatsCacheDB import FriendshipStatisticsDBHandler +from Tribler.Core.CacheDB.sqlitecachedb import bin2str +from Tribler.Core.Utilities.utilities import * + +from Tribler.Core.Overlay.SecureOverlay import OLPROTO_VER_SEVENTH + +DEBUG = False + +""" +State diagram: + +NOFRIEND -> I_INVITED or HE_INVITED +I_INVITED -> APPROVED or HE_DENIED +HE_INVITED -> APPROVED +HE_INVITED -> I_DENIED + +In theory it could happen that he sends an response=1 RESP, in which case +he approved us. I consider that an HE_INIVITE +""" + +RESCHEDULE_INTERVAL = 60 +RESEND_INTERVAL = 5*60 + + +class FriendshipMsgHandler: + __singleton = None + __lock = threading.Lock() + + @classmethod + def getInstance(cls, *args, **kargs): + if not cls.__singleton: + cls.__lock.acquire() + try: + if not cls.__singleton: + cls.__singleton = cls(*args, **kargs) + finally: + cls.__lock.release() + return cls.__singleton + + def __init__(self): + if FriendshipMsgHandler.__singleton: + raise RuntimeError, "FriendshipMsgHandler is singleton" + self.overlay_bridge = None + self.currmsgs = {} + self.online_fsext_peers = Set() # online peers that speak FRIENDSHIP ext + self.peerdb = PeerDBHandler.getInstance() + self.frienddb = FriendDBHandler.getInstance() + self.friendshipStatistics_db = FriendshipStatisticsDBHandler.getInstance() + self.list_no_of_conn_attempts_per_target= {} + self.usercallback = None + + def register(self, overlay_bridge, session): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "friendship: register" + self.overlay_bridge = overlay_bridge + self.session = session + try: + self.load_checkpoint() + except: + print_exc() + self.overlay_bridge.add_task(self.reschedule_connects,RESCHEDULE_INTERVAL) + + + def shutdown(self): + """ + Delegate all outstanding messages to others + """ + # Called by OverlayThread + self.delegate_friendship_making() + self.checkpoint() + + + def register_usercallback(self,usercallback): + self.usercallback = usercallback + + def anythread_send_friendship_msg(self,permid,type,params): + """ Called when user adds someone from the person found, or by + explicity adding someone with her credentials + It establishes overlay connection with the target peer """ + # Called by any thread + + olthread_func = lambda:self.send_friendship_msg(permid,type,params,submit=True) + self.overlay_bridge.add_task(olthread_func,0) + + + def send_friendship_msg(self,permid,type,params,submit=False): + # Called by overlay thread + + if submit: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: send_friendship_msg: Saving msg",show_permid_short(permid) + self.save_msg(permid,type,params) + + if type == F_REQUEST_MSG: + # Make him my friend, pending his approval + self.frienddb.setFriendState(permid, commit=True,state=FS_I_INVITED) + elif type == F_RESPONSE_MSG: + # Mark response in DB + if params['response']: + state = FS_MUTUAL + else: + state = FS_I_DENIED + self.frienddb.setFriendState(permid, commit=True,state=state) + + func = lambda exc,dns,permid,selversion:self.fmsg_connect_callback(exc, dns, permid, selversion, type) + self.overlay_bridge.connect(permid,self.fmsg_connect_callback) + + + def fmsg_connect_callback(self,exc,dns,permid,selversion, type = None): + """ Callback function for the overlay connect function """ + # Called by OverlayThread + + if exc is None: + if selversion < OLPROTO_VER_SEVENTH: + self.remove_msgs_for_ltv7_peer(permid) + return + + # Reached him + sendlist = self.get_msgs_as_sendlist(targetpermid=permid) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'friendship: fmsg_connect_callback: sendlist len',len(sendlist) + #print_stack() + + for i in range(0,len(sendlist)): + tuple = sendlist[i] + + permid,msgid,msg = tuple + send_callback = lambda exc,permid:self.fmsg_send_callback(exc,permid,msgid) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: fmsg_connect_callback: Sending",`msg`,msgid + + mypermid = self.session.get_permid() + + commit = (i == len(sendlist)-1) + isForwarder = 0 + no_of_helpers = 0 +# if type == F_REQUEST_MSG: +# print +# elif type == F_RESPONSE_MSG: +# print + #Set forwarder to True and also no of helpers to 10 + if type == F_FORWARD_MSG: + isForwarder = 1 + no_of_helpers = 10 + + + no_of_attempts = 0 + if permid in self.currmsgs: + msgid2rec = self.currmsgs[permid] + if msgid in msgid2rec: + msgrec = msgid2rec[msgid] + no_of_attempts = msgrec['attempt'] + +# insertFriendshipStatistics(self, my_permid, target_permid, current_time, isForwarder = 0, no_of_attempts = 0, no_of_helpers = 0, commit = True): + + self.friendshipStatistics_db.insertOrUpdateFriendshipStatistics( bin2str(mypermid), + bin2str(permid), + int(time()), + isForwarder, + no_of_attempts , + no_of_helpers, + commit=commit) + + self.overlay_bridge.send(permid, FRIENDSHIP + bencode(msg), send_callback) + + + else: + if DEBUG: + peer = self.peerdb.getPeer(permid) + if peer is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'friendship: Could not connect to peer', show_permid_short(permid),peer + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'friendship: Could not connect to peer', show_permid_short(permid),peer['name'] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",exc + + mypermid = self.session.get_permid() + + isForwarder = 0 + no_of_helpers = 0 + if type == F_FORWARD_MSG: + isForwarder = 1 + no_of_helpers = 10 + + + no_of_attempts = 0 + if permid in self.currmsgs: + msgid2rec = self.currmsgs[permid] + for msgid in msgid2rec: + msgrec = msgid2rec[msgid] + no_of_attempts = msgrec['attempt'] + + + self.friendshipStatistics_db.insertOrUpdateFriendshipStatistics( bin2str(mypermid), + bin2str(permid), + int(time()), + isForwarder, + no_of_attempts , + no_of_helpers) + + + + + def fmsg_send_callback(self,exc,permid,msgid): + + # If an exception arises + if exc is None: + self.delete_msg(permid,msgid) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'friendship: Could not send to ',show_permid_short(permid) + print_exc() + + mypermid = self.session.get_permid() + + no_of_attempts = 0 + no_of_helpers = 10 + isForwarder = False + if permid in self.currmsgs: + msgid2rec = self.currmsgs[permid] + for msgid in msgid2rec: + msgrec = msgid2rec[msgid] + no_of_attempts = msgrec['attempt'] + if msgrec['forwarded'] == True: + isForwarder = 1 + + + self.friendshipStatistics_db.insertOrUpdateFriendshipStatistics( bin2str(mypermid), + bin2str(permid), + int(time()), + isForwarder, + no_of_attempts , + no_of_helpers) + + + def remove_msgs_for_ltv7_peer(self,permid): + """ Remove messages destined for a peer that does not speak >= v7 of + the overlay protocol + """ + sendlist = self.get_msgs_as_sendlist(targetpermid=permid) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'friendship: remove_msgs_for_ltv7_peer: sendlist len',len(sendlist) + + for i in range(0,len(sendlist)): + tuple = sendlist[i] + + permid,msgid,msg = tuple + self.delete_msg(permid,msgid) + + + # + # Incoming connections + # + def handleConnection(self, exc, permid, selversion, locally_initiated): + + if selversion < OLPROTO_VER_SEVENTH: + return True + + if exc is None: + self.online_fsext_peers.add(permid) + + # if we meet peer otherwise, dequeue messages + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: Met peer, attempting to deliver msgs",show_permid_short(permid) + + # If we're initiating the connection from this handler, the + # fmsg_connect_callback will get called twice: + # 1. here + # 2. just a bit later when the callback for a successful connect() + # is called. + # Solution: we delay this call, which should give 2. the time to + # run and remove msgs from the queue. + # + # Better: remove msgs from queue when sent and reinsert if send fails + # + friendship_delay_func = lambda:self.fmsg_connect_callback(None,None,permid,selversion) + self.overlay_bridge.add_task(friendship_delay_func,4) + else: + try: + self.online_fsext_peers.remove(permid) + except: + pass + + return True + + + # + # Incoming messages + # + def handleMessage(self, permid, selversion, message): + """ Handle incoming Friend Request, and their response""" + + if selversion < OLPROTO_VER_SEVENTH: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: Got FRIENDSHIP msg from peer with old protocol",show_permid_short(permid) + return False + + try: + d = bdecode(message[1:]) + except: + print_exc() + return False + + return self.process_message(permid,selversion,d) + + + def process_message(self,permid,selversion,d): + + if self.isValidFriendMsg(d): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: Got FRIENDSHIP msg",d['msg type'] + + # If the message is to become a friend, i.e., a friendship request + if d['msg type'] == F_REQUEST_MSG: + self.process_request(permid,d) + + # If the message is to have a response on friend request + elif d['msg type'] == F_RESPONSE_MSG: + self.process_response(permid,d) + + # If the receiving message is to delegate the Friendship request to the target peer + elif d['msg type'] == F_FORWARD_MSG: + return self.process_forward(permid,selversion,d) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: Got unknown msg type",d['msg type'] + return False + + return True + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: Got bad FRIENDSHIP message" + return False + + def process_request(self,permid,d): + # to see that the following peer is already a friend, or not + fs = self.frienddb.getFriendState(permid) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: process_request: Got request, fs",show_permid_short(permid),fs + + + if fs == FS_NOFRIEND or fs == FS_HE_DENIED: + # not on HE_INVITED, to filter out duplicates + + # And if that peer is not already added as a friend, either approved, or unapproved + # call friend dialog + self.frienddb.setFriendState(permid, commit=True, state = FS_HE_INVITED) + + # FUTURE: always do callback, such that we also know about failed + # attempts + if self.usercallback is not None: + friendship_usercallback = lambda:self.usercallback(permid,[]) + self.session.uch.perform_usercallback(friendship_usercallback) + elif fs == FS_I_INVITED: + # In case, requestee is already added as friend, just make this + # requestee as an approved friend + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: process_request: Got request but I already invited him" + + self.frienddb.setFriendState(permid, commit=True, state = FS_MUTUAL) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: process_request: Got request but I already invited him: sending reply" + + self.send_friendship_msg(permid,F_RESPONSE_MSG,{'response':1},submit=True) + elif fs == FS_MUTUAL: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: process_request: Got request but already approved" + elif fs == FS_I_DENIED: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: process_request: Got request but I already denied" + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: process_request: Got request, but fs is",fs + + def process_response(self,permid,d): + + mypermid = self.session.get_permid() + + + self.friendshipStatistics_db.updateFriendshipResponseTime( bin2str(mypermid), + bin2str(permid), + int(time())) + + + fs = self.frienddb.getFriendState(permid) + + # If the request to add has been approved + if d['response'] == 1: + if fs == FS_I_INVITED: + self.frienddb.setFriendState(permid, commit=True, state = FS_MUTUAL) + elif fs != FS_MUTUAL: + # Unsollicited response, consider this an invite, if not already friend + self.frienddb.setFriendState(permid, commit=True, state = FS_HE_INVITED) + else: + # He denied our friendship + self.frienddb.setFriendState(permid, commit=True, state = FS_HE_DENIED) + + + def process_forward(self,permid,selversion,d): + + mypermid = self.session.get_permid() + if d['dest']['permid'] == mypermid: + # This is a forward containing a message meant for me + + # First add original sender to DB so we can connect back to it + self.addPeerToDB(d['source']) + + self.process_message(d['source']['permid'],selversion,d['msg']) + + return True + + + else: + # Queue and forward + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: process_fwd: Forwarding immediately to",show_permid_short(d['dest']['permid']) + + if permid != d['source']['permid']: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: process_fwd: Forwarding: Illegal, source is not sender, and dest is not me" + return False + # First add dest to DB so we can connect to it + + # FUTURE: don't let just any peer overwrite the IP+port of a peer + # if self.peer_db.hasPeer(d['dest']['permid']): + self.addPeerToDB(d['dest']) + + self.send_friendship_msg(d['dest']['permid'],d['msg type'],d,submit=True) + return True + + def addPeerToDB(self,mpeer): + peer = {} + peer['permid'] = mpeer['permid'] + peer['ip'] = mpeer['ip'] + peer['port'] = mpeer['port'] + peer['last_seen'] = 0 + self.peerdb.addPeer(mpeer['permid'],peer,update_dns=True,commit=True) + + + def create_friendship_msg(self,type,params): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: create_fs_msg:",type,`params` + + mypermid = self.session.get_permid() + myip = self.session.get_external_ip() + myport = self.session.get_listen_port() + + d ={'msg type':type} + if type == F_RESPONSE_MSG: + d['response'] = params['response'] + elif type == F_FORWARD_MSG: + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: create: fwd: params",`params` + peer = self.peerdb.getPeer(params['destpermid']) # ,keys=['ip', 'port']) + if peer is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "friendship: create msg: Don't know IP + port of peer", show_permid_short(params['destpermid']) + return + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "friendship: create msg: Peer at",peer + + # FUTURE: add signatures on ip+port + src = {'permid':mypermid,'ip':myip,'port':myport} + dst = {'permid':params['destpermid'],'ip':peer['ip'],'port':peer['port']} + d.update({'source':src,'dest':dst,'msg':params['msg']}) + return d + + + + def isValidFriendMsg(self,d): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: msg: payload is",`d` + + + if type(d) != DictType: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: msg: payload is not bencoded dict" + return False + if not 'msg type' in d: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: msg: dict misses key",'msg type' + return False + + if d['msg type'] == F_REQUEST_MSG: + keys = d.keys()[:] + if len(keys)-1 != 0: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: msg: REQ: contains superfluous keys",keys + return False + return True + + if d['msg type'] == F_RESPONSE_MSG: + if (d.has_key('response') and (d['response'] == 1 or d['response'] == 0)): + return True + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: msg: RESP: something wrong",`d` + return False + + if d['msg type'] == F_FORWARD_MSG: + if not self.isValidPeer(d['source']): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: msg: FWD: source bad",`d` + return False + if not self.isValidPeer(d['dest']): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: msg: FWD: dest bad",`d` + return False + if not 'msg' in d: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: msg: FWD: no msg",`d` + return False + if not self.isValidFriendMsg(d['msg']): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: msg: FWD: bad msg",`d` + return False + if d['msg']['msg type'] == F_FORWARD_MSG: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: msg: FWD: cannot contain fwd",`d` + return False + return True + + return False + + + def isValidPeer(self,d): + if (d.has_key('ip') and d.has_key('port') and d.has_key('permid') + and validPermid(d['permid']) + and validIP(d['ip'])and validPort(d['port'])): + return True + else: + return False + + + def save_msg(self,permid,type,params): + + if not permid in self.currmsgs: + self.currmsgs[permid] = {} + + mypermid = self.session.get_permid() + now = time() + attempt = 1 + + base = mypermid+permid+str(now)+str(random.random()) + msgid = sha(base).hexdigest() + msgrec = {'permid':permid,'type':type,'params':params,'attempt':attempt,'t':now,'forwarded':False} + + msgid2rec = self.currmsgs[permid] + msgid2rec[msgid] = msgrec + + def delete_msg(self,permid,msgid): + try: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: Deleting msg",show_permid_short(permid),msgid + msgid2rec = self.currmsgs[permid] + del msgid2rec[msgid] + except: + #print_exc() + pass + + def set_msg_forwarded(self,permid,msgid): + try: + msgid2rec = self.currmsgs[permid] + msgid2rec[msgid]['forwarded'] = True + except: + print_exc() + + def reschedule_connects(self): + """ This function is run periodically and reconnects to peers when + messages meant for it are due to be retried + """ + now = time() + delmsgids = [] + reconnectpermids = Set() + for permid in self.currmsgs: + msgid2rec = self.currmsgs[permid] + for msgid in msgid2rec: + msgrec = msgid2rec[msgid] + + eta = self.calc_eta(msgrec) + + if DEBUG: + diff = None + if eta is not None: + diff = eta - now + + if DEBUG: + peer = self.peerdb.getPeer(permid) + if peer is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: reschedule: ETA: wtf, peer not in DB!",show_permid_short(permid) + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: reschedule: ETA",show_permid_short(permid),peer['name'],diff + + if eta is None: + delmsgids.append((permid,msgid)) + elif now > eta-1.0: # -1 for round off + # reconnect + reconnectpermids.add(permid) + msgrec['attempt'] = msgrec['attempt'] + 1 + + # Delegate + if msgrec['type'] == F_REQUEST_MSG and msgrec['attempt'] == 2: + self.delegate_friendship_making(targetpermid=permid,targetmsgid=msgid) + + # Remove timed out messages + for permid,msgid in delmsgids: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: reschedule: Deleting",show_permid_short(permid),msgid + self.delete_msg(permid,msgid) + + # Initiate connections to peers for which we have due messages + for permid in reconnectpermids: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: reschedule: Reconnect to",show_permid_short(permid) + + self.overlay_bridge.connect(permid,self.fmsg_connect_callback) + + # Reschedule this periodic task + self.overlay_bridge.add_task(self.reschedule_connects,RESCHEDULE_INTERVAL) + + + def calc_eta(self,msgrec): + if msgrec['type'] == F_FORWARD_MSG: + if msgrec['attempt'] >= 10: + # Stop trying to forward after a given period + return None + # exponential backoff, on 10th attempt we would wait 24hrs + eta = msgrec['t'] + pow(3.116,msgrec['attempt']) + else: + if msgrec['attempt'] >= int(7*24*3600/RESEND_INTERVAL): + # Stop trying to forward after a given period = 1 week + return None + + eta = msgrec['t'] + msgrec['attempt']*RESEND_INTERVAL + return eta + + + def get_msgs_as_sendlist(self,targetpermid=None): + + sendlist = [] + if targetpermid is None: + permids = self.currmsgs.keys() + else: + permids = [targetpermid] + + for permid in permids: + msgid2rec = self.currmsgs.get(permid,{}) + for msgid in msgid2rec: + msgrec = msgid2rec[msgid] + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: get_msgs: Creating",msgrec['type'],`msgrec['params']`,msgid + if msgrec['type'] == F_FORWARD_MSG: + msg = msgrec['params'] + else: + msg = self.create_friendship_msg(msgrec['type'],msgrec['params']) + tuple = (permid,msgid,msg) + sendlist.append(tuple) + return sendlist + + + def get_msgs_as_fwd_sendlist(self,targetpermid=None,targetmsgid=None): + + sendlist = [] + if targetpermid is None: + permids = self.currmsgs.keys() + else: + permids = [targetpermid] + + for permid in permids: + msgid2rec = self.currmsgs.get(permid,{}) + for msgid in msgid2rec: + if targetmsgid is None or msgid == targetmsgid: + msgrec = msgid2rec[msgid] + if msgrec['type'] != F_FORWARD_MSG and msgrec['forwarded'] == False: + # Don't forward forwards, or messages already forwarded + + # Create forward message for original + params = {} + params['destpermid'] = permid + params['msg'] = self.create_friendship_msg(msgrec['type'],msgrec['params']) + + msg = self.create_friendship_msg(F_FORWARD_MSG,params) + tuple = (permid,msgid,msg) + sendlist.append(tuple) + return sendlist + + + + def delegate_friendship_making(self,targetpermid=None,targetmsgid=None): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: delegate:",show_permid_short(targetpermid),targetmsgid + + # 1. See if there are undelivered msgs + sendlist = self.get_msgs_as_fwd_sendlist(targetpermid=targetpermid,targetmsgid=targetmsgid) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: delegate: Number of messages queued",len(sendlist) + + if len(sendlist) == 0: + return + + # 2. Get friends, not necess. online + friend_permids = self.frienddb.getFriends() + + if DEBUG: + l = len(friend_permids) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: delegate: friend helpers",l + for permid in friend_permids: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: delegate: friend helper",show_permid_short(permid) + + # 3. Sort online peers on similarity, highly similar should be tastebuddies + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: delegate: Number of online v7 peers",len(self.online_fsext_peers) + tastebuddies = self.peerdb.getPeers(list(self.online_fsext_peers),['similarity','name']) + tastebuddies.sort(sim_desc_cmp) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: delegate: Sorted tastebuddies",`tastebuddies` + + tastebuddies_permids = [] + size = min(10,len(tastebuddies)) + for i in xrange(0,size): + peer = tastebuddies[i] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: delegate: buddy helper",show_permid_short(peer['permid']) + tastebuddies_permids.append(peer['permid']) + + # 4. Create list of helpers: + # + # Policy: Helpers are a mix of friends and online tastebuddies + # with 70% friends (if avail) and 30% tastebuddies + # + # I chose this policy because friends are not guaranteed to be online + # and waiting to see if we can connect to them before switching to + # the online taste buddies is complex code-wise and time-consuming. + # We don't have a lot of time when this thing is called by Session.shutdown() + # + nwant = 10 + nfriends = int(nwant * .7) + nbuddies = int(nwant * .3) + + part1 = sampleorlist(friend_permids,nfriends) + fill = nfriends-len(part1) # if no friends, use tastebuddies + part2 = sampleorlist(tastebuddies_permids,nbuddies+fill) + helpers = part1 + part2 + + if DEBUG: + l = len(helpers) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: delegate: end helpers",l + for permid in helpers: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: delegate: end helper",show_permid_short(permid),self.frienddb.getFriendState(permid),self.peerdb.getPeers([permid],['similarity','name']) + + + for tuple in sendlist: + destpermid,msgid,msg = tuple + for helperpermid in helpers: + if destpermid != helperpermid: + connect_callback = lambda exc,dns,permid,selversion:self.forward_connect_callback(exc,dns,permid,selversion,destpermid,msgid,msg) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: delegate: Connecting to",show_permid_short(helperpermid) + + self.overlay_bridge.connect(helperpermid, connect_callback) + + + def forward_connect_callback(self,exc,dns,permid,selversion,destpermid,msgid,msg): + if exc is None: + + if selversion < OLPROTO_VER_SEVENTH: + return + + send_callback = lambda exc,permid:self.forward_send_callback(exc,permid,destpermid,msgid) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: forward_connect_callback: Sending",`msg` + self.overlay_bridge.send(permid, FRIENDSHIP + bencode(msg), send_callback) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: forward: Could not connect to helper",show_permid_short(permid) + + + def forward_send_callback(self,exc,permid,destpermid,msgid): + if DEBUG: + if exc is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: forward: Success forwarding to helper",show_permid_short(permid) + self.set_msg_forwarded(destpermid,msgid) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: forward: Failed to forward to helper",show_permid_short(permid) + + def checkpoint(self): + statedir = self.session.get_state_dir() + newfilename = os.path.join(statedir,'new-friendship-msgs.pickle') + finalfilename = os.path.join(statedir,'friendship-msgs.pickle') + try: + f = open(newfilename,"wb") + cPickle.dump(self.currmsgs,f) + f.close() + try: + os.remove(finalfilename) + except: + # If first time, it doesn't exist + print_exc() + os.rename(newfilename,finalfilename) + except: + print_exc() + + def load_checkpoint(self): + statedir = self.session.get_state_dir() + finalfilename = os.path.join(statedir,'friendship-msgs.pickle') + try: + f = open(finalfilename,"rb") + self.currmsgs = cPickle.load(f) + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "friendship: could not read previous messages from", finalfilename + + # Increase # attempts till current time + now = time() + for permid in self.currmsgs: + msgid2rec = self.currmsgs[permid] + for msgid in msgid2rec: + msgrec = msgid2rec[msgid] + diff = now - msgrec['t'] + a = int(diff/RESEND_INTERVAL) + a += 1 + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: load_checkp: Changing #attempts from",msgrec['attempt'],a + msgrec['attempt'] = a + + +def sim_desc_cmp(peera,peerb): + if peera['similarity'] < peerb['similarity']: + return 1 + elif peera['similarity'] > peerb['similarity']: + return -1 + else: + return 0 + +def sampleorlist(z,k): + if len(z) < k: + return z + else: + return random.sample(k) diff --git a/tribler-mod/Tribler/Core/SocialNetwork/FriendshipMsgHandler.py.bak b/tribler-mod/Tribler/Core/SocialNetwork/FriendshipMsgHandler.py.bak new file mode 100644 index 0000000..060b891 --- /dev/null +++ b/tribler-mod/Tribler/Core/SocialNetwork/FriendshipMsgHandler.py.bak @@ -0,0 +1,874 @@ +# Written by Ali Abbas, Arno Bakker +# see LICENSE.txt for license information + +# TODO: either maintain connections to friends always or supplement the +# list of friends with a number of on-line taste buddies. +# +# TODO: at least add fifo order to msgs, otherwise clicking +# "make friend", "delete friend", "make friend" could arive in wrong order +# due to forwarding. +# + +import threading +import sys +import os +import random +import cPickle +from time import time +from types import DictType +from traceback import print_exc +from sets import Set + +from Tribler.Core.simpledefs import * +from Tribler.Core.BitTornado.bencode import bencode, bdecode + +from Tribler.Core.BitTornado.BT1.MessageID import * +from Tribler.Core.CacheDB.CacheDBHandler import PeerDBHandler, FriendDBHandler +from Tribler.Core.CacheDB.SqliteFriendshipStatsCacheDB import FriendshipStatisticsDBHandler +from Tribler.Core.CacheDB.sqlitecachedb import bin2str +from Tribler.Core.Utilities.utilities import * + +from Tribler.Core.Overlay.SecureOverlay import OLPROTO_VER_SEVENTH + +DEBUG = False + +""" +State diagram: + +NOFRIEND -> I_INVITED or HE_INVITED +I_INVITED -> APPROVED or HE_DENIED +HE_INVITED -> APPROVED +HE_INVITED -> I_DENIED + +In theory it could happen that he sends an response=1 RESP, in which case +he approved us. I consider that an HE_INIVITE +""" + +RESCHEDULE_INTERVAL = 60 +RESEND_INTERVAL = 5*60 + + +class FriendshipMsgHandler: + __singleton = None + __lock = threading.Lock() + + @classmethod + def getInstance(cls, *args, **kargs): + if not cls.__singleton: + cls.__lock.acquire() + try: + if not cls.__singleton: + cls.__singleton = cls(*args, **kargs) + finally: + cls.__lock.release() + return cls.__singleton + + def __init__(self): + if FriendshipMsgHandler.__singleton: + raise RuntimeError, "FriendshipMsgHandler is singleton" + self.overlay_bridge = None + self.currmsgs = {} + self.online_fsext_peers = Set() # online peers that speak FRIENDSHIP ext + self.peerdb = PeerDBHandler.getInstance() + self.frienddb = FriendDBHandler.getInstance() + self.friendshipStatistics_db = FriendshipStatisticsDBHandler.getInstance() + self.list_no_of_conn_attempts_per_target= {} + self.usercallback = None + + def register(self, overlay_bridge, session): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "friendship: register" + self.overlay_bridge = overlay_bridge + self.session = session + try: + self.load_checkpoint() + except: + print_exc() + self.overlay_bridge.add_task(self.reschedule_connects,RESCHEDULE_INTERVAL) + + + def shutdown(self): + """ + Delegate all outstanding messages to others + """ + # Called by OverlayThread + self.delegate_friendship_making() + self.checkpoint() + + + def register_usercallback(self,usercallback): + self.usercallback = usercallback + + def anythread_send_friendship_msg(self,permid,type,params): + """ Called when user adds someone from the person found, or by + explicity adding someone with her credentials + It establishes overlay connection with the target peer """ + # Called by any thread + + olthread_func = lambda:self.send_friendship_msg(permid,type,params,submit=True) + self.overlay_bridge.add_task(olthread_func,0) + + + def send_friendship_msg(self,permid,type,params,submit=False): + # Called by overlay thread + + if submit: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: send_friendship_msg: Saving msg",show_permid_short(permid) + self.save_msg(permid,type,params) + + if type == F_REQUEST_MSG: + # Make him my friend, pending his approval + self.frienddb.setFriendState(permid, commit=True,state=FS_I_INVITED) + elif type == F_RESPONSE_MSG: + # Mark response in DB + if params['response']: + state = FS_MUTUAL + else: + state = FS_I_DENIED + self.frienddb.setFriendState(permid, commit=True,state=state) + + func = lambda exc,dns,permid,selversion:self.fmsg_connect_callback(exc, dns, permid, selversion, type) + self.overlay_bridge.connect(permid,self.fmsg_connect_callback) + + + def fmsg_connect_callback(self,exc,dns,permid,selversion, type = None): + """ Callback function for the overlay connect function """ + # Called by OverlayThread + + if exc is None: + if selversion < OLPROTO_VER_SEVENTH: + self.remove_msgs_for_ltv7_peer(permid) + return + + # Reached him + sendlist = self.get_msgs_as_sendlist(targetpermid=permid) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'friendship: fmsg_connect_callback: sendlist len',len(sendlist) + #print_stack() + + for i in range(0,len(sendlist)): + tuple = sendlist[i] + + permid,msgid,msg = tuple + send_callback = lambda exc,permid:self.fmsg_send_callback(exc,permid,msgid) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: fmsg_connect_callback: Sending",`msg`,msgid + + mypermid = self.session.get_permid() + + commit = (i == len(sendlist)-1) + isForwarder = 0 + no_of_helpers = 0 +# if type == F_REQUEST_MSG: +# print +# elif type == F_RESPONSE_MSG: +# print + #Set forwarder to True and also no of helpers to 10 + if type == F_FORWARD_MSG: + isForwarder = 1 + no_of_helpers = 10 + + + no_of_attempts = 0 + if permid in self.currmsgs: + msgid2rec = self.currmsgs[permid] + if msgid in msgid2rec: + msgrec = msgid2rec[msgid] + no_of_attempts = msgrec['attempt'] + +# insertFriendshipStatistics(self, my_permid, target_permid, current_time, isForwarder = 0, no_of_attempts = 0, no_of_helpers = 0, commit = True): + + self.friendshipStatistics_db.insertOrUpdateFriendshipStatistics( bin2str(mypermid), + bin2str(permid), + int(time()), + isForwarder, + no_of_attempts , + no_of_helpers, + commit=commit) + + self.overlay_bridge.send(permid, FRIENDSHIP + bencode(msg), send_callback) + + + else: + if DEBUG: + peer = self.peerdb.getPeer(permid) + if peer is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'friendship: Could not connect to peer', show_permid_short(permid),peer + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'friendship: Could not connect to peer', show_permid_short(permid),peer['name'] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",exc + + mypermid = self.session.get_permid() + + isForwarder = 0 + no_of_helpers = 0 + if type == F_FORWARD_MSG: + isForwarder = 1 + no_of_helpers = 10 + + + no_of_attempts = 0 + if permid in self.currmsgs: + msgid2rec = self.currmsgs[permid] + for msgid in msgid2rec: + msgrec = msgid2rec[msgid] + no_of_attempts = msgrec['attempt'] + + + self.friendshipStatistics_db.insertOrUpdateFriendshipStatistics( bin2str(mypermid), + bin2str(permid), + int(time()), + isForwarder, + no_of_attempts , + no_of_helpers) + + + + + def fmsg_send_callback(self,exc,permid,msgid): + + # If an exception arises + if exc is None: + self.delete_msg(permid,msgid) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'friendship: Could not send to ',show_permid_short(permid) + print_exc() + + mypermid = self.session.get_permid() + + no_of_attempts = 0 + no_of_helpers = 10 + isForwarder = False + if permid in self.currmsgs: + msgid2rec = self.currmsgs[permid] + for msgid in msgid2rec: + msgrec = msgid2rec[msgid] + no_of_attempts = msgrec['attempt'] + if msgrec['forwarded'] == True: + isForwarder = 1 + + + self.friendshipStatistics_db.insertOrUpdateFriendshipStatistics( bin2str(mypermid), + bin2str(permid), + int(time()), + isForwarder, + no_of_attempts , + no_of_helpers) + + + def remove_msgs_for_ltv7_peer(self,permid): + """ Remove messages destined for a peer that does not speak >= v7 of + the overlay protocol + """ + sendlist = self.get_msgs_as_sendlist(targetpermid=permid) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'friendship: remove_msgs_for_ltv7_peer: sendlist len',len(sendlist) + + for i in range(0,len(sendlist)): + tuple = sendlist[i] + + permid,msgid,msg = tuple + self.delete_msg(permid,msgid) + + + # + # Incoming connections + # + def handleConnection(self, exc, permid, selversion, locally_initiated): + + if selversion < OLPROTO_VER_SEVENTH: + return True + + if exc is None: + self.online_fsext_peers.add(permid) + + # if we meet peer otherwise, dequeue messages + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: Met peer, attempting to deliver msgs",show_permid_short(permid) + + # If we're initiating the connection from this handler, the + # fmsg_connect_callback will get called twice: + # 1. here + # 2. just a bit later when the callback for a successful connect() + # is called. + # Solution: we delay this call, which should give 2. the time to + # run and remove msgs from the queue. + # + # Better: remove msgs from queue when sent and reinsert if send fails + # + friendship_delay_func = lambda:self.fmsg_connect_callback(None,None,permid,selversion) + self.overlay_bridge.add_task(friendship_delay_func,4) + else: + try: + self.online_fsext_peers.remove(permid) + except: + pass + + return True + + + # + # Incoming messages + # + def handleMessage(self, permid, selversion, message): + """ Handle incoming Friend Request, and their response""" + + if selversion < OLPROTO_VER_SEVENTH: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: Got FRIENDSHIP msg from peer with old protocol",show_permid_short(permid) + return False + + try: + d = bdecode(message[1:]) + except: + print_exc() + return False + + return self.process_message(permid,selversion,d) + + + def process_message(self,permid,selversion,d): + + if self.isValidFriendMsg(d): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: Got FRIENDSHIP msg",d['msg type'] + + # If the message is to become a friend, i.e., a friendship request + if d['msg type'] == F_REQUEST_MSG: + self.process_request(permid,d) + + # If the message is to have a response on friend request + elif d['msg type'] == F_RESPONSE_MSG: + self.process_response(permid,d) + + # If the receiving message is to delegate the Friendship request to the target peer + elif d['msg type'] == F_FORWARD_MSG: + return self.process_forward(permid,selversion,d) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: Got unknown msg type",d['msg type'] + return False + + return True + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: Got bad FRIENDSHIP message" + return False + + def process_request(self,permid,d): + # to see that the following peer is already a friend, or not + fs = self.frienddb.getFriendState(permid) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: process_request: Got request, fs",show_permid_short(permid),fs + + + if fs == FS_NOFRIEND or fs == FS_HE_DENIED: + # not on HE_INVITED, to filter out duplicates + + # And if that peer is not already added as a friend, either approved, or unapproved + # call friend dialog + self.frienddb.setFriendState(permid, commit=True, state = FS_HE_INVITED) + + # FUTURE: always do callback, such that we also know about failed + # attempts + if self.usercallback is not None: + friendship_usercallback = lambda:self.usercallback(permid,[]) + self.session.uch.perform_usercallback(friendship_usercallback) + elif fs == FS_I_INVITED: + # In case, requestee is already added as friend, just make this + # requestee as an approved friend + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: process_request: Got request but I already invited him" + + self.frienddb.setFriendState(permid, commit=True, state = FS_MUTUAL) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: process_request: Got request but I already invited him: sending reply" + + self.send_friendship_msg(permid,F_RESPONSE_MSG,{'response':1},submit=True) + elif fs == FS_MUTUAL: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: process_request: Got request but already approved" + elif fs == FS_I_DENIED: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: process_request: Got request but I already denied" + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: process_request: Got request, but fs is",fs + + def process_response(self,permid,d): + + mypermid = self.session.get_permid() + + + self.friendshipStatistics_db.updateFriendshipResponseTime( bin2str(mypermid), + bin2str(permid), + int(time())) + + + fs = self.frienddb.getFriendState(permid) + + # If the request to add has been approved + if d['response'] == 1: + if fs == FS_I_INVITED: + self.frienddb.setFriendState(permid, commit=True, state = FS_MUTUAL) + elif fs != FS_MUTUAL: + # Unsollicited response, consider this an invite, if not already friend + self.frienddb.setFriendState(permid, commit=True, state = FS_HE_INVITED) + else: + # He denied our friendship + self.frienddb.setFriendState(permid, commit=True, state = FS_HE_DENIED) + + + def process_forward(self,permid,selversion,d): + + mypermid = self.session.get_permid() + if d['dest']['permid'] == mypermid: + # This is a forward containing a message meant for me + + # First add original sender to DB so we can connect back to it + self.addPeerToDB(d['source']) + + self.process_message(d['source']['permid'],selversion,d['msg']) + + return True + + + else: + # Queue and forward + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: process_fwd: Forwarding immediately to",show_permid_short(d['dest']['permid']) + + if permid != d['source']['permid']: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: process_fwd: Forwarding: Illegal, source is not sender, and dest is not me" + return False + # First add dest to DB so we can connect to it + + # FUTURE: don't let just any peer overwrite the IP+port of a peer + # if self.peer_db.hasPeer(d['dest']['permid']): + self.addPeerToDB(d['dest']) + + self.send_friendship_msg(d['dest']['permid'],d['msg type'],d,submit=True) + return True + + def addPeerToDB(self,mpeer): + peer = {} + peer['permid'] = mpeer['permid'] + peer['ip'] = mpeer['ip'] + peer['port'] = mpeer['port'] + peer['last_seen'] = 0 + self.peerdb.addPeer(mpeer['permid'],peer,update_dns=True,commit=True) + + + def create_friendship_msg(self,type,params): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: create_fs_msg:",type,`params` + + mypermid = self.session.get_permid() + myip = self.session.get_external_ip() + myport = self.session.get_listen_port() + + d ={'msg type':type} + if type == F_RESPONSE_MSG: + d['response'] = params['response'] + elif type == F_FORWARD_MSG: + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: create: fwd: params",`params` + peer = self.peerdb.getPeer(params['destpermid']) # ,keys=['ip', 'port']) + if peer is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "friendship: create msg: Don't know IP + port of peer", show_permid_short(params['destpermid']) + return + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "friendship: create msg: Peer at",peer + + # FUTURE: add signatures on ip+port + src = {'permid':mypermid,'ip':myip,'port':myport} + dst = {'permid':params['destpermid'],'ip':peer['ip'],'port':peer['port']} + d.update({'source':src,'dest':dst,'msg':params['msg']}) + return d + + + + def isValidFriendMsg(self,d): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: msg: payload is",`d` + + + if type(d) != DictType: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: msg: payload is not bencoded dict" + return False + if not 'msg type' in d: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: msg: dict misses key",'msg type' + return False + + if d['msg type'] == F_REQUEST_MSG: + keys = d.keys()[:] + if len(keys)-1 != 0: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: msg: REQ: contains superfluous keys",keys + return False + return True + + if d['msg type'] == F_RESPONSE_MSG: + if (d.has_key('response') and (d['response'] == 1 or d['response'] == 0)): + return True + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: msg: RESP: something wrong",`d` + return False + + if d['msg type'] == F_FORWARD_MSG: + if not self.isValidPeer(d['source']): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: msg: FWD: source bad",`d` + return False + if not self.isValidPeer(d['dest']): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: msg: FWD: dest bad",`d` + return False + if not 'msg' in d: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: msg: FWD: no msg",`d` + return False + if not self.isValidFriendMsg(d['msg']): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: msg: FWD: bad msg",`d` + return False + if d['msg']['msg type'] == F_FORWARD_MSG: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: msg: FWD: cannot contain fwd",`d` + return False + return True + + return False + + + def isValidPeer(self,d): + if (d.has_key('ip') and d.has_key('port') and d.has_key('permid') + and validPermid(d['permid']) + and validIP(d['ip'])and validPort(d['port'])): + return True + else: + return False + + + def save_msg(self,permid,type,params): + + if not permid in self.currmsgs: + self.currmsgs[permid] = {} + + mypermid = self.session.get_permid() + now = time() + attempt = 1 + + base = mypermid+permid+str(now)+str(random.random()) + msgid = sha(base).hexdigest() + msgrec = {'permid':permid,'type':type,'params':params,'attempt':attempt,'t':now,'forwarded':False} + + msgid2rec = self.currmsgs[permid] + msgid2rec[msgid] = msgrec + + def delete_msg(self,permid,msgid): + try: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: Deleting msg",show_permid_short(permid),msgid + msgid2rec = self.currmsgs[permid] + del msgid2rec[msgid] + except: + #print_exc() + pass + + def set_msg_forwarded(self,permid,msgid): + try: + msgid2rec = self.currmsgs[permid] + msgid2rec[msgid]['forwarded'] = True + except: + print_exc() + + def reschedule_connects(self): + """ This function is run periodically and reconnects to peers when + messages meant for it are due to be retried + """ + now = time() + delmsgids = [] + reconnectpermids = Set() + for permid in self.currmsgs: + msgid2rec = self.currmsgs[permid] + for msgid in msgid2rec: + msgrec = msgid2rec[msgid] + + eta = self.calc_eta(msgrec) + + if DEBUG: + diff = None + if eta is not None: + diff = eta - now + + if DEBUG: + peer = self.peerdb.getPeer(permid) + if peer is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: reschedule: ETA: wtf, peer not in DB!",show_permid_short(permid) + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: reschedule: ETA",show_permid_short(permid),peer['name'],diff + + if eta is None: + delmsgids.append((permid,msgid)) + elif now > eta-1.0: # -1 for round off + # reconnect + reconnectpermids.add(permid) + msgrec['attempt'] = msgrec['attempt'] + 1 + + # Delegate + if msgrec['type'] == F_REQUEST_MSG and msgrec['attempt'] == 2: + self.delegate_friendship_making(targetpermid=permid,targetmsgid=msgid) + + # Remove timed out messages + for permid,msgid in delmsgids: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: reschedule: Deleting",show_permid_short(permid),msgid + self.delete_msg(permid,msgid) + + # Initiate connections to peers for which we have due messages + for permid in reconnectpermids: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: reschedule: Reconnect to",show_permid_short(permid) + + self.overlay_bridge.connect(permid,self.fmsg_connect_callback) + + # Reschedule this periodic task + self.overlay_bridge.add_task(self.reschedule_connects,RESCHEDULE_INTERVAL) + + + def calc_eta(self,msgrec): + if msgrec['type'] == F_FORWARD_MSG: + if msgrec['attempt'] >= 10: + # Stop trying to forward after a given period + return None + # exponential backoff, on 10th attempt we would wait 24hrs + eta = msgrec['t'] + pow(3.116,msgrec['attempt']) + else: + if msgrec['attempt'] >= int(7*24*3600/RESEND_INTERVAL): + # Stop trying to forward after a given period = 1 week + return None + + eta = msgrec['t'] + msgrec['attempt']*RESEND_INTERVAL + return eta + + + def get_msgs_as_sendlist(self,targetpermid=None): + + sendlist = [] + if targetpermid is None: + permids = self.currmsgs.keys() + else: + permids = [targetpermid] + + for permid in permids: + msgid2rec = self.currmsgs.get(permid,{}) + for msgid in msgid2rec: + msgrec = msgid2rec[msgid] + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: get_msgs: Creating",msgrec['type'],`msgrec['params']`,msgid + if msgrec['type'] == F_FORWARD_MSG: + msg = msgrec['params'] + else: + msg = self.create_friendship_msg(msgrec['type'],msgrec['params']) + tuple = (permid,msgid,msg) + sendlist.append(tuple) + return sendlist + + + def get_msgs_as_fwd_sendlist(self,targetpermid=None,targetmsgid=None): + + sendlist = [] + if targetpermid is None: + permids = self.currmsgs.keys() + else: + permids = [targetpermid] + + for permid in permids: + msgid2rec = self.currmsgs.get(permid,{}) + for msgid in msgid2rec: + if targetmsgid is None or msgid == targetmsgid: + msgrec = msgid2rec[msgid] + if msgrec['type'] != F_FORWARD_MSG and msgrec['forwarded'] == False: + # Don't forward forwards, or messages already forwarded + + # Create forward message for original + params = {} + params['destpermid'] = permid + params['msg'] = self.create_friendship_msg(msgrec['type'],msgrec['params']) + + msg = self.create_friendship_msg(F_FORWARD_MSG,params) + tuple = (permid,msgid,msg) + sendlist.append(tuple) + return sendlist + + + + def delegate_friendship_making(self,targetpermid=None,targetmsgid=None): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: delegate:",show_permid_short(targetpermid),targetmsgid + + # 1. See if there are undelivered msgs + sendlist = self.get_msgs_as_fwd_sendlist(targetpermid=targetpermid,targetmsgid=targetmsgid) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: delegate: Number of messages queued",len(sendlist) + + if len(sendlist) == 0: + return + + # 2. Get friends, not necess. online + friend_permids = self.frienddb.getFriends() + + if DEBUG: + l = len(friend_permids) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: delegate: friend helpers",l + for permid in friend_permids: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: delegate: friend helper",show_permid_short(permid) + + # 3. Sort online peers on similarity, highly similar should be tastebuddies + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: delegate: Number of online v7 peers",len(self.online_fsext_peers) + tastebuddies = self.peerdb.getPeers(list(self.online_fsext_peers),['similarity','name']) + tastebuddies.sort(sim_desc_cmp) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: delegate: Sorted tastebuddies",`tastebuddies` + + tastebuddies_permids = [] + size = min(10,len(tastebuddies)) + for i in xrange(0,size): + peer = tastebuddies[i] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: delegate: buddy helper",show_permid_short(peer['permid']) + tastebuddies_permids.append(peer['permid']) + + # 4. Create list of helpers: + # + # Policy: Helpers are a mix of friends and online tastebuddies + # with 70% friends (if avail) and 30% tastebuddies + # + # I chose this policy because friends are not guaranteed to be online + # and waiting to see if we can connect to them before switching to + # the online taste buddies is complex code-wise and time-consuming. + # We don't have a lot of time when this thing is called by Session.shutdown() + # + nwant = 10 + nfriends = int(nwant * .7) + nbuddies = int(nwant * .3) + + part1 = sampleorlist(friend_permids,nfriends) + fill = nfriends-len(part1) # if no friends, use tastebuddies + part2 = sampleorlist(tastebuddies_permids,nbuddies+fill) + helpers = part1 + part2 + + if DEBUG: + l = len(helpers) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: delegate: end helpers",l + for permid in helpers: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: delegate: end helper",show_permid_short(permid),self.frienddb.getFriendState(permid),self.peerdb.getPeers([permid],['similarity','name']) + + + for tuple in sendlist: + destpermid,msgid,msg = tuple + for helperpermid in helpers: + if destpermid != helperpermid: + connect_callback = lambda exc,dns,permid,selversion:self.forward_connect_callback(exc,dns,permid,selversion,destpermid,msgid,msg) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: delegate: Connecting to",show_permid_short(helperpermid) + + self.overlay_bridge.connect(helperpermid, connect_callback) + + + def forward_connect_callback(self,exc,dns,permid,selversion,destpermid,msgid,msg): + if exc is None: + + if selversion < OLPROTO_VER_SEVENTH: + return + + send_callback = lambda exc,permid:self.forward_send_callback(exc,permid,destpermid,msgid) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: forward_connect_callback: Sending",`msg` + self.overlay_bridge.send(permid, FRIENDSHIP + bencode(msg), send_callback) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: forward: Could not connect to helper",show_permid_short(permid) + + + def forward_send_callback(self,exc,permid,destpermid,msgid): + if DEBUG: + if exc is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: forward: Success forwarding to helper",show_permid_short(permid) + self.set_msg_forwarded(destpermid,msgid) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: forward: Failed to forward to helper",show_permid_short(permid) + + def checkpoint(self): + statedir = self.session.get_state_dir() + newfilename = os.path.join(statedir,'new-friendship-msgs.pickle') + finalfilename = os.path.join(statedir,'friendship-msgs.pickle') + try: + f = open(newfilename,"wb") + cPickle.dump(self.currmsgs,f) + f.close() + try: + os.remove(finalfilename) + except: + # If first time, it doesn't exist + print_exc() + os.rename(newfilename,finalfilename) + except: + print_exc() + + def load_checkpoint(self): + statedir = self.session.get_state_dir() + finalfilename = os.path.join(statedir,'friendship-msgs.pickle') + try: + f = open(finalfilename,"rb") + self.currmsgs = cPickle.load(f) + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "friendship: could not read previous messages from", finalfilename + + # Increase # attempts till current time + now = time() + for permid in self.currmsgs: + msgid2rec = self.currmsgs[permid] + for msgid in msgid2rec: + msgrec = msgid2rec[msgid] + diff = now - msgrec['t'] + a = int(diff/RESEND_INTERVAL) + a += 1 + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","friendship: load_checkp: Changing #attempts from",msgrec['attempt'],a + msgrec['attempt'] = a + + +def sim_desc_cmp(peera,peerb): + if peera['similarity'] < peerb['similarity']: + return 1 + elif peera['similarity'] > peerb['similarity']: + return -1 + else: + return 0 + +def sampleorlist(z,k): + if len(z) < k: + return z + else: + return random.sample(k) diff --git a/tribler-mod/Tribler/Core/SocialNetwork/OverlapMsgHandler.py b/tribler-mod/Tribler/Core/SocialNetwork/OverlapMsgHandler.py new file mode 100644 index 0000000..69c6efe --- /dev/null +++ b/tribler-mod/Tribler/Core/SocialNetwork/OverlapMsgHandler.py @@ -0,0 +1,274 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys +from time import time +from traceback import print_exc + +from Tribler.Core.BitTornado.bencode import bencode, bdecode +from Tribler.Core.BitTornado.BT1.MessageID import * + +from Tribler.Core.Utilities.utilities import * +from Tribler.Core.Utilities.unicode import str2unicode + +DEBUG = False + +MIN_OVERLAP_WAIT = 12.0*3600.0 # half a day in seconds + +ICON_MAX_SIZE = 10*1024 + +class OverlapMsgHandler: + + def __init__(self): + + self.recentpeers = {} + + def register(self, overlay_bridge, launchmany): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: bootstrap: overlap" + self.mypermid = launchmany.session.get_permid() + self.session = launchmany.session + self.peer_db = launchmany.peer_db + self.superpeer_db = launchmany.superpeer_db + self.overlay_bridge = overlay_bridge + + # + # Incoming SOCIAL_OVERLAP + # + def recv_overlap(self,permid,message,selversion): + # 1. Check syntax + try: + oldict = bdecode(message[1:]) + except: + print_exc() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_OVERLAP: error becoding" + return False + + if not isValidDict(oldict,permid): + return False + + # 2. Process + self.process_overlap(permid,oldict) + return True + + def process_overlap(self,permid,oldict): + #self.print_hashdict(oldict['hashnetwork']) + + # 1. Clean recently contacted admin + self.clean_recentpeers() + + # 3. Save persinfo + hrwidinfo + ipinfo + if self.peer_db.hasPeer(permid): + save_ssocnet_peer(self,permid,oldict,False,False,False) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: overlap: peer unknown?! Weird, we just established connection" + + # 6. Reply + if not (permid in self.recentpeers.keys()): + self.recentpeers[permid] = time() + self.reply_to_overlap(permid) + + def clean_recentpeers(self): + newdict = {} + for permid2,t in self.recentpeers.iteritems(): + if (t+MIN_OVERLAP_WAIT) > time(): + newdict[permid2] = t + #elif DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: overlap: clean recent: not keeping",show_permid_short(permid2) + + self.recentpeers = newdict + + def reply_to_overlap(self,permid): + oldict = self.create_oldict() + self.send_overlap(permid,oldict) + + # + # At overlay-connection establishment time. + # + def initiate_overlap(self,permid,locally_initiated): + self.clean_recentpeers() + if not (permid in self.recentpeers.keys() or permid in self.superpeer_db.getSuperPeers()): + if locally_initiated: + # Make sure only one sends it + self.recentpeers[permid] = time() + self.reply_to_overlap(permid) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: overlap: active: he should initiate" + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: overlap: active: peer recently contacted already" + + # + # General + # + def create_oldict(self): + """ + Send: + * Personal info: name, picture, rwidhashes + * IP info: IP + port + Both are individually signed by us so dest can safely + propagate. We distinguish between what a peer said + is his IP+port and the information obtained from the network + or from other peers (i.e. BUDDYCAST) + """ + + persinfo = {'name':self.session.get_nickname()} + # See if we can find icon + iconmime, icondata = self.session.get_mugshot() + if icondata: + persinfo.update({'icontype':iconmime, 'icondata':icondata}) + + oldict = {} + oldict['persinfo'] = persinfo + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Overlap: Sending oldict: %s' % `oldict` + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: overlap: active: sending hashdict" + # self.print_hashdict(oldict['hashnetwork']) + + return oldict + + + def send_overlap(self,permid,oldict): + try: + body = bencode(oldict) + ## Optimization: we know we're currently connected + self.overlay_bridge.send(permid, SOCIAL_OVERLAP + body,self.send_callback) + except: + if DEBUG: + print_exc(file=sys.stderr) + + + def send_callback(self,exc,permid): + if exc is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_OVERLAP: error sending to",show_permid_short(permid),exc + + # + # Internal methods + # + + +def isValidDict(oldict,source_permid): + if not isinstance(oldict, dict): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_OVERLAP: not a dict" + return False + k = oldict.keys() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_OVERLAP: keys",k + + if not ('persinfo' in k) or not isValidPersinfo(oldict['persinfo'],False): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_OVERLAP: key 'persinfo' missing or value wrong type in dict" + return False + + for key in k: + if key not in ['persinfo']: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_OVERLAP: unknown key",key,"in dict" + return False + + return True + + + +def isValidPersinfo(persinfo,signed): + if not isinstance(persinfo,dict): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_*: persinfo: not a dict" + return False + + k = persinfo.keys() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_*: persinfo: keys are",k + if not ('name' in k) or not isinstance(persinfo['name'],str): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_*: persinfo: key 'name' missing or value wrong type" + return False + + if 'icontype' in k and not isValidIconType(persinfo['icontype']): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_*: persinfo: key 'icontype' value wrong type" + return False + + if 'icondata' in k and not isValidIconData(persinfo['icondata']): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_*: persinfo: key 'icondata' value wrong type" + return False + + if ('icontype' in k and not ('icondata' in k)) or ('icondata' in k and not ('icontype' in k)): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_*: persinfo: key 'icontype' without 'icondata' or vice versa" + return False + + if signed: + if not ('insert_time' in k) or not isinstance(persinfo['insert_time'],int): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_*: persinfo: key 'insert_time' missing or value wrong type" + return False + + for key in k: + if key not in ['name','icontype','icondata','insert_time']: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_*: persinfo: unknown key",key,"in dict" + return False + + return True + + +def isValidIconType(type): + """ MIME-type := type "/" subtype ... """ + if not isinstance(type,str): + return False + idx = type.find('/') + ridx = type.rfind('/') + return idx != -1 and idx == ridx + +def isValidIconData(data): + if not isinstance(data,str): + return False + +# if DEBUG: +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_*: persinfo: IconData length is",len(data) + + return len(data) <= ICON_MAX_SIZE + + + +def save_ssocnet_peer(self,permid,record,persinfo_ignore,hrwidinfo_ignore,ipinfo_ignore): + """ This function is used by both BootstrapMsgHandler and + OverlapMsgHandler, and uses their database pointers. Hence the self + parameter. persinfo_ignore and ipinfo_ignore are booleans that + indicate whether to ignore the personal info, resp. ip info in + this record, because they were unsigned in the message and + we already received signed versions before. + """ + if permid == self.mypermid: + return + + # 1. Save persinfo + if not persinfo_ignore: + persinfo = record['persinfo'] + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: Got persinfo",persinfo.keys() + if len(persinfo.keys()) > 1: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: Got persinfo THUMB THUMB THUMB THUMB" + + # Arno, 2008-08-22: to avoid UnicodeDecode errors when commiting + # on sqlite + name = str2unicode(persinfo['name']) + + if self.peer_db.hasPeer(permid): + self.peer_db.updatePeer(permid, name=name) + else: + self.peer_db.addPeer(permid,{'name':name}) + + # b. Save icon + if 'icontype' in persinfo and 'icondata' in persinfo: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: saving icon for",show_permid_short(permid),name + self.peer_db.updatePeerIcon(permid, persinfo['icontype'],persinfo['icondata']) diff --git a/tribler-mod/Tribler/Core/SocialNetwork/OverlapMsgHandler.py.bak b/tribler-mod/Tribler/Core/SocialNetwork/OverlapMsgHandler.py.bak new file mode 100644 index 0000000..310a9c0 --- /dev/null +++ b/tribler-mod/Tribler/Core/SocialNetwork/OverlapMsgHandler.py.bak @@ -0,0 +1,273 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys +from time import time +from traceback import print_exc + +from Tribler.Core.BitTornado.bencode import bencode, bdecode +from Tribler.Core.BitTornado.BT1.MessageID import * + +from Tribler.Core.Utilities.utilities import * +from Tribler.Core.Utilities.unicode import str2unicode + +DEBUG = False + +MIN_OVERLAP_WAIT = 12.0*3600.0 # half a day in seconds + +ICON_MAX_SIZE = 10*1024 + +class OverlapMsgHandler: + + def __init__(self): + + self.recentpeers = {} + + def register(self, overlay_bridge, launchmany): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: bootstrap: overlap" + self.mypermid = launchmany.session.get_permid() + self.session = launchmany.session + self.peer_db = launchmany.peer_db + self.superpeer_db = launchmany.superpeer_db + self.overlay_bridge = overlay_bridge + + # + # Incoming SOCIAL_OVERLAP + # + def recv_overlap(self,permid,message,selversion): + # 1. Check syntax + try: + oldict = bdecode(message[1:]) + except: + print_exc() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_OVERLAP: error becoding" + return False + + if not isValidDict(oldict,permid): + return False + + # 2. Process + self.process_overlap(permid,oldict) + return True + + def process_overlap(self,permid,oldict): + #self.print_hashdict(oldict['hashnetwork']) + + # 1. Clean recently contacted admin + self.clean_recentpeers() + + # 3. Save persinfo + hrwidinfo + ipinfo + if self.peer_db.hasPeer(permid): + save_ssocnet_peer(self,permid,oldict,False,False,False) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: overlap: peer unknown?! Weird, we just established connection" + + # 6. Reply + if not (permid in self.recentpeers.keys()): + self.recentpeers[permid] = time() + self.reply_to_overlap(permid) + + def clean_recentpeers(self): + newdict = {} + for permid2,t in self.recentpeers.iteritems(): + if (t+MIN_OVERLAP_WAIT) > time(): + newdict[permid2] = t + #elif DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: overlap: clean recent: not keeping",show_permid_short(permid2) + + self.recentpeers = newdict + + def reply_to_overlap(self,permid): + oldict = self.create_oldict() + self.send_overlap(permid,oldict) + + # + # At overlay-connection establishment time. + # + def initiate_overlap(self,permid,locally_initiated): + self.clean_recentpeers() + if not (permid in self.recentpeers.keys() or permid in self.superpeer_db.getSuperPeers()): + if locally_initiated: + # Make sure only one sends it + self.recentpeers[permid] = time() + self.reply_to_overlap(permid) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: overlap: active: he should initiate" + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: overlap: active: peer recently contacted already" + + # + # General + # + def create_oldict(self): + """ + Send: + * Personal info: name, picture, rwidhashes + * IP info: IP + port + Both are individually signed by us so dest can safely + propagate. We distinguish between what a peer said + is his IP+port and the information obtained from the network + or from other peers (i.e. BUDDYCAST) + """ + + persinfo = {'name':self.session.get_nickname()} + # See if we can find icon + iconmime, icondata = self.session.get_mugshot() + if icondata: + persinfo.update({'icontype':iconmime, 'icondata':icondata}) + + oldict = {} + oldict['persinfo'] = persinfo + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Overlap: Sending oldict: %s' % `oldict` + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: overlap: active: sending hashdict" + # self.print_hashdict(oldict['hashnetwork']) + + return oldict + + + def send_overlap(self,permid,oldict): + try: + body = bencode(oldict) + ## Optimization: we know we're currently connected + self.overlay_bridge.send(permid, SOCIAL_OVERLAP + body,self.send_callback) + except: + if DEBUG: + print_exc(file=sys.stderr) + + + def send_callback(self,exc,permid): + if exc is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_OVERLAP: error sending to",show_permid_short(permid),exc + + # + # Internal methods + # + + +def isValidDict(oldict,source_permid): + if not isinstance(oldict, dict): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_OVERLAP: not a dict" + return False + k = oldict.keys() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_OVERLAP: keys",k + + if not ('persinfo' in k) or not isValidPersinfo(oldict['persinfo'],False): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_OVERLAP: key 'persinfo' missing or value wrong type in dict" + return False + + for key in k: + if key not in ['persinfo']: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_OVERLAP: unknown key",key,"in dict" + return False + + return True + + + +def isValidPersinfo(persinfo,signed): + if not isinstance(persinfo,dict): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_*: persinfo: not a dict" + return False + + k = persinfo.keys() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_*: persinfo: keys are",k + if not ('name' in k) or not isinstance(persinfo['name'],str): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_*: persinfo: key 'name' missing or value wrong type" + return False + + if 'icontype' in k and not isValidIconType(persinfo['icontype']): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_*: persinfo: key 'icontype' value wrong type" + return False + + if 'icondata' in k and not isValidIconData(persinfo['icondata']): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_*: persinfo: key 'icondata' value wrong type" + return False + + if ('icontype' in k and not ('icondata' in k)) or ('icondata' in k and not ('icontype' in k)): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_*: persinfo: key 'icontype' without 'icondata' or vice versa" + return False + + if signed: + if not ('insert_time' in k) or not isinstance(persinfo['insert_time'],int): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_*: persinfo: key 'insert_time' missing or value wrong type" + return False + + for key in k: + if key not in ['name','icontype','icondata','insert_time']: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_*: persinfo: unknown key",key,"in dict" + return False + + return True + + +def isValidIconType(type): + """ MIME-type := type "/" subtype ... """ + if not isinstance(type,str): + return False + idx = type.find('/') + ridx = type.rfind('/') + return idx != -1 and idx == ridx + +def isValidIconData(data): + if not isinstance(data,str): + return False + +# if DEBUG: +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: SOCIAL_*: persinfo: IconData length is",len(data) + + return len(data) <= ICON_MAX_SIZE + + + +def save_ssocnet_peer(self,permid,record,persinfo_ignore,hrwidinfo_ignore,ipinfo_ignore): + """ This function is used by both BootstrapMsgHandler and + OverlapMsgHandler, and uses their database pointers. Hence the self + parameter. persinfo_ignore and ipinfo_ignore are booleans that + indicate whether to ignore the personal info, resp. ip info in + this record, because they were unsigned in the message and + we already received signed versions before. + """ + if permid == self.mypermid: + return + + # 1. Save persinfo + if not persinfo_ignore: + persinfo = record['persinfo'] + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: Got persinfo",persinfo.keys() + if len(persinfo.keys()) > 1: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: Got persinfo THUMB THUMB THUMB THUMB" + + # Arno, 2008-08-22: to avoid UnicodeDecode errors when commiting + # on sqlite + name = str2unicode(persinfo['name']) + + if self.peer_db.hasPeer(permid): + self.peer_db.updatePeer(permid, name=name) + else: + self.peer_db.addPeer(permid,{'name':name}) + + # b. Save icon + if 'icontype' in persinfo and 'icondata' in persinfo: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: saving icon for",show_permid_short(permid),name + self.peer_db.updatePeerIcon(permid, persinfo['icontype'],persinfo['icondata']) diff --git a/tribler-mod/Tribler/Core/SocialNetwork/RemoteQueryMsgHandler.py b/tribler-mod/Tribler/Core/SocialNetwork/RemoteQueryMsgHandler.py new file mode 100644 index 0000000..e8f5a64 --- /dev/null +++ b/tribler-mod/Tribler/Core/SocialNetwork/RemoteQueryMsgHandler.py @@ -0,0 +1,442 @@ +from time import localtime, strftime +# Written by Arno Bakker, Jie Yang +# see LICENSE.txt for license information +# +# Send free-form queries to all the peers you are connected to. +# +# TODO: make sure we return also items from download history, but need to verify if +# their status is still checked. +# +# + +import os +import sys +from time import time +from sets import Set +from traceback import print_stack, print_exc +import datetime +import time as T + +from M2Crypto import Rand + +from Tribler.Core.BitTornado.bencode import bencode,bdecode +from Tribler.Core.CacheDB.sqlitecachedb import bin2str, str2bin +from Tribler.Core.BitTornado.BT1.MessageID import * + +from Tribler.Core.Overlay.SecureOverlay import OLPROTO_VER_SIXTH, OLPROTO_VER_NINE +from Tribler.Core.Utilities.utilities import show_permid_short,show_permid +from Tribler.Core.Statistics.Logger import OverlayLogger +from Tribler.Core.Utilities.unicode import dunno2unicode +from Tribler.Core.Search.SearchManager import SearchManager + +MAX_RESULTS = 20 +QUERY_ID_SIZE = 20 +MAX_QUERY_REPLY_LEN = 100*1024 # 100K +MAX_PEERS_TO_QUERY = 10 + +DEBUG = False + +class FakeUtility: + + def __init__(self,config_path): + self.config_path = config_path + + def getConfigPath(self): + return self.config_path + + +class RemoteQueryMsgHandler: + + __single = None + + def __init__(self): + if RemoteQueryMsgHandler.__single: + raise RuntimeError, "RemoteQueryMsgHandler is singleton" + RemoteQueryMsgHandler.__single = self + + + self.connections = Set() # only connected remote_search_peers + self.query_ids2rec = {} # ARNOCOMMENT: TODO: purge old entries... + self.overlay_log = None + self.registered = False + self.logfile = None + + def getInstance(*args, **kw): + if RemoteQueryMsgHandler.__single is None: + RemoteQueryMsgHandler(*args, **kw) + return RemoteQueryMsgHandler.__single + getInstance = staticmethod(getInstance) + + + def register(self,overlay_bridge,launchmany,config,bc_fac,log=''): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: register" + self.overlay_bridge = overlay_bridge + self.launchmany= launchmany + self.search_manager = SearchManager(launchmany.torrent_db) + self.peer_db = launchmany.peer_db + self.config = config + self.bc_fac = bc_fac # May be None + if log: + self.overlay_log = OverlayLogger.getInstance(log) + self.torrent_dir = os.path.abspath(self.config['torrent_collecting_dir']) + self.registered = True + + # + # Incoming messages + # + def handleMessage(self,permid,selversion,message): + if not self.registered: + return True + + t = message[0] + if t == QUERY: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: Got QUERY",len(message) + return self.recv_query(permid,message,selversion) + if t == QUERY_REPLY: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: Got QUERY_REPLY",len(message) + return self.recv_query_reply(permid,message,selversion) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: UNKNOWN OVERLAY MESSAGE", ord(t) + return False + + # + # Incoming connections + # + def handleConnection(self,exc,permid,selversion,locally_initiated): + if not self.registered: + return True + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: handleConnection",exc,"v",selversion,"local",locally_initiated + if exc is not None: + return + + if selversion < OLPROTO_VER_SIXTH: + return True + + if exc is None: + self.connections.add(permid) + else: + self.connections.remove(permid) + + return True + + # + # Send query + # + def send_query(self,query,usercallback,max_peers_to_query=MAX_PEERS_TO_QUERY): + """ Called by GUI Thread """ + if max_peers_to_query is None: + max_peers_to_query = MAX_PEERS_TO_QUERY + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: send_query",query + if max_peers_to_query > 0: + send_query_func = lambda:self.network_send_query_callback(query,usercallback,max_peers_to_query) + self.overlay_bridge.add_task(send_query_func,0) + + + def network_send_query_callback(self,query,usercallback,max_peers_to_query): + """ Called by overlay thread """ + p = self.create_query(query,usercallback) + m = QUERY+p + query_conn_callback_lambda = lambda exc,dns,permid,selversion:self.conn_callback(exc,dns,permid,selversion,m) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: send_query: Connected",len(self.connections),"peers" + + #print "******** send query net cb:", query, len(self.connections), self.connections + + peers_to_query = 0 + for permid in self.connections: + self.overlay_bridge.connect(permid,query_conn_callback_lambda) + peers_to_query += 1 + + if peers_to_query < max_peers_to_query and self.bc_fac and self.bc_fac.buddycast_core: + query_cand = self.bc_fac.buddycast_core.getRemoteSearchPeers(MAX_PEERS_TO_QUERY-peers_to_query) + for permid in query_cand: + if permid not in self.connections: # don't call twice + self.overlay_bridge.connect(permid,query_conn_callback_lambda) + peers_to_query += 1 + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: send_query: Sent to",peers_to_query,"peers" + + def create_query(self,query,usercallback): + d = {} + d['q'] = query + d['id'] = self.create_and_register_query_id(query,usercallback) + return bencode(d) + + def create_and_register_query_id(self,query,usercallback): + id = Rand.rand_bytes(QUERY_ID_SIZE) + queryrec = {'query':query,'usercallback':usercallback} + self.query_ids2rec[id] = queryrec + return id + + def is_registered_query_id(self,id): + if id in self.query_ids2rec: + return self.query_ids2rec[id] + else: + return None + + def conn_callback(self,exc,dns,permid,selversion,message): + if exc is None and selversion >= OLPROTO_VER_SIXTH: + self.overlay_bridge.send(permid,message,self.send_callback) + + def send_callback(self,exc,permid): + #print "******* queury was sent to", show_permid_short(permid), exc + pass + + + # + # Receive query + # + + def recv_query(self,permid,message,selversion): + if selversion < OLPROTO_VER_SIXTH: + return False + + # Unpack + try: + d = bdecode(message[1:]) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: Cannot bdecode QUERY message" + #print_exc() + return False + + if not isValidQuery(d,selversion): + return False + + # ACCESS CONTROL, INCLUDING CHECKING IF PEER HAS NOT EXCEEDED + # QUERY QUOTUM IS DONE in Tribler/Core/RequestPolicy.py + # + + # Process + self.process_query(permid, d, selversion) + + return True + + def setLogFile(self, logfile): + self.logfile = open(logfile, "a") + + + def log(self, permid, decoded_message): + lt = T.localtime(T.time()) + timestamp = "%04d-%02d-%02d %02d:%02d:%02d" % (lt[0], lt[1], lt[2], lt[3], lt[4], lt[5]) + ip = self.peer_db.getPeer(permid, "ip") + #ip = "x.y.z.1" + s = "%s\t%s\t%s\t%s\n"% (timestamp, bin2str(permid), ip, decoded_message) + + print dunno2unicode(s) + self.logfile.write(dunno2unicode(s)) # bin2str( + self.logfile.flush() + + + # + # Send query reply + # + def process_query(self, permid, d, selversion): + q = d['q'][len('SIMPLE '):] + q = dunno2unicode(q) + # log incoming query, if logfile is set + if self.logfile: + self.log(permid, q) + + # Filter against bad input + if not q.isalnum(): + newq = u'' + for i in range(0,len(q)): + if q[i].isalnum(): + newq += q[i] + q = newq + + # Format: 'SIMPLE '+string of space separated keywords + # In the future we could support full SQL queries: + # SELECT infohash,torrent_name FROM torrent_db WHERE status = ALIVE + kws = q.split() + hits = self.search_manager.search(kws, maxhits=MAX_RESULTS) + + p = self.create_query_reply(d['id'],hits,selversion) + m = QUERY_REPLY+p + + if self.overlay_log: + nqueries = self.get_peer_nqueries(permid) + # RECV_MSG PERMID OVERSION NUM_QUERIES MSG + self.overlay_log('RECV_QRY', show_permid(permid), selversion, nqueries, repr(d)) + + # RPLY_QRY PERMID NUM_HITS MSG + self.overlay_log('RPLY_QRY', show_permid(permid), len(hits), repr(p)) + + self.overlay_bridge.send(permid, m, self.send_callback) + + self.inc_peer_nqueries(permid) + + + def create_query_reply(self,id,hits,selversion): + getsize = os.path.getsize + join = os.path.join + d = {} + d['id'] = id + d2 = {} + for torrent in hits: + r = {} + # NEWDBSTANDARD. Do not rename r's fields: they are part of the + # rquery protocol spec. + r['content_name'] = torrent['name'] + r['length'] = torrent['length'] + r['leecher'] = torrent['num_leechers'] + r['seeder'] = torrent['num_seeders'] + # Arno: TODO: sending category doesn't make sense as that's user-defined + # leaving it now because of time constraints + r['category'] = torrent['category'] + if selversion >= OLPROTO_VER_NINE: + print os.listdir(self.torrent_dir) + r['torrent_size'] = getsize(join(self.torrent_dir, torrent['torrent_file_name'])) + d2[torrent['infohash']] = r + d['a'] = d2 + return bencode(d) + + + # + # Receive query reply + # + + def recv_query_reply(self,permid,message,selversion): + + #print "****** recv query reply", len(message) + + if selversion < OLPROTO_VER_SIXTH: + return False + + if len(message) > MAX_QUERY_REPLY_LEN: + return True # don't close + + # Unpack + try: + d = bdecode(message[1:]) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: Cannot bdecode QUERY_REPLY message" + return False + + if not isValidQueryReply(d,selversion): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: not valid QUERY_REPLY message" + return False + + # Check auth + queryrec = self.is_registered_query_id(d['id']) + if not queryrec: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: QUERY_REPLY has unknown query ID" + return False + + # Process + self.process_query_reply(permid,queryrec['query'],queryrec['usercallback'],d) + return True + + + def process_query_reply(self,permid,query,usercallback,d): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: process_query_reply:",show_permid_short(permid),query,d + + if len(d['a']) > 0: + remote_query_usercallback_lambda = lambda:usercallback(permid,query,d['a']) + self.launchmany.session.uch.perform_usercallback(remote_query_usercallback_lambda) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: QUERY_REPLY: no results found" + + + + def inc_peer_nqueries(self, permid): + peer = self.peer_db.getPeer(permid) + try: + if peer is not None: + nqueries = peer['num_queries'] + if nqueries is None: + nqueries = 0 + self.peer_db.updatePeer(permid, num_queries=nqueries+1) + except: + print_exc() + + def get_peer_nqueries(self, permid): + peer = self.peer_db.getPeer(permid) + if peer is None: + return 0 + else: + return peer['num_queries'] + + +def isValidQuery(d,selversion): + if not isinstance(d,dict): + return False + if not ('q' in d and 'id' in d): + return False + if not (isinstance(d['q'],str) and isinstance(d['id'],str)): + return False + if len(d['q']) == 0: + return False + if len(d) > 2: # no other keys + return False + return True + +def isValidQueryReply(d,selversion): + if not isinstance(d,dict): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rqmh: reply: not dict" + return False + if not ('a' in d and 'id' in d): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rqmh: reply: a or id key missing" + return False + if not (isinstance(d['a'],dict) and isinstance(d['id'],str)): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rqmh: reply: a or id key not dict/str" + return False + if not isValidHits(d['a'],selversion): + return False + if len(d) > 2: # no other keys + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rqmh: reply: too many keys, got",d.keys() + return False + return True + +def isValidHits(d,selversion): + if not isinstance(d,dict): + return False + for key in d.keys(): +# if len(key) != 20: +# return False + val = d[key] + if not isValidVal(val,selversion): + return False + return True + +def isValidVal(d,selversion): + if not isinstance(d,dict): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rqmh: reply: a: value not dict" + return False + if selversion >= OLPROTO_VER_NINE: + if not ('content_name' in d and 'length' in d and 'leecher' in d and 'seeder' in d and 'category' in d and 'torrent_size' in d): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rqmh: reply: a: key missing, got",d.keys() + return False + else: + if not ('content_name' in d and 'length' in d and 'leecher' in d and 'seeder' in d and 'category' in d): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rqmh: reply: a: key missing, got",d.keys() + return False +# if not (isinstance(d['content_name'],str) and isinstance(d['length'],int) and isinstance(d['leecher'],int) and isinstance(d['seeder'],int)): +# return False +# if len(d) > 4: # no other keys +# return False + return True + + diff --git a/tribler-mod/Tribler/Core/SocialNetwork/RemoteQueryMsgHandler.py.bak b/tribler-mod/Tribler/Core/SocialNetwork/RemoteQueryMsgHandler.py.bak new file mode 100644 index 0000000..18c6a43 --- /dev/null +++ b/tribler-mod/Tribler/Core/SocialNetwork/RemoteQueryMsgHandler.py.bak @@ -0,0 +1,441 @@ +# Written by Arno Bakker, Jie Yang +# see LICENSE.txt for license information +# +# Send free-form queries to all the peers you are connected to. +# +# TODO: make sure we return also items from download history, but need to verify if +# their status is still checked. +# +# + +import os +import sys +from time import time +from sets import Set +from traceback import print_stack, print_exc +import datetime +import time as T + +from M2Crypto import Rand + +from Tribler.Core.BitTornado.bencode import bencode,bdecode +from Tribler.Core.CacheDB.sqlitecachedb import bin2str, str2bin +from Tribler.Core.BitTornado.BT1.MessageID import * + +from Tribler.Core.Overlay.SecureOverlay import OLPROTO_VER_SIXTH, OLPROTO_VER_NINE +from Tribler.Core.Utilities.utilities import show_permid_short,show_permid +from Tribler.Core.Statistics.Logger import OverlayLogger +from Tribler.Core.Utilities.unicode import dunno2unicode +from Tribler.Core.Search.SearchManager import SearchManager + +MAX_RESULTS = 20 +QUERY_ID_SIZE = 20 +MAX_QUERY_REPLY_LEN = 100*1024 # 100K +MAX_PEERS_TO_QUERY = 10 + +DEBUG = False + +class FakeUtility: + + def __init__(self,config_path): + self.config_path = config_path + + def getConfigPath(self): + return self.config_path + + +class RemoteQueryMsgHandler: + + __single = None + + def __init__(self): + if RemoteQueryMsgHandler.__single: + raise RuntimeError, "RemoteQueryMsgHandler is singleton" + RemoteQueryMsgHandler.__single = self + + + self.connections = Set() # only connected remote_search_peers + self.query_ids2rec = {} # ARNOCOMMENT: TODO: purge old entries... + self.overlay_log = None + self.registered = False + self.logfile = None + + def getInstance(*args, **kw): + if RemoteQueryMsgHandler.__single is None: + RemoteQueryMsgHandler(*args, **kw) + return RemoteQueryMsgHandler.__single + getInstance = staticmethod(getInstance) + + + def register(self,overlay_bridge,launchmany,config,bc_fac,log=''): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: register" + self.overlay_bridge = overlay_bridge + self.launchmany= launchmany + self.search_manager = SearchManager(launchmany.torrent_db) + self.peer_db = launchmany.peer_db + self.config = config + self.bc_fac = bc_fac # May be None + if log: + self.overlay_log = OverlayLogger.getInstance(log) + self.torrent_dir = os.path.abspath(self.config['torrent_collecting_dir']) + self.registered = True + + # + # Incoming messages + # + def handleMessage(self,permid,selversion,message): + if not self.registered: + return True + + t = message[0] + if t == QUERY: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: Got QUERY",len(message) + return self.recv_query(permid,message,selversion) + if t == QUERY_REPLY: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: Got QUERY_REPLY",len(message) + return self.recv_query_reply(permid,message,selversion) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: UNKNOWN OVERLAY MESSAGE", ord(t) + return False + + # + # Incoming connections + # + def handleConnection(self,exc,permid,selversion,locally_initiated): + if not self.registered: + return True + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: handleConnection",exc,"v",selversion,"local",locally_initiated + if exc is not None: + return + + if selversion < OLPROTO_VER_SIXTH: + return True + + if exc is None: + self.connections.add(permid) + else: + self.connections.remove(permid) + + return True + + # + # Send query + # + def send_query(self,query,usercallback,max_peers_to_query=MAX_PEERS_TO_QUERY): + """ Called by GUI Thread """ + if max_peers_to_query is None: + max_peers_to_query = MAX_PEERS_TO_QUERY + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: send_query",query + if max_peers_to_query > 0: + send_query_func = lambda:self.network_send_query_callback(query,usercallback,max_peers_to_query) + self.overlay_bridge.add_task(send_query_func,0) + + + def network_send_query_callback(self,query,usercallback,max_peers_to_query): + """ Called by overlay thread """ + p = self.create_query(query,usercallback) + m = QUERY+p + query_conn_callback_lambda = lambda exc,dns,permid,selversion:self.conn_callback(exc,dns,permid,selversion,m) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: send_query: Connected",len(self.connections),"peers" + + #print "******** send query net cb:", query, len(self.connections), self.connections + + peers_to_query = 0 + for permid in self.connections: + self.overlay_bridge.connect(permid,query_conn_callback_lambda) + peers_to_query += 1 + + if peers_to_query < max_peers_to_query and self.bc_fac and self.bc_fac.buddycast_core: + query_cand = self.bc_fac.buddycast_core.getRemoteSearchPeers(MAX_PEERS_TO_QUERY-peers_to_query) + for permid in query_cand: + if permid not in self.connections: # don't call twice + self.overlay_bridge.connect(permid,query_conn_callback_lambda) + peers_to_query += 1 + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: send_query: Sent to",peers_to_query,"peers" + + def create_query(self,query,usercallback): + d = {} + d['q'] = query + d['id'] = self.create_and_register_query_id(query,usercallback) + return bencode(d) + + def create_and_register_query_id(self,query,usercallback): + id = Rand.rand_bytes(QUERY_ID_SIZE) + queryrec = {'query':query,'usercallback':usercallback} + self.query_ids2rec[id] = queryrec + return id + + def is_registered_query_id(self,id): + if id in self.query_ids2rec: + return self.query_ids2rec[id] + else: + return None + + def conn_callback(self,exc,dns,permid,selversion,message): + if exc is None and selversion >= OLPROTO_VER_SIXTH: + self.overlay_bridge.send(permid,message,self.send_callback) + + def send_callback(self,exc,permid): + #print "******* queury was sent to", show_permid_short(permid), exc + pass + + + # + # Receive query + # + + def recv_query(self,permid,message,selversion): + if selversion < OLPROTO_VER_SIXTH: + return False + + # Unpack + try: + d = bdecode(message[1:]) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: Cannot bdecode QUERY message" + #print_exc() + return False + + if not isValidQuery(d,selversion): + return False + + # ACCESS CONTROL, INCLUDING CHECKING IF PEER HAS NOT EXCEEDED + # QUERY QUOTUM IS DONE in Tribler/Core/RequestPolicy.py + # + + # Process + self.process_query(permid, d, selversion) + + return True + + def setLogFile(self, logfile): + self.logfile = open(logfile, "a") + + + def log(self, permid, decoded_message): + lt = T.localtime(T.time()) + timestamp = "%04d-%02d-%02d %02d:%02d:%02d" % (lt[0], lt[1], lt[2], lt[3], lt[4], lt[5]) + ip = self.peer_db.getPeer(permid, "ip") + #ip = "x.y.z.1" + s = "%s\t%s\t%s\t%s\n"% (timestamp, bin2str(permid), ip, decoded_message) + + print dunno2unicode(s) + self.logfile.write(dunno2unicode(s)) # bin2str( + self.logfile.flush() + + + # + # Send query reply + # + def process_query(self, permid, d, selversion): + q = d['q'][len('SIMPLE '):] + q = dunno2unicode(q) + # log incoming query, if logfile is set + if self.logfile: + self.log(permid, q) + + # Filter against bad input + if not q.isalnum(): + newq = u'' + for i in range(0,len(q)): + if q[i].isalnum(): + newq += q[i] + q = newq + + # Format: 'SIMPLE '+string of space separated keywords + # In the future we could support full SQL queries: + # SELECT infohash,torrent_name FROM torrent_db WHERE status = ALIVE + kws = q.split() + hits = self.search_manager.search(kws, maxhits=MAX_RESULTS) + + p = self.create_query_reply(d['id'],hits,selversion) + m = QUERY_REPLY+p + + if self.overlay_log: + nqueries = self.get_peer_nqueries(permid) + # RECV_MSG PERMID OVERSION NUM_QUERIES MSG + self.overlay_log('RECV_QRY', show_permid(permid), selversion, nqueries, repr(d)) + + # RPLY_QRY PERMID NUM_HITS MSG + self.overlay_log('RPLY_QRY', show_permid(permid), len(hits), repr(p)) + + self.overlay_bridge.send(permid, m, self.send_callback) + + self.inc_peer_nqueries(permid) + + + def create_query_reply(self,id,hits,selversion): + getsize = os.path.getsize + join = os.path.join + d = {} + d['id'] = id + d2 = {} + for torrent in hits: + r = {} + # NEWDBSTANDARD. Do not rename r's fields: they are part of the + # rquery protocol spec. + r['content_name'] = torrent['name'] + r['length'] = torrent['length'] + r['leecher'] = torrent['num_leechers'] + r['seeder'] = torrent['num_seeders'] + # Arno: TODO: sending category doesn't make sense as that's user-defined + # leaving it now because of time constraints + r['category'] = torrent['category'] + if selversion >= OLPROTO_VER_NINE: + print os.listdir(self.torrent_dir) + r['torrent_size'] = getsize(join(self.torrent_dir, torrent['torrent_file_name'])) + d2[torrent['infohash']] = r + d['a'] = d2 + return bencode(d) + + + # + # Receive query reply + # + + def recv_query_reply(self,permid,message,selversion): + + #print "****** recv query reply", len(message) + + if selversion < OLPROTO_VER_SIXTH: + return False + + if len(message) > MAX_QUERY_REPLY_LEN: + return True # don't close + + # Unpack + try: + d = bdecode(message[1:]) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: Cannot bdecode QUERY_REPLY message" + return False + + if not isValidQueryReply(d,selversion): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: not valid QUERY_REPLY message" + return False + + # Check auth + queryrec = self.is_registered_query_id(d['id']) + if not queryrec: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: QUERY_REPLY has unknown query ID" + return False + + # Process + self.process_query_reply(permid,queryrec['query'],queryrec['usercallback'],d) + return True + + + def process_query_reply(self,permid,query,usercallback,d): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: process_query_reply:",show_permid_short(permid),query,d + + if len(d['a']) > 0: + remote_query_usercallback_lambda = lambda:usercallback(permid,query,d['a']) + self.launchmany.session.uch.perform_usercallback(remote_query_usercallback_lambda) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rquery: QUERY_REPLY: no results found" + + + + def inc_peer_nqueries(self, permid): + peer = self.peer_db.getPeer(permid) + try: + if peer is not None: + nqueries = peer['num_queries'] + if nqueries is None: + nqueries = 0 + self.peer_db.updatePeer(permid, num_queries=nqueries+1) + except: + print_exc() + + def get_peer_nqueries(self, permid): + peer = self.peer_db.getPeer(permid) + if peer is None: + return 0 + else: + return peer['num_queries'] + + +def isValidQuery(d,selversion): + if not isinstance(d,dict): + return False + if not ('q' in d and 'id' in d): + return False + if not (isinstance(d['q'],str) and isinstance(d['id'],str)): + return False + if len(d['q']) == 0: + return False + if len(d) > 2: # no other keys + return False + return True + +def isValidQueryReply(d,selversion): + if not isinstance(d,dict): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rqmh: reply: not dict" + return False + if not ('a' in d and 'id' in d): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rqmh: reply: a or id key missing" + return False + if not (isinstance(d['a'],dict) and isinstance(d['id'],str)): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rqmh: reply: a or id key not dict/str" + return False + if not isValidHits(d['a'],selversion): + return False + if len(d) > 2: # no other keys + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rqmh: reply: too many keys, got",d.keys() + return False + return True + +def isValidHits(d,selversion): + if not isinstance(d,dict): + return False + for key in d.keys(): +# if len(key) != 20: +# return False + val = d[key] + if not isValidVal(val,selversion): + return False + return True + +def isValidVal(d,selversion): + if not isinstance(d,dict): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rqmh: reply: a: value not dict" + return False + if selversion >= OLPROTO_VER_NINE: + if not ('content_name' in d and 'length' in d and 'leecher' in d and 'seeder' in d and 'category' in d and 'torrent_size' in d): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rqmh: reply: a: key missing, got",d.keys() + return False + else: + if not ('content_name' in d and 'length' in d and 'leecher' in d and 'seeder' in d and 'category' in d): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rqmh: reply: a: key missing, got",d.keys() + return False +# if not (isinstance(d['content_name'],str) and isinstance(d['length'],int) and isinstance(d['leecher'],int) and isinstance(d['seeder'],int)): +# return False +# if len(d) > 4: # no other keys +# return False + return True + + diff --git a/tribler-mod/Tribler/Core/SocialNetwork/RemoteTorrentHandler.py b/tribler-mod/Tribler/Core/SocialNetwork/RemoteTorrentHandler.py new file mode 100644 index 0000000..6f61604 --- /dev/null +++ b/tribler-mod/Tribler/Core/SocialNetwork/RemoteTorrentHandler.py @@ -0,0 +1,73 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information +# +# Handles the case where the user did a remote query and now selected one of the +# returned torrents for download. +# + +import sys + +from Tribler.Core.CacheDB.CacheDBHandler import TorrentDBHandler + +DEBUG = False + +class RemoteTorrentHandler: + + __single = None + + def __init__(self): + if RemoteTorrentHandler.__single: + raise RuntimeError, "RemoteTorrentHandler is singleton" + RemoteTorrentHandler.__single = self + self.torrent_db = TorrentDBHandler.getInstance() + self.requestedtorrents = {} + + def getInstance(*args, **kw): + if RemoteTorrentHandler.__single is None: + RemoteTorrentHandler(*args, **kw) + return RemoteTorrentHandler.__single + getInstance = staticmethod(getInstance) + + + def register(self,overlay_bridge,metadatahandler,session): + self.overlay_bridge = overlay_bridge + self.metadatahandler = metadatahandler + self.session = session + + def download_torrent(self,permid,infohash,usercallback): + """ The user has selected a torrent referred to by a peer in a query + reply. Try to obtain the actual .torrent file from the peer and then + start the actual download. + """ + # Called by GUI thread + + olthread_remote_torrent_download_lambda = lambda:self.olthread_download_torrent_callback(permid,infohash,usercallback) + self.overlay_bridge.add_task(olthread_remote_torrent_download_lambda,0) + + def olthread_download_torrent_callback(self,permid,infohash,usercallback): + """ Called by overlay thread """ + + #if infohash in self.requestedtorrents: + # return # TODO RS:the previous request could have failed + + self.requestedtorrents[infohash] = usercallback + + self.metadatahandler.send_metadata_request(permid,infohash,caller="rquery") + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'rtorrent: download: Requested torrent: %s' % `infohash` + + def metadatahandler_got_torrent(self,infohash,metadata,filename): + """ Called by MetadataHandler when the requested torrent comes in """ + #Called by overlay thread + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rtorrent: got requested torrent from peer, wanted", infohash in self.requestedtorrents, `self.requestedtorrents` + if infohash not in self.requestedtorrents: + return + + usercallback = self.requestedtorrents[infohash] + del self.requestedtorrents[infohash] + + remote_torrent_usercallback_lambda = lambda:usercallback(infohash,metadata,filename) + self.session.uch.perform_usercallback(remote_torrent_usercallback_lambda) diff --git a/tribler-mod/Tribler/Core/SocialNetwork/RemoteTorrentHandler.py.bak b/tribler-mod/Tribler/Core/SocialNetwork/RemoteTorrentHandler.py.bak new file mode 100644 index 0000000..6bce997 --- /dev/null +++ b/tribler-mod/Tribler/Core/SocialNetwork/RemoteTorrentHandler.py.bak @@ -0,0 +1,72 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information +# +# Handles the case where the user did a remote query and now selected one of the +# returned torrents for download. +# + +import sys + +from Tribler.Core.CacheDB.CacheDBHandler import TorrentDBHandler + +DEBUG = False + +class RemoteTorrentHandler: + + __single = None + + def __init__(self): + if RemoteTorrentHandler.__single: + raise RuntimeError, "RemoteTorrentHandler is singleton" + RemoteTorrentHandler.__single = self + self.torrent_db = TorrentDBHandler.getInstance() + self.requestedtorrents = {} + + def getInstance(*args, **kw): + if RemoteTorrentHandler.__single is None: + RemoteTorrentHandler(*args, **kw) + return RemoteTorrentHandler.__single + getInstance = staticmethod(getInstance) + + + def register(self,overlay_bridge,metadatahandler,session): + self.overlay_bridge = overlay_bridge + self.metadatahandler = metadatahandler + self.session = session + + def download_torrent(self,permid,infohash,usercallback): + """ The user has selected a torrent referred to by a peer in a query + reply. Try to obtain the actual .torrent file from the peer and then + start the actual download. + """ + # Called by GUI thread + + olthread_remote_torrent_download_lambda = lambda:self.olthread_download_torrent_callback(permid,infohash,usercallback) + self.overlay_bridge.add_task(olthread_remote_torrent_download_lambda,0) + + def olthread_download_torrent_callback(self,permid,infohash,usercallback): + """ Called by overlay thread """ + + #if infohash in self.requestedtorrents: + # return # TODO RS:the previous request could have failed + + self.requestedtorrents[infohash] = usercallback + + self.metadatahandler.send_metadata_request(permid,infohash,caller="rquery") + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'rtorrent: download: Requested torrent: %s' % `infohash` + + def metadatahandler_got_torrent(self,infohash,metadata,filename): + """ Called by MetadataHandler when the requested torrent comes in """ + #Called by overlay thread + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","rtorrent: got requested torrent from peer, wanted", infohash in self.requestedtorrents, `self.requestedtorrents` + if infohash not in self.requestedtorrents: + return + + usercallback = self.requestedtorrents[infohash] + del self.requestedtorrents[infohash] + + remote_torrent_usercallback_lambda = lambda:usercallback(infohash,metadata,filename) + self.session.uch.perform_usercallback(remote_torrent_usercallback_lambda) diff --git a/tribler-mod/Tribler/Core/SocialNetwork/SocialNetworkMsgHandler.py b/tribler-mod/Tribler/Core/SocialNetwork/SocialNetworkMsgHandler.py new file mode 100644 index 0000000..2958559 --- /dev/null +++ b/tribler-mod/Tribler/Core/SocialNetwork/SocialNetworkMsgHandler.py @@ -0,0 +1,80 @@ +from time import localtime, strftime +# Written by Arno Bakker, Jie Yang +# see LICENSE.txt for license information + + +import sys + +from Tribler.Core.BitTornado.BT1.MessageID import * +from Tribler.Core.Overlay.SecureOverlay import OLPROTO_VER_FIFTH +from Tribler.Core.SocialNetwork.OverlapMsgHandler import OverlapMsgHandler + +DEBUG = False + +class SocialNetworkMsgHandler: + + __single = None + + def __init__(self): + if SocialNetworkMsgHandler.__single: + raise RuntimeError, "SocialNetworkMsgHandler is singleton" + SocialNetworkMsgHandler.__single = self + self.overlap = OverlapMsgHandler() + + def getInstance(*args, **kw): + if SocialNetworkMsgHandler.__single is None: + SocialNetworkMsgHandler(*args, **kw) + return SocialNetworkMsgHandler.__single + getInstance = staticmethod(getInstance) + + + def register(self,overlay_bridge,launchmany,config): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: register" + self.overlay_bridge = overlay_bridge + self.config = config + self.overlap.register(overlay_bridge,launchmany) + + # + # Incoming messages + # + def handleMessage(self,permid,selversion,message): + + t = message[0] + if t == SOCIAL_OVERLAP: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: Got SOCIAL_OVERLAP",len(message) + if self.config['superpeer']: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: overlap: Ignoring, we are superpeer" + return True + else: + return self.overlap.recv_overlap(permid,message,selversion) + + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: UNKNOWN OVERLAY MESSAGE", ord(t) + return False + + # + # Incoming connections + # + def handleConnection(self,exc,permid,selversion,locally_initiated): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: handleConnection",exc,"v",selversion,"local",locally_initiated + if exc is not None: + return + + if selversion < OLPROTO_VER_FIFTH: + return True + + if self.config['superpeer']: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: overlap: Ignoring connection, we are superpeer" + return True + + self.overlap.initiate_overlap(permid,locally_initiated) + return True + + \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/SocialNetwork/SocialNetworkMsgHandler.py.bak b/tribler-mod/Tribler/Core/SocialNetwork/SocialNetworkMsgHandler.py.bak new file mode 100644 index 0000000..bcba5ef --- /dev/null +++ b/tribler-mod/Tribler/Core/SocialNetwork/SocialNetworkMsgHandler.py.bak @@ -0,0 +1,79 @@ +# Written by Arno Bakker, Jie Yang +# see LICENSE.txt for license information + + +import sys + +from Tribler.Core.BitTornado.BT1.MessageID import * +from Tribler.Core.Overlay.SecureOverlay import OLPROTO_VER_FIFTH +from Tribler.Core.SocialNetwork.OverlapMsgHandler import OverlapMsgHandler + +DEBUG = False + +class SocialNetworkMsgHandler: + + __single = None + + def __init__(self): + if SocialNetworkMsgHandler.__single: + raise RuntimeError, "SocialNetworkMsgHandler is singleton" + SocialNetworkMsgHandler.__single = self + self.overlap = OverlapMsgHandler() + + def getInstance(*args, **kw): + if SocialNetworkMsgHandler.__single is None: + SocialNetworkMsgHandler(*args, **kw) + return SocialNetworkMsgHandler.__single + getInstance = staticmethod(getInstance) + + + def register(self,overlay_bridge,launchmany,config): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: register" + self.overlay_bridge = overlay_bridge + self.config = config + self.overlap.register(overlay_bridge,launchmany) + + # + # Incoming messages + # + def handleMessage(self,permid,selversion,message): + + t = message[0] + if t == SOCIAL_OVERLAP: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: Got SOCIAL_OVERLAP",len(message) + if self.config['superpeer']: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: overlap: Ignoring, we are superpeer" + return True + else: + return self.overlap.recv_overlap(permid,message,selversion) + + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: UNKNOWN OVERLAY MESSAGE", ord(t) + return False + + # + # Incoming connections + # + def handleConnection(self,exc,permid,selversion,locally_initiated): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: handleConnection",exc,"v",selversion,"local",locally_initiated + if exc is not None: + return + + if selversion < OLPROTO_VER_FIFTH: + return True + + if self.config['superpeer']: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","socnet: overlap: Ignoring connection, we are superpeer" + return True + + self.overlap.initiate_overlap(permid,locally_initiated) + return True + + \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/SocialNetwork/__init__.py b/tribler-mod/Tribler/Core/SocialNetwork/__init__.py new file mode 100644 index 0000000..7c348ca --- /dev/null +++ b/tribler-mod/Tribler/Core/SocialNetwork/__init__.py @@ -0,0 +1,4 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information + diff --git a/tribler-mod/Tribler/Core/SocialNetwork/__init__.py.bak b/tribler-mod/Tribler/Core/SocialNetwork/__init__.py.bak new file mode 100644 index 0000000..86ac17b --- /dev/null +++ b/tribler-mod/Tribler/Core/SocialNetwork/__init__.py.bak @@ -0,0 +1,3 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information + diff --git a/tribler-mod/Tribler/Core/Statistics/Crawler.py b/tribler-mod/Tribler/Core/Statistics/Crawler.py new file mode 100644 index 0000000..7019d0b --- /dev/null +++ b/tribler-mod/Tribler/Core/Statistics/Crawler.py @@ -0,0 +1,559 @@ +from time import localtime, strftime +# Written by Boudewijn Schoon +# see LICENSE.txt for license information + +# todo +# - try to connect first, than start the initiator. now we start the +# initiator and we often fail to connect + +from traceback import print_exc +import random +import sys +import time + +from Tribler.Core.BitTornado.BT1.MessageID import CRAWLER_REQUEST, CRAWLER_REPLY, getMessageName +from Tribler.Core.CacheDB.SqliteCacheDBHandler import CrawlerDBHandler +from Tribler.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge +from Tribler.Core.Overlay.SecureOverlay import OLPROTO_VER_SEVENTH +from Tribler.Core.Utilities.utilities import show_permid_short + +DEBUG = False + +# when a message payload exceedes 32KB it is divided into multiple +# messages +MAX_PAYLOAD_LENGTH = 32 * 1024 + +# after 1 hour the channels for any outstanding CRAWLER_REQUEST +# messages will be closed +CHANNEL_TIMEOUT = 60 * 60 + +# the FREQUENCY_FLEXIBILITY tels the client how strict it must adhere +# to the frequency. the value indicates how many seconds a request +# will be allowed before the actual frequency deadline +FREQUENCY_FLEXIBILITY = 5 + +# Do not attempt to re-initiate communication after more than x +# connection failures +MAX_ALLOWED_FAILURES = 26 + +class Crawler: + __singleton = None + + @classmethod + def get_instance(cls, *args, **kargs): + if not cls.__singleton: + cls.__singleton = cls(*args, **kargs) + return cls.__singleton + + def __init__(self, session): + if self.__singleton: + raise RuntimeError, "Crawler is Singleton" + Crawler.__single = self + self._overlay_bridge = OverlayThreadingBridge.getInstance() + self._session = session + self._crawler_db = CrawlerDBHandler.getInstance() + + # _message_handlers contains message-id:(request-callback, reply-callback, last-request-timestamp) + # the handlers are called when either a CRAWL_REQUEST or CRAWL_REPLY message is received + self._message_handlers = {} + + # _crawl_initiators is a list with (initiator-callback, + # frequency, accept_frequency) tuples the initiators are called + # when a new connection is received + self._crawl_initiators = [] + + # _initiator_dealines contains [deadline, frequency, + # accept_frequency, initiator-callback, permid, selversion, + # failure-counter] deadlines register information on when to + # call the crawl initiators again for a specific permid + self._initiator_deadlines = [] + + # _dialback_deadlines contains message_id:(deadline, permid) pairs + # client peers should connect back to -a- crawler indicated by + # permid after deadline expired + self._dialback_deadlines = {} + + # _channels contains permid:buffer-dict pairs. Where + # buffer_dict contains channel-id:(timestamp, buffer, + # channel_data) pairs. Where buffer is the payload from + # multipart messages that are received so far. Channels are + # used to match outstanding replies to given requests + self._channels = {} + + # start checking for expired deadlines + self._check_deadlines(True) + + # start checking for ancient channels + self._check_channels() + + def register_crawl_initiator(self, initiator_callback, frequency=3600, accept_frequency=None): + """ + Register a callback that is called each time a new connection + is made and subsequently each FREQUENCY seconds. + + ACCEPT_FREQUENCY defaults to FREQUENCY and indicates the + minimum seconds that must expire before a crawler request + message is accepted. + + Giving FREQUENCY = 10 and ACCEPT_FREQUENCY = 0 will call + INITIATOR_CALLBACK every 10 seconds and will let the receiving + peers accept allways. + + Giving FREQUENCY = 10 and ACCEPT_FREQUENCY = 20 will call + INITIATOR_CALLBACK every 10 seconds and will cause frequency + errors 50% of the time. + """ + if accept_frequency is None: + accept_frequency = frequency + self._crawl_initiators.append((initiator_callback, frequency, accept_frequency)) + + def register_message_handler(self, id_, request_callback, reply_callback): + self._message_handlers[id_] = (request_callback, reply_callback, 0) + + def am_crawler(self): + """ + Returns True if this running Tribler is a Crawler + """ + return self._session.get_permid() in self._crawler_db.getCrawlers() + + def _acquire_channel_id(self, permid, channel_data): + """ + Claim a unique one-byte id to match a request to a reply. + + PERMID the peer to communicate with + CHANNEL_DATA optional data associated with this channel + """ + if permid in self._channels: + channels = self._channels[permid] + else: + channels = {} + self._channels[permid] = channels + + # find a free channel-id randomly + channel_id = random.randint(1, 255) + attempt = 0 + while channel_id in channels: + attempt += 1 + if attempt > 64: + channel_id = 0 + break + channel_id = random.randint(1, 255) + + if channel_id == 0: + # find a free channel-id sequentialy + channel_id = 255 + while channel_id in channels and channel_id != 0: + channel_id -= 1 + + if channel_id: + # create a buffer to receive the reply + channels[channel_id] = [time.time() + CHANNEL_TIMEOUT, "", channel_data] + + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: _acquire_channel_id:", show_permid_short(permid), len(channels), "channels used" + + # a valid channel-id or 0 when no channel-id is left + return channel_id + + def _release_channel_id(self, permid, channel_id): + if permid in self._channels: + if channel_id in self._channels[permid]: + del self._channels[permid][channel_id] + if not self._channels[permid]: + del self._channels[permid] + + def _post_connection_attempt(self, permid, success): + """ + This method is called after a succesfull or failed connection + attempt + """ + if success: + # reset all failure counters for this permid + for tup in (tup for tup in self._initiator_deadlines if tup[4] == permid): + tup[6] = 0 + + else: + def increase_failure_counter(tup): + if tup[4] == permid: + if tup[6] > MAX_ALLOWED_FAILURES: + # remove from self._initiator_deadlines + return False + else: + # increase counter but leave in self._initiator_deadlines + tup[6] += 1 + return True + else: + return True + + self._initiator_deadlines = filter(increase_failure_counter, self._initiator_deadlines) + + def send_request(self, permid, message_id, payload, frequency=3600, callback=None, channel_data=None): + """ + This method ensures that a connection to PERMID exists before + sending the message + + Returns the channel-id. + + MESSAGE_ID is a one character crawler specific ID (defined in MessageID.py). + PAYLOAD is message specific sting. + FREQUENCY is an integer defining the time, in seconds, until a next message with MESSAGE_ID is accepted by the client-side crawler. + CALLBACK is either None or callable. Called with parameters EXC and PERMID. EXC is None for success or an Exception for failure. + CHANNEL_DATA can be anything related to this specific request. It is supplied with the handle-reply callback. + """ + # reserve a new channel-id + channel_id = self._acquire_channel_id(permid, channel_data) + + def _after_connect(exc, dns, permid, selversion): + self._post_connection_attempt(permid, not exc) + if exc: + # could not connect. + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: could not connect", dns, show_permid_short(permid), exc + self._release_channel_id(permid, channel_id) + if callback: + callback(exc, permid) + else: + self._send_request(permid, message_id, channel_id, payload, frequency=frequency, callback=callback) + +# if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: connecting (send_request)...", show_permid_short(permid) + if channel_id == 0: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: send_request: Can not acquire channel-id", show_permid_short(permid) + else: + self._overlay_bridge.connect(permid, _after_connect) + return channel_id + + def _send_request(self, permid, message_id, channel_id, payload, frequency=3600, callback=None): + """ + Send a CRAWLER_REQUEST message to permid. This method assumes + that connection exists to the permid. + + @param permid The destination peer + @param message_id The message id + @param payload The message content + @param frequency Destination peer will return a frequency-error when this message_id has been received within the last frequency seconds + @param callback Callable function/method is called when request is send with 2 paramaters (exc, permid) + @return The message channel-id > 0 on success, and 0 on failure + """ + # Sending a request from a Crawler to a Tribler peer + # SIZE INDEX + # 1 byte: 0 CRAWLER_REQUEST (from Tribler.Core.BitTornado.BT1.MessageID) + # 1 byte: 1 --MESSAGE-SPECIFIC-ID-- + # 1 byte: 2 Channel id + # 2 byte: 3+4 Frequency + # n byte: 5... Request payload + def _after_send_request(exc, permid): + if DEBUG: + if exc: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: could not send request to", show_permid_short(permid), exc + if exc: + self._release_channel_id(permid, channel_id) + + # call the optional callback supplied with send_request + if callback: + callback(exc, permid) + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: sending", getMessageName(CRAWLER_REQUEST+message_id), "with", len(payload), "bytes payload to", show_permid_short(permid) + self._overlay_bridge.send(permid, "".join((CRAWLER_REQUEST, + message_id, + chr(channel_id & 0xFF), + chr((frequency >> 8) & 0xFF) + chr(frequency & 0xFF), + str(payload))), _after_send_request) + return channel_id + + def handle_request(self, permid, selversion, message): + """ + Received CRAWLER_REQUEST message from OverlayApps + """ + if selversion >= OLPROTO_VER_SEVENTH and len(message) >= 5: + if message[1] in self._message_handlers: + + message_id = message[1] + channel_id = ord(message[2]) + frequency = ord(message[3]) << 8 | ord(message[4]) + now = time.time() + request_callback, reply_callback, last_request_timestamp = self._message_handlers[message_id] + + # frequency: we will report a requency error when we have + # received this request within FREQUENCY seconds + if last_request_timestamp + frequency < now + FREQUENCY_FLEXIBILITY: + + if not permid in self._channels: + self._channels[permid] = {} + self._channels[permid][channel_id] = [time.time() + CHANNEL_TIMEOUT, "", None] + + # store the new timestamp + self._message_handlers[message_id] = (request_callback, reply_callback, now) + + def send_reply_helper(payload="", error=0, callback=None): + return self.send_reply(permid, message_id, channel_id, payload, error=error, callback=callback) + + # 20/10/08. Boudewijn: We will no longer disconnect + # based on the return value from the message handler + try: + request_callback(permid, selversion, channel_id, message[5:], send_reply_helper) + except: + print_exc() + + # 11/11/08. Boudewijn: Because the client peers may + # not always be connectable, the client peers will + # actively seek to connect to -a- crawler after + # frequency expires. + self._dialback_deadlines[message_id] = (now + frequency, permid) + + return True + + else: + # frequency error + self.send_reply(permid, message_id, channel_id, "frequency error", error=254) + return True + else: + # invalid / unknown message. may be caused by a + # crawler sending newly introduced messages + self.send_reply(permid, message_id, channel_id, "unknown message", error=253) + return True + else: + # protocol version conflict or invalid message + return False + + def send_reply(self, permid, message_id, channel_id, payload, error=0, callback=None): + """ + This method ensures that a connection to PERMID exists before sending the message + """ + def _after_connect(exc, dns, permid, selversion): + self._post_connection_attempt(permid, not exc) + if exc: + # could not connect. + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: could not connect", dns, show_permid_short(permid), exc + if callback: + callback(exc, permid) + else: + self._send_reply(permid, message_id, channel_id, payload, error=error, callback=callback) + +# if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: connecting... (send_reply)", show_permid_short(permid) + self._overlay_bridge.connect(permid, _after_connect) + + def _send_reply(self, permid, message_id, channel_id, payload, error=0, callback=None): + """ + Send a CRAWLER_REPLY message to permid. This method assumes + that connection exists to the permid. + + @param permid The destination peer + @param message_id The message id + @param channel_id The channel id. Used to match replies to requests + @param payload The message content + @param error The error code. (0: no-error, 253: unknown-message, 254: frequency-error, 255: reserved) + @param callback Callable function/method is called when request is send with 2 paramaters (exc, permid) + @return The message channel-id > 0 on success, and 0 on failure + """ + # Sending a reply from a Tribler peer to a Crawler + # SIZE INDEX + # 1 byte: 0 CRAWLER_REPLY (from Tribler.Core.BitTornado.BT1.MessageID) + # 1 byte: 1 --MESSAGE-SPECIFIC-ID-- + # 1 byte: 2 Channel id + # 1 byte: 3 Parts left + # 1 byte: 4 Indicating success (0) or failure (non 0) + # n byte: 5... Reply payload + if len(payload) > MAX_PAYLOAD_LENGTH: + remaining_payload = payload[MAX_PAYLOAD_LENGTH:] + + def _after_send_reply(exc, permid): + """ + Called after the overlay attempted to send a reply message + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: _after_send_reply", show_permid_short(permid), exc + if not exc: + self.send_reply(permid, message_id, channel_id, remaining_payload, error=error) + # call the optional callback supplied with send_request + if callback: + callback(exc, permid) + + parts_left = int(len(payload) / MAX_PAYLOAD_LENGTH) + payload = payload[:MAX_PAYLOAD_LENGTH] + + else: + def _after_send_reply(exc, permid): + if DEBUG: + if exc: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: could not send request", show_permid_short(permid), exc + # call the optional callback supplied with send_request + if callback: + callback(exc, permid) + + parts_left = 0 + + # remove from self._channels if it is still there (could + # have been remove during periodic timeout check) + if permid in self._channels and channel_id in self._channels[permid]: + del self._channels[permid][channel_id] + if not self._channels[permid]: + del self._channels[permid] + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: sending", getMessageName(CRAWLER_REPLY+message_id), "with", len(payload), "bytes payload to", show_permid_short(permid) + self._overlay_bridge.send(permid, "".join((CRAWLER_REPLY, + message_id, + chr(channel_id & 0xFF), + chr(parts_left & 0xFF), + chr(error & 0xFF), + str(payload))), _after_send_reply) + return channel_id + + def handle_reply(self, permid, selversion, message): + """ + Received CRAWLER_REPLY message from OverlayApps + """ + if selversion >= OLPROTO_VER_SEVENTH and len(message) >= 5 and message[1] in self._message_handlers: + + message_id = message[1] + channel_id = ord(message[2]) + parts_left = ord(message[3]) + error = ord(message[4]) + + # A request must exist in self._channels, otherwise we did + # not request this reply + if permid in self._channels and channel_id in self._channels[permid]: + + # add part to buffer + self._channels[permid][channel_id][1] += message[5:] + + if parts_left: + # todo: register some event to remove the buffer + # after a time (in case connection is lost before + # all parts are received) + + # Can't do anything until all parts have been received + return True + else: + timestamp, payload, channel_data = self._channels[permid].pop(channel_id) + if DEBUG: + if error == 253: + # unknown message error (probably because + # the crawler is newer than the peer) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: received", getMessageName(CRAWLER_REPLY+message_id), "with", len(message), "bytes payload from", show_permid_short(permid), "indicating an unknown message error" + if error == 254: + # frequency error (we did this request recently) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: received", getMessageName(CRAWLER_REPLY+message_id), "with", len(message), "bytes payload from", show_permid_short(permid), "indicating a frequency error" + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: received", getMessageName(CRAWLER_REPLY+message_id), "with", len(payload), "bytes payload from", show_permid_short(permid) + if not self._channels[permid]: + del self._channels[permid] + + def send_request_helper(message_id, payload, frequency=3600, callback=None, channel_data=None): + return self.send_request(permid, message_id, payload, frequency=frequency, callback=callback, channel_data=channel_data) + + # 20/10/08. Boudewijn: We will no longer + # disconnect based on the return value from the + # message handler + try: + # todo: update all code to always accept the channel_data parameter + if channel_data: + self._message_handlers[message_id][1](permid, selversion, channel_id, channel_data, error, payload, send_request_helper) + else: + self._message_handlers[message_id][1](permid, selversion, channel_id, error, payload, send_request_helper) + except: + print_exc() + return True + else: + # reply from unknown permid or channel + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: received", getMessageName(CRAWLER_REPLY+message_id), "with", len(payload), "bytes payload from", show_permid_short(permid), "from unknown peer or unused channel" + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: received", getMessageName(CRAWLER_REPLY+message_id), "with", len(payload), "bytes payload from", show_permid_short(permid), "from unknown peer or unused channel" + return False + + def handle_connection(self, exc, permid, selversion, locally_initiated): + """ + Called when overlay received a connection. Note that this + method is only registered with OverlayApps when the command + line option 'crawl' is used. + """ + if exc: + # connection lost + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: overlay connection lost", show_permid_short(permid), exc + + elif selversion >= OLPROTO_VER_SEVENTH: + # verify that we do not already have deadlines for this permid + already_known = False + for tup in self._initiator_deadlines: + if tup[4] == permid: + already_known = True + break + + if not already_known: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: new overlay connection", show_permid_short(permid) + for initiator_callback, frequency, accept_frequency in self._crawl_initiators: + self._initiator_deadlines.append([0, frequency, accept_frequency, initiator_callback, permid, selversion, 0]) + + self._initiator_deadlines.sort() + + # Start sending crawler requests + self._check_deadlines(False) + else: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: new overlay connection (can not use version %d)" % selversion, show_permid_short(permid) + + def _check_deadlines(self, resubmit): + """ + Send requests to permid and re-register to be called again + after frequency seconds + """ + now = time.time() + + # crawler side deadlines... + if self._initiator_deadlines: + for tup in self._initiator_deadlines: + deadline, frequency, accept_frequency, initiator_callback, permid, selversion, failure_counter = tup + if now > deadline + FREQUENCY_FLEXIBILITY: + def send_request_helper(message_id, payload, frequency=accept_frequency, callback=None, channel_data=None): + return self.send_request(permid, message_id, payload, frequency=frequency, callback=callback, channel_data=channel_data) + # 20/10/08. Boudewijn: We will no longer disconnect + # based on the return value from the message handler + try: + initiator_callback(permid, selversion, send_request_helper) + except Exception: + print_exc() + + # set new deadline + tup[0] = now + frequency + else: + break + + # resort + self._initiator_deadlines.sort() + + # client side deadlines... + if self._dialback_deadlines: + + def _after_connect(exc, dns, permid, selversion): + if DEBUG: + if exc: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: dialback to crawler failed", dns, show_permid_short(permid), exc + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: dialback to crawler established", dns, show_permid_short(permid) + + for message_id, (deadline, permid) in self._dialback_deadlines.items(): + if now > deadline + FREQUENCY_FLEXIBILITY: + self._overlay_bridge.connect(permid, _after_connect) + del self._dialback_deadlines[message_id] + + if resubmit: + self._overlay_bridge.add_task(lambda:self._check_deadlines(True), 5) + + def _check_channels(self): + """ + Periodically removes permids after no connection was + established for a long time + """ + now = time.time() + to_remove_permids = [] + for permid in self._channels: + to_remove_channel_ids = [] + for channel_id, (deadline, _, _) in self._channels[permid].iteritems(): + if now > deadline: + to_remove_channel_ids.append(channel_id) + for channel_id in to_remove_channel_ids: + del self._channels[permid][channel_id] + if not self._channels[permid]: + to_remove_permids.append(permid) + for permid in to_remove_permids: + del self._channels[permid] + + # resubmit + self._overlay_bridge.add_task(self._check_channels, 60) + diff --git a/tribler-mod/Tribler/Core/Statistics/Crawler.py.bak b/tribler-mod/Tribler/Core/Statistics/Crawler.py.bak new file mode 100644 index 0000000..e30c60d --- /dev/null +++ b/tribler-mod/Tribler/Core/Statistics/Crawler.py.bak @@ -0,0 +1,558 @@ +# Written by Boudewijn Schoon +# see LICENSE.txt for license information + +# todo +# - try to connect first, than start the initiator. now we start the +# initiator and we often fail to connect + +from traceback import print_exc +import random +import sys +import time + +from Tribler.Core.BitTornado.BT1.MessageID import CRAWLER_REQUEST, CRAWLER_REPLY, getMessageName +from Tribler.Core.CacheDB.SqliteCacheDBHandler import CrawlerDBHandler +from Tribler.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge +from Tribler.Core.Overlay.SecureOverlay import OLPROTO_VER_SEVENTH +from Tribler.Core.Utilities.utilities import show_permid_short + +DEBUG = False + +# when a message payload exceedes 32KB it is divided into multiple +# messages +MAX_PAYLOAD_LENGTH = 32 * 1024 + +# after 1 hour the channels for any outstanding CRAWLER_REQUEST +# messages will be closed +CHANNEL_TIMEOUT = 60 * 60 + +# the FREQUENCY_FLEXIBILITY tels the client how strict it must adhere +# to the frequency. the value indicates how many seconds a request +# will be allowed before the actual frequency deadline +FREQUENCY_FLEXIBILITY = 5 + +# Do not attempt to re-initiate communication after more than x +# connection failures +MAX_ALLOWED_FAILURES = 26 + +class Crawler: + __singleton = None + + @classmethod + def get_instance(cls, *args, **kargs): + if not cls.__singleton: + cls.__singleton = cls(*args, **kargs) + return cls.__singleton + + def __init__(self, session): + if self.__singleton: + raise RuntimeError, "Crawler is Singleton" + Crawler.__single = self + self._overlay_bridge = OverlayThreadingBridge.getInstance() + self._session = session + self._crawler_db = CrawlerDBHandler.getInstance() + + # _message_handlers contains message-id:(request-callback, reply-callback, last-request-timestamp) + # the handlers are called when either a CRAWL_REQUEST or CRAWL_REPLY message is received + self._message_handlers = {} + + # _crawl_initiators is a list with (initiator-callback, + # frequency, accept_frequency) tuples the initiators are called + # when a new connection is received + self._crawl_initiators = [] + + # _initiator_dealines contains [deadline, frequency, + # accept_frequency, initiator-callback, permid, selversion, + # failure-counter] deadlines register information on when to + # call the crawl initiators again for a specific permid + self._initiator_deadlines = [] + + # _dialback_deadlines contains message_id:(deadline, permid) pairs + # client peers should connect back to -a- crawler indicated by + # permid after deadline expired + self._dialback_deadlines = {} + + # _channels contains permid:buffer-dict pairs. Where + # buffer_dict contains channel-id:(timestamp, buffer, + # channel_data) pairs. Where buffer is the payload from + # multipart messages that are received so far. Channels are + # used to match outstanding replies to given requests + self._channels = {} + + # start checking for expired deadlines + self._check_deadlines(True) + + # start checking for ancient channels + self._check_channels() + + def register_crawl_initiator(self, initiator_callback, frequency=3600, accept_frequency=None): + """ + Register a callback that is called each time a new connection + is made and subsequently each FREQUENCY seconds. + + ACCEPT_FREQUENCY defaults to FREQUENCY and indicates the + minimum seconds that must expire before a crawler request + message is accepted. + + Giving FREQUENCY = 10 and ACCEPT_FREQUENCY = 0 will call + INITIATOR_CALLBACK every 10 seconds and will let the receiving + peers accept allways. + + Giving FREQUENCY = 10 and ACCEPT_FREQUENCY = 20 will call + INITIATOR_CALLBACK every 10 seconds and will cause frequency + errors 50% of the time. + """ + if accept_frequency is None: + accept_frequency = frequency + self._crawl_initiators.append((initiator_callback, frequency, accept_frequency)) + + def register_message_handler(self, id_, request_callback, reply_callback): + self._message_handlers[id_] = (request_callback, reply_callback, 0) + + def am_crawler(self): + """ + Returns True if this running Tribler is a Crawler + """ + return self._session.get_permid() in self._crawler_db.getCrawlers() + + def _acquire_channel_id(self, permid, channel_data): + """ + Claim a unique one-byte id to match a request to a reply. + + PERMID the peer to communicate with + CHANNEL_DATA optional data associated with this channel + """ + if permid in self._channels: + channels = self._channels[permid] + else: + channels = {} + self._channels[permid] = channels + + # find a free channel-id randomly + channel_id = random.randint(1, 255) + attempt = 0 + while channel_id in channels: + attempt += 1 + if attempt > 64: + channel_id = 0 + break + channel_id = random.randint(1, 255) + + if channel_id == 0: + # find a free channel-id sequentialy + channel_id = 255 + while channel_id in channels and channel_id != 0: + channel_id -= 1 + + if channel_id: + # create a buffer to receive the reply + channels[channel_id] = [time.time() + CHANNEL_TIMEOUT, "", channel_data] + + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: _acquire_channel_id:", show_permid_short(permid), len(channels), "channels used" + + # a valid channel-id or 0 when no channel-id is left + return channel_id + + def _release_channel_id(self, permid, channel_id): + if permid in self._channels: + if channel_id in self._channels[permid]: + del self._channels[permid][channel_id] + if not self._channels[permid]: + del self._channels[permid] + + def _post_connection_attempt(self, permid, success): + """ + This method is called after a succesfull or failed connection + attempt + """ + if success: + # reset all failure counters for this permid + for tup in (tup for tup in self._initiator_deadlines if tup[4] == permid): + tup[6] = 0 + + else: + def increase_failure_counter(tup): + if tup[4] == permid: + if tup[6] > MAX_ALLOWED_FAILURES: + # remove from self._initiator_deadlines + return False + else: + # increase counter but leave in self._initiator_deadlines + tup[6] += 1 + return True + else: + return True + + self._initiator_deadlines = filter(increase_failure_counter, self._initiator_deadlines) + + def send_request(self, permid, message_id, payload, frequency=3600, callback=None, channel_data=None): + """ + This method ensures that a connection to PERMID exists before + sending the message + + Returns the channel-id. + + MESSAGE_ID is a one character crawler specific ID (defined in MessageID.py). + PAYLOAD is message specific sting. + FREQUENCY is an integer defining the time, in seconds, until a next message with MESSAGE_ID is accepted by the client-side crawler. + CALLBACK is either None or callable. Called with parameters EXC and PERMID. EXC is None for success or an Exception for failure. + CHANNEL_DATA can be anything related to this specific request. It is supplied with the handle-reply callback. + """ + # reserve a new channel-id + channel_id = self._acquire_channel_id(permid, channel_data) + + def _after_connect(exc, dns, permid, selversion): + self._post_connection_attempt(permid, not exc) + if exc: + # could not connect. + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: could not connect", dns, show_permid_short(permid), exc + self._release_channel_id(permid, channel_id) + if callback: + callback(exc, permid) + else: + self._send_request(permid, message_id, channel_id, payload, frequency=frequency, callback=callback) + +# if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: connecting (send_request)...", show_permid_short(permid) + if channel_id == 0: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: send_request: Can not acquire channel-id", show_permid_short(permid) + else: + self._overlay_bridge.connect(permid, _after_connect) + return channel_id + + def _send_request(self, permid, message_id, channel_id, payload, frequency=3600, callback=None): + """ + Send a CRAWLER_REQUEST message to permid. This method assumes + that connection exists to the permid. + + @param permid The destination peer + @param message_id The message id + @param payload The message content + @param frequency Destination peer will return a frequency-error when this message_id has been received within the last frequency seconds + @param callback Callable function/method is called when request is send with 2 paramaters (exc, permid) + @return The message channel-id > 0 on success, and 0 on failure + """ + # Sending a request from a Crawler to a Tribler peer + # SIZE INDEX + # 1 byte: 0 CRAWLER_REQUEST (from Tribler.Core.BitTornado.BT1.MessageID) + # 1 byte: 1 --MESSAGE-SPECIFIC-ID-- + # 1 byte: 2 Channel id + # 2 byte: 3+4 Frequency + # n byte: 5... Request payload + def _after_send_request(exc, permid): + if DEBUG: + if exc: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: could not send request to", show_permid_short(permid), exc + if exc: + self._release_channel_id(permid, channel_id) + + # call the optional callback supplied with send_request + if callback: + callback(exc, permid) + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: sending", getMessageName(CRAWLER_REQUEST+message_id), "with", len(payload), "bytes payload to", show_permid_short(permid) + self._overlay_bridge.send(permid, "".join((CRAWLER_REQUEST, + message_id, + chr(channel_id & 0xFF), + chr((frequency >> 8) & 0xFF) + chr(frequency & 0xFF), + str(payload))), _after_send_request) + return channel_id + + def handle_request(self, permid, selversion, message): + """ + Received CRAWLER_REQUEST message from OverlayApps + """ + if selversion >= OLPROTO_VER_SEVENTH and len(message) >= 5: + if message[1] in self._message_handlers: + + message_id = message[1] + channel_id = ord(message[2]) + frequency = ord(message[3]) << 8 | ord(message[4]) + now = time.time() + request_callback, reply_callback, last_request_timestamp = self._message_handlers[message_id] + + # frequency: we will report a requency error when we have + # received this request within FREQUENCY seconds + if last_request_timestamp + frequency < now + FREQUENCY_FLEXIBILITY: + + if not permid in self._channels: + self._channels[permid] = {} + self._channels[permid][channel_id] = [time.time() + CHANNEL_TIMEOUT, "", None] + + # store the new timestamp + self._message_handlers[message_id] = (request_callback, reply_callback, now) + + def send_reply_helper(payload="", error=0, callback=None): + return self.send_reply(permid, message_id, channel_id, payload, error=error, callback=callback) + + # 20/10/08. Boudewijn: We will no longer disconnect + # based on the return value from the message handler + try: + request_callback(permid, selversion, channel_id, message[5:], send_reply_helper) + except: + print_exc() + + # 11/11/08. Boudewijn: Because the client peers may + # not always be connectable, the client peers will + # actively seek to connect to -a- crawler after + # frequency expires. + self._dialback_deadlines[message_id] = (now + frequency, permid) + + return True + + else: + # frequency error + self.send_reply(permid, message_id, channel_id, "frequency error", error=254) + return True + else: + # invalid / unknown message. may be caused by a + # crawler sending newly introduced messages + self.send_reply(permid, message_id, channel_id, "unknown message", error=253) + return True + else: + # protocol version conflict or invalid message + return False + + def send_reply(self, permid, message_id, channel_id, payload, error=0, callback=None): + """ + This method ensures that a connection to PERMID exists before sending the message + """ + def _after_connect(exc, dns, permid, selversion): + self._post_connection_attempt(permid, not exc) + if exc: + # could not connect. + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: could not connect", dns, show_permid_short(permid), exc + if callback: + callback(exc, permid) + else: + self._send_reply(permid, message_id, channel_id, payload, error=error, callback=callback) + +# if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: connecting... (send_reply)", show_permid_short(permid) + self._overlay_bridge.connect(permid, _after_connect) + + def _send_reply(self, permid, message_id, channel_id, payload, error=0, callback=None): + """ + Send a CRAWLER_REPLY message to permid. This method assumes + that connection exists to the permid. + + @param permid The destination peer + @param message_id The message id + @param channel_id The channel id. Used to match replies to requests + @param payload The message content + @param error The error code. (0: no-error, 253: unknown-message, 254: frequency-error, 255: reserved) + @param callback Callable function/method is called when request is send with 2 paramaters (exc, permid) + @return The message channel-id > 0 on success, and 0 on failure + """ + # Sending a reply from a Tribler peer to a Crawler + # SIZE INDEX + # 1 byte: 0 CRAWLER_REPLY (from Tribler.Core.BitTornado.BT1.MessageID) + # 1 byte: 1 --MESSAGE-SPECIFIC-ID-- + # 1 byte: 2 Channel id + # 1 byte: 3 Parts left + # 1 byte: 4 Indicating success (0) or failure (non 0) + # n byte: 5... Reply payload + if len(payload) > MAX_PAYLOAD_LENGTH: + remaining_payload = payload[MAX_PAYLOAD_LENGTH:] + + def _after_send_reply(exc, permid): + """ + Called after the overlay attempted to send a reply message + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: _after_send_reply", show_permid_short(permid), exc + if not exc: + self.send_reply(permid, message_id, channel_id, remaining_payload, error=error) + # call the optional callback supplied with send_request + if callback: + callback(exc, permid) + + parts_left = int(len(payload) / MAX_PAYLOAD_LENGTH) + payload = payload[:MAX_PAYLOAD_LENGTH] + + else: + def _after_send_reply(exc, permid): + if DEBUG: + if exc: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: could not send request", show_permid_short(permid), exc + # call the optional callback supplied with send_request + if callback: + callback(exc, permid) + + parts_left = 0 + + # remove from self._channels if it is still there (could + # have been remove during periodic timeout check) + if permid in self._channels and channel_id in self._channels[permid]: + del self._channels[permid][channel_id] + if not self._channels[permid]: + del self._channels[permid] + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: sending", getMessageName(CRAWLER_REPLY+message_id), "with", len(payload), "bytes payload to", show_permid_short(permid) + self._overlay_bridge.send(permid, "".join((CRAWLER_REPLY, + message_id, + chr(channel_id & 0xFF), + chr(parts_left & 0xFF), + chr(error & 0xFF), + str(payload))), _after_send_reply) + return channel_id + + def handle_reply(self, permid, selversion, message): + """ + Received CRAWLER_REPLY message from OverlayApps + """ + if selversion >= OLPROTO_VER_SEVENTH and len(message) >= 5 and message[1] in self._message_handlers: + + message_id = message[1] + channel_id = ord(message[2]) + parts_left = ord(message[3]) + error = ord(message[4]) + + # A request must exist in self._channels, otherwise we did + # not request this reply + if permid in self._channels and channel_id in self._channels[permid]: + + # add part to buffer + self._channels[permid][channel_id][1] += message[5:] + + if parts_left: + # todo: register some event to remove the buffer + # after a time (in case connection is lost before + # all parts are received) + + # Can't do anything until all parts have been received + return True + else: + timestamp, payload, channel_data = self._channels[permid].pop(channel_id) + if DEBUG: + if error == 253: + # unknown message error (probably because + # the crawler is newer than the peer) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: received", getMessageName(CRAWLER_REPLY+message_id), "with", len(message), "bytes payload from", show_permid_short(permid), "indicating an unknown message error" + if error == 254: + # frequency error (we did this request recently) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: received", getMessageName(CRAWLER_REPLY+message_id), "with", len(message), "bytes payload from", show_permid_short(permid), "indicating a frequency error" + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: received", getMessageName(CRAWLER_REPLY+message_id), "with", len(payload), "bytes payload from", show_permid_short(permid) + if not self._channels[permid]: + del self._channels[permid] + + def send_request_helper(message_id, payload, frequency=3600, callback=None, channel_data=None): + return self.send_request(permid, message_id, payload, frequency=frequency, callback=callback, channel_data=channel_data) + + # 20/10/08. Boudewijn: We will no longer + # disconnect based on the return value from the + # message handler + try: + # todo: update all code to always accept the channel_data parameter + if channel_data: + self._message_handlers[message_id][1](permid, selversion, channel_id, channel_data, error, payload, send_request_helper) + else: + self._message_handlers[message_id][1](permid, selversion, channel_id, error, payload, send_request_helper) + except: + print_exc() + return True + else: + # reply from unknown permid or channel + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: received", getMessageName(CRAWLER_REPLY+message_id), "with", len(payload), "bytes payload from", show_permid_short(permid), "from unknown peer or unused channel" + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: received", getMessageName(CRAWLER_REPLY+message_id), "with", len(payload), "bytes payload from", show_permid_short(permid), "from unknown peer or unused channel" + return False + + def handle_connection(self, exc, permid, selversion, locally_initiated): + """ + Called when overlay received a connection. Note that this + method is only registered with OverlayApps when the command + line option 'crawl' is used. + """ + if exc: + # connection lost + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: overlay connection lost", show_permid_short(permid), exc + + elif selversion >= OLPROTO_VER_SEVENTH: + # verify that we do not already have deadlines for this permid + already_known = False + for tup in self._initiator_deadlines: + if tup[4] == permid: + already_known = True + break + + if not already_known: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: new overlay connection", show_permid_short(permid) + for initiator_callback, frequency, accept_frequency in self._crawl_initiators: + self._initiator_deadlines.append([0, frequency, accept_frequency, initiator_callback, permid, selversion, 0]) + + self._initiator_deadlines.sort() + + # Start sending crawler requests + self._check_deadlines(False) + else: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: new overlay connection (can not use version %d)" % selversion, show_permid_short(permid) + + def _check_deadlines(self, resubmit): + """ + Send requests to permid and re-register to be called again + after frequency seconds + """ + now = time.time() + + # crawler side deadlines... + if self._initiator_deadlines: + for tup in self._initiator_deadlines: + deadline, frequency, accept_frequency, initiator_callback, permid, selversion, failure_counter = tup + if now > deadline + FREQUENCY_FLEXIBILITY: + def send_request_helper(message_id, payload, frequency=accept_frequency, callback=None, channel_data=None): + return self.send_request(permid, message_id, payload, frequency=frequency, callback=callback, channel_data=channel_data) + # 20/10/08. Boudewijn: We will no longer disconnect + # based on the return value from the message handler + try: + initiator_callback(permid, selversion, send_request_helper) + except Exception: + print_exc() + + # set new deadline + tup[0] = now + frequency + else: + break + + # resort + self._initiator_deadlines.sort() + + # client side deadlines... + if self._dialback_deadlines: + + def _after_connect(exc, dns, permid, selversion): + if DEBUG: + if exc: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: dialback to crawler failed", dns, show_permid_short(permid), exc + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: dialback to crawler established", dns, show_permid_short(permid) + + for message_id, (deadline, permid) in self._dialback_deadlines.items(): + if now > deadline + FREQUENCY_FLEXIBILITY: + self._overlay_bridge.connect(permid, _after_connect) + del self._dialback_deadlines[message_id] + + if resubmit: + self._overlay_bridge.add_task(lambda:self._check_deadlines(True), 5) + + def _check_channels(self): + """ + Periodically removes permids after no connection was + established for a long time + """ + now = time.time() + to_remove_permids = [] + for permid in self._channels: + to_remove_channel_ids = [] + for channel_id, (deadline, _, _) in self._channels[permid].iteritems(): + if now > deadline: + to_remove_channel_ids.append(channel_id) + for channel_id in to_remove_channel_ids: + del self._channels[permid][channel_id] + if not self._channels[permid]: + to_remove_permids.append(permid) + for permid in to_remove_permids: + del self._channels[permid] + + # resubmit + self._overlay_bridge.add_task(self._check_channels, 60) + diff --git a/tribler-mod/Tribler/Core/Statistics/DatabaseCrawler.py b/tribler-mod/Tribler/Core/Statistics/DatabaseCrawler.py new file mode 100644 index 0000000..87fe086 --- /dev/null +++ b/tribler-mod/Tribler/Core/Statistics/DatabaseCrawler.py @@ -0,0 +1,103 @@ +from time import localtime, strftime +# Written by Boudewijn Schoon +# see LICENSE.txt for license information + +import sys +import cPickle +from time import strftime + +from Tribler.Core.BitTornado.BT1.MessageID import CRAWLER_DATABASE_QUERY +from Tribler.Core.CacheDB.sqlitecachedb import SQLiteCacheDB +from Tribler.Core.Utilities.utilities import show_permid, show_permid_short +from Tribler.Core.Statistics.Crawler import Crawler + +DEBUG = False + +class DatabaseCrawler: + __single = None + + @classmethod + def get_instance(cls, *args, **kargs): + if not cls.__single: + cls.__single = cls(*args, **kargs) + return cls.__single + + def __init__(self): + self._sqlite_cache_db = SQLiteCacheDB.getInstance() + + crawler = Crawler.get_instance() + if crawler.am_crawler(): + self._file = open("databasecrawler.txt", "a") + self._file.write("".join(("# ", "*" * 80, "\n# ", strftime("%Y/%m/%d %H:%M:%S"), " Crawler started\n"))) + self._file.flush() + else: + self._file = None + + def query_initiator(self, permid, selversion, request_callback): + """ + Established a new connection. Send a CRAWLER_DATABASE_QUERY request. + @param permid The Tribler peer permid + @param selversion The oberlay protocol version + @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload) + """ + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "databasecrawler: query_initiator", show_permid_short(permid) + request_callback(CRAWLER_DATABASE_QUERY, "SELECT 'peer_count', count(*) FROM Peer; SELECT 'torrent_count', count(*) FROM Torrent; SELECT 'moderations_count', count(*) FROM ModerationCast; SELECT 'positive_votes_count', count(*) FROM Moderators where status=1; SELECT 'negative_votes_count', count(*) FROM Moderators where status=-1", callback=self._after_request_callback) + + def _after_request_callback(self, exc, permid): + """ + Called by the Crawler with the result of the request_callback + call in the query_initiator method. + """ + if not exc: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "databasecrawler: request send to", show_permid_short(permid) + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), "REQUEST", show_permid(permid), "\n"))) + self._file.flush() + + def handle_crawler_request(self, permid, selversion, channel_id, message, reply_callback): + """ + Received a CRAWLER_DATABASE_QUERY request. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param message The message payload + @param reply_callback Call this function once to send the reply: reply_callback(payload [, error=123]) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "databasecrawler: handle_crawler_request", show_permid_short(permid), message + + # execute the sql + try: + cursor = self._sqlite_cache_db.execute_read(message) + + except Exception, e: + reply_callback(str(e), error=1) + else: + if cursor: + reply_callback(cPickle.dumps(list(cursor), 2)) + else: + reply_callback("error", error=2) + + def handle_crawler_reply(self, permid, selversion, channel_id, error, message, request_callback): + """ + Received a CRAWLER_DATABASE_QUERY reply. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param error The error value. 0 indicates success. + @param message The message payload + @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload) + """ + if error: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "databasecrawler: handle_crawler_reply", error, message + + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), " REPLY", show_permid(permid), str(error), message, "\n"))) + self._file.flush() + + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "databasecrawler: handle_crawler_reply", show_permid_short(permid), cPickle.loads(message) + + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), " REPLY", show_permid(permid), str(error), str(cPickle.loads(message)), "\n"))) + self._file.flush() + diff --git a/tribler-mod/Tribler/Core/Statistics/DatabaseCrawler.py.bak b/tribler-mod/Tribler/Core/Statistics/DatabaseCrawler.py.bak new file mode 100644 index 0000000..6dad3d7 --- /dev/null +++ b/tribler-mod/Tribler/Core/Statistics/DatabaseCrawler.py.bak @@ -0,0 +1,102 @@ +# Written by Boudewijn Schoon +# see LICENSE.txt for license information + +import sys +import cPickle +from time import strftime + +from Tribler.Core.BitTornado.BT1.MessageID import CRAWLER_DATABASE_QUERY +from Tribler.Core.CacheDB.sqlitecachedb import SQLiteCacheDB +from Tribler.Core.Utilities.utilities import show_permid, show_permid_short +from Tribler.Core.Statistics.Crawler import Crawler + +DEBUG = False + +class DatabaseCrawler: + __single = None + + @classmethod + def get_instance(cls, *args, **kargs): + if not cls.__single: + cls.__single = cls(*args, **kargs) + return cls.__single + + def __init__(self): + self._sqlite_cache_db = SQLiteCacheDB.getInstance() + + crawler = Crawler.get_instance() + if crawler.am_crawler(): + self._file = open("databasecrawler.txt", "a") + self._file.write("".join(("# ", "*" * 80, "\n# ", strftime("%Y/%m/%d %H:%M:%S"), " Crawler started\n"))) + self._file.flush() + else: + self._file = None + + def query_initiator(self, permid, selversion, request_callback): + """ + Established a new connection. Send a CRAWLER_DATABASE_QUERY request. + @param permid The Tribler peer permid + @param selversion The oberlay protocol version + @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload) + """ + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "databasecrawler: query_initiator", show_permid_short(permid) + request_callback(CRAWLER_DATABASE_QUERY, "SELECT 'peer_count', count(*) FROM Peer; SELECT 'torrent_count', count(*) FROM Torrent; SELECT 'moderations_count', count(*) FROM ModerationCast; SELECT 'positive_votes_count', count(*) FROM Moderators where status=1; SELECT 'negative_votes_count', count(*) FROM Moderators where status=-1", callback=self._after_request_callback) + + def _after_request_callback(self, exc, permid): + """ + Called by the Crawler with the result of the request_callback + call in the query_initiator method. + """ + if not exc: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "databasecrawler: request send to", show_permid_short(permid) + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), "REQUEST", show_permid(permid), "\n"))) + self._file.flush() + + def handle_crawler_request(self, permid, selversion, channel_id, message, reply_callback): + """ + Received a CRAWLER_DATABASE_QUERY request. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param message The message payload + @param reply_callback Call this function once to send the reply: reply_callback(payload [, error=123]) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "databasecrawler: handle_crawler_request", show_permid_short(permid), message + + # execute the sql + try: + cursor = self._sqlite_cache_db.execute_read(message) + + except Exception, e: + reply_callback(str(e), error=1) + else: + if cursor: + reply_callback(cPickle.dumps(list(cursor), 2)) + else: + reply_callback("error", error=2) + + def handle_crawler_reply(self, permid, selversion, channel_id, error, message, request_callback): + """ + Received a CRAWLER_DATABASE_QUERY reply. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param error The error value. 0 indicates success. + @param message The message payload + @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload) + """ + if error: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "databasecrawler: handle_crawler_reply", error, message + + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), " REPLY", show_permid(permid), str(error), message, "\n"))) + self._file.flush() + + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "databasecrawler: handle_crawler_reply", show_permid_short(permid), cPickle.loads(message) + + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), " REPLY", show_permid(permid), str(error), str(cPickle.loads(message)), "\n"))) + self._file.flush() + diff --git a/tribler-mod/Tribler/Core/Statistics/FriendshipCrawler.py b/tribler-mod/Tribler/Core/Statistics/FriendshipCrawler.py new file mode 100644 index 0000000..681beb7 --- /dev/null +++ b/tribler-mod/Tribler/Core/Statistics/FriendshipCrawler.py @@ -0,0 +1,122 @@ +from time import localtime, strftime +# Written by Ali Abbas +# see LICENSE.txt for license information + +import sys +import time +from traceback import print_exc + +from Tribler.Core.BitTornado.BT1.MessageID import CRAWLER_FRIENDSHIP_STATS +from Tribler.Core.BitTornado.bencode import bencode,bdecode +from Tribler.Core.CacheDB.SqliteFriendshipStatsCacheDB import FriendshipStatisticsDBHandler +from Tribler.Core.CacheDB.sqlitecachedb import bin2str + +DEBUG = False + +class FriendshipCrawler: + __single = None + + @classmethod + def get_instance(cls, *args, **kargs): + if not cls.__single: + cls.__single = cls(*args, **kargs) + return cls.__single + + def __init__(self,session): + self.session = session + self.friendshipStatistics_db = FriendshipStatisticsDBHandler.getInstance() + + def query_initiator(self, permid, selversion, request_callback): + """ + Established a new connection. Send a CRAWLER_DATABASE_QUERY request. + @param permid The Tribler peer permid + @param selversion The oberlay protocol version + @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "FriendshipCrawler: friendship_query_initiator" + + get_last_updated_time = self.friendshipStatistics_db.getLastUpdateTimeOfThePeer(permid) + + msg_dict = {'current time':get_last_updated_time} + msg = bencode(msg_dict) + return request_callback(CRAWLER_FRIENDSHIP_STATS,msg) + + def handle_crawler_request(self, permid, selversion, channel_id, message, reply_callback): + """ + Received a CRAWLER_FRIENDSHIP_QUERY request. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param message The message payload + @param reply_callback Call this function once to send the reply: reply_callback(payload [, error=123]) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "FriendshipCrawler: handle_friendship_crawler_database_query_request", message + + try: + d = bdecode(message) + + stats = self.getStaticsFromFriendshipStatisticsTable(self.session.get_permid(),d['current time']) + msg_dict = {'current time':d['current time'],'stats':stats} + msg = bencode(msg_dict) + reply_callback(msg) + + except Exception, e: + print_exc() + reply_callback(str(e), 1) + + return True + + def handle_crawler_reply(self, permid, selversion, channel_id, error, message, request_callback): + """ + Received a CRAWLER_FRIENDSHIP_STATS request. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param error The error value. 0 indicates success. + @param message The message payload + @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload) + """ + + if error: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "friendshipcrawler: handle_crawler_reply" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "friendshipcrawler: error", error, message + + else: + try: + d = bdecode(message) + except Exception: + print_exc() + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "friendshipcrawler: handle_crawler_reply" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "friendshipcrawler: friendship: Got",`d` + + self.saveFriendshipStatistics(permid,d['current time'],d['stats']) + + return True + + def getStaticsFromFriendshipStatisticsTable(self, mypermid, last_update_time): + return self.friendshipStatistics_db.getAllFriendshipStatistics(mypermid, last_update_time) + + def saveFriendshipStatistics(self,permid,currentTime,stats): + if stats: + # 20/10/08. Boudewijn: A mistake in the code results in + # only 7 items in the list instead of 8. We add one here + # to get things working. + for stat in stats: + if len(stat) == 7: + stat.append(0) + if len(stat) == 7 or len(stat) == 8: + stat.append(bin2str(permid)) + + self.friendshipStatistics_db.saveFriendshipStatisticData(stats) + + def getLastUpdateTime(self, permid): + + mypermid = self.session.get_permid() + + return self.friendshipStatistics_db.getLastUpdatedTime(permid) + diff --git a/tribler-mod/Tribler/Core/Statistics/FriendshipCrawler.py.bak b/tribler-mod/Tribler/Core/Statistics/FriendshipCrawler.py.bak new file mode 100644 index 0000000..005a756 --- /dev/null +++ b/tribler-mod/Tribler/Core/Statistics/FriendshipCrawler.py.bak @@ -0,0 +1,121 @@ +# Written by Ali Abbas +# see LICENSE.txt for license information + +import sys +import time +from traceback import print_exc + +from Tribler.Core.BitTornado.BT1.MessageID import CRAWLER_FRIENDSHIP_STATS +from Tribler.Core.BitTornado.bencode import bencode,bdecode +from Tribler.Core.CacheDB.SqliteFriendshipStatsCacheDB import FriendshipStatisticsDBHandler +from Tribler.Core.CacheDB.sqlitecachedb import bin2str + +DEBUG = False + +class FriendshipCrawler: + __single = None + + @classmethod + def get_instance(cls, *args, **kargs): + if not cls.__single: + cls.__single = cls(*args, **kargs) + return cls.__single + + def __init__(self,session): + self.session = session + self.friendshipStatistics_db = FriendshipStatisticsDBHandler.getInstance() + + def query_initiator(self, permid, selversion, request_callback): + """ + Established a new connection. Send a CRAWLER_DATABASE_QUERY request. + @param permid The Tribler peer permid + @param selversion The oberlay protocol version + @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "FriendshipCrawler: friendship_query_initiator" + + get_last_updated_time = self.friendshipStatistics_db.getLastUpdateTimeOfThePeer(permid) + + msg_dict = {'current time':get_last_updated_time} + msg = bencode(msg_dict) + return request_callback(CRAWLER_FRIENDSHIP_STATS,msg) + + def handle_crawler_request(self, permid, selversion, channel_id, message, reply_callback): + """ + Received a CRAWLER_FRIENDSHIP_QUERY request. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param message The message payload + @param reply_callback Call this function once to send the reply: reply_callback(payload [, error=123]) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "FriendshipCrawler: handle_friendship_crawler_database_query_request", message + + try: + d = bdecode(message) + + stats = self.getStaticsFromFriendshipStatisticsTable(self.session.get_permid(),d['current time']) + msg_dict = {'current time':d['current time'],'stats':stats} + msg = bencode(msg_dict) + reply_callback(msg) + + except Exception, e: + print_exc() + reply_callback(str(e), 1) + + return True + + def handle_crawler_reply(self, permid, selversion, channel_id, error, message, request_callback): + """ + Received a CRAWLER_FRIENDSHIP_STATS request. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param error The error value. 0 indicates success. + @param message The message payload + @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload) + """ + + if error: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "friendshipcrawler: handle_crawler_reply" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "friendshipcrawler: error", error, message + + else: + try: + d = bdecode(message) + except Exception: + print_exc() + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "friendshipcrawler: handle_crawler_reply" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "friendshipcrawler: friendship: Got",`d` + + self.saveFriendshipStatistics(permid,d['current time'],d['stats']) + + return True + + def getStaticsFromFriendshipStatisticsTable(self, mypermid, last_update_time): + return self.friendshipStatistics_db.getAllFriendshipStatistics(mypermid, last_update_time) + + def saveFriendshipStatistics(self,permid,currentTime,stats): + if stats: + # 20/10/08. Boudewijn: A mistake in the code results in + # only 7 items in the list instead of 8. We add one here + # to get things working. + for stat in stats: + if len(stat) == 7: + stat.append(0) + if len(stat) == 7 or len(stat) == 8: + stat.append(bin2str(permid)) + + self.friendshipStatistics_db.saveFriendshipStatisticData(stats) + + def getLastUpdateTime(self, permid): + + mypermid = self.session.get_permid() + + return self.friendshipStatistics_db.getLastUpdatedTime(permid) + diff --git a/tribler-mod/Tribler/Core/Statistics/Logger.py b/tribler-mod/Tribler/Core/Statistics/Logger.py new file mode 100644 index 0000000..6340f4c --- /dev/null +++ b/tribler-mod/Tribler/Core/Statistics/Logger.py @@ -0,0 +1,222 @@ +from time import localtime, strftime + +# Written by Jie Yang +# see LICENSE.txt for license information +# +# Log version 3 = BuddyCast message V8 + +import sys +import os +import time +import socket +import threading +from traceback import print_exc + +DEBUG = True #False + +log_separator = ' ' +logger = None + +# To be compatible with Logger from http://linux.duke.edu/projects/mini/logger/ +# for 2fastbt (revision <=825). +def create_logger(file_name): + global logger + + logger = Logger(3, file_name) + + +def get_logger(): + global logger + + if logger is None: + create_logger("global.log") + + return logger + +def get_today(): # UTC based + return time.gmtime(time.time())[:3] + +class Logger: + """ + Atrributes (defulat value): + threshold (): message will not be logged if its output_level is bigger + than this threshould + file_name (): log file name + file_dir ('.'): directory of log file. It can be absolute or relative path. + prefix (''): prefix of log file + prefix_date (False): if it is True, insert 'YYYYMMDD-' between prefix + and file_name, e.g., sp-20060302-buddycast.log given + prefix = 'sp-' and file_name = 'buddycast.log' + open_mode ('a+b'): mode for open. + """ + + def __init__(self, threshold, file_name, file_dir = '.', prefix = '', + prefix_date = False, open_mode = 'a+b'): + + self.threshold = threshold + self.Log = self.log + if file_name == '': + self.logfile = sys.stderr + else: + try: + if not os.access(file_dir, os.F_OK): + try: + os.mkdir(file_dir) + except os.error, msg: + raise "logger: mkdir error: " + msg + file_path = self.get_file_path(file_dir, prefix, + prefix_date, file_name) + self.logfile = open(file_path, open_mode) + except Exception, msg: + self.logfile = None + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "logger: cannot open log file", \ + file_name, file_dir, prefix, prefix_date, msg + print_exc() + + def __del__(self): + self.close() + + def get_file_path(self, file_dir, prefix, prefix_date, file_name): + if prefix_date is True: # create a new file for each day + today = get_today() + date = "%04d%02d%02d" % today + else: + date = '' + return os.path.join(file_dir, prefix + date + file_name) + + def log(self, level, msg, showtime=True): + if level <= self.threshold: + if self.logfile is None: + return + if showtime: + time_stamp = "%.01f"%time.time() + self.logfile.write(time_stamp + log_separator) + if isinstance(msg, str): + self.logfile.write(msg) + else: + self.logfile.write(repr(msg)) + self.logfile.write('\n') + self.logfile.flush() + + def close(self): + if self.logfile is not None: + self.logfile.close() + + +class OverlayLogger: + __single = None + __lock = threading.RLock() + + def __init__(self, file_name, file_dir = '.'): + if OverlayLogger.__single: + raise RuntimeError, "OverlayLogger is singleton2" + + self.file_name = file_name + self.file_dir = file_dir + OverlayLogger.__single = self + self.Log = self.log + self.__call__ = self.log + + def getInstance(*args, **kw): + OverlayLogger.__lock.acquire() + try: + if OverlayLogger.__single is None: + OverlayLogger(*args, **kw) + return OverlayLogger.__single + finally: + OverlayLogger.__lock.release() + getInstance = staticmethod(getInstance) + + def log(self, *msgs): + """ + # MSG must be the last one. Permid should be in the rear to be readable + BuddyCast log for superpeer format: (V2) + CONN_TRY IP PORT PERMID + CONN_ADD IP PORT PERMID SELVERSION + CONN_DEL IP PORT PERMID REASON + SEND_MSG IP PORT PERMID SELVERSION MSG_ID MSG + RECV_MSG IP PORT PERMID SELVERSION MSG_ID MSG + + #BUCA_CON Permid1, Permid2, ... + + BUCA_STA xx xx xx ... # BuddyCast status + 1 Pr # nPeer + 2 Pf # nPref + 3 Tr # nTorrent + + #4 Cc # nConntionCandidates (this one was missed before v4.1, and will not be included either in this version) + 4 Bs # nBlockSendList + 5 Br # nBlockRecvList + + 6 SO # nConnectionsInSecureOver + 7 Co # nConnectionsInBuddyCast + + 8 Ct # nTasteConnectionList + 9 Cr # nRandomConnectionList + 10 Cu # nUnconnectableConnectionList + """ + + log_msg = '' + nmsgs = len(msgs) + if nmsgs < 2: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Error message for log", msgs + return + + else: + for i in range(nmsgs): + if isinstance(msgs[i], tuple) or isinstance(msgs[i], list): + log_msg += log_separator + for msg in msgs[i]: + try: + log_msg += str(msg) + except: + log_msg += repr(msg) + log_msg += log_separator + else: + try: + log_msg += str(msgs[i]) + except: + log_msg += repr(msgs[i]) + log_msg += log_separator + + if log_msg: + self._write_log(log_msg) + + def _write_log(self, msg): + # one logfile per day. + today = get_today() + if not hasattr(self, 'today'): + self.logger = self._make_logger(today) + elif today != self.today: # make a new log if a new day comes + self.logger.close() + self.logger = self._make_logger(today) + self.logger.log(3, msg) + + def _make_logger(self, today): + self.today = today + hostname = socket.gethostname() + logger = Logger(3, self.file_name, self.file_dir, hostname, True) + logger.log(3, '# Tribler Overlay Log Version 3', showtime=False) # mention the log version at the first line + logger.log(3, '# BUCA_STA: nRound nPeer nPref nTorrent ' + \ + 'nBlockSendList nBlockRecvList ' + \ + 'nConnectionsInSecureOver nConnectionsInBuddyCast ' + \ + 'nTasteConnectionList nRandomConnectionList nUnconnectableConnectionList', + showtime=False) + logger.log(3, '# BUCA_STA: Rd Pr Pf Tr Bs Br SO Co Ct Cr Cu', showtime=False) + return logger + +if __name__ == '__main__': + create_logger('test.log') + get_logger().log(1, 'abc' + ' ' + str(['abc', 1, (2,3)])) + get_logger().log(0, [1,'a',{(2,3):'asfadf'}]) + #get_logger().log(1, open('log').read()) + + ol = OverlayLogger('overlay.log') + ol.log('CONN_TRY', '123.34.3.45', 34, 'asdfasdfasdfasdfsadf') + ol.log('CONN_ADD', '123.34.3.45', 36, 'asdfasdfasdfasdfsadf', 3) + ol.log('CONN_DEL', '123.34.3.45', 38, 'asdfasdfasdfasdfsadf', 'asbc') + ol.log('SEND_MSG', '123.34.3.45', 39, 'asdfasdfasdfasdfsadf', 2, 'BC', 'abadsfasdfasf') + ol.log('RECV_MSG', '123.34.3.45', 30, 'asdfasdfasdfasdfsadf', 3, 'BC', 'bbbbbbbbbbbbb') + ol.log('BUCA_STA', (1,2,3), (4,5,6), (7,8), (9,10,11)) + ol.log('BUCA_CON', ['asfd','bsdf','wevs','wwrewv']) + \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/Statistics/Logger.py.bak b/tribler-mod/Tribler/Core/Statistics/Logger.py.bak new file mode 100644 index 0000000..3b0a682 --- /dev/null +++ b/tribler-mod/Tribler/Core/Statistics/Logger.py.bak @@ -0,0 +1,221 @@ + +# Written by Jie Yang +# see LICENSE.txt for license information +# +# Log version 3 = BuddyCast message V8 + +import sys +import os +import time +import socket +import threading +from traceback import print_exc + +DEBUG = True #False + +log_separator = ' ' +logger = None + +# To be compatible with Logger from http://linux.duke.edu/projects/mini/logger/ +# for 2fastbt (revision <=825). +def create_logger(file_name): + global logger + + logger = Logger(3, file_name) + + +def get_logger(): + global logger + + if logger is None: + create_logger("global.log") + + return logger + +def get_today(): # UTC based + return time.gmtime(time.time())[:3] + +class Logger: + """ + Atrributes (defulat value): + threshold (): message will not be logged if its output_level is bigger + than this threshould + file_name (): log file name + file_dir ('.'): directory of log file. It can be absolute or relative path. + prefix (''): prefix of log file + prefix_date (False): if it is True, insert 'YYYYMMDD-' between prefix + and file_name, e.g., sp-20060302-buddycast.log given + prefix = 'sp-' and file_name = 'buddycast.log' + open_mode ('a+b'): mode for open. + """ + + def __init__(self, threshold, file_name, file_dir = '.', prefix = '', + prefix_date = False, open_mode = 'a+b'): + + self.threshold = threshold + self.Log = self.log + if file_name == '': + self.logfile = sys.stderr + else: + try: + if not os.access(file_dir, os.F_OK): + try: + os.mkdir(file_dir) + except os.error, msg: + raise "logger: mkdir error: " + msg + file_path = self.get_file_path(file_dir, prefix, + prefix_date, file_name) + self.logfile = open(file_path, open_mode) + except Exception, msg: + self.logfile = None + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "logger: cannot open log file", \ + file_name, file_dir, prefix, prefix_date, msg + print_exc() + + def __del__(self): + self.close() + + def get_file_path(self, file_dir, prefix, prefix_date, file_name): + if prefix_date is True: # create a new file for each day + today = get_today() + date = "%04d%02d%02d" % today + else: + date = '' + return os.path.join(file_dir, prefix + date + file_name) + + def log(self, level, msg, showtime=True): + if level <= self.threshold: + if self.logfile is None: + return + if showtime: + time_stamp = "%.01f"%time.time() + self.logfile.write(time_stamp + log_separator) + if isinstance(msg, str): + self.logfile.write(msg) + else: + self.logfile.write(repr(msg)) + self.logfile.write('\n') + self.logfile.flush() + + def close(self): + if self.logfile is not None: + self.logfile.close() + + +class OverlayLogger: + __single = None + __lock = threading.RLock() + + def __init__(self, file_name, file_dir = '.'): + if OverlayLogger.__single: + raise RuntimeError, "OverlayLogger is singleton2" + + self.file_name = file_name + self.file_dir = file_dir + OverlayLogger.__single = self + self.Log = self.log + self.__call__ = self.log + + def getInstance(*args, **kw): + OverlayLogger.__lock.acquire() + try: + if OverlayLogger.__single is None: + OverlayLogger(*args, **kw) + return OverlayLogger.__single + finally: + OverlayLogger.__lock.release() + getInstance = staticmethod(getInstance) + + def log(self, *msgs): + """ + # MSG must be the last one. Permid should be in the rear to be readable + BuddyCast log for superpeer format: (V2) + CONN_TRY IP PORT PERMID + CONN_ADD IP PORT PERMID SELVERSION + CONN_DEL IP PORT PERMID REASON + SEND_MSG IP PORT PERMID SELVERSION MSG_ID MSG + RECV_MSG IP PORT PERMID SELVERSION MSG_ID MSG + + #BUCA_CON Permid1, Permid2, ... + + BUCA_STA xx xx xx ... # BuddyCast status + 1 Pr # nPeer + 2 Pf # nPref + 3 Tr # nTorrent + + #4 Cc # nConntionCandidates (this one was missed before v4.1, and will not be included either in this version) + 4 Bs # nBlockSendList + 5 Br # nBlockRecvList + + 6 SO # nConnectionsInSecureOver + 7 Co # nConnectionsInBuddyCast + + 8 Ct # nTasteConnectionList + 9 Cr # nRandomConnectionList + 10 Cu # nUnconnectableConnectionList + """ + + log_msg = '' + nmsgs = len(msgs) + if nmsgs < 2: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Error message for log", msgs + return + + else: + for i in range(nmsgs): + if isinstance(msgs[i], tuple) or isinstance(msgs[i], list): + log_msg += log_separator + for msg in msgs[i]: + try: + log_msg += str(msg) + except: + log_msg += repr(msg) + log_msg += log_separator + else: + try: + log_msg += str(msgs[i]) + except: + log_msg += repr(msgs[i]) + log_msg += log_separator + + if log_msg: + self._write_log(log_msg) + + def _write_log(self, msg): + # one logfile per day. + today = get_today() + if not hasattr(self, 'today'): + self.logger = self._make_logger(today) + elif today != self.today: # make a new log if a new day comes + self.logger.close() + self.logger = self._make_logger(today) + self.logger.log(3, msg) + + def _make_logger(self, today): + self.today = today + hostname = socket.gethostname() + logger = Logger(3, self.file_name, self.file_dir, hostname, True) + logger.log(3, '# Tribler Overlay Log Version 3', showtime=False) # mention the log version at the first line + logger.log(3, '# BUCA_STA: nRound nPeer nPref nTorrent ' + \ + 'nBlockSendList nBlockRecvList ' + \ + 'nConnectionsInSecureOver nConnectionsInBuddyCast ' + \ + 'nTasteConnectionList nRandomConnectionList nUnconnectableConnectionList', + showtime=False) + logger.log(3, '# BUCA_STA: Rd Pr Pf Tr Bs Br SO Co Ct Cr Cu', showtime=False) + return logger + +if __name__ == '__main__': + create_logger('test.log') + get_logger().log(1, 'abc' + ' ' + str(['abc', 1, (2,3)])) + get_logger().log(0, [1,'a',{(2,3):'asfadf'}]) + #get_logger().log(1, open('log').read()) + + ol = OverlayLogger('overlay.log') + ol.log('CONN_TRY', '123.34.3.45', 34, 'asdfasdfasdfasdfsadf') + ol.log('CONN_ADD', '123.34.3.45', 36, 'asdfasdfasdfasdfsadf', 3) + ol.log('CONN_DEL', '123.34.3.45', 38, 'asdfasdfasdfasdfsadf', 'asbc') + ol.log('SEND_MSG', '123.34.3.45', 39, 'asdfasdfasdfasdfsadf', 2, 'BC', 'abadsfasdfasf') + ol.log('RECV_MSG', '123.34.3.45', 30, 'asdfasdfasdfasdfsadf', 3, 'BC', 'bbbbbbbbbbbbb') + ol.log('BUCA_STA', (1,2,3), (4,5,6), (7,8), (9,10,11)) + ol.log('BUCA_CON', ['asfd','bsdf','wevs','wwrewv']) + \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/Statistics/SeedingStatsCrawler.py b/tribler-mod/Tribler/Core/Statistics/SeedingStatsCrawler.py new file mode 100644 index 0000000..4ecf2b8 --- /dev/null +++ b/tribler-mod/Tribler/Core/Statistics/SeedingStatsCrawler.py @@ -0,0 +1,179 @@ +from time import localtime, strftime +# Written by Boxun Zhang, Boudewijn Schoon +# see LICENSE.txt for license information + +import sys +import cPickle + +from Tribler.Core.BitTornado.BT1.MessageID import CRAWLER_SEEDINGSTATS_QUERY +from Tribler.Core.CacheDB.SqliteSeedingStatsCacheDB import * + +DEBUG = False + +class SeedingStatsCrawler: + __single = None + + @classmethod + def get_instance(cls, *args, **kargs): + if not cls.__single: + cls.__single = cls(*args, **kargs) + return cls.__single + + def __init__(self): + self._sqlite_cache_db = SQLiteSeedingStatsCacheDB.getInstance() + + def query_initiator(self, permid, selversion, request_callback): + """ + Established a new connection. Send a CRAWLER_DATABASE_QUERY request. + @param permid The Tribler peer permid + @param selversion The oberlay protocol version + @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: SeedingStatsDB_update_settings_initiator" + read_query = "SELECT * FROM SeedingStats WHERE crawled = 0" + write_query = "UPDATE SeedingStats SET crawled = 1 WHERE crawled = 0" + return request_callback(CRAWLER_SEEDINGSTATS_QUERY, cPickle.dumps([("read", read_query), ("write", write_query)], 2)) + + def update_settings_initiator(self, permid, selversion, request_callback): + """ + Established a new connection. Send a CRAWLER_DATABASE_QUERY request. + @param permid The Tribler peer permid + @param selversion The oberlay protocol version + @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: SeedingStatsDB_update_settings_initiator" + + try: + sql_update = "UPDATE SeedingStatsSettings SET crawling_interval=%s WHERE crawling_enabled=%s"%(1800, 1) + except: + print_exc() + else: + return request_callback(CRAWLER_SEEDINGSTATS_QUERY, cPickle.dumps(sql_update, 2)) + + + def handle_crawler_request(self, permid, selversion, channel_id, message, reply_callback): + """ + Received a CRAWLER_DATABASE_QUERY request. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param message The message payload + @param reply_callback Call this function once to send the reply: reply_callback(payload [, error=123]) + + MESSAGE contains a cPickled list. Each list element is a + tuple. Each tuple consists of a string (either 'read' or + 'write') and a string (the query) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: handle_crawler_request", len(message) + + results = [] + try: + items = cPickle.loads(message) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: handle_crawler_request", items + + for action, query in items: + if action == "read": + cursor = self._sqlite_cache_db.execute_read(query) + elif action == "write": + cursor = self._sqlite_cache_db.execute_write(query) + else: + raise Exception("invalid payload") + + if cursor: + results.append(list(cursor)) + else: + results.append(None) + except Exception, e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: handle_crawler_request", e + results.append(str(e)) + reply_callback(cPickle.dumps(results, 2), 1) + else: + reply_callback(cPickle.dumps(results, 2)) + + return True + + + def handle_crawler_reply(self, permid, selversion, channel_id, error, message, reply_callback): + """ + Received a CRAWLER_DATABASE_QUERY request. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param error The error value. 0 indicates success. + @param message The message payload + @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload) + """ + if error: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "seedingstatscrawler: handle_crawler_reply" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "seedingstatscrawler: error", error + + else: + try: + results = cPickle.loads(message) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "seedingstatscrawler: handle_crawler_reply" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "seedingstatscrawler:", results + + # the first item in the list contains the results from the select query + if results[0]: + values = map(tuple, results[0]) + self._sqlite_cache_db.insertMany("SeedingStats", values) + except Exception, e: + + # 04/11/08 boudewijn: cPickle.loads(...) sometimes + # results in EOFError. This may be caused by message + # being interpreted as non-binary. + f = open("seedingstats-EOFError.data", "ab") + f.write("--\n%s\n--\n" % message) + f.close() + + print_exc() + return False + + return True + + + def handle_crawler_update_settings_request(self, permid, selversion, channel_id, message, reply_callback): + """ + Received a CRAWLER_DATABASE_QUERY request. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param message The message payload + @param reply_callback Call this function once to send the reply: reply_callback(payload [, error=123]) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: handle_crawler_SeedingStats_request", message + + # execute the sql + sql_update = cPickle.loads(message) + + try: + self._sqlite_cache_db.execute_write(sql_query) + except Exception, e: + reply_callback(str(e), 1) + else: + reply_callback(cPickle.dumps('Update succeeded.', 2)) + + return True + + def handle_crawler_update_setings_reply(self, permid, selversion, channel_id, message, reply_callback): + """ + Received a CRAWLER_DATABASE_QUERY request. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param message The message payload + @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "olapps: handle_crawler_SeedingStats_reply" + + return True diff --git a/tribler-mod/Tribler/Core/Statistics/SeedingStatsCrawler.py.bak b/tribler-mod/Tribler/Core/Statistics/SeedingStatsCrawler.py.bak new file mode 100644 index 0000000..26a21c6 --- /dev/null +++ b/tribler-mod/Tribler/Core/Statistics/SeedingStatsCrawler.py.bak @@ -0,0 +1,178 @@ +# Written by Boxun Zhang, Boudewijn Schoon +# see LICENSE.txt for license information + +import sys +import cPickle + +from Tribler.Core.BitTornado.BT1.MessageID import CRAWLER_SEEDINGSTATS_QUERY +from Tribler.Core.CacheDB.SqliteSeedingStatsCacheDB import * + +DEBUG = False + +class SeedingStatsCrawler: + __single = None + + @classmethod + def get_instance(cls, *args, **kargs): + if not cls.__single: + cls.__single = cls(*args, **kargs) + return cls.__single + + def __init__(self): + self._sqlite_cache_db = SQLiteSeedingStatsCacheDB.getInstance() + + def query_initiator(self, permid, selversion, request_callback): + """ + Established a new connection. Send a CRAWLER_DATABASE_QUERY request. + @param permid The Tribler peer permid + @param selversion The oberlay protocol version + @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: SeedingStatsDB_update_settings_initiator" + read_query = "SELECT * FROM SeedingStats WHERE crawled = 0" + write_query = "UPDATE SeedingStats SET crawled = 1 WHERE crawled = 0" + return request_callback(CRAWLER_SEEDINGSTATS_QUERY, cPickle.dumps([("read", read_query), ("write", write_query)], 2)) + + def update_settings_initiator(self, permid, selversion, request_callback): + """ + Established a new connection. Send a CRAWLER_DATABASE_QUERY request. + @param permid The Tribler peer permid + @param selversion The oberlay protocol version + @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: SeedingStatsDB_update_settings_initiator" + + try: + sql_update = "UPDATE SeedingStatsSettings SET crawling_interval=%s WHERE crawling_enabled=%s"%(1800, 1) + except: + print_exc() + else: + return request_callback(CRAWLER_SEEDINGSTATS_QUERY, cPickle.dumps(sql_update, 2)) + + + def handle_crawler_request(self, permid, selversion, channel_id, message, reply_callback): + """ + Received a CRAWLER_DATABASE_QUERY request. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param message The message payload + @param reply_callback Call this function once to send the reply: reply_callback(payload [, error=123]) + + MESSAGE contains a cPickled list. Each list element is a + tuple. Each tuple consists of a string (either 'read' or + 'write') and a string (the query) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: handle_crawler_request", len(message) + + results = [] + try: + items = cPickle.loads(message) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: handle_crawler_request", items + + for action, query in items: + if action == "read": + cursor = self._sqlite_cache_db.execute_read(query) + elif action == "write": + cursor = self._sqlite_cache_db.execute_write(query) + else: + raise Exception("invalid payload") + + if cursor: + results.append(list(cursor)) + else: + results.append(None) + except Exception, e: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: handle_crawler_request", e + results.append(str(e)) + reply_callback(cPickle.dumps(results, 2), 1) + else: + reply_callback(cPickle.dumps(results, 2)) + + return True + + + def handle_crawler_reply(self, permid, selversion, channel_id, error, message, reply_callback): + """ + Received a CRAWLER_DATABASE_QUERY request. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param error The error value. 0 indicates success. + @param message The message payload + @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload) + """ + if error: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "seedingstatscrawler: handle_crawler_reply" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "seedingstatscrawler: error", error + + else: + try: + results = cPickle.loads(message) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "seedingstatscrawler: handle_crawler_reply" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "seedingstatscrawler:", results + + # the first item in the list contains the results from the select query + if results[0]: + values = map(tuple, results[0]) + self._sqlite_cache_db.insertMany("SeedingStats", values) + except Exception, e: + + # 04/11/08 boudewijn: cPickle.loads(...) sometimes + # results in EOFError. This may be caused by message + # being interpreted as non-binary. + f = open("seedingstats-EOFError.data", "ab") + f.write("--\n%s\n--\n" % message) + f.close() + + print_exc() + return False + + return True + + + def handle_crawler_update_settings_request(self, permid, selversion, channel_id, message, reply_callback): + """ + Received a CRAWLER_DATABASE_QUERY request. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param message The message payload + @param reply_callback Call this function once to send the reply: reply_callback(payload [, error=123]) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "crawler: handle_crawler_SeedingStats_request", message + + # execute the sql + sql_update = cPickle.loads(message) + + try: + self._sqlite_cache_db.execute_write(sql_query) + except Exception, e: + reply_callback(str(e), 1) + else: + reply_callback(cPickle.dumps('Update succeeded.', 2)) + + return True + + def handle_crawler_update_setings_reply(self, permid, selversion, channel_id, message, reply_callback): + """ + Received a CRAWLER_DATABASE_QUERY request. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param message The message payload + @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "olapps: handle_crawler_SeedingStats_reply" + + return True diff --git a/tribler-mod/Tribler/Core/Statistics/VideoPlaybackCrawler.py b/tribler-mod/Tribler/Core/Statistics/VideoPlaybackCrawler.py new file mode 100644 index 0000000..15242d6 --- /dev/null +++ b/tribler-mod/Tribler/Core/Statistics/VideoPlaybackCrawler.py @@ -0,0 +1,210 @@ +from time import localtime, strftime +""" +Crawling the VideoPlayback statistics database +""" + +import sys +import cPickle +import threading +from time import strftime + +from Tribler.Core.BitTornado.BT1.MessageID import CRAWLER_VIDEOPLAYBACK_INFO_QUERY, CRAWLER_VIDEOPLAYBACK_EVENT_QUERY +from Tribler.Core.CacheDB.SqliteVideoPlaybackStatsCacheDB import VideoPlaybackEventDBHandler, VideoPlaybackInfoDBHandler +from Tribler.Core.Utilities.utilities import show_permid, show_permid_short +from Tribler.Core.Statistics.Crawler import Crawler +from Tribler.Core.Overlay.SecureOverlay import OLPROTO_VER_EIGHTH + +DEBUG = False + +class VideoPlaybackCrawler: + __single = None # used for multi-threaded singletons pattern + lock = threading.Lock() + + @classmethod + def get_instance(cls, *args, **kargs): + # Singleton pattern with double-checking to ensure that it can only create one object + if cls.__single is None: + cls.lock.acquire() + try: + if cls.__single is None: + cls.__single = cls(*args, **kargs) + finally: + cls.lock.release() + return cls.__single + + def __init__(self): + if VideoPlaybackCrawler.__single is not None: + raise RuntimeError, "VideoPlaybackCrawler is singleton" + + crawler = Crawler.get_instance() + if crawler.am_crawler(): + self._file = open("videoplaybackcrawler.txt", "a") + self._file.write("".join(("# ", "*" * 80, "\n# ", strftime("%Y/%m/%d %H:%M:%S"), " Crawler started\n"))) + self._file.flush() + self._info_db = None + self._event_db = None + + else: + self._file = None + self._info_db = VideoPlaybackInfoDBHandler.get_instance() + self._event_db = VideoPlaybackEventDBHandler.get_instance() + + def query_info_initiator(self, permid, selversion, request_callback): + """ + <> + Established a new connection. Send a CRAWLER_VIDEOPLAYBACK_INFO_QUERY request. + @param permid The Tribler peer permid + @param selversion The oberlay protocol version + @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload) + """ + if selversion >= OLPROTO_VER_EIGHTH: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "videoplaybackcrawler: query_info_initiator", show_permid_short(permid) + # boudewijn: order the result DESC! From the resulting + # list we will not remove the first entries from the + # database because this (being the last item added) may + # still be actively used. + request_callback(CRAWLER_VIDEOPLAYBACK_INFO_QUERY, "SELECT key, timestamp, piece_size, num_pieces, bitrate, nat FROM playback_info ORDER BY timestamp DESC LIMIT 50", callback=self._after_info_request_callback) + else: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "videoplaybackcrawler: query_info_initiator", show_permid_short(permid), "unsupported overlay version" + + def _after_info_request_callback(self, exc, permid): + """ + <> + Called by the Crawler with the result of the request_callback + call in the query_initiator method. + """ + if not exc: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "videoplaybackcrawler: request send to", show_permid_short(permid) + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), "INFO REQUEST", show_permid(permid), "\n"))) + self._file.flush() + + def handle_info_crawler_reply(self, permid, selversion, channel_id, error, message, request_callback): + """ + <> + Received a CRAWLER_VIDEOPLAYBACK_INFO_QUERY reply. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param error The error value. 0 indicates success. + @param message The message payload + @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload) + """ + if error: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "videoplaybackcrawler: handle_crawler_reply", error, message + + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), " INFO REPLY", show_permid(permid), str(error), message, "\n"))) + self._file.flush() + + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "videoplaybackcrawler: handle_crawler_reply", show_permid_short(permid), cPickle.loads(message) + + info = cPickle.loads(message) + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), " INFO REPLY", show_permid(permid), str(error), str(info), "\n"))) + self._file.flush() + + i = 0 + for key, timestamp, piece_size, num_pieces, bitrate, nat in info: + i += 1 + # do not remove the first item. the list is ordered + # DESC so the first item is the last that is added to + # the database and we can't affored to remove it, as + # it may cause exceptions in the running playback. + if i == 1: + sql = "SELECT timestamp, origin, event FROM playback_event WHERE key = '%s' ORDER BY timestamp ASC LIMIT 50" % key + else: + sql = "SELECT timestamp, origin, event FROM playback_event WHERE key = '%s' ORDER BY timestamp ASC LIMIT 50; DELETE FROM playback_event WHERE key = '%s'; DELETE FROM playback_info WHERE key = '%s';" % (key, key, key) + + # todo: optimize to not select key for each row + request_callback(CRAWLER_VIDEOPLAYBACK_EVENT_QUERY, sql, channel_data=key, callback=self._after_event_request_callback, frequency=0) + + def _after_event_request_callback(self, exc, permid): + """ + <> + Called by the Crawler with the result of the request_callback + call in the handle_crawler_reply method. + """ + if not exc: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "videoplaybackcrawler: request send to", show_permid_short(permid) + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), " INFO REQUEST", show_permid(permid), "\n"))) + self._file.flush() + + def handle_event_crawler_reply(self, permid, selversion, channel_id, channel_data, error, message, request_callback): + """ + <> + Received a CRAWLER_VIDEOPLAYBACK_EVENT_QUERY reply. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param channel_data Data associated with the request + @param error The error value. 0 indicates success. + @param message The message payload + @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload) + """ + if error: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "videoplaybackcrawler: handle_crawler_reply", error, message + + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), " EVENT REPLY", show_permid(permid), str(error), channel_data, message, "\n"))) + self._file.flush() + + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "videoplaybackcrawler: handle_crawler_reply", show_permid_short(permid), cPickle.loads(message) + + info = cPickle.loads(message) + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), " EVENT REPLY", show_permid(permid), str(error), channel_data, str(info), "\n"))) + self._file.flush() + + def handle_info_crawler_request(self, permid, selversion, channel_id, message, reply_callback): + """ + <> + Received a CRAWLER_VIDEOPLAYBACK_INFO_QUERY request. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param message The message payload + @param reply_callback Call this function once to send the reply: reply_callback(payload [, error=123]) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "videoplaybackcrawler: handle_info_crawler_request", show_permid_short(permid), message + + # execute the sql + try: + cursor = self._info_db._db.execute_read(message) + + except Exception, e: + reply_callback(str(e), error=1) + else: + if cursor: + reply_callback(cPickle.dumps(list(cursor), 2)) + else: + reply_callback("error", error=2) + + def handle_event_crawler_request(self, permid, selversion, channel_id, message, reply_callback): + """ + <> + Received a CRAWLER_VIDEOPLAYBACK_EVENT_QUERY request. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param message The message payload + @param reply_callback Call this function once to send the reply: reply_callback(payload [, error=123]) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "videoplaybackcrawler: handle_event_crawler_request", show_permid_short(permid), message + + # execute the sql + try: + cursor = self._event_db._db.execute_read(message) + + except Exception, e: + reply_callback(str(e), error=1) + else: + if cursor: + reply_callback(cPickle.dumps(list(cursor), 2)) + else: + reply_callback("error", error=2) + + diff --git a/tribler-mod/Tribler/Core/Statistics/VideoPlaybackCrawler.py.bak b/tribler-mod/Tribler/Core/Statistics/VideoPlaybackCrawler.py.bak new file mode 100644 index 0000000..3a0f69c --- /dev/null +++ b/tribler-mod/Tribler/Core/Statistics/VideoPlaybackCrawler.py.bak @@ -0,0 +1,209 @@ +""" +Crawling the VideoPlayback statistics database +""" + +import sys +import cPickle +import threading +from time import strftime + +from Tribler.Core.BitTornado.BT1.MessageID import CRAWLER_VIDEOPLAYBACK_INFO_QUERY, CRAWLER_VIDEOPLAYBACK_EVENT_QUERY +from Tribler.Core.CacheDB.SqliteVideoPlaybackStatsCacheDB import VideoPlaybackEventDBHandler, VideoPlaybackInfoDBHandler +from Tribler.Core.Utilities.utilities import show_permid, show_permid_short +from Tribler.Core.Statistics.Crawler import Crawler +from Tribler.Core.Overlay.SecureOverlay import OLPROTO_VER_EIGHTH + +DEBUG = False + +class VideoPlaybackCrawler: + __single = None # used for multi-threaded singletons pattern + lock = threading.Lock() + + @classmethod + def get_instance(cls, *args, **kargs): + # Singleton pattern with double-checking to ensure that it can only create one object + if cls.__single is None: + cls.lock.acquire() + try: + if cls.__single is None: + cls.__single = cls(*args, **kargs) + finally: + cls.lock.release() + return cls.__single + + def __init__(self): + if VideoPlaybackCrawler.__single is not None: + raise RuntimeError, "VideoPlaybackCrawler is singleton" + + crawler = Crawler.get_instance() + if crawler.am_crawler(): + self._file = open("videoplaybackcrawler.txt", "a") + self._file.write("".join(("# ", "*" * 80, "\n# ", strftime("%Y/%m/%d %H:%M:%S"), " Crawler started\n"))) + self._file.flush() + self._info_db = None + self._event_db = None + + else: + self._file = None + self._info_db = VideoPlaybackInfoDBHandler.get_instance() + self._event_db = VideoPlaybackEventDBHandler.get_instance() + + def query_info_initiator(self, permid, selversion, request_callback): + """ + <> + Established a new connection. Send a CRAWLER_VIDEOPLAYBACK_INFO_QUERY request. + @param permid The Tribler peer permid + @param selversion The oberlay protocol version + @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload) + """ + if selversion >= OLPROTO_VER_EIGHTH: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "videoplaybackcrawler: query_info_initiator", show_permid_short(permid) + # boudewijn: order the result DESC! From the resulting + # list we will not remove the first entries from the + # database because this (being the last item added) may + # still be actively used. + request_callback(CRAWLER_VIDEOPLAYBACK_INFO_QUERY, "SELECT key, timestamp, piece_size, num_pieces, bitrate, nat FROM playback_info ORDER BY timestamp DESC LIMIT 50", callback=self._after_info_request_callback) + else: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "videoplaybackcrawler: query_info_initiator", show_permid_short(permid), "unsupported overlay version" + + def _after_info_request_callback(self, exc, permid): + """ + <> + Called by the Crawler with the result of the request_callback + call in the query_initiator method. + """ + if not exc: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "videoplaybackcrawler: request send to", show_permid_short(permid) + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), "INFO REQUEST", show_permid(permid), "\n"))) + self._file.flush() + + def handle_info_crawler_reply(self, permid, selversion, channel_id, error, message, request_callback): + """ + <> + Received a CRAWLER_VIDEOPLAYBACK_INFO_QUERY reply. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param error The error value. 0 indicates success. + @param message The message payload + @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload) + """ + if error: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "videoplaybackcrawler: handle_crawler_reply", error, message + + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), " INFO REPLY", show_permid(permid), str(error), message, "\n"))) + self._file.flush() + + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "videoplaybackcrawler: handle_crawler_reply", show_permid_short(permid), cPickle.loads(message) + + info = cPickle.loads(message) + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), " INFO REPLY", show_permid(permid), str(error), str(info), "\n"))) + self._file.flush() + + i = 0 + for key, timestamp, piece_size, num_pieces, bitrate, nat in info: + i += 1 + # do not remove the first item. the list is ordered + # DESC so the first item is the last that is added to + # the database and we can't affored to remove it, as + # it may cause exceptions in the running playback. + if i == 1: + sql = "SELECT timestamp, origin, event FROM playback_event WHERE key = '%s' ORDER BY timestamp ASC LIMIT 50" % key + else: + sql = "SELECT timestamp, origin, event FROM playback_event WHERE key = '%s' ORDER BY timestamp ASC LIMIT 50; DELETE FROM playback_event WHERE key = '%s'; DELETE FROM playback_info WHERE key = '%s';" % (key, key, key) + + # todo: optimize to not select key for each row + request_callback(CRAWLER_VIDEOPLAYBACK_EVENT_QUERY, sql, channel_data=key, callback=self._after_event_request_callback, frequency=0) + + def _after_event_request_callback(self, exc, permid): + """ + <> + Called by the Crawler with the result of the request_callback + call in the handle_crawler_reply method. + """ + if not exc: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "videoplaybackcrawler: request send to", show_permid_short(permid) + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), " INFO REQUEST", show_permid(permid), "\n"))) + self._file.flush() + + def handle_event_crawler_reply(self, permid, selversion, channel_id, channel_data, error, message, request_callback): + """ + <> + Received a CRAWLER_VIDEOPLAYBACK_EVENT_QUERY reply. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param channel_data Data associated with the request + @param error The error value. 0 indicates success. + @param message The message payload + @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload) + """ + if error: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "videoplaybackcrawler: handle_crawler_reply", error, message + + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), " EVENT REPLY", show_permid(permid), str(error), channel_data, message, "\n"))) + self._file.flush() + + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "videoplaybackcrawler: handle_crawler_reply", show_permid_short(permid), cPickle.loads(message) + + info = cPickle.loads(message) + self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), " EVENT REPLY", show_permid(permid), str(error), channel_data, str(info), "\n"))) + self._file.flush() + + def handle_info_crawler_request(self, permid, selversion, channel_id, message, reply_callback): + """ + <> + Received a CRAWLER_VIDEOPLAYBACK_INFO_QUERY request. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param message The message payload + @param reply_callback Call this function once to send the reply: reply_callback(payload [, error=123]) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "videoplaybackcrawler: handle_info_crawler_request", show_permid_short(permid), message + + # execute the sql + try: + cursor = self._info_db._db.execute_read(message) + + except Exception, e: + reply_callback(str(e), error=1) + else: + if cursor: + reply_callback(cPickle.dumps(list(cursor), 2)) + else: + reply_callback("error", error=2) + + def handle_event_crawler_request(self, permid, selversion, channel_id, message, reply_callback): + """ + <> + Received a CRAWLER_VIDEOPLAYBACK_EVENT_QUERY request. + @param permid The Crawler permid + @param selversion The overlay protocol version + @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair + @param message The message payload + @param reply_callback Call this function once to send the reply: reply_callback(payload [, error=123]) + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "videoplaybackcrawler: handle_event_crawler_request", show_permid_short(permid), message + + # execute the sql + try: + cursor = self._event_db._db.execute_read(message) + + except Exception, e: + reply_callback(str(e), error=1) + else: + if cursor: + reply_callback(cPickle.dumps(list(cursor), 2)) + else: + reply_callback("error", error=2) + + diff --git a/tribler-mod/Tribler/Core/Statistics/__init__.py b/tribler-mod/Tribler/Core/Statistics/__init__.py new file mode 100644 index 0000000..b7ef832 --- /dev/null +++ b/tribler-mod/Tribler/Core/Statistics/__init__.py @@ -0,0 +1,3 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/Statistics/__init__.py.bak b/tribler-mod/Tribler/Core/Statistics/__init__.py.bak new file mode 100644 index 0000000..395f8fb --- /dev/null +++ b/tribler-mod/Tribler/Core/Statistics/__init__.py.bak @@ -0,0 +1,2 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/Statistics/crawler.txt b/tribler-mod/Tribler/Core/Statistics/crawler.txt new file mode 100644 index 0000000..fec1ba1 --- /dev/null +++ b/tribler-mod/Tribler/Core/Statistics/crawler.txt @@ -0,0 +1,31 @@ +# +# Anonymous performance data gathering +# +# For improvements to our algorithms the scientists behind this +# software need to have some insight how the P2P network operates. +# The PermIDs listed in this file have some access to internal P2P +# statistics when conducting a network crawl. +# Collected data will never be shared with third parties or used +# for non-scientific purposes. +# +# Please delete the PermIDs to disable this feature. +# +# permid +# lucia's old crawler +MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKa2aWZv65UoFv0OR8BbVSnlmTPrYKcwwpGHEhK3AO2PpxiGlv/Y2mTP2kg+VXLaBBmfpdYWPA4eSdpq +MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAe6fNHWhKsReFj8/RIN6rBHWRzT4VkLddvhJZ5jmAQf5c7ZmqkdFQ/F21DKbC8V1Otmf6YO00ufe5D/o + +# General crawler PermIDs: DO NOT CHANGE OR ADD WITHOUT CONSULTION! +MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOydlMAfRpmhT+jKr0gI8EanNLyt+Y/FEFcjTAoFAKCmNGMGrBl22ZICZBi+oPo0p6FpWECrf2oGg2WM +MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAK1cQH+R2B6oOPNgCcgiAruKlWAYZGzryZm6P0B3AMzocJszITiPPIsGujeg0saYZ6+VmzuncOCvVOWY +MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAPKqwAWYmpi3yjhnQTV1kOHU3y8gbNVyFGbAJaQMAAQjDYrSOHJTeKIAaYZFieGU6K8FnmJKlC4qLHxh +MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMOfw9qZb/9Eqqy+75FWQGOi8vAkt7P32S+EEjVbAN67PY2fTjHNdFlZlhjqotTzJdYc1299OWCV3Nf+ +MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2CbiMwdGFLFQK93Je7H1a+Gi2QWV8B9n+Fwdq6AdAH04s1unhfTEP6cw1UlAdg4rZEY27GsINGsmD+ +MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEABZzmfFlN7PryBasdECMITSm8XJEQ4WU2Te99YeqARS2i2aLDxPYhFTOfBuYN4MrFLwpDxmRm7Gvdp2m +MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbR4tcEbSSikh7oULmXjpl5tYKdKvR3Qn1UH913lAW2GK0k2bF8hO7RIdu971gZpgNUew33kiWE/IREP +MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXvNe65EBsnBAy/s4dp1kJDa9KXnfTHAOO8OADt+Abm83AAXdeeTwyBboyioaMMlIuUyS/9GwXay2ZLA +MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHXzQ+9sH0II55c3TfpFz+LZwqNpHCOHYq0iXkmFALZKYSNA3/WvyncKCh9mbpWUtbusf06/HYhHHxUg +MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFuyErJwV2MBqhjLbjXA0D5PkvY1O9thUbx4QB3CAQOxYlZUtgUP09mc8K+uEuoHzOKdN2h4KoB/G8Ae + + + diff --git a/tribler-mod/Tribler/Core/Statistics/tribler_friendship_stats_sdb.sql b/tribler-mod/Tribler/Core/Statistics/tribler_friendship_stats_sdb.sql new file mode 100644 index 0000000..f215540 --- /dev/null +++ b/tribler-mod/Tribler/Core/Statistics/tribler_friendship_stats_sdb.sql @@ -0,0 +1,38 @@ +-- Tribler Friendship Statistics Database + +BEGIN TRANSACTION create_table; + +---------------------------------------- + +CREATE TABLE FriendshipStatistics ( + source_permid text NOT NULL, + target_permid text NOT NULL, + isForwarder integer DEFAULT 0, + request_time numeric, + response_time numeric, + no_of_attempts integer DEFAULT 0, + no_of_helpers integer DEFAULT 0, + modified_on numeric, + crawled_permid text NOT NULL DEFAULT client +); + +---------------------------------------- + +CREATE TABLE MyInfo ( + entry PRIMARY KEY, + value text +); + +---------------------------------------- + +COMMIT TRANSACTION create_table; + +---------------------------------------- + +BEGIN TRANSACTION init_values; + +-- Version 1: Initial version, published in Tribler 4.5.0 +-- Version 2: Added crawled_permid to FriendshipStatistics table. +INSERT INTO MyInfo VALUES ('version', 2); + +COMMIT TRANSACTION init_values; \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/Statistics/tribler_seedingstats_sdb.sql b/tribler-mod/Tribler/Core/Statistics/tribler_seedingstats_sdb.sql new file mode 100644 index 0000000..b2bad53 --- /dev/null +++ b/tribler-mod/Tribler/Core/Statistics/tribler_seedingstats_sdb.sql @@ -0,0 +1,41 @@ +-- Tribler Seeding Statistics Database + +BEGIN TRANSACTION create_table; + +---------------------------------------- + +CREATE TABLE SeedingStats ( + timestamp real, + permID text, + info_hash text, + seeding_time real, + reputation real, + crawled integer +); + +---------------------------------------- + +CREATE TABLE SeedingStatsSettings ( + version integer PRIMARY KEY, + crawling_interval integer, + crawling_enabled integer +); + +---------------------------------------- + +CREATE TABLE MyInfo ( + entry PRIMARY KEY, + value text +); + +---------------------------------------- +COMMIT TRANSACTION create_table; + +---------------------------------------- + +BEGIN TRANSACTION init_values; + +INSERT INTO MyInfo VALUES ('version', 1); +INSERT INTO SeedingStatsSettings VALUES (1, 1800, 1); + +COMMIT TRANSACTION init_values; diff --git a/tribler-mod/Tribler/Core/Statistics/tribler_videoplayback_stats.sql b/tribler-mod/Tribler/Core/Statistics/tribler_videoplayback_stats.sql new file mode 100644 index 0000000..2cdb14c --- /dev/null +++ b/tribler-mod/Tribler/Core/Statistics/tribler_videoplayback_stats.sql @@ -0,0 +1,49 @@ +-- Tribler Video Playback Statistics Database + +BEGIN TRANSACTION create_table; + +---------------------------------------- + +CREATE TABLE playback_info ( + key text PRIMARY KEY NOT NULL, + timestamp real NOT NULL, + piece_size integer, + num_pieces integer, + bitrate integer, + nat text +); + +CREATE INDEX playback_info_idx + ON playback_info (timestamp); + +---------------------------------------- + +CREATE TABLE playback_event ( + key text NOT NULL, + timestamp real NOT NULL, + origin text NOT NULL, + event text NOT NULL +); + +CREATE INDEX playback_event_idx + ON playback_event (key, timestamp); + +---------------------------------------- + +CREATE TABLE MyInfo ( + entry PRIMARY KEY, + value text +); + +---------------------------------------- + +COMMIT TRANSACTION create_table; + +---------------------------------------- + +BEGIN TRANSACTION init_values; + +-- Version 1: Initial version, published in Tribler 5.0.0 +INSERT INTO MyInfo VALUES ('version', 1); + +COMMIT TRANSACTION init_values; diff --git a/tribler-mod/Tribler/Core/TorrentDef.py b/tribler-mod/Tribler/Core/TorrentDef.py new file mode 100644 index 0000000..6a1cfb7 --- /dev/null +++ b/tribler-mod/Tribler/Core/TorrentDef.py @@ -0,0 +1,738 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information +""" Definition of a torrent, that is, a collection of files or a live stream. """ +import sys +import os +#import time +import copy +import sha +from types import StringType,ListType,IntType,LongType + +from Tribler.Core.BitTornado.bencode import bencode,bdecode + +from Tribler.Core.simpledefs import * +from Tribler.Core.defaults import * +from Tribler.Core.exceptions import * +from Tribler.Core.Base import * + +import Tribler.Core.APIImplementation.maketorrent as maketorrent +from Tribler.Core.APIImplementation.miscutils import * + +from Tribler.Core.Utilities.utilities import validTorrentFile,isValidURL +from Tribler.Core.Utilities.unicode import metainfoname2unicode +from Tribler.Core.Utilities.timeouturlopen import urlOpenTimeout +from Tribler.Core.osutils import * + +class TorrentDef(Serializable,Copyable): + """ + Definition of a torrent, that is, all params required for a torrent file, + plus optional params such as thumbnail, playtime, etc. + + Note: to add fields to the torrent definition which are not supported + by its API, first create the torrent def, finalize it, then add the + fields to the metainfo, and create a new torrent def from that + upgraded metainfo using TorrentDef.load_from_dict() + + cf. libtorrent torrent_info + """ + def __init__(self,input=None,metainfo=None,infohash=None): + """ Normal constructor for TorrentDef (The input, metainfo and infohash + parameters are used internally to make this a copy constructor) """ + + self.readonly = False + if input is not None: # copy constructor + self.input = input + # self.metainfo_valid set in copy() + self.metainfo = metainfo + self.infohash = infohash + return + + self.input = {} # fields added by user, waiting to be turned into torrent file + # Define the built-in default here + self.input.update(tdefdefaults) + try: + self.input['encoding'] = sys.getfilesystemencoding() + except: + self.input['encoding'] = sys.getdefaultencoding() + + self.input['files'] = [] + + + self.metainfo_valid = False + self.metainfo = None # copy of loaded or last saved torrent dict + self.infohash = None # only valid if metainfo_valid + + + # We cannot set a built-in default for a tracker here, as it depends on + # a Session. Alternatively, the tracker will be set to the internal + # tracker by default when Session::start_download() is called, if the + # 'announce' field is the empty string. + + # + # Class methods for creating a TorrentDef from a .torrent file + # + def load(filename): + """ + Load a BT .torrent or Tribler .tribe file from disk and convert + it into a finalized TorrentDef. + + @param filename An absolute Unicode filename + @return TorrentDef + """ + # Class method, no locking required + f = open(filename,"rb") + return TorrentDef._read(f) + load = staticmethod(load) + + def _read(stream): + """ Internal class method that reads a torrent file from stream, + checks it for correctness and sets self.input and self.metainfo + accordingly. """ + bdata = stream.read() + stream.close() + data = bdecode(bdata) + return TorrentDef._create(data) + _read = staticmethod(_read) + + def _create(metainfo): # TODO: replace with constructor + # raises ValueErrors if not good + validTorrentFile(metainfo) + + t = TorrentDef() + t.metainfo = metainfo + t.metainfo_valid = True + t.infohash = sha.sha(bencode(metainfo['info'])).digest() + + # copy stuff into self.input + maketorrent.copy_metainfo_to_input(t.metainfo,t.input) + + return t + _create = staticmethod(_create) + + def load_from_url(url): + """ + Load a BT .torrent or Tribler .tribe file from the URL and convert + it into a TorrentDef. + + @param url URL + @return TorrentDef. + """ + # Class method, no locking required + f = urlOpenTimeout(url) + return TorrentDef._read(f) + load_from_url = staticmethod(load_from_url) + + + def load_from_dict(metainfo): + """ + Load a BT .torrent or Tribler .tribe file from the metainfo dictionary + it into a TorrentDef + + @param metainfo A dictionary following the BT torrent file spec. + @return TorrentDef. + """ + # Class method, no locking required + return TorrentDef._create(metainfo) + load_from_dict = staticmethod(load_from_dict) + + + # + # Convenience instance methods for publishing new content + # + def add_content(self,inpath,outpath=None,playtime=None): + """ + Add a file or directory to this torrent definition. When adding a + directory, all files in that directory will be added to the torrent. + + One can add multiple files and directories to a torrent definition. + In that case the "outpath" parameter must be used to indicate how + the files/dirs should be named in the torrent. The outpaths used must + start with a common prefix which will become the "name" field of the + torrent. + + To seed the torrent via the core (as opposed to e.g. HTTP) you will + need to start the download with the dest_dir set to the top-level + directory containing the files and directories to seed. For example, + a file "c:\Videos\file.avi" is seeded as follows: +
+            tdef = TorrentDef()
+            tdef.add_content("c:\Videos\file.avi",playtime="1:59:20")
+            tdef.set_tracker(s.get_internal_tracker_url())
+            tdef.finalize()
+            dscfg = DownloadStartupConfig()
+            dscfg.set_dest_dir("c:\Video")
+            s.start_download(tdef,dscfg)
+        
+ @param inpath Absolute name of file or directory on local filesystem, + as Unicode string. + @param outpath (optional) Name of the content to use in the torrent def + as Unicode string. + @param playtime (optional) String representing the duration of the + multimedia file when played, in [hh:]mm:ss format. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + s = os.stat(inpath) + d = {'inpath':inpath,'outpath':outpath,'playtime':playtime,'length':s.st_size} + self.input['files'].append(d) + + self.metainfo_valid = False + + + def remove_content(self,inpath): + """ Remove a file or directory from this torrent definition + + @param inpath Absolute name of file or directory on local filesystem, + as Unicode string. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + for d in self.input['files']: + if d['inpath'] == inpath: + self.input['files'].remove(d) + break + + def create_live(self,name,bitrate,playtime="1:00:00",authconfig=None): + """ Create a live streaming multimedia torrent with a specific bitrate. + + The authconfig is a subclass LiveSourceAuthConfig with the key + information required to allow authentication of packets from the source, + or None. In the latter case there is no source authentication. The other + current legal value is an instance of ECDSALiveSourceAuthConfig. When + using this method, a sequence number, real-time timestamp and an ECDSA + signature of 64 bytes is put in each piece. As a result, the content in + each packet is get_piece_length()-81, so that this into account when + selecting the bitrate. + + The info from the authconfig is stored in the 'info' part of the + torrent file when finalized, so changing the authentication info changes + the identity (infohash) of the torrent. + + @param name The name of the stream. + @param bitrate The desired bitrate in bytes per second. + @param playtime The virtual playtime of the stream as a string in + [hh:]mm:ss format. + @param authconfig Parameters for the authentication of the source + """ + self.input['bps'] = bitrate + self.input['playtime'] = playtime # size of virtual content + + # For source auth + authparams = {} + if authconfig is None: + authparams['authmethod'] = LIVE_AUTHMETHOD_NONE + else: + authparams['authmethod'] = authconfig.get_method() + authparams['pubkey'] = str(authconfig.get_pubkey()) + + self.input['live'] = authparams + + d = {'inpath':name,'outpath':None,'playtime':None,'length':None} + self.input['files'].append(d) + + # + # Torrent attributes + # + def set_encoding(self,enc): + """ Set the character encoding for e.g. the 'name' field """ + self.input['encoding'] = enc + self.metainfo_valid = False + + def get_encoding(self): + return self.input['encoding'] + + def set_thumbnail(self,thumbfilename): + """ + Reads image from file and turns it into a torrent thumbnail + The file should contain an image in JPEG format, preferably 171x96. + + @param thumbfilename Absolute name of image file, as Unicode string. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + f = open(thumbfilename,"rb") + data = f.read() + f.close() + self.input['thumb'] = data + self.metainfo_valid = False + + + def get_thumbnail(self): + """ Returns (MIME type,thumbnail data) if present or (None,None) + @return A tuple. """ + if 'thumb' not in self.input or self.input['thumb'] is None: + return (None,None) + else: + thumb = self.input['thumb'] # buffer/string immutable + return ('image/jpeg',thumb) + + + def set_tracker(self,url): + """ Sets the tracker (i.e. the torrent file's 'announce' field). + @param url The announce URL. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + if not isValidURL(url): + raise ValueError("Invalid URL") + + if url.endswith('/'): + # Some tracker code can't deal with / at end + url = url[:-1] + self.input['announce'] = url + self.metainfo_valid = False + + def get_tracker(self): + """ Returns the announce URL. + @return URL """ + return self.input['announce'] + + def set_tracker_hierarchy(self,hier): + """ Set hierarchy of trackers (announce-list) following the spec + at http://www.bittornado.com/docs/multitracker-spec.txt + @param hier A hierarchy of trackers as a list of lists. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + # TODO: check input, in particular remove / at end + newhier = [] + if type(hier) != ListType: + raise ValueError("hierarchy is not a list") + for tier in hier: + if type(tier) != ListType: + raise ValueError("tier is not a list") + newtier = [] + for url in tier: + if not isValidURL(url): + raise ValueError("Invalid URL: "+`url`) + + if url.endswith('/'): + # Some tracker code can't deal with / at end + url = url[:-1] + newtier.append(url) + newhier.append(newtier) + + self.input['announce-list'] = newhier + self.metainfo_valid = False + + def get_tracker_hierarchy(self): + """ Returns the hierarchy of trackers. + @return A list of lists. """ + return self.input['announce-list'] + + def set_dht_nodes(self,nodes): + """ Sets the DHT nodes required by the mainline DHT support, + See http://www.bittorrent.org/beps/bep_0005.html + @param nodes A list of [hostname,port] lists. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + # Check input + if type(nodes) != ListType: + raise ValueError("nodes not a list") + else: + for node in nodes: + if type(node) != ListType and len(node) != 2: + raise ValueError("node in nodes not a 2-item list: "+`node`) + if type(node[0]) != StringType: + raise ValueError("host in node is not string:"+`node`) + if type(node[1]) != IntType: + raise ValueError("port in node is not int:"+`node`) + + self.input['nodes'] = nodes + self.metainfo_valid = False + + def get_dht_nodes(self): + """ Returns the DHT nodes set. + @return A list of [hostname,port] lists. """ + return self.input['nodes'] + + def set_comment(self,value): + """ Set comment field. + @param value A Unicode string. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + self.input['comment'] = value + self.metainfo_valid = False + + def get_comment(self): + """ Returns the comment field of the def. + @return A Unicode string. """ + return self.input['comment'] + + def set_created_by(self,value): + """ Set 'created by' field. + @param value A Unicode string. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + self.input['created by'] = value + self.metainfo_valid = False + + def get_created_by(self): + """ Returns the 'created by' field. + @return Unicode string. """ + return self.input['created by'] + + def set_httpseeds(self,value): + """ Set list of HTTP seeds following the spec at + http://www.bittornado.com/docs/webseed-spec.txt + @param value A list of URLs. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + for url in value: + if not isValidURL(url): + raise ValueError("Invalid URL: "+`url`) + + self.input['httpseeds'] = value + self.metainfo_valid = False + + def get_httpseeds(self): + """ Returns the list of HTTP seeds. + @return A list of URLs. """ + return self.input['httpseeds'] + + def set_piece_length(self,value): + """ Set the size of the pieces in which the content is traded. + The piece size must be a multiple of the chunk size, the unit in which + it is transmitted, which is 16K by default (see + DownloadConfig.set_download_slice_size()). The default is automatic + (value 0). + @param value A number of bytes as per the text. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + if not (type(value) == IntType or type(value) == LongType): + raise ValueError("Piece length not an int/long") + + self.input['piece length'] = value + self.metainfo_valid = False + + def get_piece_length(self): + """ Returns the piece size. + @return A number of bytes. """ + return self.input['piece length'] + + def set_add_md5hash(self,value): + """ Whether to add an end-to-end MD5 checksum to the def. + @param value Boolean. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + self.input['makehash_md5'] = value + self.metainfo_valid = False + + def get_add_md5hash(self): + """ Returns whether to add an MD5 checksum. """ + return self.input['makehash_md5'] + + def set_add_crc32(self,value): + """ Whether to add an end-to-end CRC32 checksum to the def. + @param value Boolean. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + self.input['makehash_crc32'] = value + self.metainfo_valid = False + + def get_add_crc32(self): + """ Returns whether to add an end-to-end CRC32 checksum to the def. + @return Boolean. """ + return self.input['makehash_crc32'] + + def set_add_sha1hash(self,value): + """ Whether to add end-to-end SHA1 checksum to the def. + @param value Boolean. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + self.input['makehash_sha1'] = value + self.metainfo_valid = False + + def get_add_sha1hash(self): + """ Returns whether to add an end-to-end SHA1 checksum to the def. + @return Boolean.""" + return self.input['makehash_sha1'] + + def set_create_merkle_torrent(self,value): + """ Create a Merkle torrent instead of a regular BT torrent. A Merkle + torrent uses a hash tree for checking the integrity of the content + received. As such it creates much smaller torrent files than the + regular method. Tribler-specific feature.""" + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + self.input['createmerkletorrent'] = value + self.metainfo_valid = False + + def get_create_merkle_torrent(self): + """ Returns whether to create a Merkle torrent. + @return Boolean. """ + return self.input['createmerkletorrent'] + + def set_signature_keypair_filename(self,value): + """ Set absolute filename of keypair to be used for signature. + When set, a signature will be added. + @param value A filename containing an Elliptic Curve keypair. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + self.input['torrentsigkeypairfilename'] = value + self.metainfo_valid = False + + def get_signature_keypair_filename(self): + """ Returns the filename containing the signing keypair or None. + @return Unicode String or None. """ + return self.input['torrentsigkeypairfilename'] + + def get_live(self): + """ Returns whether this definition is for a live torrent. + @return Boolean. """ + return 'live' in self.input and self.input['live'] + + def get_live_authmethod(self): + """ Returns the method for authenticating the source. +
+        LIVE_AUTHMETHOD_ECDSA
+        
+ @return String + """ + return 'live' in self.input and self.input['live']['authmethod'] + + def get_live_pubkey(self): + """ Returns the public key used for authenticating packets from + the source. + @return A public key in DER. + """ + if 'live' in self.input and 'pubkey' in self.input['live']: + return self.input['live']['pubkey'] + else: + return None + + + def finalize(self,userabortflag=None,userprogresscallback=None): + """ Create BT torrent file by reading the files added with + add_content() and calculate the torrent file's infohash. + + Creating the torrent file can take a long time and will be carried out + by the calling thread. The process can be made interruptable by passing + a threading.Event() object via the userabortflag and setting it when + the process should be aborted. The also optional userprogresscallback + will be called by the calling thread periodically, with a progress + percentage as argument. + + The userprogresscallback function will be called by the calling thread. + + @param userabortflag threading.Event() object + @param userprogresscallback Function accepting a fraction as first + argument. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + if self.metainfo_valid: + return + + if 'live' in self.input: + # Make sure the duration is an integral number of pieces, for + # security (live source auth). + secs = parse_playtime_to_secs(self.input['playtime']) + pl = float(self.get_piece_length()) + length = float(self.input['bps']*secs) + + diff = length % pl + add = (pl - diff) % pl + newlen = int(length + add) + + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","CHECK INFO LENGTH",secs,newlen + + d = self.input['files'][0] + d['length'] = newlen + + + # Note: reading of all files and calc of hashes is done by calling + # thread. + (infohash,metainfo) = maketorrent.make_torrent_file(self.input,userabortflag=userabortflag,userprogresscallback=userprogresscallback) + if infohash is not None: + self.infohash = infohash + self.metainfo = metainfo + self.input['name'] = metainfo['info']['name'] + # May have been 0, meaning auto. + self.input['piece length'] = metainfo['info']['piece length'] + self.metainfo_valid = True + + def is_finalized(self): + """ Returns whether the TorrentDef is finalized or not. + @return Boolean. """ + return self.metainfo_valid + + # + # Operations on finalized TorrentDefs + # + def get_infohash(self): + """ Returns the infohash of the torrent. + @return A string of length 20. """ + if self.metainfo_valid: + return self.infohash + else: + raise TorrentDefNotFinalizedException() + + def get_metainfo(self): + """ Returns the torrent definition as a dictionary that follows the BT + spec for torrent files. + @return dict + """ + if self.metainfo_valid: + return self.metainfo + else: + raise TorrentDefNotFinalizedException() + + def get_name(self): + """ Returns the info['name'] field as raw string of bytes. + @return String """ + if self.metainfo_valid: + return self.input['name'] # string immutable + else: + raise TorrentDefNotFinalizedException() + + def set_name(self,name): + """ Set the name of this torrent + @param name name of torrent as String + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + self.input['name'] = name + self.metainfo_valid = False + + + def get_name_as_unicode(self): + """ Returns the info['name'] field as Unicode string. + @return Unicode string. """ + if self.metainfo_valid: + (namekey,uniname) = metainfoname2unicode(self.metainfo) + return uniname + else: + raise TorrentDefNotFinalizedException() + + def verify_torrent_signature(self): + """ Verify the signature on the finalized torrent definition. Returns + whether the signature was valid. + @return Boolean. + """ + if self.metainfo_valid: + return Tribler.Core.Overlay.permid.verify_torrent_signature(self.metainfo) + else: + raise TorrentDefNotFinalizedException() + + + def save(self,filename): + """ + Finalizes the torrent def and writes a torrent file i.e., bencoded dict + following BT spec) to the specified filename. Note this make take a + long time when the torrent def is not yet finalized. + + @param filename An absolute Unicode path name. + """ + if not self.readonly: + self.finalize() + + bdata = bencode(self.metainfo) + f = open(filename,"wb") + f.write(bdata) + f.close() + + + def get_bitrate(self,file=None): + """ Returns the bitrate of the specified file. If no file is specified, + we assume this is a single-file torrent. + + @param file (Optional) the file in the torrent to retrieve the bitrate of. + @return The bitrate in bytes per second or None. + """ + if not self.metainfo_valid: + raise NotYetImplementedException() # must save first + + return maketorrent.get_bitrate_from_metainfo(file,self.metainfo) + + def get_files(self,exts=None): + """ The list of files in the finalized torrent def. + @param exts (Optional) list of filename extensions (without leading .) + to search for. + @return A list of filenames. + """ + if not self.metainfo_valid: + raise NotYetImplementedException() # must save first + + return maketorrent.get_files(self.metainfo,exts) + + def get_length(self,selectedfiles=None): + """ Returns the total size of the content in the torrent. If the + optional selectedfiles argument is specified, the method returns + the total size of only those files. + @return A length (long) + """ + if not self.metainfo_valid: + raise NotYetImplementedException() # must save first + + (length,filepieceranges) = maketorrent.get_length_filepieceranges_from_metainfo(self.metainfo,selectedfiles) + return length + + def is_multifile_torrent(self): + """ Returns whether this TorrentDef is a multi-file torrent. + @return Boolean + """ + if not self.metainfo_valid: + raise NotYetImplementedException() # must save first + + return 'files' in self.metainfo['info'] + + + # + # Internal methods + # + def get_index_of_file_in_files(self,file): + if not self.metainfo_valid: + raise NotYetImplementedException() # must save first + + info = self.metainfo['info'] + + if file is not None and 'files' in info: + for i in range(len(info['files'])): + x = info['files'][i] + + intorrentpath = maketorrent.pathlist2filename(x['path']) + if intorrentpath == file: + return i + return ValueError("File not found in torrent") + else: + raise ValueError("File not found in single-file torrent") + + # + # Copyable interface + # + def copy(self): + input = copy.copy(self.input) + metainfo = copy.copy(self.metainfo) + infohash = self.infohash + t = TorrentDef(input,metainfo,infohash) + t.metainfo_valid = self.metainfo_valid + return t diff --git a/tribler-mod/Tribler/Core/TorrentDef.py.bak b/tribler-mod/Tribler/Core/TorrentDef.py.bak new file mode 100644 index 0000000..0c22fa2 --- /dev/null +++ b/tribler-mod/Tribler/Core/TorrentDef.py.bak @@ -0,0 +1,737 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information +""" Definition of a torrent, that is, a collection of files or a live stream. """ +import sys +import os +#import time +import copy +import sha +from types import StringType,ListType,IntType,LongType + +from Tribler.Core.BitTornado.bencode import bencode,bdecode + +from Tribler.Core.simpledefs import * +from Tribler.Core.defaults import * +from Tribler.Core.exceptions import * +from Tribler.Core.Base import * + +import Tribler.Core.APIImplementation.maketorrent as maketorrent +from Tribler.Core.APIImplementation.miscutils import * + +from Tribler.Core.Utilities.utilities import validTorrentFile,isValidURL +from Tribler.Core.Utilities.unicode import metainfoname2unicode +from Tribler.Core.Utilities.timeouturlopen import urlOpenTimeout +from Tribler.Core.osutils import * + +class TorrentDef(Serializable,Copyable): + """ + Definition of a torrent, that is, all params required for a torrent file, + plus optional params such as thumbnail, playtime, etc. + + Note: to add fields to the torrent definition which are not supported + by its API, first create the torrent def, finalize it, then add the + fields to the metainfo, and create a new torrent def from that + upgraded metainfo using TorrentDef.load_from_dict() + + cf. libtorrent torrent_info + """ + def __init__(self,input=None,metainfo=None,infohash=None): + """ Normal constructor for TorrentDef (The input, metainfo and infohash + parameters are used internally to make this a copy constructor) """ + + self.readonly = False + if input is not None: # copy constructor + self.input = input + # self.metainfo_valid set in copy() + self.metainfo = metainfo + self.infohash = infohash + return + + self.input = {} # fields added by user, waiting to be turned into torrent file + # Define the built-in default here + self.input.update(tdefdefaults) + try: + self.input['encoding'] = sys.getfilesystemencoding() + except: + self.input['encoding'] = sys.getdefaultencoding() + + self.input['files'] = [] + + + self.metainfo_valid = False + self.metainfo = None # copy of loaded or last saved torrent dict + self.infohash = None # only valid if metainfo_valid + + + # We cannot set a built-in default for a tracker here, as it depends on + # a Session. Alternatively, the tracker will be set to the internal + # tracker by default when Session::start_download() is called, if the + # 'announce' field is the empty string. + + # + # Class methods for creating a TorrentDef from a .torrent file + # + def load(filename): + """ + Load a BT .torrent or Tribler .tribe file from disk and convert + it into a finalized TorrentDef. + + @param filename An absolute Unicode filename + @return TorrentDef + """ + # Class method, no locking required + f = open(filename,"rb") + return TorrentDef._read(f) + load = staticmethod(load) + + def _read(stream): + """ Internal class method that reads a torrent file from stream, + checks it for correctness and sets self.input and self.metainfo + accordingly. """ + bdata = stream.read() + stream.close() + data = bdecode(bdata) + return TorrentDef._create(data) + _read = staticmethod(_read) + + def _create(metainfo): # TODO: replace with constructor + # raises ValueErrors if not good + validTorrentFile(metainfo) + + t = TorrentDef() + t.metainfo = metainfo + t.metainfo_valid = True + t.infohash = sha.sha(bencode(metainfo['info'])).digest() + + # copy stuff into self.input + maketorrent.copy_metainfo_to_input(t.metainfo,t.input) + + return t + _create = staticmethod(_create) + + def load_from_url(url): + """ + Load a BT .torrent or Tribler .tribe file from the URL and convert + it into a TorrentDef. + + @param url URL + @return TorrentDef. + """ + # Class method, no locking required + f = urlOpenTimeout(url) + return TorrentDef._read(f) + load_from_url = staticmethod(load_from_url) + + + def load_from_dict(metainfo): + """ + Load a BT .torrent or Tribler .tribe file from the metainfo dictionary + it into a TorrentDef + + @param metainfo A dictionary following the BT torrent file spec. + @return TorrentDef. + """ + # Class method, no locking required + return TorrentDef._create(metainfo) + load_from_dict = staticmethod(load_from_dict) + + + # + # Convenience instance methods for publishing new content + # + def add_content(self,inpath,outpath=None,playtime=None): + """ + Add a file or directory to this torrent definition. When adding a + directory, all files in that directory will be added to the torrent. + + One can add multiple files and directories to a torrent definition. + In that case the "outpath" parameter must be used to indicate how + the files/dirs should be named in the torrent. The outpaths used must + start with a common prefix which will become the "name" field of the + torrent. + + To seed the torrent via the core (as opposed to e.g. HTTP) you will + need to start the download with the dest_dir set to the top-level + directory containing the files and directories to seed. For example, + a file "c:\Videos\file.avi" is seeded as follows: +
+            tdef = TorrentDef()
+            tdef.add_content("c:\Videos\file.avi",playtime="1:59:20")
+            tdef.set_tracker(s.get_internal_tracker_url())
+            tdef.finalize()
+            dscfg = DownloadStartupConfig()
+            dscfg.set_dest_dir("c:\Video")
+            s.start_download(tdef,dscfg)
+        
+ @param inpath Absolute name of file or directory on local filesystem, + as Unicode string. + @param outpath (optional) Name of the content to use in the torrent def + as Unicode string. + @param playtime (optional) String representing the duration of the + multimedia file when played, in [hh:]mm:ss format. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + s = os.stat(inpath) + d = {'inpath':inpath,'outpath':outpath,'playtime':playtime,'length':s.st_size} + self.input['files'].append(d) + + self.metainfo_valid = False + + + def remove_content(self,inpath): + """ Remove a file or directory from this torrent definition + + @param inpath Absolute name of file or directory on local filesystem, + as Unicode string. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + for d in self.input['files']: + if d['inpath'] == inpath: + self.input['files'].remove(d) + break + + def create_live(self,name,bitrate,playtime="1:00:00",authconfig=None): + """ Create a live streaming multimedia torrent with a specific bitrate. + + The authconfig is a subclass LiveSourceAuthConfig with the key + information required to allow authentication of packets from the source, + or None. In the latter case there is no source authentication. The other + current legal value is an instance of ECDSALiveSourceAuthConfig. When + using this method, a sequence number, real-time timestamp and an ECDSA + signature of 64 bytes is put in each piece. As a result, the content in + each packet is get_piece_length()-81, so that this into account when + selecting the bitrate. + + The info from the authconfig is stored in the 'info' part of the + torrent file when finalized, so changing the authentication info changes + the identity (infohash) of the torrent. + + @param name The name of the stream. + @param bitrate The desired bitrate in bytes per second. + @param playtime The virtual playtime of the stream as a string in + [hh:]mm:ss format. + @param authconfig Parameters for the authentication of the source + """ + self.input['bps'] = bitrate + self.input['playtime'] = playtime # size of virtual content + + # For source auth + authparams = {} + if authconfig is None: + authparams['authmethod'] = LIVE_AUTHMETHOD_NONE + else: + authparams['authmethod'] = authconfig.get_method() + authparams['pubkey'] = str(authconfig.get_pubkey()) + + self.input['live'] = authparams + + d = {'inpath':name,'outpath':None,'playtime':None,'length':None} + self.input['files'].append(d) + + # + # Torrent attributes + # + def set_encoding(self,enc): + """ Set the character encoding for e.g. the 'name' field """ + self.input['encoding'] = enc + self.metainfo_valid = False + + def get_encoding(self): + return self.input['encoding'] + + def set_thumbnail(self,thumbfilename): + """ + Reads image from file and turns it into a torrent thumbnail + The file should contain an image in JPEG format, preferably 171x96. + + @param thumbfilename Absolute name of image file, as Unicode string. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + f = open(thumbfilename,"rb") + data = f.read() + f.close() + self.input['thumb'] = data + self.metainfo_valid = False + + + def get_thumbnail(self): + """ Returns (MIME type,thumbnail data) if present or (None,None) + @return A tuple. """ + if 'thumb' not in self.input or self.input['thumb'] is None: + return (None,None) + else: + thumb = self.input['thumb'] # buffer/string immutable + return ('image/jpeg',thumb) + + + def set_tracker(self,url): + """ Sets the tracker (i.e. the torrent file's 'announce' field). + @param url The announce URL. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + if not isValidURL(url): + raise ValueError("Invalid URL") + + if url.endswith('/'): + # Some tracker code can't deal with / at end + url = url[:-1] + self.input['announce'] = url + self.metainfo_valid = False + + def get_tracker(self): + """ Returns the announce URL. + @return URL """ + return self.input['announce'] + + def set_tracker_hierarchy(self,hier): + """ Set hierarchy of trackers (announce-list) following the spec + at http://www.bittornado.com/docs/multitracker-spec.txt + @param hier A hierarchy of trackers as a list of lists. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + # TODO: check input, in particular remove / at end + newhier = [] + if type(hier) != ListType: + raise ValueError("hierarchy is not a list") + for tier in hier: + if type(tier) != ListType: + raise ValueError("tier is not a list") + newtier = [] + for url in tier: + if not isValidURL(url): + raise ValueError("Invalid URL: "+`url`) + + if url.endswith('/'): + # Some tracker code can't deal with / at end + url = url[:-1] + newtier.append(url) + newhier.append(newtier) + + self.input['announce-list'] = newhier + self.metainfo_valid = False + + def get_tracker_hierarchy(self): + """ Returns the hierarchy of trackers. + @return A list of lists. """ + return self.input['announce-list'] + + def set_dht_nodes(self,nodes): + """ Sets the DHT nodes required by the mainline DHT support, + See http://www.bittorrent.org/beps/bep_0005.html + @param nodes A list of [hostname,port] lists. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + # Check input + if type(nodes) != ListType: + raise ValueError("nodes not a list") + else: + for node in nodes: + if type(node) != ListType and len(node) != 2: + raise ValueError("node in nodes not a 2-item list: "+`node`) + if type(node[0]) != StringType: + raise ValueError("host in node is not string:"+`node`) + if type(node[1]) != IntType: + raise ValueError("port in node is not int:"+`node`) + + self.input['nodes'] = nodes + self.metainfo_valid = False + + def get_dht_nodes(self): + """ Returns the DHT nodes set. + @return A list of [hostname,port] lists. """ + return self.input['nodes'] + + def set_comment(self,value): + """ Set comment field. + @param value A Unicode string. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + self.input['comment'] = value + self.metainfo_valid = False + + def get_comment(self): + """ Returns the comment field of the def. + @return A Unicode string. """ + return self.input['comment'] + + def set_created_by(self,value): + """ Set 'created by' field. + @param value A Unicode string. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + self.input['created by'] = value + self.metainfo_valid = False + + def get_created_by(self): + """ Returns the 'created by' field. + @return Unicode string. """ + return self.input['created by'] + + def set_httpseeds(self,value): + """ Set list of HTTP seeds following the spec at + http://www.bittornado.com/docs/webseed-spec.txt + @param value A list of URLs. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + for url in value: + if not isValidURL(url): + raise ValueError("Invalid URL: "+`url`) + + self.input['httpseeds'] = value + self.metainfo_valid = False + + def get_httpseeds(self): + """ Returns the list of HTTP seeds. + @return A list of URLs. """ + return self.input['httpseeds'] + + def set_piece_length(self,value): + """ Set the size of the pieces in which the content is traded. + The piece size must be a multiple of the chunk size, the unit in which + it is transmitted, which is 16K by default (see + DownloadConfig.set_download_slice_size()). The default is automatic + (value 0). + @param value A number of bytes as per the text. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + if not (type(value) == IntType or type(value) == LongType): + raise ValueError("Piece length not an int/long") + + self.input['piece length'] = value + self.metainfo_valid = False + + def get_piece_length(self): + """ Returns the piece size. + @return A number of bytes. """ + return self.input['piece length'] + + def set_add_md5hash(self,value): + """ Whether to add an end-to-end MD5 checksum to the def. + @param value Boolean. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + self.input['makehash_md5'] = value + self.metainfo_valid = False + + def get_add_md5hash(self): + """ Returns whether to add an MD5 checksum. """ + return self.input['makehash_md5'] + + def set_add_crc32(self,value): + """ Whether to add an end-to-end CRC32 checksum to the def. + @param value Boolean. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + self.input['makehash_crc32'] = value + self.metainfo_valid = False + + def get_add_crc32(self): + """ Returns whether to add an end-to-end CRC32 checksum to the def. + @return Boolean. """ + return self.input['makehash_crc32'] + + def set_add_sha1hash(self,value): + """ Whether to add end-to-end SHA1 checksum to the def. + @param value Boolean. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + self.input['makehash_sha1'] = value + self.metainfo_valid = False + + def get_add_sha1hash(self): + """ Returns whether to add an end-to-end SHA1 checksum to the def. + @return Boolean.""" + return self.input['makehash_sha1'] + + def set_create_merkle_torrent(self,value): + """ Create a Merkle torrent instead of a regular BT torrent. A Merkle + torrent uses a hash tree for checking the integrity of the content + received. As such it creates much smaller torrent files than the + regular method. Tribler-specific feature.""" + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + self.input['createmerkletorrent'] = value + self.metainfo_valid = False + + def get_create_merkle_torrent(self): + """ Returns whether to create a Merkle torrent. + @return Boolean. """ + return self.input['createmerkletorrent'] + + def set_signature_keypair_filename(self,value): + """ Set absolute filename of keypair to be used for signature. + When set, a signature will be added. + @param value A filename containing an Elliptic Curve keypair. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + self.input['torrentsigkeypairfilename'] = value + self.metainfo_valid = False + + def get_signature_keypair_filename(self): + """ Returns the filename containing the signing keypair or None. + @return Unicode String or None. """ + return self.input['torrentsigkeypairfilename'] + + def get_live(self): + """ Returns whether this definition is for a live torrent. + @return Boolean. """ + return 'live' in self.input and self.input['live'] + + def get_live_authmethod(self): + """ Returns the method for authenticating the source. +
+        LIVE_AUTHMETHOD_ECDSA
+        
+ @return String + """ + return 'live' in self.input and self.input['live']['authmethod'] + + def get_live_pubkey(self): + """ Returns the public key used for authenticating packets from + the source. + @return A public key in DER. + """ + if 'live' in self.input and 'pubkey' in self.input['live']: + return self.input['live']['pubkey'] + else: + return None + + + def finalize(self,userabortflag=None,userprogresscallback=None): + """ Create BT torrent file by reading the files added with + add_content() and calculate the torrent file's infohash. + + Creating the torrent file can take a long time and will be carried out + by the calling thread. The process can be made interruptable by passing + a threading.Event() object via the userabortflag and setting it when + the process should be aborted. The also optional userprogresscallback + will be called by the calling thread periodically, with a progress + percentage as argument. + + The userprogresscallback function will be called by the calling thread. + + @param userabortflag threading.Event() object + @param userprogresscallback Function accepting a fraction as first + argument. + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + if self.metainfo_valid: + return + + if 'live' in self.input: + # Make sure the duration is an integral number of pieces, for + # security (live source auth). + secs = parse_playtime_to_secs(self.input['playtime']) + pl = float(self.get_piece_length()) + length = float(self.input['bps']*secs) + + diff = length % pl + add = (pl - diff) % pl + newlen = int(length + add) + + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","CHECK INFO LENGTH",secs,newlen + + d = self.input['files'][0] + d['length'] = newlen + + + # Note: reading of all files and calc of hashes is done by calling + # thread. + (infohash,metainfo) = maketorrent.make_torrent_file(self.input,userabortflag=userabortflag,userprogresscallback=userprogresscallback) + if infohash is not None: + self.infohash = infohash + self.metainfo = metainfo + self.input['name'] = metainfo['info']['name'] + # May have been 0, meaning auto. + self.input['piece length'] = metainfo['info']['piece length'] + self.metainfo_valid = True + + def is_finalized(self): + """ Returns whether the TorrentDef is finalized or not. + @return Boolean. """ + return self.metainfo_valid + + # + # Operations on finalized TorrentDefs + # + def get_infohash(self): + """ Returns the infohash of the torrent. + @return A string of length 20. """ + if self.metainfo_valid: + return self.infohash + else: + raise TorrentDefNotFinalizedException() + + def get_metainfo(self): + """ Returns the torrent definition as a dictionary that follows the BT + spec for torrent files. + @return dict + """ + if self.metainfo_valid: + return self.metainfo + else: + raise TorrentDefNotFinalizedException() + + def get_name(self): + """ Returns the info['name'] field as raw string of bytes. + @return String """ + if self.metainfo_valid: + return self.input['name'] # string immutable + else: + raise TorrentDefNotFinalizedException() + + def set_name(self,name): + """ Set the name of this torrent + @param name name of torrent as String + """ + if self.readonly: + raise OperationNotPossibleAtRuntimeException() + + self.input['name'] = name + self.metainfo_valid = False + + + def get_name_as_unicode(self): + """ Returns the info['name'] field as Unicode string. + @return Unicode string. """ + if self.metainfo_valid: + (namekey,uniname) = metainfoname2unicode(self.metainfo) + return uniname + else: + raise TorrentDefNotFinalizedException() + + def verify_torrent_signature(self): + """ Verify the signature on the finalized torrent definition. Returns + whether the signature was valid. + @return Boolean. + """ + if self.metainfo_valid: + return Tribler.Core.Overlay.permid.verify_torrent_signature(self.metainfo) + else: + raise TorrentDefNotFinalizedException() + + + def save(self,filename): + """ + Finalizes the torrent def and writes a torrent file i.e., bencoded dict + following BT spec) to the specified filename. Note this make take a + long time when the torrent def is not yet finalized. + + @param filename An absolute Unicode path name. + """ + if not self.readonly: + self.finalize() + + bdata = bencode(self.metainfo) + f = open(filename,"wb") + f.write(bdata) + f.close() + + + def get_bitrate(self,file=None): + """ Returns the bitrate of the specified file. If no file is specified, + we assume this is a single-file torrent. + + @param file (Optional) the file in the torrent to retrieve the bitrate of. + @return The bitrate in bytes per second or None. + """ + if not self.metainfo_valid: + raise NotYetImplementedException() # must save first + + return maketorrent.get_bitrate_from_metainfo(file,self.metainfo) + + def get_files(self,exts=None): + """ The list of files in the finalized torrent def. + @param exts (Optional) list of filename extensions (without leading .) + to search for. + @return A list of filenames. + """ + if not self.metainfo_valid: + raise NotYetImplementedException() # must save first + + return maketorrent.get_files(self.metainfo,exts) + + def get_length(self,selectedfiles=None): + """ Returns the total size of the content in the torrent. If the + optional selectedfiles argument is specified, the method returns + the total size of only those files. + @return A length (long) + """ + if not self.metainfo_valid: + raise NotYetImplementedException() # must save first + + (length,filepieceranges) = maketorrent.get_length_filepieceranges_from_metainfo(self.metainfo,selectedfiles) + return length + + def is_multifile_torrent(self): + """ Returns whether this TorrentDef is a multi-file torrent. + @return Boolean + """ + if not self.metainfo_valid: + raise NotYetImplementedException() # must save first + + return 'files' in self.metainfo['info'] + + + # + # Internal methods + # + def get_index_of_file_in_files(self,file): + if not self.metainfo_valid: + raise NotYetImplementedException() # must save first + + info = self.metainfo['info'] + + if file is not None and 'files' in info: + for i in range(len(info['files'])): + x = info['files'][i] + + intorrentpath = maketorrent.pathlist2filename(x['path']) + if intorrentpath == file: + return i + return ValueError("File not found in torrent") + else: + raise ValueError("File not found in single-file torrent") + + # + # Copyable interface + # + def copy(self): + input = copy.copy(self.input) + metainfo = copy.copy(self.metainfo) + infohash = self.infohash + t = TorrentDef(input,metainfo,infohash) + t.metainfo_valid = self.metainfo_valid + return t diff --git a/tribler-mod/Tribler/Core/Utilities/__init__.py b/tribler-mod/Tribler/Core/Utilities/__init__.py new file mode 100644 index 0000000..b7ef832 --- /dev/null +++ b/tribler-mod/Tribler/Core/Utilities/__init__.py @@ -0,0 +1,3 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/Utilities/__init__.py.bak b/tribler-mod/Tribler/Core/Utilities/__init__.py.bak new file mode 100644 index 0000000..395f8fb --- /dev/null +++ b/tribler-mod/Tribler/Core/Utilities/__init__.py.bak @@ -0,0 +1,2 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/Utilities/timeouturlopen.py b/tribler-mod/Tribler/Core/Utilities/timeouturlopen.py new file mode 100644 index 0000000..c81ac79 --- /dev/null +++ b/tribler-mod/Tribler/Core/Utilities/timeouturlopen.py @@ -0,0 +1,43 @@ +from time import localtime, strftime +# Written by Feek Zindel +# see LICENSE.txt for license information + +import httplib +import socket +import urllib2 + +def urlOpenTimeout(url,timeout=30,*data): + class TimeoutHTTPConnection(httplib.HTTPConnection): + def connect(self): + """Connect to the host and port specified in __init__.""" + msg = "getaddrinfo returns an empty list" + for res in socket.getaddrinfo(self.host, self.port, 0, + socket.SOCK_STREAM): + af, socktype, proto, canonname, sa = res + try: + self.sock = socket.socket(af,socktype, proto) + self.sock.settimeout(timeout) + if self.debuglevel > 0: + print "connect: (%s, %s)" % (self.host, self.port) + self.sock.connect(sa) + except socket.error, msg: + if self.debuglevel > 0: + print 'connect fail:', (self.host, self.port) + if self.sock: + self.sock.close() + self.sock = None + continue + break + if not self.sock: + raise socket.error, msg + + class TimeoutHTTPHandler(urllib2.HTTPHandler): + def http_open(self, req): + return self.do_open(TimeoutHTTPConnection, req) + + opener = urllib2.build_opener(TimeoutHTTPHandler, + urllib2.HTTPDefaultErrorHandler, + urllib2.HTTPRedirectHandler) + return opener.open(url,*data) + +#s = urlOpenTimeout("http://www.google.com",timeout=30) diff --git a/tribler-mod/Tribler/Core/Utilities/timeouturlopen.py.bak b/tribler-mod/Tribler/Core/Utilities/timeouturlopen.py.bak new file mode 100644 index 0000000..e27e670 --- /dev/null +++ b/tribler-mod/Tribler/Core/Utilities/timeouturlopen.py.bak @@ -0,0 +1,42 @@ +# Written by Feek Zindel +# see LICENSE.txt for license information + +import httplib +import socket +import urllib2 + +def urlOpenTimeout(url,timeout=30,*data): + class TimeoutHTTPConnection(httplib.HTTPConnection): + def connect(self): + """Connect to the host and port specified in __init__.""" + msg = "getaddrinfo returns an empty list" + for res in socket.getaddrinfo(self.host, self.port, 0, + socket.SOCK_STREAM): + af, socktype, proto, canonname, sa = res + try: + self.sock = socket.socket(af,socktype, proto) + self.sock.settimeout(timeout) + if self.debuglevel > 0: + print "connect: (%s, %s)" % (self.host, self.port) + self.sock.connect(sa) + except socket.error, msg: + if self.debuglevel > 0: + print 'connect fail:', (self.host, self.port) + if self.sock: + self.sock.close() + self.sock = None + continue + break + if not self.sock: + raise socket.error, msg + + class TimeoutHTTPHandler(urllib2.HTTPHandler): + def http_open(self, req): + return self.do_open(TimeoutHTTPConnection, req) + + opener = urllib2.build_opener(TimeoutHTTPHandler, + urllib2.HTTPDefaultErrorHandler, + urllib2.HTTPRedirectHandler) + return opener.open(url,*data) + +#s = urlOpenTimeout("http://www.google.com",timeout=30) diff --git a/tribler-mod/Tribler/Core/Utilities/unicode.py b/tribler-mod/Tribler/Core/Utilities/unicode.py new file mode 100644 index 0000000..9b9721c --- /dev/null +++ b/tribler-mod/Tribler/Core/Utilities/unicode.py @@ -0,0 +1,79 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys + +def bin2unicode(bin,possible_encoding='utf_8'): + sysenc = sys.getfilesystemencoding() + if possible_encoding is None: + possible_encoding = sysenc + try: + return bin.decode(possible_encoding) + except: + try: + if possible_encoding == sysenc: + raise + return bin.decode(sysenc) + except: + try: + return bin.decode('utf_8') + except: + try: + return bin.decode('iso-8859-1') + except: + try: + return bin.decode(sys.getfilesystemencoding()) + except: + return bin.decode(sys.getdefaultencoding(), errors = 'replace') + + +def str2unicode(s): + try: + s = unicode(s) + except: + flag = 0 + for encoding in [sys.getfilesystemencoding(), 'utf_8', 'iso-8859-1', 'unicode-escape' ]: + try: + s = unicode(s, encoding) + flag = 1 + break + except: + pass + if flag == 0: + try: + s = unicode(s,sys.getdefaultencoding(), errors = 'replace') + except: + pass + return s + +def dunno2unicode(dunno): + newdunno = None + if isinstance(dunno,unicode): + newdunno = dunno + else: + try: + newdunno = bin2unicode(dunno) + except: + newdunno = str2unicode(dunno) + return newdunno + + +def metainfoname2unicode(metadata): + if metadata['info'].has_key('name.utf-8'): + namekey = 'name.utf-8' + else: + namekey = 'name' + if metadata.has_key('encoding'): + encoding = metadata['encoding'] + name = bin2unicode(metadata['info'][namekey],encoding) + else: + name = bin2unicode(metadata['info'][namekey]) + + return (namekey,name) + + +def unicode2str(s): + if not isinstance(s,unicode): + return s + return s.encode(sys.getfilesystemencoding()) \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/Utilities/unicode.py.bak b/tribler-mod/Tribler/Core/Utilities/unicode.py.bak new file mode 100644 index 0000000..12691de --- /dev/null +++ b/tribler-mod/Tribler/Core/Utilities/unicode.py.bak @@ -0,0 +1,78 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys + +def bin2unicode(bin,possible_encoding='utf_8'): + sysenc = sys.getfilesystemencoding() + if possible_encoding is None: + possible_encoding = sysenc + try: + return bin.decode(possible_encoding) + except: + try: + if possible_encoding == sysenc: + raise + return bin.decode(sysenc) + except: + try: + return bin.decode('utf_8') + except: + try: + return bin.decode('iso-8859-1') + except: + try: + return bin.decode(sys.getfilesystemencoding()) + except: + return bin.decode(sys.getdefaultencoding(), errors = 'replace') + + +def str2unicode(s): + try: + s = unicode(s) + except: + flag = 0 + for encoding in [sys.getfilesystemencoding(), 'utf_8', 'iso-8859-1', 'unicode-escape' ]: + try: + s = unicode(s, encoding) + flag = 1 + break + except: + pass + if flag == 0: + try: + s = unicode(s,sys.getdefaultencoding(), errors = 'replace') + except: + pass + return s + +def dunno2unicode(dunno): + newdunno = None + if isinstance(dunno,unicode): + newdunno = dunno + else: + try: + newdunno = bin2unicode(dunno) + except: + newdunno = str2unicode(dunno) + return newdunno + + +def metainfoname2unicode(metadata): + if metadata['info'].has_key('name.utf-8'): + namekey = 'name.utf-8' + else: + namekey = 'name' + if metadata.has_key('encoding'): + encoding = metadata['encoding'] + name = bin2unicode(metadata['info'][namekey],encoding) + else: + name = bin2unicode(metadata['info'][namekey]) + + return (namekey,name) + + +def unicode2str(s): + if not isinstance(s,unicode): + return s + return s.encode(sys.getfilesystemencoding()) \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/Utilities/utilities.py b/tribler-mod/Tribler/Core/Utilities/utilities.py new file mode 100644 index 0000000..41feda5 --- /dev/null +++ b/tribler-mod/Tribler/Core/Utilities/utilities.py @@ -0,0 +1,541 @@ +from time import localtime, strftime + +# Written by Jie Yang +# see LICENSE.txt for license information + +import socket +from time import time, strftime, gmtime +from base64 import encodestring, decodestring +from sha import sha +import sys +import os +import copy +from types import UnicodeType, StringType, LongType, IntType, ListType, DictType +import urlparse +from traceback import print_exc + +STRICT_CHECK = False +DEBUG = True #False + +permid_len = 112 +infohash_len = 20 + +def bin2str(bin): + # Full BASE64-encoded + return encodestring(bin).replace("\n","") + +def str2bin(str): + return decodestring(str) + +def validName(name): + if not isinstance(name, str) and len(name) == 0: + raise RuntimeError, "invalid name: " + name + return True + +def validPort(port): + port = int(port) + if port < 0 or port > 65535: + raise RuntimeError, "invalid Port: " + str(port) + return True + +def validIP(ip): + try: + try: + # Is IPv4 addr? + socket.inet_aton(ip) + return True + except socket.error: + # Is hostname / IPv6? + socket.getaddrinfo(ip, None) + return True + except: + print_exc() + raise RuntimeError, "invalid IP address: " + ip + + +def validPermid(permid): + if not isinstance(permid, str): + raise RuntimeError, "invalid permid: " + permid + if STRICT_CHECK and len(permid) != permid_len: + raise RuntimeError, "invalid permid: " + permid + return True + +def validInfohash(infohash): + if not isinstance(infohash, str): + raise RuntimeError, "invalid infohash " + infohash + if STRICT_CHECK and len(infohash) != infohash_len: + raise RuntimeError, "invalid infohash " + infohash + return True + +def isValidPermid(permid): + try: + return validPermid(permid) + except: + return False + +def isValidInfohash(infohash): + try: + return validInfohash(infohash) + except: + return False + +def isValidPort(port): + try: + return validPort(port) + except: + return False + +def isValidIP(ip): + try: + return validIP(ip) + except: + return False + +def isValidName(name): + try: + return validPort(name) + except: + return False + + +def validTorrentFile(metainfo): + # Jie: is this function too strict? Many torrents could not be downloaded + if type(metainfo) != DictType: + raise ValueError('metainfo not dict') + + + if 'info' not in metainfo: + raise ValueError('metainfo misses key info') + + if 'announce' in metainfo and not isValidURL(metainfo['announce']): + raise ValueError('announce URL bad') + + # http://www.bittorrent.org/DHT_protocol.html says both announce and nodes + # are not allowed, but some torrents (Azureus?) apparently violate this. + + #if 'announce' in metainfo and 'nodes' in metainfo: + # raise ValueError('both announce and nodes present') + + if 'nodes' in metainfo: + nodes = metainfo['nodes'] + if type(nodes) != ListType: + raise ValueError('nodes not list, but '+`type(nodes)`) + for pair in nodes: + if type(pair) != ListType and len(pair) != 2: + raise ValueError('node not 2-item list, but '+`type(pair)`) + host,port = pair + if type(host) != StringType: + raise ValueError('node host not string, but '+`type(host)`) + if type(port) != IntType: + raise ValueError('node port not int, but '+`type(port)`) + + if not ('announce' in metainfo or 'nodes' in metainfo): + raise ValueError('announce and nodes missing') + + info = metainfo['info'] + if type(info) != DictType: + raise ValueError('info not dict') + + if 'root hash' in info: + infokeys = ['name','piece length', 'root hash'] + elif 'live' in info: + infokeys = ['name','piece length', 'live'] + else: + infokeys = ['name','piece length', 'pieces'] + for key in infokeys: + if key not in info: + raise ValueError('info misses key '+key) + name = info['name'] + if type(name) != StringType: + raise ValueError('info name is not string but '+`type(name)`) + pl = info['piece length'] + if type(pl) != IntType and type(pl) != LongType: + raise ValueError('info piece size is not int, but '+`type(pl)`) + if 'root hash' in info: + rh = info['root hash'] + if type(rh) != StringType or len(rh) != 20: + raise ValueError('info roothash is not 20-byte string') + elif 'live' in info: + live = info['live'] + if type(live) != DictType: + raise ValueError('info live is not a dict') + else: + if 'authmethod' not in live: + raise ValueError('info live misses key'+'authmethod') + else: + p = info['pieces'] + if type(p) != StringType or len(p) % 20 != 0: + raise ValueError('info pieces is not multiple of 20 bytes') + + if 'length' in info: + # single-file torrent + if 'files' in info: + raise ValueError('info may not contain both files and length key') + + l = info['length'] + if type(l) != IntType and type(l) != LongType: + raise ValueError('info length is not int, but '+`type(l)`) + else: + # multi-file torrent + if 'length' in info: + raise ValueError('info may not contain both files and length key') + + files = info['files'] + if type(files) != ListType: + raise ValueError('info files not list, but '+`type(files)`) + + filekeys = ['path','length'] + for file in files: + for key in filekeys: + if key not in file: + raise ValueError('info files missing path or length key') + + p = file['path'] + if type(p) != ListType: + raise ValueError('info files path is not list, but '+`type(p)`) + for dir in p: + if type(dir) != StringType: + raise ValueError('info files path is not string, but '+`type(dir)`) + + l = file['length'] + if type(l) != IntType and type(l) != LongType: + raise ValueError('info files length is not int, but '+`type(l)`) + + # common additional fields + if 'announce-list' in metainfo: + al = metainfo['announce-list'] + if type(al) != ListType: + raise ValueError('announce-list is not list, but '+`type(al)`) + for tier in al: + if type(tier) != ListType: + raise ValueError('announce-list tier is not list '+`tier`) + # Jie: this limitation is not necessary +# for url in tier: +# if not isValidURL(url): +# raise ValueError('announce-list url is not valid '+`url`) + + if 'azureus_properties' in metainfo: + azprop = metainfo['azureus_properties'] + if type(azprop) != DictType: + raise ValueError('azureus_properties is not dict, but '+`type(azprop)`) + if 'Content' in azprop: + content = azprop['Content'] + if type(content) != DictType: + raise ValueError('azureus_properties content is not dict, but '+`type(content)`) + if 'thumbnail' in content: + thumb = content['thumbnail'] + if type(content) != StringType: + raise ValueError('azureus_properties content thumbnail is not string') + + +def isValidTorrentFile(metainfo): + try: + validTorrentFile(metainfo) + return True + except: + if DEBUG: + print_exc() + return False + + +def isValidURL(url): + if url.lower().startswith('udp'): # exception for udp + url = url.lower().replace('udp','http',1) + r = urlparse.urlsplit(url) + # if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","isValidURL:",r + + if r[0] == '' or r[1] == '': + return False + return True + +def show_permid(permid): + # Full BASE64-encoded. Must not be abbreviated in any way. + if not permid: + return 'None' + return encodestring(permid).replace("\n","") + # Short digest + ##return sha(permid).hexdigest() + +def show_permid_short(permid): + if not permid: + return 'None' + s = encodestring(permid).replace("\n","") + return s[-10:] + #return encodestring(sha(s).digest()).replace("\n","") + +def show_permid_shorter(permid): + if not permid: + return 'None' + s = encodestring(permid).replace("\n","") + return s[-5:] + +def readableBuddyCastMsg(buddycast_data,selversion): + """ Convert msg to readable format. + As this copies the original dict, and just transforms it, + most added info is already present and therefore logged + correctly. Exception is the OLPROTO_VER_EIGHTH which + modified the preferences list. """ + prefxchg_msg = copy.deepcopy(buddycast_data) + + if prefxchg_msg.has_key('permid'): + prefxchg_msg.pop('permid') + if prefxchg_msg.has_key('ip'): + prefxchg_msg.pop('ip') + if prefxchg_msg.has_key('port'): + prefxchg_msg.pop('port') + + name = repr(prefxchg_msg['name']) # avoid coding error + + if prefxchg_msg['preferences']: + prefs = [] + if selversion < 8: # OLPROTO_VER_EIGHTH: Can't use constant due to recursive import + for pref in prefxchg_msg['preferences']: + prefs.append(show_permid(pref)) + else: + for preftuple in prefxchg_msg['preferences']: + # Copy tuple and escape infohash + newlist = [] + for i in range(0,len(preftuple)): + if i == 0: + val = show_permid(preftuple[i]) + else: + val = preftuple[i] + newlist.append(val) + prefs.append(newlist) + + prefxchg_msg['preferences'] = prefs + + + if prefxchg_msg.get('taste buddies', []): + buddies = [] + for buddy in prefxchg_msg['taste buddies']: + buddy['permid'] = show_permid(buddy['permid']) + if buddy.get('preferences', []): + prefs = [] + for pref in buddy['preferences']: + prefs.append(show_permid(pref)) + buddy['preferences'] = prefs + buddies.append(buddy) + prefxchg_msg['taste buddies'] = buddies + + if prefxchg_msg.get('random peers', []): + peers = [] + for peer in prefxchg_msg['random peers']: + peer['permid'] = show_permid(peer['permid']) + peers.append(peer) + prefxchg_msg['random peers'] = peers + + return prefxchg_msg + +def print_prefxchg_msg(prefxchg_msg): + def show_permid(permid): + return permid + print "------- preference_exchange message ---------" + print prefxchg_msg + print "---------------------------------------------" + print "permid:", show_permid(prefxchg_msg['permid']) + print "name", prefxchg_msg['name'] + print "ip:", prefxchg_msg['ip'] + print "port:", prefxchg_msg['port'] + print "preferences:" + if prefxchg_msg['preferences']: + for pref in prefxchg_msg['preferences']: + print "\t", pref#, prefxchg_msg['preferences'][pref] + print "taste buddies:" + if prefxchg_msg['taste buddies']: + for buddy in prefxchg_msg['taste buddies']: + print "\t permid:", show_permid(buddy['permid']) + #print "\t permid:", buddy['permid'] + print "\t ip:", buddy['ip'] + print "\t port:", buddy['port'] + print "\t age:", buddy['age'] + print "\t preferences:" + if buddy['preferences']: + for pref in buddy['preferences']: + print "\t\t", pref#, buddy['preferences'][pref] + print + print "random peers:" + if prefxchg_msg['random peers']: + for peer in prefxchg_msg['random peers']: + print "\t permid:", show_permid(peer['permid']) + #print "\t permid:", peer['permid'] + print "\t ip:", peer['ip'] + print "\t port:", peer['port'] + print "\t age:", peer['age'] + print + +def print_dict(data, level=0): + if isinstance(data, dict): + print + for i in data: + print " "*level, str(i) + ':', + print_dict(data[i], level+1) + elif isinstance(data, list): + if not data: + print "[]" + else: + print + for i in xrange(len(data)): + print " "*level, '[' + str(i) + ']:', + print_dict(data[i], level+1) + else: + print data + +def friendly_time(old_time): + curr_time = time() + try: + old_time = int(old_time) + assert old_time > 0 + diff = int(curr_time - old_time) + except: + if isinstance(old_time, str): + return old_time + else: + return '?' + if diff < 0: + return '?' + elif diff < 2: + return str(diff) + " sec. ago" + elif diff < 60: + return str(diff) + " secs. ago" + elif diff < 120: + return "1 min. ago" + elif diff < 3600: + return str(int(diff/60)) + " mins. ago" + elif diff < 7200: + return "1 hour ago" + elif diff < 86400: + return str(int(diff/3600)) + " hours ago" + elif diff < 172800: + return "Yesterday" + elif diff < 259200: + return str(int(diff/86400)) + " days ago" + else: + return strftime("%d-%m-%Y", gmtime(old_time)) + +def sort_dictlist(dict_list, key, order='increase'): + + aux = [] + for i in xrange(len(dict_list)): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","sort_dictlist",key,"in",dict_list[i].keys(),"?" + if key in dict_list[i]: + aux.append((dict_list[i][key],i)) + aux.sort() + if order == 'decrease' or order == 1: # 0 - increase, 1 - decrease + aux.reverse() + return [dict_list[i] for x, i in aux] + + +def dict_compare(a, b, keys): + for key in keys: + order = 'increase' + if type(key) == tuple: + skey, order = key + else: + skey = key + + if a.get(skey) > b.get(skey): + if order == 'decrease' or order == 1: + return -1 + else: + return 1 + elif a.get(skey) < b.get(skey): + if order == 'decrease' or order == 1: + return 1 + else: + return -1 + + return 0 + + +def multisort_dictlist(dict_list, keys): + + listcopy = copy.copy(dict_list) + cmp = lambda a, b: dict_compare(a, b, keys) + listcopy.sort(cmp=cmp) + return listcopy + + +def find_content_in_dictlist(dict_list, content, key='infohash'): + title = content.get(key) + if not title: + print 'Error: content had no content_name' + return False + for i in xrange(len(dict_list)): + if title == dict_list[i].get(key): + return i + return -1 + +def remove_torrent_from_list(list, content, key = 'infohash'): + remove_data_from_list(list, content, key) + +def remove_data_from_list(list, content, key = 'infohash'): + index = find_content_in_dictlist(list, content, key) + if index != -1: + del list[index] + +def sortList(list_to_sort, list_key, order='decrease'): + aux = zip(list_key, list_to_sort) + aux.sort() + if order == 'decrease': + aux.reverse() + return [i for k, i in aux] + +def getPlural( n): + if n == 1: + return '' + else: + return 's' + + +def find_prog_in_PATH(prog): + envpath = os.path.expandvars('${PATH}') + if sys.platform == 'win32': + splitchar = ';' + else: + splitchar = ':' + paths = envpath.split(splitchar) + foundat = None + for path in paths: + fullpath = os.path.join(path,prog) + if os.access(fullpath,os.R_OK|os.X_OK): + foundat = fullpath + break + return foundat + +def hostname_or_ip2ip(hostname_or_ip): + # Arno: don't DNS resolve always, grabs lock on most systems + ip = None + try: + # test that hostname_or_ip contains a xxx.xxx.xxx.xxx string + socket.inet_aton(hostname_or_ip) + ip = hostname_or_ip + + except: + try: + # dns-lookup for hostname_or_ip into an ip address + ip = socket.gethostbyname(hostname_or_ip) + if not hostname_or_ip.startswith("superpeer"): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","hostname_or_ip2ip: resolved ip from hostname, an ip should have been provided", hostname_or_ip + + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","hostname_or_ip2ip: invalid hostname", hostname_or_ip + print_exc() + + return ip + + +if __name__=='__main__': + + torrenta = {'name':'a', 'swarmsize' : 12} + torrentb = {'name':'b', 'swarmsize' : 24} + torrentc = {'name':'c', 'swarmsize' : 18, 'Web2' : True} + torrentd = {'name':'b', 'swarmsize' : 36, 'Web2' : True} + + torrents = [torrenta, torrentb, torrentc, torrentd] + print multisort_dictlist(torrents, ["Web2", ("swarmsize", "decrease")]) + + + #d = {'a':1,'b':[1,2,3],'c':{'c':2,'d':[3,4],'k':{'c':2,'d':[3,4]}}} + #print_dict(d) diff --git a/tribler-mod/Tribler/Core/Utilities/utilities.py.bak b/tribler-mod/Tribler/Core/Utilities/utilities.py.bak new file mode 100644 index 0000000..e0fbbed --- /dev/null +++ b/tribler-mod/Tribler/Core/Utilities/utilities.py.bak @@ -0,0 +1,540 @@ + +# Written by Jie Yang +# see LICENSE.txt for license information + +import socket +from time import time, strftime, gmtime +from base64 import encodestring, decodestring +from sha import sha +import sys +import os +import copy +from types import UnicodeType, StringType, LongType, IntType, ListType, DictType +import urlparse +from traceback import print_exc + +STRICT_CHECK = False +DEBUG = True #False + +permid_len = 112 +infohash_len = 20 + +def bin2str(bin): + # Full BASE64-encoded + return encodestring(bin).replace("\n","") + +def str2bin(str): + return decodestring(str) + +def validName(name): + if not isinstance(name, str) and len(name) == 0: + raise RuntimeError, "invalid name: " + name + return True + +def validPort(port): + port = int(port) + if port < 0 or port > 65535: + raise RuntimeError, "invalid Port: " + str(port) + return True + +def validIP(ip): + try: + try: + # Is IPv4 addr? + socket.inet_aton(ip) + return True + except socket.error: + # Is hostname / IPv6? + socket.getaddrinfo(ip, None) + return True + except: + print_exc() + raise RuntimeError, "invalid IP address: " + ip + + +def validPermid(permid): + if not isinstance(permid, str): + raise RuntimeError, "invalid permid: " + permid + if STRICT_CHECK and len(permid) != permid_len: + raise RuntimeError, "invalid permid: " + permid + return True + +def validInfohash(infohash): + if not isinstance(infohash, str): + raise RuntimeError, "invalid infohash " + infohash + if STRICT_CHECK and len(infohash) != infohash_len: + raise RuntimeError, "invalid infohash " + infohash + return True + +def isValidPermid(permid): + try: + return validPermid(permid) + except: + return False + +def isValidInfohash(infohash): + try: + return validInfohash(infohash) + except: + return False + +def isValidPort(port): + try: + return validPort(port) + except: + return False + +def isValidIP(ip): + try: + return validIP(ip) + except: + return False + +def isValidName(name): + try: + return validPort(name) + except: + return False + + +def validTorrentFile(metainfo): + # Jie: is this function too strict? Many torrents could not be downloaded + if type(metainfo) != DictType: + raise ValueError('metainfo not dict') + + + if 'info' not in metainfo: + raise ValueError('metainfo misses key info') + + if 'announce' in metainfo and not isValidURL(metainfo['announce']): + raise ValueError('announce URL bad') + + # http://www.bittorrent.org/DHT_protocol.html says both announce and nodes + # are not allowed, but some torrents (Azureus?) apparently violate this. + + #if 'announce' in metainfo and 'nodes' in metainfo: + # raise ValueError('both announce and nodes present') + + if 'nodes' in metainfo: + nodes = metainfo['nodes'] + if type(nodes) != ListType: + raise ValueError('nodes not list, but '+`type(nodes)`) + for pair in nodes: + if type(pair) != ListType and len(pair) != 2: + raise ValueError('node not 2-item list, but '+`type(pair)`) + host,port = pair + if type(host) != StringType: + raise ValueError('node host not string, but '+`type(host)`) + if type(port) != IntType: + raise ValueError('node port not int, but '+`type(port)`) + + if not ('announce' in metainfo or 'nodes' in metainfo): + raise ValueError('announce and nodes missing') + + info = metainfo['info'] + if type(info) != DictType: + raise ValueError('info not dict') + + if 'root hash' in info: + infokeys = ['name','piece length', 'root hash'] + elif 'live' in info: + infokeys = ['name','piece length', 'live'] + else: + infokeys = ['name','piece length', 'pieces'] + for key in infokeys: + if key not in info: + raise ValueError('info misses key '+key) + name = info['name'] + if type(name) != StringType: + raise ValueError('info name is not string but '+`type(name)`) + pl = info['piece length'] + if type(pl) != IntType and type(pl) != LongType: + raise ValueError('info piece size is not int, but '+`type(pl)`) + if 'root hash' in info: + rh = info['root hash'] + if type(rh) != StringType or len(rh) != 20: + raise ValueError('info roothash is not 20-byte string') + elif 'live' in info: + live = info['live'] + if type(live) != DictType: + raise ValueError('info live is not a dict') + else: + if 'authmethod' not in live: + raise ValueError('info live misses key'+'authmethod') + else: + p = info['pieces'] + if type(p) != StringType or len(p) % 20 != 0: + raise ValueError('info pieces is not multiple of 20 bytes') + + if 'length' in info: + # single-file torrent + if 'files' in info: + raise ValueError('info may not contain both files and length key') + + l = info['length'] + if type(l) != IntType and type(l) != LongType: + raise ValueError('info length is not int, but '+`type(l)`) + else: + # multi-file torrent + if 'length' in info: + raise ValueError('info may not contain both files and length key') + + files = info['files'] + if type(files) != ListType: + raise ValueError('info files not list, but '+`type(files)`) + + filekeys = ['path','length'] + for file in files: + for key in filekeys: + if key not in file: + raise ValueError('info files missing path or length key') + + p = file['path'] + if type(p) != ListType: + raise ValueError('info files path is not list, but '+`type(p)`) + for dir in p: + if type(dir) != StringType: + raise ValueError('info files path is not string, but '+`type(dir)`) + + l = file['length'] + if type(l) != IntType and type(l) != LongType: + raise ValueError('info files length is not int, but '+`type(l)`) + + # common additional fields + if 'announce-list' in metainfo: + al = metainfo['announce-list'] + if type(al) != ListType: + raise ValueError('announce-list is not list, but '+`type(al)`) + for tier in al: + if type(tier) != ListType: + raise ValueError('announce-list tier is not list '+`tier`) + # Jie: this limitation is not necessary +# for url in tier: +# if not isValidURL(url): +# raise ValueError('announce-list url is not valid '+`url`) + + if 'azureus_properties' in metainfo: + azprop = metainfo['azureus_properties'] + if type(azprop) != DictType: + raise ValueError('azureus_properties is not dict, but '+`type(azprop)`) + if 'Content' in azprop: + content = azprop['Content'] + if type(content) != DictType: + raise ValueError('azureus_properties content is not dict, but '+`type(content)`) + if 'thumbnail' in content: + thumb = content['thumbnail'] + if type(content) != StringType: + raise ValueError('azureus_properties content thumbnail is not string') + + +def isValidTorrentFile(metainfo): + try: + validTorrentFile(metainfo) + return True + except: + if DEBUG: + print_exc() + return False + + +def isValidURL(url): + if url.lower().startswith('udp'): # exception for udp + url = url.lower().replace('udp','http',1) + r = urlparse.urlsplit(url) + # if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","isValidURL:",r + + if r[0] == '' or r[1] == '': + return False + return True + +def show_permid(permid): + # Full BASE64-encoded. Must not be abbreviated in any way. + if not permid: + return 'None' + return encodestring(permid).replace("\n","") + # Short digest + ##return sha(permid).hexdigest() + +def show_permid_short(permid): + if not permid: + return 'None' + s = encodestring(permid).replace("\n","") + return s[-10:] + #return encodestring(sha(s).digest()).replace("\n","") + +def show_permid_shorter(permid): + if not permid: + return 'None' + s = encodestring(permid).replace("\n","") + return s[-5:] + +def readableBuddyCastMsg(buddycast_data,selversion): + """ Convert msg to readable format. + As this copies the original dict, and just transforms it, + most added info is already present and therefore logged + correctly. Exception is the OLPROTO_VER_EIGHTH which + modified the preferences list. """ + prefxchg_msg = copy.deepcopy(buddycast_data) + + if prefxchg_msg.has_key('permid'): + prefxchg_msg.pop('permid') + if prefxchg_msg.has_key('ip'): + prefxchg_msg.pop('ip') + if prefxchg_msg.has_key('port'): + prefxchg_msg.pop('port') + + name = repr(prefxchg_msg['name']) # avoid coding error + + if prefxchg_msg['preferences']: + prefs = [] + if selversion < 8: # OLPROTO_VER_EIGHTH: Can't use constant due to recursive import + for pref in prefxchg_msg['preferences']: + prefs.append(show_permid(pref)) + else: + for preftuple in prefxchg_msg['preferences']: + # Copy tuple and escape infohash + newlist = [] + for i in range(0,len(preftuple)): + if i == 0: + val = show_permid(preftuple[i]) + else: + val = preftuple[i] + newlist.append(val) + prefs.append(newlist) + + prefxchg_msg['preferences'] = prefs + + + if prefxchg_msg.get('taste buddies', []): + buddies = [] + for buddy in prefxchg_msg['taste buddies']: + buddy['permid'] = show_permid(buddy['permid']) + if buddy.get('preferences', []): + prefs = [] + for pref in buddy['preferences']: + prefs.append(show_permid(pref)) + buddy['preferences'] = prefs + buddies.append(buddy) + prefxchg_msg['taste buddies'] = buddies + + if prefxchg_msg.get('random peers', []): + peers = [] + for peer in prefxchg_msg['random peers']: + peer['permid'] = show_permid(peer['permid']) + peers.append(peer) + prefxchg_msg['random peers'] = peers + + return prefxchg_msg + +def print_prefxchg_msg(prefxchg_msg): + def show_permid(permid): + return permid + print "------- preference_exchange message ---------" + print prefxchg_msg + print "---------------------------------------------" + print "permid:", show_permid(prefxchg_msg['permid']) + print "name", prefxchg_msg['name'] + print "ip:", prefxchg_msg['ip'] + print "port:", prefxchg_msg['port'] + print "preferences:" + if prefxchg_msg['preferences']: + for pref in prefxchg_msg['preferences']: + print "\t", pref#, prefxchg_msg['preferences'][pref] + print "taste buddies:" + if prefxchg_msg['taste buddies']: + for buddy in prefxchg_msg['taste buddies']: + print "\t permid:", show_permid(buddy['permid']) + #print "\t permid:", buddy['permid'] + print "\t ip:", buddy['ip'] + print "\t port:", buddy['port'] + print "\t age:", buddy['age'] + print "\t preferences:" + if buddy['preferences']: + for pref in buddy['preferences']: + print "\t\t", pref#, buddy['preferences'][pref] + print + print "random peers:" + if prefxchg_msg['random peers']: + for peer in prefxchg_msg['random peers']: + print "\t permid:", show_permid(peer['permid']) + #print "\t permid:", peer['permid'] + print "\t ip:", peer['ip'] + print "\t port:", peer['port'] + print "\t age:", peer['age'] + print + +def print_dict(data, level=0): + if isinstance(data, dict): + print + for i in data: + print " "*level, str(i) + ':', + print_dict(data[i], level+1) + elif isinstance(data, list): + if not data: + print "[]" + else: + print + for i in xrange(len(data)): + print " "*level, '[' + str(i) + ']:', + print_dict(data[i], level+1) + else: + print data + +def friendly_time(old_time): + curr_time = time() + try: + old_time = int(old_time) + assert old_time > 0 + diff = int(curr_time - old_time) + except: + if isinstance(old_time, str): + return old_time + else: + return '?' + if diff < 0: + return '?' + elif diff < 2: + return str(diff) + " sec. ago" + elif diff < 60: + return str(diff) + " secs. ago" + elif diff < 120: + return "1 min. ago" + elif diff < 3600: + return str(int(diff/60)) + " mins. ago" + elif diff < 7200: + return "1 hour ago" + elif diff < 86400: + return str(int(diff/3600)) + " hours ago" + elif diff < 172800: + return "Yesterday" + elif diff < 259200: + return str(int(diff/86400)) + " days ago" + else: + return strftime("%d-%m-%Y", gmtime(old_time)) + +def sort_dictlist(dict_list, key, order='increase'): + + aux = [] + for i in xrange(len(dict_list)): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","sort_dictlist",key,"in",dict_list[i].keys(),"?" + if key in dict_list[i]: + aux.append((dict_list[i][key],i)) + aux.sort() + if order == 'decrease' or order == 1: # 0 - increase, 1 - decrease + aux.reverse() + return [dict_list[i] for x, i in aux] + + +def dict_compare(a, b, keys): + for key in keys: + order = 'increase' + if type(key) == tuple: + skey, order = key + else: + skey = key + + if a.get(skey) > b.get(skey): + if order == 'decrease' or order == 1: + return -1 + else: + return 1 + elif a.get(skey) < b.get(skey): + if order == 'decrease' or order == 1: + return 1 + else: + return -1 + + return 0 + + +def multisort_dictlist(dict_list, keys): + + listcopy = copy.copy(dict_list) + cmp = lambda a, b: dict_compare(a, b, keys) + listcopy.sort(cmp=cmp) + return listcopy + + +def find_content_in_dictlist(dict_list, content, key='infohash'): + title = content.get(key) + if not title: + print 'Error: content had no content_name' + return False + for i in xrange(len(dict_list)): + if title == dict_list[i].get(key): + return i + return -1 + +def remove_torrent_from_list(list, content, key = 'infohash'): + remove_data_from_list(list, content, key) + +def remove_data_from_list(list, content, key = 'infohash'): + index = find_content_in_dictlist(list, content, key) + if index != -1: + del list[index] + +def sortList(list_to_sort, list_key, order='decrease'): + aux = zip(list_key, list_to_sort) + aux.sort() + if order == 'decrease': + aux.reverse() + return [i for k, i in aux] + +def getPlural( n): + if n == 1: + return '' + else: + return 's' + + +def find_prog_in_PATH(prog): + envpath = os.path.expandvars('${PATH}') + if sys.platform == 'win32': + splitchar = ';' + else: + splitchar = ':' + paths = envpath.split(splitchar) + foundat = None + for path in paths: + fullpath = os.path.join(path,prog) + if os.access(fullpath,os.R_OK|os.X_OK): + foundat = fullpath + break + return foundat + +def hostname_or_ip2ip(hostname_or_ip): + # Arno: don't DNS resolve always, grabs lock on most systems + ip = None + try: + # test that hostname_or_ip contains a xxx.xxx.xxx.xxx string + socket.inet_aton(hostname_or_ip) + ip = hostname_or_ip + + except: + try: + # dns-lookup for hostname_or_ip into an ip address + ip = socket.gethostbyname(hostname_or_ip) + if not hostname_or_ip.startswith("superpeer"): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","hostname_or_ip2ip: resolved ip from hostname, an ip should have been provided", hostname_or_ip + + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","hostname_or_ip2ip: invalid hostname", hostname_or_ip + print_exc() + + return ip + + +if __name__=='__main__': + + torrenta = {'name':'a', 'swarmsize' : 12} + torrentb = {'name':'b', 'swarmsize' : 24} + torrentc = {'name':'c', 'swarmsize' : 18, 'Web2' : True} + torrentd = {'name':'b', 'swarmsize' : 36, 'Web2' : True} + + torrents = [torrenta, torrentb, torrentc, torrentd] + print multisort_dictlist(torrents, ["Web2", ("swarmsize", "decrease")]) + + + #d = {'a':1,'b':[1,2,3],'c':{'c':2,'d':[3,4],'k':{'c':2,'d':[3,4]}}} + #print_dict(d) diff --git a/tribler-mod/Tribler/Core/Utilities/win32regchecker.py b/tribler-mod/Tribler/Core/Utilities/win32regchecker.py new file mode 100644 index 0000000..22a81b5 --- /dev/null +++ b/tribler-mod/Tribler/Core/Utilities/win32regchecker.py @@ -0,0 +1,114 @@ +from time import localtime, strftime +# Written by ABC authors and Arno Bakker +# see LICENSE.txt for license information + +import sys +import os +from traceback import print_exc + +if (sys.platform == 'win32'): + import _winreg + + # short for PyHKEY from "_winreg" module + HKCR = _winreg.HKEY_CLASSES_ROOT + HKLM = _winreg.HKEY_LOCAL_MACHINE + HKCU = _winreg.HKEY_CURRENT_USER +else: + HKCR = 0 + HKLM = 1 + HKCU = 2 + +DEBUG = False + +class Win32RegChecker: + def __init__(self): + pass + + def readRootKey(self,key_name,value_name=""): + return self.readKey(HKCR,key_name,value_name) + + def readKey(self,hkey,key_name,value_name=""): + if (sys.platform != 'win32'): + return None + + try: + # test that shell/open association with ABC exist + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","win32regcheck: Opening",key_name,value_name + full_key = _winreg.OpenKey(hkey, key_name, 0, _winreg.KEY_READ) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","win32regcheck: Open returned",full_key + + value_data, value_type = _winreg.QueryValueEx(full_key, value_name) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","win32regcheck: Read",value_data,value_type + _winreg.CloseKey(full_key) + + return value_data + except: + print_exc(file=sys.stderr) + # error, test failed, key don't exist + # (could also indicate a unicode error) + return None + + + def readKeyRecursively(self,hkey,key_name,value_name=""): + if (sys.platform != 'win32'): + return None + + lasthkey = hkey + try: + toclose = [] + keyparts = key_name.split('\\') + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","win32regcheck: keyparts",keyparts + for keypart in keyparts: + if keypart == '': + continue + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","win32regcheck: Opening",keypart + full_key = _winreg.OpenKey(lasthkey, keypart, 0, _winreg.KEY_READ) + lasthkey = full_key + toclose.append(full_key) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","win32regcheck: Open returned",full_key + + value_data, value_type = _winreg.QueryValueEx(full_key, value_name) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","win32regcheck: Read",value_data,value_type + for hkey in toclose: + _winreg.CloseKey(hkey) + + return value_data + except: + print_exc() + # error, test failed, key don't exist + # (could also indicate a unicode error) + return None + + + def writeKey(self,hkey,key_name,value_name,value_data,value_type): + try: + # kreate desired key in Windows register + full_key = _winreg.CreateKey(hkey, key_name) + except EnvironmentError: + return False; + # set desired value in created Windows register key + _winreg.SetValueEx(full_key, value_name, 0, value_type, value_data) + # close Windows register key + _winreg.CloseKey(full_key) + + return True + + + +if __name__ == "__main__": + w = Win32RegChecker() + winfiletype = w.readRootKey(".wmv") + playkey = winfiletype+"\shell\play\command" + urlplay = w.readRootKey(playkey) + print urlplay + openkey = winfiletype+"\shell\open\command" + urlopen = w.readRootKey(openkey) + print urlopen diff --git a/tribler-mod/Tribler/Core/Utilities/win32regchecker.py.bak b/tribler-mod/Tribler/Core/Utilities/win32regchecker.py.bak new file mode 100644 index 0000000..14005e8 --- /dev/null +++ b/tribler-mod/Tribler/Core/Utilities/win32regchecker.py.bak @@ -0,0 +1,113 @@ +# Written by ABC authors and Arno Bakker +# see LICENSE.txt for license information + +import sys +import os +from traceback import print_exc + +if (sys.platform == 'win32'): + import _winreg + + # short for PyHKEY from "_winreg" module + HKCR = _winreg.HKEY_CLASSES_ROOT + HKLM = _winreg.HKEY_LOCAL_MACHINE + HKCU = _winreg.HKEY_CURRENT_USER +else: + HKCR = 0 + HKLM = 1 + HKCU = 2 + +DEBUG = False + +class Win32RegChecker: + def __init__(self): + pass + + def readRootKey(self,key_name,value_name=""): + return self.readKey(HKCR,key_name,value_name) + + def readKey(self,hkey,key_name,value_name=""): + if (sys.platform != 'win32'): + return None + + try: + # test that shell/open association with ABC exist + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","win32regcheck: Opening",key_name,value_name + full_key = _winreg.OpenKey(hkey, key_name, 0, _winreg.KEY_READ) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","win32regcheck: Open returned",full_key + + value_data, value_type = _winreg.QueryValueEx(full_key, value_name) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","win32regcheck: Read",value_data,value_type + _winreg.CloseKey(full_key) + + return value_data + except: + print_exc(file=sys.stderr) + # error, test failed, key don't exist + # (could also indicate a unicode error) + return None + + + def readKeyRecursively(self,hkey,key_name,value_name=""): + if (sys.platform != 'win32'): + return None + + lasthkey = hkey + try: + toclose = [] + keyparts = key_name.split('\\') + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","win32regcheck: keyparts",keyparts + for keypart in keyparts: + if keypart == '': + continue + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","win32regcheck: Opening",keypart + full_key = _winreg.OpenKey(lasthkey, keypart, 0, _winreg.KEY_READ) + lasthkey = full_key + toclose.append(full_key) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","win32regcheck: Open returned",full_key + + value_data, value_type = _winreg.QueryValueEx(full_key, value_name) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","win32regcheck: Read",value_data,value_type + for hkey in toclose: + _winreg.CloseKey(hkey) + + return value_data + except: + print_exc() + # error, test failed, key don't exist + # (could also indicate a unicode error) + return None + + + def writeKey(self,hkey,key_name,value_name,value_data,value_type): + try: + # kreate desired key in Windows register + full_key = _winreg.CreateKey(hkey, key_name) + except EnvironmentError: + return False; + # set desired value in created Windows register key + _winreg.SetValueEx(full_key, value_name, 0, value_type, value_data) + # close Windows register key + _winreg.CloseKey(full_key) + + return True + + + +if __name__ == "__main__": + w = Win32RegChecker() + winfiletype = w.readRootKey(".wmv") + playkey = winfiletype+"\shell\play\command" + urlplay = w.readRootKey(playkey) + print urlplay + openkey = winfiletype+"\shell\open\command" + urlopen = w.readRootKey(openkey) + print urlopen diff --git a/tribler-mod/Tribler/Core/Video/LiveSourceAuth.py b/tribler-mod/Tribler/Core/Video/LiveSourceAuth.py new file mode 100644 index 0000000..1156aed --- /dev/null +++ b/tribler-mod/Tribler/Core/Video/LiveSourceAuth.py @@ -0,0 +1,315 @@ +from time import localtime, strftime +# written by Arno Bakker +# see LICENSE.txt for license information + +import sys +from traceback import print_exc +from cStringIO import StringIO +import struct +import time + +from sha import sha + + +from M2Crypto import EC +from Tribler.Core.Overlay.permid import sign_data,verify_data_pubkeyobj +from Tribler.Core.osutils import * + +DEBUG = False + +class Authenticator: + + def __init__(self,piecelen,npieces): + self.piecelen = piecelen + self.npieces = npieces + + def get_piece_length(self): + return self.piecelen + + def get_npieces(self): + return self.npieces + + def get_content_blocksize(self): + pass + + def sign(self,content): + pass + + def verify(self,piece): + pass + + def get_content(self,piece): + pass + + +class NullAuthenticator(Authenticator): + + def __init__(self,piecelen,npieces): + Authenticator.__init__(self,piecelen,npieces) + self.contentblocksize = piecelen + + def get_content_blocksize(self): + return self.contentblocksize + + def sign(self,content): + return [content] + + def verify(self,piece): + return True + + def get_content(self,piece): + return piece + + +class ECDSAAuthenticator(Authenticator): + """ Authenticator who places a ECDSA signature in the last part of a + piece. In particular, the sig consists of: + - an 8 byte sequence number + - an 8 byte real-time timestamp + - a 1 byte length field followed by + - a variable-length ECDSA signature in ASN.1, (max 64 bytes) + - optionally 0x00 padding bytes, if the ECDSA sig is less than 64 bytes, + to give a total of 81 bytes. + """ + + SEQNUM_SIZE = 8 + RTSTAMP_SIZE = 8 + LENGTH_SIZE = 1 + MAX_ECDSA_ASN1_SIGSIZE = 64 + EXTRA_SIZE = SEQNUM_SIZE + RTSTAMP_SIZE + # = seqnum + rtstamp + 1 byte length + MAX_ECDSA, padded + # put seqnum + rtstamp directly after content, so we calc the sig directly + # from the received buffer. + OUR_SIGSIZE = EXTRA_SIZE+LENGTH_SIZE+MAX_ECDSA_ASN1_SIGSIZE + + def __init__(self,piecelen,npieces,keypair=None,pubkeypem=None): + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: npieces",npieces + + Authenticator.__init__(self,piecelen,npieces) + self.contentblocksize = piecelen-self.OUR_SIGSIZE + self.keypair = keypair + if pubkeypem is not None: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: pubkeypem",`pubkeypem` + self.pubkey = EC.pub_key_from_der(pubkeypem) + else: + self.pubkey = None + self.seqnum = 0L + + def get_content_blocksize(self): + return self.contentblocksize + + def sign(self,content): + rtstamp = time.time() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: sign: ts %.5f s" % rtstamp + + extra = struct.pack('>Qd', self.seqnum,rtstamp) + self.seqnum += 1L + + sig = sign_data(content,extra,self.keypair) + # The sig returned is either 64 or 63 bytes long (62 also possible I + # guess). Therefore we transmit size as 1 bytes and fill to 64 bytes. + lensig = chr(len(sig)) + if len(sig) != self.MAX_ECDSA_ASN1_SIGSIZE: + # Note: this is not official ASN.1 padding. Also need to modify + # the header length for that I assume. + diff = self.MAX_ECDSA_ASN1_SIGSIZE-len(sig) + padding = '\x00' * diff + return [content,extra,lensig,sig,padding] + else: + return [content,extra,lensig,sig] + + def verify(self,piece,index): + """ A piece is valid if: + - the signature is correct, + - the seqnum % npieces == piecenr. + - the seqnum is no older than self.seqnum - npieces + @param piece The piece data as received from peer + @param index The piece number as received from peer + @return Boolean + """ + try: + # Can we do this without memcpy? + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: verify",len(piece) + extra = piece[-self.OUR_SIGSIZE:-self.OUR_SIGSIZE+self.EXTRA_SIZE] + lensig = ord(piece[-self.OUR_SIGSIZE+self.EXTRA_SIZE]) + if lensig > self.MAX_ECDSA_ASN1_SIGSIZE: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ failed piece",index,"lensig wrong",lensig + return False + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: verify lensig",lensig + diff = lensig-self.MAX_ECDSA_ASN1_SIGSIZE + if diff == 0: + sig = piece[-self.OUR_SIGSIZE+self.EXTRA_SIZE+self.LENGTH_SIZE:] + else: + sig = piece[-self.OUR_SIGSIZE+self.EXTRA_SIZE+self.LENGTH_SIZE:diff] + content = piece[:-self.OUR_SIGSIZE] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: verify piece",index,"sig",`sig` + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: verify dig",sha(content).hexdigest() + + ret = verify_data_pubkeyobj(content,extra,self.pubkey,sig) + if ret: + (seqnum, rtstamp) = struct.unpack('>Qd',extra) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: verify piece",index,"seq",seqnum,"ts %.5f s" % rtstamp,"ls",lensig + + mod = seqnum % self.get_npieces() + thres = self.seqnum - self.get_npieces()/2 + if seqnum <= thres: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ failed piece",index,"old seqnum",seqnum,"<<",self.seqnum + return False + elif mod != index: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ failed piece",index,"expected",mod + return False + else: + self.seqnum = max(self.seqnum,seqnum) + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ piece",index,"failed sig" + + return ret + except: + print_exc() + return False + + def get_content(self,piece): + return piece[:-self.OUR_SIGSIZE] + + # Extra fields + def get_seqnum(self,piece): + (seqnum, rtstamp) = self._decode_extra(piece) + return seqnum + + def get_rtstamp(self,piece): + (seqnum, rtstamp) = self._decode_extra(piece) + return rtstamp + + def _decode_extra(self,piece): + extra = piece[-self.OUR_SIGSIZE:-self.OUR_SIGSIZE+self.EXTRA_SIZE] + return struct.unpack('>Qd',extra) + + +def sign_data(plaintext,extra,ec_keypair): + digester = sha(plaintext) + digester.update(extra) + digest = digester.digest() + return ec_keypair.sign_dsa_asn1(digest) + +def verify_data_pubkeyobj(plaintext,extra,pubkey,blob): + digester = sha(plaintext) + digester.update(extra) + digest = digester.digest() + return pubkey.verify_dsa_asn1(digest,blob) + + +class AuthStreamWrapper: + """ Wrapper around the stream returned by VideoOnDemand/MovieOnDemandTransporter + that strips of the signature info + """ + + def __init__(self,inputstream,authenticator): + self.inputstream = inputstream + self.buffer = StringIO() + self.authenticator = authenticator + self.piecelen = authenticator.get_piece_length() + self.last_rtstamp = None + + def read(self,numbytes=None): + rawdata = self._readn(self.piecelen) + content = self.authenticator.get_content(rawdata) + self.last_rtstamp = self.authenticator.get_rtstamp(rawdata) + if numbytes is None or numbytes < 0: + raise ValueError('Stream has unlimited size, read all not supported.') + elif numbytes < len(content): + # TODO: buffer unread data for next read + raise ValueError('reading less than piecesize not supported yet') + else: + return content + + def get_generation_time(self): + """ Returns the time at which the last read piece was generated at the source. """ + return self.last_rtstamp + + def seek(self,pos,whence=os.SEEK_SET): + if pos == 0 and whence == os.SEEK_SET: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","authstream: seek: Ignoring seek 0 in live" + else: + raise ValueError("authstream does not support seek") + + def close(self): + self.inputstream.close() + + # Internal method + def _readn(self,n): + """ read exactly n bytes from inputstream, block if unavail """ + nwant = n + while True: + data = self.inputstream.read(nwant) + if len(data) == 0: + return data + nwant -= len(data) + self.buffer.write(data) + if nwant == 0: + break + self.buffer.seek(0) + data = self.buffer.read(n) + self.buffer.seek(0) + return data + + + +class VariableReadAuthStreamWrapper: + """ Wrapper around AuthStreamWrapper that allows reading of variable + number of bytes. TODO: optimize whole stack of AuthWrapper, + MovieTransportWrapper, MovieOnDemandTransporter + """ + + def __init__(self,inputstream,piecelen): + self.inputstream = inputstream + self.buffer = '' + self.piecelen = piecelen + + def read(self,numbytes=None): + if numbytes is None or numbytes < 0: + raise ValueError('Stream has unlimited size, read all not supported.') + return self._readn(numbytes) + + def get_generation_time(self): + """ Returns the time at which the last read piece was generated at the source. """ + return self.inputstream.get_generation_time() + + def seek(self,pos,whence=None): + return self.inputstream.seek(pos,whence=whence) + + def close(self): + self.inputstream.close() + + # Internal method + def _readn(self,nwant): + """ read *at most* nwant bytes from inputstream """ + + if len(self.buffer) == 0: + # Must read fixed size blocks from authwrapper + data = self.inputstream.read(self.piecelen) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","varread: Got",len(data),"want",nwant + if len(data) == 0: + return data + self.buffer = data + + lenb = len(self.buffer) + tosend = min(nwant,lenb) + + if tosend == lenb: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","varread: zero copy 2 lenb",lenb + pre = self.buffer + post = '' + else: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","varread: copy",tosend,"lenb",lenb + pre = self.buffer[0:tosend] + post = self.buffer[tosend:] + + self.buffer = post + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","varread: Returning",len(pre) + return pre + + diff --git a/tribler-mod/Tribler/Core/Video/LiveSourceAuth.py.bak b/tribler-mod/Tribler/Core/Video/LiveSourceAuth.py.bak new file mode 100644 index 0000000..c376897 --- /dev/null +++ b/tribler-mod/Tribler/Core/Video/LiveSourceAuth.py.bak @@ -0,0 +1,314 @@ +# written by Arno Bakker +# see LICENSE.txt for license information + +import sys +from traceback import print_exc +from cStringIO import StringIO +import struct +import time + +from sha import sha + + +from M2Crypto import EC +from Tribler.Core.Overlay.permid import sign_data,verify_data_pubkeyobj +from Tribler.Core.osutils import * + +DEBUG = False + +class Authenticator: + + def __init__(self,piecelen,npieces): + self.piecelen = piecelen + self.npieces = npieces + + def get_piece_length(self): + return self.piecelen + + def get_npieces(self): + return self.npieces + + def get_content_blocksize(self): + pass + + def sign(self,content): + pass + + def verify(self,piece): + pass + + def get_content(self,piece): + pass + + +class NullAuthenticator(Authenticator): + + def __init__(self,piecelen,npieces): + Authenticator.__init__(self,piecelen,npieces) + self.contentblocksize = piecelen + + def get_content_blocksize(self): + return self.contentblocksize + + def sign(self,content): + return [content] + + def verify(self,piece): + return True + + def get_content(self,piece): + return piece + + +class ECDSAAuthenticator(Authenticator): + """ Authenticator who places a ECDSA signature in the last part of a + piece. In particular, the sig consists of: + - an 8 byte sequence number + - an 8 byte real-time timestamp + - a 1 byte length field followed by + - a variable-length ECDSA signature in ASN.1, (max 64 bytes) + - optionally 0x00 padding bytes, if the ECDSA sig is less than 64 bytes, + to give a total of 81 bytes. + """ + + SEQNUM_SIZE = 8 + RTSTAMP_SIZE = 8 + LENGTH_SIZE = 1 + MAX_ECDSA_ASN1_SIGSIZE = 64 + EXTRA_SIZE = SEQNUM_SIZE + RTSTAMP_SIZE + # = seqnum + rtstamp + 1 byte length + MAX_ECDSA, padded + # put seqnum + rtstamp directly after content, so we calc the sig directly + # from the received buffer. + OUR_SIGSIZE = EXTRA_SIZE+LENGTH_SIZE+MAX_ECDSA_ASN1_SIGSIZE + + def __init__(self,piecelen,npieces,keypair=None,pubkeypem=None): + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: npieces",npieces + + Authenticator.__init__(self,piecelen,npieces) + self.contentblocksize = piecelen-self.OUR_SIGSIZE + self.keypair = keypair + if pubkeypem is not None: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: pubkeypem",`pubkeypem` + self.pubkey = EC.pub_key_from_der(pubkeypem) + else: + self.pubkey = None + self.seqnum = 0L + + def get_content_blocksize(self): + return self.contentblocksize + + def sign(self,content): + rtstamp = time.time() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: sign: ts %.5f s" % rtstamp + + extra = struct.pack('>Qd', self.seqnum,rtstamp) + self.seqnum += 1L + + sig = sign_data(content,extra,self.keypair) + # The sig returned is either 64 or 63 bytes long (62 also possible I + # guess). Therefore we transmit size as 1 bytes and fill to 64 bytes. + lensig = chr(len(sig)) + if len(sig) != self.MAX_ECDSA_ASN1_SIGSIZE: + # Note: this is not official ASN.1 padding. Also need to modify + # the header length for that I assume. + diff = self.MAX_ECDSA_ASN1_SIGSIZE-len(sig) + padding = '\x00' * diff + return [content,extra,lensig,sig,padding] + else: + return [content,extra,lensig,sig] + + def verify(self,piece,index): + """ A piece is valid if: + - the signature is correct, + - the seqnum % npieces == piecenr. + - the seqnum is no older than self.seqnum - npieces + @param piece The piece data as received from peer + @param index The piece number as received from peer + @return Boolean + """ + try: + # Can we do this without memcpy? + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: verify",len(piece) + extra = piece[-self.OUR_SIGSIZE:-self.OUR_SIGSIZE+self.EXTRA_SIZE] + lensig = ord(piece[-self.OUR_SIGSIZE+self.EXTRA_SIZE]) + if lensig > self.MAX_ECDSA_ASN1_SIGSIZE: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ failed piece",index,"lensig wrong",lensig + return False + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: verify lensig",lensig + diff = lensig-self.MAX_ECDSA_ASN1_SIGSIZE + if diff == 0: + sig = piece[-self.OUR_SIGSIZE+self.EXTRA_SIZE+self.LENGTH_SIZE:] + else: + sig = piece[-self.OUR_SIGSIZE+self.EXTRA_SIZE+self.LENGTH_SIZE:diff] + content = piece[:-self.OUR_SIGSIZE] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: verify piece",index,"sig",`sig` + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: verify dig",sha(content).hexdigest() + + ret = verify_data_pubkeyobj(content,extra,self.pubkey,sig) + if ret: + (seqnum, rtstamp) = struct.unpack('>Qd',extra) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: verify piece",index,"seq",seqnum,"ts %.5f s" % rtstamp,"ls",lensig + + mod = seqnum % self.get_npieces() + thres = self.seqnum - self.get_npieces()/2 + if seqnum <= thres: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ failed piece",index,"old seqnum",seqnum,"<<",self.seqnum + return False + elif mod != index: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ failed piece",index,"expected",mod + return False + else: + self.seqnum = max(self.seqnum,seqnum) + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ECDSAAuth: @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ piece",index,"failed sig" + + return ret + except: + print_exc() + return False + + def get_content(self,piece): + return piece[:-self.OUR_SIGSIZE] + + # Extra fields + def get_seqnum(self,piece): + (seqnum, rtstamp) = self._decode_extra(piece) + return seqnum + + def get_rtstamp(self,piece): + (seqnum, rtstamp) = self._decode_extra(piece) + return rtstamp + + def _decode_extra(self,piece): + extra = piece[-self.OUR_SIGSIZE:-self.OUR_SIGSIZE+self.EXTRA_SIZE] + return struct.unpack('>Qd',extra) + + +def sign_data(plaintext,extra,ec_keypair): + digester = sha(plaintext) + digester.update(extra) + digest = digester.digest() + return ec_keypair.sign_dsa_asn1(digest) + +def verify_data_pubkeyobj(plaintext,extra,pubkey,blob): + digester = sha(plaintext) + digester.update(extra) + digest = digester.digest() + return pubkey.verify_dsa_asn1(digest,blob) + + +class AuthStreamWrapper: + """ Wrapper around the stream returned by VideoOnDemand/MovieOnDemandTransporter + that strips of the signature info + """ + + def __init__(self,inputstream,authenticator): + self.inputstream = inputstream + self.buffer = StringIO() + self.authenticator = authenticator + self.piecelen = authenticator.get_piece_length() + self.last_rtstamp = None + + def read(self,numbytes=None): + rawdata = self._readn(self.piecelen) + content = self.authenticator.get_content(rawdata) + self.last_rtstamp = self.authenticator.get_rtstamp(rawdata) + if numbytes is None or numbytes < 0: + raise ValueError('Stream has unlimited size, read all not supported.') + elif numbytes < len(content): + # TODO: buffer unread data for next read + raise ValueError('reading less than piecesize not supported yet') + else: + return content + + def get_generation_time(self): + """ Returns the time at which the last read piece was generated at the source. """ + return self.last_rtstamp + + def seek(self,pos,whence=os.SEEK_SET): + if pos == 0 and whence == os.SEEK_SET: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","authstream: seek: Ignoring seek 0 in live" + else: + raise ValueError("authstream does not support seek") + + def close(self): + self.inputstream.close() + + # Internal method + def _readn(self,n): + """ read exactly n bytes from inputstream, block if unavail """ + nwant = n + while True: + data = self.inputstream.read(nwant) + if len(data) == 0: + return data + nwant -= len(data) + self.buffer.write(data) + if nwant == 0: + break + self.buffer.seek(0) + data = self.buffer.read(n) + self.buffer.seek(0) + return data + + + +class VariableReadAuthStreamWrapper: + """ Wrapper around AuthStreamWrapper that allows reading of variable + number of bytes. TODO: optimize whole stack of AuthWrapper, + MovieTransportWrapper, MovieOnDemandTransporter + """ + + def __init__(self,inputstream,piecelen): + self.inputstream = inputstream + self.buffer = '' + self.piecelen = piecelen + + def read(self,numbytes=None): + if numbytes is None or numbytes < 0: + raise ValueError('Stream has unlimited size, read all not supported.') + return self._readn(numbytes) + + def get_generation_time(self): + """ Returns the time at which the last read piece was generated at the source. """ + return self.inputstream.get_generation_time() + + def seek(self,pos,whence=None): + return self.inputstream.seek(pos,whence=whence) + + def close(self): + self.inputstream.close() + + # Internal method + def _readn(self,nwant): + """ read *at most* nwant bytes from inputstream """ + + if len(self.buffer) == 0: + # Must read fixed size blocks from authwrapper + data = self.inputstream.read(self.piecelen) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","varread: Got",len(data),"want",nwant + if len(data) == 0: + return data + self.buffer = data + + lenb = len(self.buffer) + tosend = min(nwant,lenb) + + if tosend == lenb: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","varread: zero copy 2 lenb",lenb + pre = self.buffer + post = '' + else: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","varread: copy",tosend,"lenb",lenb + pre = self.buffer[0:tosend] + post = self.buffer[tosend:] + + self.buffer = post + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","varread: Returning",len(pre) + return pre + + diff --git a/tribler-mod/Tribler/Core/Video/MovieTransport.py b/tribler-mod/Tribler/Core/Video/MovieTransport.py new file mode 100644 index 0000000..fb6937b --- /dev/null +++ b/tribler-mod/Tribler/Core/Video/MovieTransport.py @@ -0,0 +1,67 @@ +from time import localtime, strftime +# Written by Jan David Mol, Arno Bakker +# see LICENSE.txt for license information + + +import os,sys + +if sys.version.startswith("2.4"): + os.SEEK_SET = 0 + os.SEEK_CUR = 1 + os.SEEK_END = 2 + +DEBUG = False + +class MovieTransport: + + def __init__(self): + pass + + def start( self, bytepos = 0 ): + pass + + def size(self ): + pass + + def read(self): + pass + + def stop(self): + pass + + def done(self): + pass + + def get_mimetype(self): + pass + + def set_mimetype(self,mimetype): + pass + + +class MovieTransportStreamWrapper: + """ Provide a file-like interface """ + def __init__(self,mt): + self.mt = mt + self.started = False + + def read(self,numbytes=None): + if not self.started: + self.mt.start(0) + self.started = True + if self.mt.done(): + return '' + data = self.mt.read(numbytes) + if data is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","MovieTransportStreamWrapper: mt read returns None" + data = '' + return data + + def seek(self,pos,whence=os.SEEK_SET): + # TODO: shift play_pos in PiecePicking + interpret whence + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","MovieTransportStreamWrapper: seek() CALLED",pos,"whence",whence + self.mt.seek(pos,whence=whence) + + def close(self): + self.mt.stop() + \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/Video/MovieTransport.py.bak b/tribler-mod/Tribler/Core/Video/MovieTransport.py.bak new file mode 100644 index 0000000..55bb543 --- /dev/null +++ b/tribler-mod/Tribler/Core/Video/MovieTransport.py.bak @@ -0,0 +1,66 @@ +# Written by Jan David Mol, Arno Bakker +# see LICENSE.txt for license information + + +import os,sys + +if sys.version.startswith("2.4"): + os.SEEK_SET = 0 + os.SEEK_CUR = 1 + os.SEEK_END = 2 + +DEBUG = False + +class MovieTransport: + + def __init__(self): + pass + + def start( self, bytepos = 0 ): + pass + + def size(self ): + pass + + def read(self): + pass + + def stop(self): + pass + + def done(self): + pass + + def get_mimetype(self): + pass + + def set_mimetype(self,mimetype): + pass + + +class MovieTransportStreamWrapper: + """ Provide a file-like interface """ + def __init__(self,mt): + self.mt = mt + self.started = False + + def read(self,numbytes=None): + if not self.started: + self.mt.start(0) + self.started = True + if self.mt.done(): + return '' + data = self.mt.read(numbytes) + if data is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","MovieTransportStreamWrapper: mt read returns None" + data = '' + return data + + def seek(self,pos,whence=os.SEEK_SET): + # TODO: shift play_pos in PiecePicking + interpret whence + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","MovieTransportStreamWrapper: seek() CALLED",pos,"whence",whence + self.mt.seek(pos,whence=whence) + + def close(self): + self.mt.stop() + \ No newline at end of file diff --git a/tribler-mod/Tribler/Core/Video/PiecePickerStreaming.py b/tribler-mod/Tribler/Core/Video/PiecePickerStreaming.py new file mode 100644 index 0000000..192250c --- /dev/null +++ b/tribler-mod/Tribler/Core/Video/PiecePickerStreaming.py @@ -0,0 +1,649 @@ +from time import localtime, strftime + +# wRIsten by Jan David Mol, Arno Bakker +# see LICENSE.txt for license information + +import sys +import time +import random +from traceback import print_exc + +from Tribler.Core.BitTornado.BT1.PiecePicker import PiecePicker + +if __debug__: + from Tribler.Core.BitTornado.BT1.Downloader import print_chunks + +# percent piece loss to emulate -- we just don't request this percentage of the pieces +# only implemented for live streaming +PIECELOSS = 0 + +DEBUG = True #False +DEBUG_CHUNKS = True +DEBUGPP = False + +def rarest_first( has_dict, rarity_list, filter = lambda x: True ): + """ Select the rarest of pieces in has_dict, according + to the rarities in rarity_list. Breaks ties uniformly + at random. Additionally, `filter' is applied to select + the pieces we can return. """ + + """ Strategy: + - `choice' is the choice so far + - `n' is the number of pieces we could choose from so far + - `rarity' is the rarity of the choice so far + + Every time we see a rarer piece, we reset our choice. + Every time we see a piece of the same rarity we're looking for, + we select it (overriding the previous choice) with probability 1/n. + This leads to a uniformly selected piece in one pass, be it that + we need more random numbers than when doing two passes. """ + + choice = None + rarity = None + n = 0 + + for k in (x for x in has_dict if filter(x)): + r = rarity_list[k] + + if rarity is None or r < rarity: + rarity = r + n = 1 + choice = k + elif r == rarity: + n += 1 + if random.uniform(0,n) == 0: # uniform selects from [0,n) + choice = k + + return choice + +class PiecePickerStreaming(PiecePicker): + """ Implements piece picking for streaming video. Keeps track of playback + point and avoids requesting obsolete pieces. """ + + # order of initialisation and important function calls + # PiecePicker.__init__ (by BitTornado.BT1Download.__init__) + # PiecePicker.complete (by hash checker, for pieces on disk) + # MovieSelector.__init__ + # PiecePicker.set_download_range (indirectly by MovieSelector.__init__) + # MovieOnDemandTransporter.__init__ (by BitTornado.BT1Download.startEngine) + # PiecePicker.set_bitrate (by MovieOnDemandTransporter) + # PiecePicker.set_transporter (by MovieOnDemandTransporter) + # + # PiecePicker._next (once connections are set up) + # + # PiecePicker.complete (by hash checker, for pieces received) + + # relative size of mid-priority set + MU = 4 + + def __init__(self, numpieces, + rarest_first_cutoff = 1, rarest_first_priority_cutoff = 3, + priority_step = 20, helper = None, rate_predictor = None, piecesize = 0): + PiecePicker.__init__( self, numpieces, rarest_first_cutoff, rarest_first_priority_cutoff, + priority_step, helper, rate_predictor ) + + # maximum existing piece number, to avoid scanning beyond it in next() + self.maxhave = 0 + + # some statistics + self.stats = {} + self.stats["high"] = 0 + self.stats["mid"] = 0 + self.stats["low"] = 0 + + # playback module + self.transporter = None + + # self.outstanding_requests contains (piece-id, begin, + # length):timestamp pairs for each outstanding request. + self.outstanding_requests = {} + + # The playing_delay and buffering_delay give three values + # (min, max, offeset) in seconds. + # + # The min tells how long before the cancel policy is allowed + # to kick in. We can not expect to receive a piece instantly, + # so we have to wait this time before having a download speed + # estimation. + # + # The max tells how long before we cancel the request. The + # request may also be canceled because the chunk will not be + # completed given the current download speed. + # + # The offset gives a grace period that is taken into account + # when choosing to cancel a request. For instance, when the + # peer download speed is to low to receive the chunk within 10 + # seconds, a grace offset of 15 would ensure that the chunk is + # NOT canceled (usefull while buffering) + self.playing_delay = (5, 20, -0.5) + self.buffering_delay = (7.5, 30, 10) + + def set_transporter(self, transporter): + self.transporter = transporter + + # update its information -- pieces read from disk + if not self.videostatus.live_streaming: + for i in xrange(self.videostatus.first_piece,self.videostatus.last_piece+1): + if self.has[i]: + self.transporter.complete( i, downloaded=False ) + + def set_videostatus(self,videostatus): + """ Download in a wrap-around fashion between pieces [0,numpieces). + Look at most delta pieces ahead from download_range[0]. + """ + self.videostatus = videostatus + videostatus.add_playback_pos_observer( self.change_playback_pos ) + + def is_interesting(self,piece): + if PIECELOSS and piece % 100 < PIECELOSS: + return False + + if self.has[piece]: + return False + + if not self.videostatus or self.videostatus.in_download_range( piece ): + return True + + return False + + def change_playback_pos(self, oldpos, newpos): + if oldpos is None: + # (re)initialise + valid = self.is_interesting + + for d in self.peer_connections.values(): + interesting = {} + has = d["connection"].download.have + for i in xrange(self.videostatus.first_piece,self.videostatus.last_piece+1): + if has[i] and valid(i): + interesting[i] = 1 + + d["interesting"] = interesting + else: + # playback position incremented -- remove timed out piece + for d in self.peer_connections.values(): + d["interesting"].pop(oldpos,0) + + def got_have(self, piece, connection=None): + # if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePickerStreaming: got_have:",piece + self.maxhave = max(self.maxhave,piece) + PiecePicker.got_have( self, piece, connection ) + if self.transporter: + self.transporter.got_have( piece ) + + if self.is_interesting(piece): + self.peer_connections[connection]["interesting"][piece] = 1 + + def got_seed(self): + self.maxhave = self.numpieces + PiecePicker.got_seed( self ) + + def lost_have(self, piece): + PiecePicker.lost_have( self, piece ) + + def got_peer(self, connection): + PiecePicker.got_peer( self, connection ) + + self.peer_connections[connection]["interesting"] = {} + + def lost_peer(self, connection): + PiecePicker.lost_peer( self, connection ) + + def got_piece(self, *request): + if request in self.outstanding_requests: + del self.outstanding_requests[request] + if self.transporter: + self.transporter.got_piece(*request) + + def complete(self, piece): + # if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePickerStreaming: complete:",piece + PiecePicker.complete( self, piece ) + if self.transporter: + self.transporter.complete( piece ) + + for request in self.outstanding_requests.keys(): + if request[0] == piece: + del self.outstanding_requests[request] + + # don't consider this piece anymore + for d in self.peer_connections.itervalues(): + d["interesting"].pop(piece,0) + + def num_nonempty_neighbours(self): + # return #neighbours who have something + return len( [c for c in self.peer_connections if c.download.have.numfalse < c.download.have.length] ) + + def pos_is_sustainable(self,fudge=2): + """ + Returns whether we have enough data around us to support the current playback position. + If not, playback should pause, stall or reinitialised when pieces are lost. + """ + vs = self.videostatus + + # only holds for live streaming for now. theoretically, vod can have the same problem + # since data can be seeded in a 'live' fashion + if not vs.live_streaming: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "PiecePickerStreaming: pos is sustainable: not streaming live" + return True + + # We assume the maximum piece number that is available at at least half of the neighbours + # to be sustainable. Although we only need a fixed number of neighbours with enough bandwidth, + # such neighbours may depart, hence we choose a relative trade-off. + + # this means that our current playback position is sustainable if any future piece + # is owned by at least half of the peers + + # ignore peers which have nothing + numconn = self.num_nonempty_neighbours() + + if not numconn: + # not sustainable, but nothing we can do. Return True to avoid pausing + # and getting out of sync. + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "PiecePickerStreaming: pos is sustainable: no neighbours with pieces" + return True + + half = max( 1, numconn/2 ) + skip = fudge # ignore the first 'fudge' pieces + + for x in vs.generate_range( vs.download_range() ): + if skip > 0: + skip -= 1 + elif self.numhaves[x] >= half: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "PiecePickerStreaming: pos is sustainable: piece %s @ %s>%s peers (fudge=%s)" % (x,self.numhaves[x],half,fudge) + return True + else: + pass + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "PiecePickerStreaming: pos is NOT sustainable playpos=%s fudge=%s numconn=%s half=%s numpeers=%s %s" % (vs.playback_pos,fudge,numconn,half,len(self.peer_connections),[x.get_ip() for x in self.peer_connections]) + + # too few neighbours own the future pieces. it's wise to pause and let neighbours catch up + # with us + return False + + + # next: selects next piece to download. adjusts wantfunc with filter for streaming; calls + # _next: selects next piece to download. completes partial downloads first, if needed, otherwise calls + # next_new: selects next piece to download. override this with the piece picking policy + + def next(self, haves, wantfunc, sdownload, complete_first = False, helper_con = False, slowpieces=[], willrequest=True,connection=None): + def newwantfunc( piece ): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","S",self.streaming_piece_filter( piece ),"!sP",not (piece in slowpieces),"w",wantfunc( piece ) + return not (piece in slowpieces) and wantfunc( piece ) + + # fallback: original piece picker + p = PiecePicker.next(self, haves, newwantfunc, sdownload, complete_first, helper_con, slowpieces=slowpieces, willrequest=willrequest,connection=connection) + if DEBUGPP and self.videostatus.prebuffering: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePickerStreaming: original PP.next returns",p + if p is None and not self.videostatus.live_streaming: + # When the file we selected from a multi-file torrent is complete, + # we won't request anymore pieces, so the normal way of detecting + # we're done is not working and we won't tell the video player + # we're playable. Do it here instead. + self.transporter.notify_playable() + return p + + def _next(self, haves, wantfunc, complete_first, helper_con, willrequest=True, connection=None): + """ First, complete any partials if needed. Otherwise, select a new piece. """ + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePickerStreaming: complete_first is",complete_first,"started",self.started + + # cutoff = True: random mode + # False: rarest-first mode + cutoff = self.numgot < self.rarest_first_cutoff + + # whether to complete existing partials first -- do so before the + # cutoff, or if forced by complete_first, but not for seeds. + #complete_first = (complete_first or cutoff) and not haves.complete() + complete_first = (complete_first or cutoff) + + # most interesting piece + best = None + + # interest level of best piece + bestnum = 2 ** 30 + + # select piece we started to download with best interest index. + for i in self.started: +# 2fastbt_ + if haves[i] and wantfunc(i) and (self.helper is None or helper_con or not self.helper.is_ignored(i)): +# _2fastbt + if self.level_in_interests[i] < bestnum: + best = i + bestnum = self.level_in_interests[i] + + if best is not None: + # found a piece -- return it if we are completing partials first + # or if there is a cutoff + if complete_first or (cutoff and len(self.interests) > self.cutoff): + return best + + p = self.next_new(haves, wantfunc, complete_first, helper_con,willrequest=willrequest,connection=connection) + # if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePickerStreaming: next_new returns",p + return p + + def check_outstanding_requests(self, downloads): + if not self.transporter: + return + + now = time.time() + cancel_requests = [] + in_high_range = self.videostatus.in_high_range + playing_mode = self.videostatus.playing and not self.videostatus.paused + piece_due = self.transporter.piece_due + + if playing_mode: + # playing mode + min_delay, max_delay, offset_delay = self.playing_delay + else: + # buffering mode + min_delay, max_delay, offset_delay = self.buffering_delay + + for download in downloads: + + total_length = 0 + download_rate = download.get_short_term_rate() + for piece_id, begin, length in download.active_requests: + # select policy for this piece + try: + time_request = self.outstanding_requests[(piece_id, begin, length)] + except KeyError: + continue + + # add the length of this chunk to the total of bytes + # that needs to be downloaded + total_length += length + + # each request must be allowed at least some + # minimal time to be handled + if now < time_request + min_delay: + continue + + # high-priority pieces are eligable for + # cancelation. Others are not. They will eventually be + # eligable as they become important for playback. + if in_high_range(piece_id): + if download_rate == 0: + # we have not received anything in the last min_delay seconds + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "PiecePickerStreaming: download not started yet for piece", piece_id, "chunk", begin, "on", download.ip + cancel_requests.append((piece_id, begin, length)) + download.bad_performance_counter += 1 + + else: + if playing_mode: + time_until_deadline = min(piece_due(piece_id), time_request + max_delay - now) + else: + time_until_deadline = time_request + max_delay - now + time_until_download = total_length / download_rate + + # we have to cancel when the deadline can not be met + if time_until_deadline < time_until_download - offset_delay: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "PiecePickerStreaming: download speed too slow for piece", piece_id, "chunk", begin, "on", download.ip, "Deadline in", time_until_deadline, "while estimated download in", time_until_download + cancel_requests.append((piece_id, begin, length)) + + # Cancel all requests that are too late + if cancel_requests: + try: + self.downloader.cancel_requests(cancel_requests) + except: + print_exc() + + if __debug__: + if DEBUG_CHUNKS: + print_chunks(self.downloader, list(self.videostatus.generate_high_range()), compact=False) + + def requested(self, *request): + self.outstanding_requests[request] = time.time() + return PiecePicker.requested(self, *request) + + def next_new(self, haves, wantfunc, complete_first, helper_con, willrequest=True, connection=None): + """ Determine which piece to download next from a peer. + + haves: set of pieces owned by that peer + wantfunc: custom piece filter + complete_first: whether to complete partial pieces first + helper_con: + willrequest: whether the returned piece will actually be requested + + """ + + vs = self.videostatus + + if vs.live_streaming: + # first, make sure we know where to start downloading + if vs.live_startpos is None: + self.transporter.calc_live_startpos( self.transporter.max_prebuf_packets, False ) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: pp determined startpos of",vs.live_startpos + + # select any interesting piece, rarest first + if connection: + # Without 'connection', we don't know who we will request from. + return rarest_first( self.peer_connections[connection]["interesting"], self.numhaves, wantfunc ) + + def pick_first( f, t ): # no shuffle + for i in vs.generate_range((f,t)): + # Is there a piece in the range the peer has? + # Is there a piece in the range we don't have? + if not haves[i] or self.has[i]: + continue + + if not wantfunc(i): # Is there a piece in the range we want? + continue + + if self.helper is None or helper_con or not self.helper.is_ignored(i): + return i + + return None + + def pick_rarest_loop_over_small_range(f,t,shuffle=True): + # Arno: pick_rarest is way expensive for the midrange thing, + # therefore loop over the list of pieces we want and see + # if it's avail, rather than looping over the list of all + # pieces to see if one falls in the (f,t) range. + # + xr = vs.generate_range((f,t)) + r = None + if shuffle: + # xr is an xrange generator, need real values to shuffle + r = [] + r.extend(xr) + random.shuffle(r) + else: + r = xr + for i in r: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","H", + if not haves[i] or self.has[i]: + continue + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","W", + if not wantfunc(i): + continue + + if self.helper is None or helper_con or not self.helper.is_ignored(i): + return i + + return None + + def pick_rarest_small_range(f,t): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","choice small",f,t + d = vs.dist_range(f,t) + + for level in xrange(len(self.interests)): + piecelist = self.interests[level] + + if len(piecelist) > d: + #if level+1 == len(self.interests): + # Arno: Lowest level priorities / long piecelist. + # This avoids doing a scan that goes over the entire list + # of pieces when we already have the hi and/or mid ranges. + + # Arno, 2008-05-21: Apparently, the big list is not always + # at the lowest level, hacked distance metric to determine + # whether to use slow or fast method. + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","choice QUICK" + return pick_rarest_loop_over_small_range(f,t) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","choice Q",diffstr,"l",level,"s",len(piecelist) + else: + # Higher priorities / short lists + for i in piecelist: + if not vs.in_range( f, t, i ): + continue + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","H", + if not haves[i] or self.has[i]: + continue + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","W", + if not wantfunc(i): + continue + + if self.helper is None or helper_con or not self.helper.is_ignored(i): + return i + + return None + + def pick_rarest(f,t): #BitTorrent already shuffles the self.interests for us + for piecelist in self.interests: + for i in piecelist: + if not vs.in_range( f, t, i ): + continue + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","H", + if not haves[i] or self.has[i]: + continue + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","W", + if not wantfunc(i): + continue + + if self.helper is None or helper_con or not self.helper.is_ignored(i): + return i + + return None + + first, last = vs.download_range() + priority_first, priority_last = vs.get_high_range() + if priority_first != priority_last: + first = priority_first + highprob_cutoff = vs.normalize(priority_last + 1) + midprob_cutoff = vs.normalize(first + self.MU * vs.get_range_length(first, last)) + else: + highprob_cutoff = last + midprob_cutoff = vs.normalize(first + self.MU * vs.high_prob_min_pieces) + # h = vs.time_to_pieces( self.HIGH_PROB_SETSIZE ) + # highprob_cutoff = vs.normalize(first + max(h, self.HIGH_PROB_MIN_PIECES)) + # midprob_cutoff = vs.normalize(first + max(self.MU * h, self.HIGH_PROB_MIN_PIECES)) + + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Prio %s:%s:%s" % (first, highprob_cutoff, midprob_cutoff), highprob_cutoff - first, midprob_cutoff - highprob_cutoff + + # first,last = vs.download_range() + # if vs.wraparound: + # max_lookahead = vs.wraparound_delta + # else: + # max_lookahead = vs.last_piece - vs.playback_pos + + # highprob_cutoff = vs.normalize( first + min( h, max_lookahead ) ) + # midprob_cutoff = vs.normalize( first + min( h + self.MU * h, max_lookahead ) ) + + if vs.live_streaming: + # for live playback consider peers to be bad when they miss the deadline 5 times + allow_based_on_performance = connection.download.bad_performance_counter < 5 + else: + # for VOD playback consider peers to be bad when they miss the deadline 1 time + allow_based_on_performance = connection.download.bad_performance_counter < 1 + + if vs.prebuffering: + f = first + t = vs.normalize( first + self.transporter.max_prebuf_packets ) + choice = pick_rarest_small_range(f,t) + type = "high" + else: + choice = None + + if choice is None: + if vs.live_streaming: + choice = pick_rarest_small_range( first, highprob_cutoff ) + else: + choice = pick_first( first, highprob_cutoff ) + type = "high" + + # it is possible that the performance of this peer prohibits + # us from selecting this piece... + if not allow_based_on_performance: + high_priority_choice = choice + choice = None + + if choice is None: + choice = pick_rarest_small_range( highprob_cutoff, midprob_cutoff ) + type = "mid" + + if choice is None: + if vs.live_streaming: + # Want: loop over what peer has avail, respecting piece priorities + # (could ignore those for live). + # + # Attempt 1: loop over range (which is 25% of window (see + # VideoStatus), ignoring priorities, no shuffle. + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: choice low RANGE",midprob_cutoff,last + #choice = pick_rarest_loop_over_small_range(midprob_cutoff,last,shuffle=False) + pass + else: + choice = pick_rarest( midprob_cutoff, last ) + type = "low" + + if choice and willrequest: + self.stats[type] += 1 + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: picked piece %s [type=%s] [%d,%d,%d,%d]" % (`choice`,type,first,highprob_cutoff,midprob_cutoff,last) + + # 12/05/09, boudewijn: (1) The bad_performance_counter is + # incremented whenever a piece download failed and decremented + # whenever is succeeds. (2) A peer with a positive + # bad_performance_counter is only allowd to pick low-priority + # pieces. (Conclusion) When all low-priority pieces are + # downloaded the client hangs when one or more high-priority + # pieces are required and if all peers have a positive + # bad_performance_counter. + if choice is None and not allow_based_on_performance: + # ensure that there is another known peer with a + # non-positive bad_performance_counter that has the piece + # that we would pick from the high-priority set for this + # connection. + + if high_priority_choice: + availability = 0 + for download in self.downloader.downloads: + if download.have[high_priority_choice] and not download.bad_performance_counter: + availability += 1 + + if not availability: + # no other connection has it... then ignore the + # bad_performance_counter advice and attempt to + # download it from this connection anyway + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "vod: the bad_performance_counter says this is a bad peer... but we have nothing better... requesting piece", high_priority_choice, "regardless." + choice = high_priority_choice + + return choice + + def is_valid_piece(self,piece): + return self.videostatus.in_valid_range(piece) + + def get_valid_range_iterator(self): + if self.videostatus.live_streaming and self.videostatus.get_live_startpos() is None: + # Not hooked in, so cannot provide a sensible download range + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePickerStreaming: Not hooked in, valid range set to total" + return PiecePicker.get_valid_range_iterator(self) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePickerStreaming: Live hooked in, or VOD, valid range set to subset" + first,last = self.videostatus.download_range() + return self.videostatus.generate_range((first,last)) + +PiecePickerVOD = PiecePickerStreaming diff --git a/tribler-mod/Tribler/Core/Video/PiecePickerStreaming.py.bak b/tribler-mod/Tribler/Core/Video/PiecePickerStreaming.py.bak new file mode 100644 index 0000000..3efeaf2 --- /dev/null +++ b/tribler-mod/Tribler/Core/Video/PiecePickerStreaming.py.bak @@ -0,0 +1,648 @@ + +# wRIsten by Jan David Mol, Arno Bakker +# see LICENSE.txt for license information + +import sys +import time +import random +from traceback import print_exc + +from Tribler.Core.BitTornado.BT1.PiecePicker import PiecePicker + +if __debug__: + from Tribler.Core.BitTornado.BT1.Downloader import print_chunks + +# percent piece loss to emulate -- we just don't request this percentage of the pieces +# only implemented for live streaming +PIECELOSS = 0 + +DEBUG = True #False +DEBUG_CHUNKS = True +DEBUGPP = False + +def rarest_first( has_dict, rarity_list, filter = lambda x: True ): + """ Select the rarest of pieces in has_dict, according + to the rarities in rarity_list. Breaks ties uniformly + at random. Additionally, `filter' is applied to select + the pieces we can return. """ + + """ Strategy: + - `choice' is the choice so far + - `n' is the number of pieces we could choose from so far + - `rarity' is the rarity of the choice so far + + Every time we see a rarer piece, we reset our choice. + Every time we see a piece of the same rarity we're looking for, + we select it (overriding the previous choice) with probability 1/n. + This leads to a uniformly selected piece in one pass, be it that + we need more random numbers than when doing two passes. """ + + choice = None + rarity = None + n = 0 + + for k in (x for x in has_dict if filter(x)): + r = rarity_list[k] + + if rarity is None or r < rarity: + rarity = r + n = 1 + choice = k + elif r == rarity: + n += 1 + if random.uniform(0,n) == 0: # uniform selects from [0,n) + choice = k + + return choice + +class PiecePickerStreaming(PiecePicker): + """ Implements piece picking for streaming video. Keeps track of playback + point and avoids requesting obsolete pieces. """ + + # order of initialisation and important function calls + # PiecePicker.__init__ (by BitTornado.BT1Download.__init__) + # PiecePicker.complete (by hash checker, for pieces on disk) + # MovieSelector.__init__ + # PiecePicker.set_download_range (indirectly by MovieSelector.__init__) + # MovieOnDemandTransporter.__init__ (by BitTornado.BT1Download.startEngine) + # PiecePicker.set_bitrate (by MovieOnDemandTransporter) + # PiecePicker.set_transporter (by MovieOnDemandTransporter) + # + # PiecePicker._next (once connections are set up) + # + # PiecePicker.complete (by hash checker, for pieces received) + + # relative size of mid-priority set + MU = 4 + + def __init__(self, numpieces, + rarest_first_cutoff = 1, rarest_first_priority_cutoff = 3, + priority_step = 20, helper = None, rate_predictor = None, piecesize = 0): + PiecePicker.__init__( self, numpieces, rarest_first_cutoff, rarest_first_priority_cutoff, + priority_step, helper, rate_predictor ) + + # maximum existing piece number, to avoid scanning beyond it in next() + self.maxhave = 0 + + # some statistics + self.stats = {} + self.stats["high"] = 0 + self.stats["mid"] = 0 + self.stats["low"] = 0 + + # playback module + self.transporter = None + + # self.outstanding_requests contains (piece-id, begin, + # length):timestamp pairs for each outstanding request. + self.outstanding_requests = {} + + # The playing_delay and buffering_delay give three values + # (min, max, offeset) in seconds. + # + # The min tells how long before the cancel policy is allowed + # to kick in. We can not expect to receive a piece instantly, + # so we have to wait this time before having a download speed + # estimation. + # + # The max tells how long before we cancel the request. The + # request may also be canceled because the chunk will not be + # completed given the current download speed. + # + # The offset gives a grace period that is taken into account + # when choosing to cancel a request. For instance, when the + # peer download speed is to low to receive the chunk within 10 + # seconds, a grace offset of 15 would ensure that the chunk is + # NOT canceled (usefull while buffering) + self.playing_delay = (5, 20, -0.5) + self.buffering_delay = (7.5, 30, 10) + + def set_transporter(self, transporter): + self.transporter = transporter + + # update its information -- pieces read from disk + if not self.videostatus.live_streaming: + for i in xrange(self.videostatus.first_piece,self.videostatus.last_piece+1): + if self.has[i]: + self.transporter.complete( i, downloaded=False ) + + def set_videostatus(self,videostatus): + """ Download in a wrap-around fashion between pieces [0,numpieces). + Look at most delta pieces ahead from download_range[0]. + """ + self.videostatus = videostatus + videostatus.add_playback_pos_observer( self.change_playback_pos ) + + def is_interesting(self,piece): + if PIECELOSS and piece % 100 < PIECELOSS: + return False + + if self.has[piece]: + return False + + if not self.videostatus or self.videostatus.in_download_range( piece ): + return True + + return False + + def change_playback_pos(self, oldpos, newpos): + if oldpos is None: + # (re)initialise + valid = self.is_interesting + + for d in self.peer_connections.values(): + interesting = {} + has = d["connection"].download.have + for i in xrange(self.videostatus.first_piece,self.videostatus.last_piece+1): + if has[i] and valid(i): + interesting[i] = 1 + + d["interesting"] = interesting + else: + # playback position incremented -- remove timed out piece + for d in self.peer_connections.values(): + d["interesting"].pop(oldpos,0) + + def got_have(self, piece, connection=None): + # if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePickerStreaming: got_have:",piece + self.maxhave = max(self.maxhave,piece) + PiecePicker.got_have( self, piece, connection ) + if self.transporter: + self.transporter.got_have( piece ) + + if self.is_interesting(piece): + self.peer_connections[connection]["interesting"][piece] = 1 + + def got_seed(self): + self.maxhave = self.numpieces + PiecePicker.got_seed( self ) + + def lost_have(self, piece): + PiecePicker.lost_have( self, piece ) + + def got_peer(self, connection): + PiecePicker.got_peer( self, connection ) + + self.peer_connections[connection]["interesting"] = {} + + def lost_peer(self, connection): + PiecePicker.lost_peer( self, connection ) + + def got_piece(self, *request): + if request in self.outstanding_requests: + del self.outstanding_requests[request] + if self.transporter: + self.transporter.got_piece(*request) + + def complete(self, piece): + # if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePickerStreaming: complete:",piece + PiecePicker.complete( self, piece ) + if self.transporter: + self.transporter.complete( piece ) + + for request in self.outstanding_requests.keys(): + if request[0] == piece: + del self.outstanding_requests[request] + + # don't consider this piece anymore + for d in self.peer_connections.itervalues(): + d["interesting"].pop(piece,0) + + def num_nonempty_neighbours(self): + # return #neighbours who have something + return len( [c for c in self.peer_connections if c.download.have.numfalse < c.download.have.length] ) + + def pos_is_sustainable(self,fudge=2): + """ + Returns whether we have enough data around us to support the current playback position. + If not, playback should pause, stall or reinitialised when pieces are lost. + """ + vs = self.videostatus + + # only holds for live streaming for now. theoretically, vod can have the same problem + # since data can be seeded in a 'live' fashion + if not vs.live_streaming: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "PiecePickerStreaming: pos is sustainable: not streaming live" + return True + + # We assume the maximum piece number that is available at at least half of the neighbours + # to be sustainable. Although we only need a fixed number of neighbours with enough bandwidth, + # such neighbours may depart, hence we choose a relative trade-off. + + # this means that our current playback position is sustainable if any future piece + # is owned by at least half of the peers + + # ignore peers which have nothing + numconn = self.num_nonempty_neighbours() + + if not numconn: + # not sustainable, but nothing we can do. Return True to avoid pausing + # and getting out of sync. + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "PiecePickerStreaming: pos is sustainable: no neighbours with pieces" + return True + + half = max( 1, numconn/2 ) + skip = fudge # ignore the first 'fudge' pieces + + for x in vs.generate_range( vs.download_range() ): + if skip > 0: + skip -= 1 + elif self.numhaves[x] >= half: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "PiecePickerStreaming: pos is sustainable: piece %s @ %s>%s peers (fudge=%s)" % (x,self.numhaves[x],half,fudge) + return True + else: + pass + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "PiecePickerStreaming: pos is NOT sustainable playpos=%s fudge=%s numconn=%s half=%s numpeers=%s %s" % (vs.playback_pos,fudge,numconn,half,len(self.peer_connections),[x.get_ip() for x in self.peer_connections]) + + # too few neighbours own the future pieces. it's wise to pause and let neighbours catch up + # with us + return False + + + # next: selects next piece to download. adjusts wantfunc with filter for streaming; calls + # _next: selects next piece to download. completes partial downloads first, if needed, otherwise calls + # next_new: selects next piece to download. override this with the piece picking policy + + def next(self, haves, wantfunc, sdownload, complete_first = False, helper_con = False, slowpieces=[], willrequest=True,connection=None): + def newwantfunc( piece ): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","S",self.streaming_piece_filter( piece ),"!sP",not (piece in slowpieces),"w",wantfunc( piece ) + return not (piece in slowpieces) and wantfunc( piece ) + + # fallback: original piece picker + p = PiecePicker.next(self, haves, newwantfunc, sdownload, complete_first, helper_con, slowpieces=slowpieces, willrequest=willrequest,connection=connection) + if DEBUGPP and self.videostatus.prebuffering: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePickerStreaming: original PP.next returns",p + if p is None and not self.videostatus.live_streaming: + # When the file we selected from a multi-file torrent is complete, + # we won't request anymore pieces, so the normal way of detecting + # we're done is not working and we won't tell the video player + # we're playable. Do it here instead. + self.transporter.notify_playable() + return p + + def _next(self, haves, wantfunc, complete_first, helper_con, willrequest=True, connection=None): + """ First, complete any partials if needed. Otherwise, select a new piece. """ + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePickerStreaming: complete_first is",complete_first,"started",self.started + + # cutoff = True: random mode + # False: rarest-first mode + cutoff = self.numgot < self.rarest_first_cutoff + + # whether to complete existing partials first -- do so before the + # cutoff, or if forced by complete_first, but not for seeds. + #complete_first = (complete_first or cutoff) and not haves.complete() + complete_first = (complete_first or cutoff) + + # most interesting piece + best = None + + # interest level of best piece + bestnum = 2 ** 30 + + # select piece we started to download with best interest index. + for i in self.started: +# 2fastbt_ + if haves[i] and wantfunc(i) and (self.helper is None or helper_con or not self.helper.is_ignored(i)): +# _2fastbt + if self.level_in_interests[i] < bestnum: + best = i + bestnum = self.level_in_interests[i] + + if best is not None: + # found a piece -- return it if we are completing partials first + # or if there is a cutoff + if complete_first or (cutoff and len(self.interests) > self.cutoff): + return best + + p = self.next_new(haves, wantfunc, complete_first, helper_con,willrequest=willrequest,connection=connection) + # if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePickerStreaming: next_new returns",p + return p + + def check_outstanding_requests(self, downloads): + if not self.transporter: + return + + now = time.time() + cancel_requests = [] + in_high_range = self.videostatus.in_high_range + playing_mode = self.videostatus.playing and not self.videostatus.paused + piece_due = self.transporter.piece_due + + if playing_mode: + # playing mode + min_delay, max_delay, offset_delay = self.playing_delay + else: + # buffering mode + min_delay, max_delay, offset_delay = self.buffering_delay + + for download in downloads: + + total_length = 0 + download_rate = download.get_short_term_rate() + for piece_id, begin, length in download.active_requests: + # select policy for this piece + try: + time_request = self.outstanding_requests[(piece_id, begin, length)] + except KeyError: + continue + + # add the length of this chunk to the total of bytes + # that needs to be downloaded + total_length += length + + # each request must be allowed at least some + # minimal time to be handled + if now < time_request + min_delay: + continue + + # high-priority pieces are eligable for + # cancelation. Others are not. They will eventually be + # eligable as they become important for playback. + if in_high_range(piece_id): + if download_rate == 0: + # we have not received anything in the last min_delay seconds + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "PiecePickerStreaming: download not started yet for piece", piece_id, "chunk", begin, "on", download.ip + cancel_requests.append((piece_id, begin, length)) + download.bad_performance_counter += 1 + + else: + if playing_mode: + time_until_deadline = min(piece_due(piece_id), time_request + max_delay - now) + else: + time_until_deadline = time_request + max_delay - now + time_until_download = total_length / download_rate + + # we have to cancel when the deadline can not be met + if time_until_deadline < time_until_download - offset_delay: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "PiecePickerStreaming: download speed too slow for piece", piece_id, "chunk", begin, "on", download.ip, "Deadline in", time_until_deadline, "while estimated download in", time_until_download + cancel_requests.append((piece_id, begin, length)) + + # Cancel all requests that are too late + if cancel_requests: + try: + self.downloader.cancel_requests(cancel_requests) + except: + print_exc() + + if __debug__: + if DEBUG_CHUNKS: + print_chunks(self.downloader, list(self.videostatus.generate_high_range()), compact=False) + + def requested(self, *request): + self.outstanding_requests[request] = time.time() + return PiecePicker.requested(self, *request) + + def next_new(self, haves, wantfunc, complete_first, helper_con, willrequest=True, connection=None): + """ Determine which piece to download next from a peer. + + haves: set of pieces owned by that peer + wantfunc: custom piece filter + complete_first: whether to complete partial pieces first + helper_con: + willrequest: whether the returned piece will actually be requested + + """ + + vs = self.videostatus + + if vs.live_streaming: + # first, make sure we know where to start downloading + if vs.live_startpos is None: + self.transporter.calc_live_startpos( self.transporter.max_prebuf_packets, False ) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: pp determined startpos of",vs.live_startpos + + # select any interesting piece, rarest first + if connection: + # Without 'connection', we don't know who we will request from. + return rarest_first( self.peer_connections[connection]["interesting"], self.numhaves, wantfunc ) + + def pick_first( f, t ): # no shuffle + for i in vs.generate_range((f,t)): + # Is there a piece in the range the peer has? + # Is there a piece in the range we don't have? + if not haves[i] or self.has[i]: + continue + + if not wantfunc(i): # Is there a piece in the range we want? + continue + + if self.helper is None or helper_con or not self.helper.is_ignored(i): + return i + + return None + + def pick_rarest_loop_over_small_range(f,t,shuffle=True): + # Arno: pick_rarest is way expensive for the midrange thing, + # therefore loop over the list of pieces we want and see + # if it's avail, rather than looping over the list of all + # pieces to see if one falls in the (f,t) range. + # + xr = vs.generate_range((f,t)) + r = None + if shuffle: + # xr is an xrange generator, need real values to shuffle + r = [] + r.extend(xr) + random.shuffle(r) + else: + r = xr + for i in r: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","H", + if not haves[i] or self.has[i]: + continue + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","W", + if not wantfunc(i): + continue + + if self.helper is None or helper_con or not self.helper.is_ignored(i): + return i + + return None + + def pick_rarest_small_range(f,t): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","choice small",f,t + d = vs.dist_range(f,t) + + for level in xrange(len(self.interests)): + piecelist = self.interests[level] + + if len(piecelist) > d: + #if level+1 == len(self.interests): + # Arno: Lowest level priorities / long piecelist. + # This avoids doing a scan that goes over the entire list + # of pieces when we already have the hi and/or mid ranges. + + # Arno, 2008-05-21: Apparently, the big list is not always + # at the lowest level, hacked distance metric to determine + # whether to use slow or fast method. + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","choice QUICK" + return pick_rarest_loop_over_small_range(f,t) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","choice Q",diffstr,"l",level,"s",len(piecelist) + else: + # Higher priorities / short lists + for i in piecelist: + if not vs.in_range( f, t, i ): + continue + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","H", + if not haves[i] or self.has[i]: + continue + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","W", + if not wantfunc(i): + continue + + if self.helper is None or helper_con or not self.helper.is_ignored(i): + return i + + return None + + def pick_rarest(f,t): #BitTorrent already shuffles the self.interests for us + for piecelist in self.interests: + for i in piecelist: + if not vs.in_range( f, t, i ): + continue + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","H", + if not haves[i] or self.has[i]: + continue + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","W", + if not wantfunc(i): + continue + + if self.helper is None or helper_con or not self.helper.is_ignored(i): + return i + + return None + + first, last = vs.download_range() + priority_first, priority_last = vs.get_high_range() + if priority_first != priority_last: + first = priority_first + highprob_cutoff = vs.normalize(priority_last + 1) + midprob_cutoff = vs.normalize(first + self.MU * vs.get_range_length(first, last)) + else: + highprob_cutoff = last + midprob_cutoff = vs.normalize(first + self.MU * vs.high_prob_min_pieces) + # h = vs.time_to_pieces( self.HIGH_PROB_SETSIZE ) + # highprob_cutoff = vs.normalize(first + max(h, self.HIGH_PROB_MIN_PIECES)) + # midprob_cutoff = vs.normalize(first + max(self.MU * h, self.HIGH_PROB_MIN_PIECES)) + + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Prio %s:%s:%s" % (first, highprob_cutoff, midprob_cutoff), highprob_cutoff - first, midprob_cutoff - highprob_cutoff + + # first,last = vs.download_range() + # if vs.wraparound: + # max_lookahead = vs.wraparound_delta + # else: + # max_lookahead = vs.last_piece - vs.playback_pos + + # highprob_cutoff = vs.normalize( first + min( h, max_lookahead ) ) + # midprob_cutoff = vs.normalize( first + min( h + self.MU * h, max_lookahead ) ) + + if vs.live_streaming: + # for live playback consider peers to be bad when they miss the deadline 5 times + allow_based_on_performance = connection.download.bad_performance_counter < 5 + else: + # for VOD playback consider peers to be bad when they miss the deadline 1 time + allow_based_on_performance = connection.download.bad_performance_counter < 1 + + if vs.prebuffering: + f = first + t = vs.normalize( first + self.transporter.max_prebuf_packets ) + choice = pick_rarest_small_range(f,t) + type = "high" + else: + choice = None + + if choice is None: + if vs.live_streaming: + choice = pick_rarest_small_range( first, highprob_cutoff ) + else: + choice = pick_first( first, highprob_cutoff ) + type = "high" + + # it is possible that the performance of this peer prohibits + # us from selecting this piece... + if not allow_based_on_performance: + high_priority_choice = choice + choice = None + + if choice is None: + choice = pick_rarest_small_range( highprob_cutoff, midprob_cutoff ) + type = "mid" + + if choice is None: + if vs.live_streaming: + # Want: loop over what peer has avail, respecting piece priorities + # (could ignore those for live). + # + # Attempt 1: loop over range (which is 25% of window (see + # VideoStatus), ignoring priorities, no shuffle. + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: choice low RANGE",midprob_cutoff,last + #choice = pick_rarest_loop_over_small_range(midprob_cutoff,last,shuffle=False) + pass + else: + choice = pick_rarest( midprob_cutoff, last ) + type = "low" + + if choice and willrequest: + self.stats[type] += 1 + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: picked piece %s [type=%s] [%d,%d,%d,%d]" % (`choice`,type,first,highprob_cutoff,midprob_cutoff,last) + + # 12/05/09, boudewijn: (1) The bad_performance_counter is + # incremented whenever a piece download failed and decremented + # whenever is succeeds. (2) A peer with a positive + # bad_performance_counter is only allowd to pick low-priority + # pieces. (Conclusion) When all low-priority pieces are + # downloaded the client hangs when one or more high-priority + # pieces are required and if all peers have a positive + # bad_performance_counter. + if choice is None and not allow_based_on_performance: + # ensure that there is another known peer with a + # non-positive bad_performance_counter that has the piece + # that we would pick from the high-priority set for this + # connection. + + if high_priority_choice: + availability = 0 + for download in self.downloader.downloads: + if download.have[high_priority_choice] and not download.bad_performance_counter: + availability += 1 + + if not availability: + # no other connection has it... then ignore the + # bad_performance_counter advice and attempt to + # download it from this connection anyway + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "vod: the bad_performance_counter says this is a bad peer... but we have nothing better... requesting piece", high_priority_choice, "regardless." + choice = high_priority_choice + + return choice + + def is_valid_piece(self,piece): + return self.videostatus.in_valid_range(piece) + + def get_valid_range_iterator(self): + if self.videostatus.live_streaming and self.videostatus.get_live_startpos() is None: + # Not hooked in, so cannot provide a sensible download range + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePickerStreaming: Not hooked in, valid range set to total" + return PiecePicker.get_valid_range_iterator(self) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PiecePickerStreaming: Live hooked in, or VOD, valid range set to subset" + first,last = self.videostatus.download_range() + return self.videostatus.generate_range((first,last)) + +PiecePickerVOD = PiecePickerStreaming diff --git a/tribler-mod/Tribler/Core/Video/VideoOnDemand.py b/tribler-mod/Tribler/Core/Video/VideoOnDemand.py new file mode 100644 index 0000000..2d75757 --- /dev/null +++ b/tribler-mod/Tribler/Core/Video/VideoOnDemand.py @@ -0,0 +1,1573 @@ +from time import localtime, strftime + +# Written by Jan David Mol, Arno Bakker +# see LICENSE.txt for license information + +import sys +from math import ceil +from threading import Condition,currentThread +from traceback import print_exc +from tempfile import mkstemp +import time +import collections +import os +import base64 + +if sys.version.startswith("2.4"): + os.SEEK_SET = 0 + os.SEEK_CUR = 1 + os.SEEK_END = 2 + +import os,sys,time +import re +from Tribler.Core.BitTornado.CurrentRateMeasure import Measure +from Tribler.Core.Video.MovieTransport import MovieTransport,MovieTransportStreamWrapper +from Tribler.Core.simpledefs import * +from Tribler.Core.Video.LiveSourceAuth import ECDSAAuthenticator,AuthStreamWrapper,VariableReadAuthStreamWrapper +from Tribler.Core.CacheDB.SqliteVideoPlaybackStatsCacheDB import VideoPlaybackEventDBHandler, VideoPlaybackInfoDBHandler + +# pull all video data as if a video player was attached +FAKEPLAYBACK = False + +DEBUG = True #False +DEBUGPP = False + +class PieceStats: + """ Keeps track of statistics for each piece as it flows through the system. """ + + def __init__(self): + self.pieces = {} + self.completed = {} + + def set(self,piece,stat,value,firstonly=True): + if piece not in self.pieces: + self.pieces[piece] = {} + + if firstonly and stat in self.pieces[piece]: + return + + self.pieces[piece][stat] = value + + def complete(self,piece): + self.completed[piece] = 1 + + def reset(self): + for x in self.completed: + self.pieces.pop(x,0) + + self.completed = {} + + def pop_completed(self): + completed = {} + + for x in self.completed: + completed[x] = self.pieces.pop(x,{}) + + self.completed = {} + return completed + +class MovieOnDemandTransporter(MovieTransport): + """ Takes care of providing a bytestream interface based on the available pieces. """ + + # seconds to prebuffer if bitrate is known + PREBUF_SEC_LIVE = 10 + PREBUF_SEC_VOD = 10 + + # max number of seconds in queue to player + # Arno: < 2008-07-15: St*pid vlc apparently can't handle lots of data pushed to it + # Arno: 2008-07-15: 0.8.6h apparently can + BUFFER_TIME = 5.0 + + # polling interval to refill buffer + #REFILL_INTERVAL = BUFFER_TIME * 0.75 + # Arno: there's is no guarantee we got enough (=BUFFER_TIME secs worth) to write to output bug! + REFILL_INTERVAL = 0.1 + + # amount of time (seconds) to push a packet into + # the player queue ahead of schedule + VLC_BUFFER_SIZE = 0 + PIECE_DUE_SKEW = 0.1 + VLC_BUFFER_SIZE + + # Arno: If we don't know playtime and FFMPEG gave no decent bitrate, this is the minimum + # bitrate (in KByte/s) that the playback birate-estimator must have to make us + # set the bitrate in movieselector. + MINPLAYBACKRATE = 32*1024 + + # maximum delay between pops before we force a restart (seconds) + MAX_POP_TIME = 60 + + def __init__(self,bt1download,videostatus,videoinfo,videoanalyserpath,vodeventfunc): + + # dirty hack to get the Tribler Session + from Tribler.Core.Session import Session + session = Session.get_instance() + + if session.get_overlay(): + # there is an overlay + + self._playback_info_db = VideoPlaybackInfoDBHandler.get_instance() + self._playback_event_db = VideoPlaybackEventDBHandler.get_instance() + + # add an event to indicate that the user wants playback to + # start + def set_nat(nat): + self._playback_info_db.set_nat(self._playback_key, nat) + self._playback_key = base64.b64encode(os.urandom(20)) + self._playback_info_db.create_entry(self._playback_key, piece_size=videostatus.piecelen, num_pieces=videostatus.movie_numpieces, bitrate=videostatus.bitrate, nat=session.get_nat_type(callback=set_nat)) + self._playback_event_db.add_event(self._playback_key, "play", "init") + + else: + self._playback_info_db = None + self._playback_event_db = None + + self._complete = False + self.videoinfo = videoinfo + self.bt1download = bt1download + self.piecepicker = bt1download.picker + self.rawserver = bt1download.rawserver + self.storagewrapper = bt1download.storagewrapper + self.fileselector = bt1download.fileselector + + self.vodeventfunc = vodeventfunc + self.videostatus = vs = videostatus + + # Add quotes around path, as that's what os.popen() wants on win32 + if sys.platform == "win32" and videoanalyserpath is not None and videoanalyserpath.find(' ') != -1: + self.video_analyser_path='"'+videoanalyserpath+'"' + else: + self.video_analyser_path=videoanalyserpath + + # counter for the sustainable() call. Every X calls the + # buffer-percentage is updated. + self.sustainable_counter = sys.maxint + + # boudewijn: because we now update the downloadrate for each + # received chunk instead of each piece we do not need to + # average the measurement over a 'long' period of time. Also, + # we only update the downloadrate for pieces that are in the + # high priority range giving us a better estimation on how + # likely the pieces will be available on time. + self.overall_rate = Measure(10) + self.high_range_rate = Measure(2) + + # boudewijn: increase the initial minimum buffer size + if not vs.live_streaming: + vs.increase_high_range() + + # buffer: a link to the piecepicker buffer + self.has = self.piecepicker.has + + # number of pieces in buffer + self.pieces_in_buffer = 0 + + self.data_ready = Condition() + + # Arno: Call FFMPEG only if the torrent did not provide the + # bitrate and video dimensions. This is becasue FFMPEG + # sometimes hangs e.g. Ivaylo's Xvid Finland AVI, for unknown + # reasons + + # Arno: 2007-01-06: Since we use VideoLan player, videodimensions not important + if vs.bitrate_set: + self.doing_ffmpeg_analysis = False + self.doing_bitrate_est = False + self.videodim = None #self.movieselector.videodim + else: + self.doing_ffmpeg_analysis = True + self.doing_bitrate_est = True + self.videodim = None + + self.player_opened_with_width_height = False + self.ffmpeg_est_bitrate = None + + # number of packets required to preparse the video + # I say we need 128 KB to sniff size and bitrate + + # Arno: 2007-01-04: Changed to 1MB. It appears ffplay works better with some + # decent prebuffering. We should replace this with a timing based thing, + + if not self.doing_bitrate_est: + if vs.live_streaming: + prebufsecs = self.PREBUF_SEC_LIVE + else: + prebufsecs = self.PREBUF_SEC_VOD + + # assumes first piece is whole (first_piecelen == piecelen) + piecesneeded = vs.time_to_pieces( prebufsecs ) + bytesneeded = piecesneeded * vs.piecelen + else: + # Arno, 2007-01-08: for very high bitrate files e.g. + # 850 kilobyte/s (500 MB for 10 min 20 secs) this is too small + # and we'll have packet loss because we start too soon. + bytesneeded = 1024 * 1024 + piecesneeded = 1 + int(ceil((bytesneeded - vs.first_piecelen) / float(vs.piecelen))) + + if vs.wraparound: + self.max_prebuf_packets = min(vs.wraparound_delta, piecesneeded) + else: + self.max_prebuf_packets = min(vs.movie_numpieces, piecesneeded) + + if self.doing_ffmpeg_analysis and DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Want",self.max_prebuf_packets,"pieces for FFMPEG analysis, piecesize",vs.piecelen + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Want",self.max_prebuf_packets,"pieces for prebuffering" + + self.nreceived = 0 + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Setting MIME type to",self.videoinfo['mimetype'] + + self.set_mimetype(self.videoinfo['mimetype']) + + # some statistics + self.stat_playedpieces = 0 # number of pieces played successfully + self.stat_latepieces = 0 # number of pieces that arrived too late + self.stat_droppedpieces = 0 # number of pieces dropped + self.stat_stalltime = 0.0 # total amount of time the video was stalled + self.stat_prebuffertime = 0.0 # amount of prebuffer time used + self.stat_pieces = PieceStats() # information about each piece + + # start periodic tasks + self.curpiece = "" + self.curpiece_pos = 0 + self.outbuf = [] + #self.last_pop = None # time of last pop + self.reset_bitrate_prediction() + + self.lasttime=0 + # For DownloadState + self.prebufprogress = 0.0 + self.prebufstart = time.time() + self.playable = False + self.usernotified = False + + self.outbuflen = None + + # LIVESOURCEAUTH + if vs.live_streaming and vs.authparams['authmethod'] == LIVE_AUTHMETHOD_ECDSA: + self.authenticator = ECDSAAuthenticator(vs.first_piecelen,vs.movie_numpieces,pubkeypem=vs.authparams['pubkey']) + vs.sigsize = vs.piecelen - self.authenticator.get_content_blocksize() + else: + self.authenticator = None + + self.refill_rawserv_tasker() + self.tick_second() + + # link to others (last thing to do) + self.piecepicker.set_transporter( self ) + #self.start() + + if FAKEPLAYBACK: + import threading + + class FakeReader(threading.Thread): + def __init__(self,movie): + threading.Thread.__init__(self) + self.movie = movie + + def run(self): + self.movie.start() + while not self.movie.done(): + self.movie.read() + + t = FakeReader(self) + t.start() + + #self.rawserver.add_task( fakereader, 0.0 ) + + if self.videostatus.live_streaming: + self.live_streaming_timer() + + def calc_live_startpos(self,prebufsize=2,have=False): + """ If watching a live stream, determine where to 'hook in'. Adjusts self.download_range[0] + accordingly, never decreasing it. If 'have' is true, we need to have the data + ourself. If 'have' is false, we look at availability at our neighbours. + + Return True if succesful, False if more data has to be collected. """ + + # ----- determine highest known piece number + if have: + numseeds = 0 + numhaves = self.piecepicker.has + totalhaves = self.piecepicker.numgot + + threshold = 1 + else: + numseeds = self.piecepicker.seeds_connected + numhaves = self.piecepicker.numhaves # excludes seeds + totalhaves = self.piecepicker.totalcount # excludes seeds + + numconns = self.piecepicker.num_nonempty_neighbours() + threshold = max( 1, numconns/2 ) + + # FUDGE: number of pieces we subtract from maximum known/have, + # to start playback with some buffer present. We need enough + # pieces to do pass the prebuffering phase. when still + # requesting pieces, FUDGE can probably be a bit low lower, + # since by the time they arrive, we will have later pieces anyway. + # NB: all live torrents have the bitrate set. + FUDGE = prebufsize #self.max_prebuf_packets + + if numseeds == 0 and totalhaves == 0: + # optimisation: without seeds or pieces, just wait + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: calc_live_offset: no pieces" + return False + + # pieces are known, so we can determine where to start playing + vs = self.videostatus + + bpiece = vs.first_piece + epiece = vs.last_piece + + if numseeds > 0 or (not vs.wraparound and numhaves[epiece] > 0): + # special: if full video is available, do nothing and enter VoD mode + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: calc_live_offset: vod mode" + vs.set_live_startpos( 0 ) + return True + + # maxnum = highest existing piece number owned by more than half of the neighbours + maxnum = None + for i in xrange(epiece,bpiece-1,-1): + #if DEBUG: + # if 0 < numhaves[i] < threshold: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: calc_live_offset: discarding piece %d as it is owned by only %d<%d neighbours" % (i,numhaves[i],threshold) + + if numhaves[i] >= threshold: + maxnum = i + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: calc_live_offset: chosing piece %d as it is owned by %d>=%d neighbours" % (i,numhaves[i],threshold) + break + + if maxnum is None: + return False + + # if there is wraparound, newest piece may actually have wrapped + if vs.wraparound and maxnum > epiece - vs.wraparound_delta: + delta_left = vs.wraparound_delta - (epiece-maxnum) + + for i in xrange( vs.first_piece+delta_left-1, vs.first_piece-1, -1 ): + if numhaves[i] >= threshold: + maxnum = i + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: calc_live_offset: chosing piece %d as it is owned by %d>=%d neighbours" % (i,numhaves[i],threshold) + break + + # start watching from maximum piece number, adjusted by fudge. + if vs.wraparound: + maxnum = vs.normalize( maxnum - FUDGE ) + #f = bpiece + (maxnum - bpiece - FUDGE) % (epiece-bpiece) + #t = bpiece + (f - bpiece + vs.wraparound_delta) % (epiece-bpiece) + + # start at a piece known to exist to avoid waiting for something that won't appear + # for another round. guaranteed to succeed since we would have bailed if noone had anything + while not numhaves[maxnum]: + maxnum = vs.normalize( maxnum + 1 ) + else: + maxnum = max( bpiece, maxnum - FUDGE ) + + if maxnum == bpiece: + # video has just started -- watch from beginning + return True + + # If we're connected to the source, and already hooked in, + # don't change the hooking point unless it is really far + oldstartpos = vs.get_live_startpos() + if not have and threshold == 1 and oldstartpos is not None: + diff = vs.dist_range(oldstartpos,maxnum) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: calc_live_offset: m o",maxnum,oldstartpos,"diff",diff + if diff < 8: + return True + + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: === HOOKING IN AT PIECE %d (based on have: %s) ===" % (maxnum,have) + toinvalidateset = vs.set_live_startpos( maxnum ) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: invalidateset is",`toinvalidateset` + for piece in toinvalidateset: + self.live_invalidate_piece_globally(piece) + + return True + + def live_streaming_timer(self): + """ Background 'thread' to check where to hook in if live streaming. """ + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: live_streaming_timer: Finding hookin" + if self.videostatus.playing: + # Stop adjusting the download range + return + + # JD:keep checking correct playback pos since it can change if we switch neighbours + # due to faulty peers etc + + #if not (self.videostatus.live_startpos is None): + # # Adjust it only once on what we see around us + # return + + if self.calc_live_startpos( self.max_prebuf_packets, False ): + # Adjust it only once on what we see around us + #return + pass + + self.rawserver.add_task( self.live_streaming_timer, 1 ) + + def parse_video(self): + """ Feeds the first max_prebuf_packets to ffmpeg to determine video bitrate. """ + + vs = self.videostatus + width = None + height = None + + # Start ffmpeg, let it write to a temporary file to prevent + # blocking problems on Win32 when FFMPEG outputs lots of + # (error) messages. + # + [loghandle,logfilename] = mkstemp() + os.close(loghandle) + if sys.platform == "win32": + # Not "Nul:" but "nul" is /dev/null on Win32 + sink = 'nul' + else: + sink = '/dev/null' + # DON'T FORGET 'b' OTHERWISE WE'RE WRITING BINARY DATA IN TEXT MODE! + (child_out,child_in) = os.popen2( "%s -y -i - -vcodec copy -acodec copy -f avi %s > %s 2>&1" % (self.video_analyser_path, sink, logfilename), 'b' ) + """ + # If the path is "C:\Program Files\bla\bla" (escaping left out) and that file does not exist + # the output will say something cryptic like "vod: trans: FFMPEG said C:\Program" suggesting an + # error with the double quotes around the command, but that's not it. Be warned! + cmd = self.video_analyser_path+' -y -i - -vcodec copy -acodec copy -f avi '+sink+' > '+logfilename+' 2>&1' + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Video analyser command is",cmd + (child_out,child_in) = os.popen2(cmd,'b') # DON'T FORGET 'b' OTHERWISE THINGS GO WRONG! + """ + + # feed all the pieces + first,last = vs.download_range() + for i in xrange(first,last): + piece = self.get_piece( i ) + + if piece is None: + break + + # remove any signatures etc + if self.authenticator is not None: + piece = self.authenticator.get_content( piece ) + + try: + child_out.write( piece ) + except IOError: + print_exc(file=sys.stderr) + break + + child_out.close() + child_in.close() + + logfile = open(logfilename, 'r') + + # find the bitrate in the output + bitrate = None + + r = re.compile( "bitrate= *([0-9.]+)kbits/s" ) + r2 = re.compile( "Video:.* ([0-9]+x[0-9]+)," ) # video dimensions WIDTHxHEIGHT + + founddim = False + for x in logfile.readlines(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: FFMPEG said:",x + occ = r.findall( x ) + if occ: + # use the latest mentioning of bitrate + bitrate = float( occ[-1] ) * 1024 / 8 + if DEBUG: + if bitrate is not None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Bitrate according to FFMPEG: %.2f KByte/s" % (bitrate/1024) + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Bitrate could not be determined by FFMPEG" + occ = r2.findall( x ) + if occ and not founddim: + # use first occurence + dim = occ[0] + idx = dim.find('x') + width = int(dim[:idx]) + height = int(dim[idx+1:]) + founddim = True + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: width",width,"heigth",height + logfile.close() + try: + os.remove(logfilename) + except: + pass + + return [bitrate,width,height] + + def update_prebuffering(self,received_piece=None): + """ Update prebuffering process. 'received_piece' is a hint that we just received this piece; + keep at 'None' for an update in general. """ + + vs = self.videostatus + + if not vs.prebuffering: + return + + if vs.live_streaming and vs.live_startpos is None: + # first determine where to hook in + return + + if received_piece: + self.nreceived += 1 + + high_range = vs.generate_high_range() + high_range_length = vs.get_high_range_length() + missing_pieces = filter(lambda i: not self.have_piece(i), high_range) + gotall = not missing_pieces + if high_range_length: + self.prebufprogress = min(1, float(high_range_length - len(missing_pieces)) / max(1, high_range_length)) + else: + self.prebufprogress = 1.0 + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Already got",(self.prebufprogress*100.0),"% of prebuffer" + + if not gotall and DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Still need pieces",missing_pieces,"for prebuffering/FFMPEG analysis" + + if vs.dropping: + if not self.doing_ffmpeg_analysis and not gotall and not (0 in missing_pieces) and self.nreceived > self.max_prebuf_packets: + perc = float(self.max_prebuf_packets)/10.0 + if float(len(missing_pieces)) < perc or self.nreceived > (2*len(missing_pieces)): + # If less then 10% of packets missing, or we got 2 times the packets we need already, + # force start of playback + gotall = True + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Forcing stop of prebuffering, less than",perc,"missing, or got 2N packets already" + + if gotall and self.doing_ffmpeg_analysis: + [bitrate,width,height] = self.parse_video() + self.doing_ffmpeg_analysis = False + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: after parse",bitrate,self.doing_bitrate_est + if bitrate is None or round(bitrate)== 0: + if self.doing_bitrate_est: + # Errr... there was no playtime info in the torrent + # and FFMPEG can't tell us... + bitrate = (1*1024*1024/8) # 1mbps + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: No bitrate info avail, wild guess: %.2f KByte/s" % (bitrate/1024) + + vs.set_bitrate(bitrate) + if self._playback_info_db: self._playback_info_db.set_bitrate(self._playback_key, bitrate) + if self._playback_event_db: self._playback_event_db.add_event(self._playback_key, "ffmpeg", "bitrate %d" % bitrate) + else: + if self.doing_bitrate_est: + # There was no playtime info in torrent, use what FFMPEG tells us + self.ffmpeg_est_bitrate = bitrate + bitrate *= 1.1 # Make FFMPEG estimation 10% higher + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Estimated bitrate: %.2f KByte/s" % (bitrate/1024) + + vs.set_bitrate(bitrate) + if self._playback_info_db: self._playback_info_db.set_bitrate(self._playback_key, bitrate) + if self._playback_event_db: self._playback_event_db.add_event(self._playback_key, "ffmpeg", "bitrate %d" % bitrate) + + if width is not None and height is not None: + diff = False + if self.videodim is None: + self.videodim = (width,height) + self.height = height + elif self.videodim[0] != width or self.videodim[1] != height: + diff = True + if not self.player_opened_with_width_height or diff: + #self.user_setsize(self.videodim) + pass + + # # 10/03/09 boudewijn: For VOD we will wait for the entire + # # buffer to fill (gotall) before we start playback. For live + # # this is unlikely to happen and we will therefore only wait + # # until we estimate that we have enough_buffer. + # if (gotall or vs.live_streaming) and self.enough_buffer(): + if gotall and self.enough_buffer(): + # enough buffer and could estimated bitrate - start streaming + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Prebuffering done",currentThread().getName() + self.data_ready.acquire() + vs.prebuffering = False + self.stat_prebuffertime = time.time() - self.prebufstart + self.notify_playable() + self.data_ready.notify() + self.data_ready.release() + + elif DEBUG: + if self.doing_ffmpeg_analysis: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Prebuffering: waiting to obtain the first %d packets" % (self.max_prebuf_packets) + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Prebuffering: %.2f seconds left" % (self.expected_buffering_time()) + + def got_have(self,piece): + vs = self.videostatus + + # update stats + self.stat_pieces.set( piece, "known", time.time() ) + """ + if vs.playing and vs.wraparound: + # check whether we've slipped back too far + d = vs.wraparound_delta + n = max(1,self.piecepicker.num_nonempty_neighbours()/2) + if self.piecepicker.numhaves[piece] > n and d/2 < (piece - vs.playback_pos) % vs.movie_numpieces < d: + # have is confirmed by more than half of the neighours and is in second half of future window + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Forcing restart. Am at playback position %d but saw %d at %d>%d peers." % (vs.playback_pos,piece,self.piecepicker.numhaves[piece],n) + + self.start(force=True) + """ + + def got_piece(self, piece_id, begin, length): + """ + Called when a chunk has been downloaded. This information can + be used to estimate download speed. + """ + if self.videostatus.in_high_range(piece_id): + self.high_range_rate.update_rate(length) + # if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "vod: high priority rate:", self.high_range_rate.get_rate() + + def complete(self,piece,downloaded=True): + """ Called when a movie piece has been downloaded or was available from the start (disk). """ + + if not self._complete and self.piecepicker.am_I_complete(): + self._complete = True + if self._playback_event_db: self._playback_event_db.add_event(self._playback_key, "complete", "system") + + vs = self.videostatus + + if vs.wraparound: + assert downloaded + + self.stat_pieces.set( piece, "complete", time.time() ) + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Completed",piece + + if downloaded: + self.overall_rate.update_rate( vs.real_piecelen( piece ) ) + + if vs.in_download_range( piece ): + self.pieces_in_buffer += 1 + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: piece %d too late [pos=%d]" % (piece,vs.playback_pos) + self.stat_latepieces += 1 + + if vs.playing and vs.playback_pos == piece: + # we were delaying for this piece + self.refill_buffer() + + self.update_prebuffering( piece ) + + def set_pos(self,pos): + """ Update the playback position. Called when playback is started (depending + on requested offset). """ + + vs = self.videostatus + + oldpos = vs.playback_pos + vs.playback_pos = pos + + if vs.wraparound: + # recalculate + self.pieces_in_buffer = 0 + for i in vs.generate_range( vs.download_range() ): + if self.has[i]: + self.pieces_in_buffer += 1 + else: + # fast forward + for i in xrange(oldpos,pos+1): + if self.has[i]: + self.pieces_in_buffer -= 1 + + # fast rewind + for i in xrange(pos,oldpos+1): + if self.has[i]: + self.pieces_in_buffer += 1 + + def inc_pos(self): + vs = self.videostatus + + if self.has[vs.playback_pos]: + self.pieces_in_buffer -= 1 + + vs.inc_playback_pos() + + if vs.live_streaming: + self.live_invalidate_piece_globally(vs.live_piece_to_invalidate()) + +# def buffered_time_period(self): +# """Length of period of Buffered pieces""" +# if self.movieselector.bitrate is None or self.movieselector.bitrate == 0.0: +# return 0 +# else: +# return self.pieces_in_buffer * self.movieselector.piece_length / self.movieselector.bitrate +# +# def playback_time_position(self): +# """Time of playback_pos and total duration +# Return playback_time in seconds +# """ +# if self.movieselector.bitrate is None or self.movieselector.bitrate == 0.0: +# return 0 +# else: +# return self.playback_pos * self.movieselector.piece_length / self.movieselector.bitrate + + def expected_download_time(self): + """ Expected download time left. """ + vs = self.videostatus + if vs.wraparound: + return float(2 ** 31) + + pieces_left = vs.last_piece - vs.playback_pos - self.pieces_in_buffer + if pieces_left <= 0: + return 0.0 + + # list all pieces from the high priority set that have not + # been completed + uncompleted_pieces = filter(self.storagewrapper.do_I_have, vs.generate_high_range()) + + # when all pieces in the high-range have been downloaded, + # we have an expected download time of zero + if not uncompleted_pieces: + return 0.0 + + # the download time estimator is very inacurate when we only + # have a few chunks left. therefore, we will put more emphesis + # on the overall_rate as the number of uncompleted_pieces does + # down. + total_length = vs.get_high_range_length() + uncompleted_length = len(uncompleted_pieces) + expected_download_speed = self.high_range_rate.get_rate() * (1 - float(uncompleted_length) / total_length) + \ + self.overall_rate.get_rate() * uncompleted_length / total_length + if expected_download_speed < 0.1: + return float(2 ** 31) + + return pieces_left * vs.piecelen / expected_download_speed + + def expected_playback_time(self): + """ Expected playback time left. """ + + vs = self.videostatus + + if vs.wraparound: + return float(2 ** 31) + + pieces_to_play = vs.last_piece - vs.playback_pos + 1 + + if pieces_to_play <= 0: + return 0.0 + + if not vs.bitrate: + return float(2 ** 31) + + return pieces_to_play * vs.piecelen / vs.bitrate + + def expected_buffering_time(self): + """ Expected time required for buffering. """ + download_time = self.expected_download_time() + playback_time = self.expected_playback_time() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","EXPECT",self.expected_download_time(),self.expected_playback_time() + # Infinite minus infinite is still infinite + if download_time > float(2 ** 30) and playback_time > float(2 ** 30): + return float(2 ** 31) + return abs(download_time - playback_time) + + def enough_buffer(self): + """ Returns True if we can safely start playback without expecting to run out of + buffer. """ + + if self.videostatus.wraparound: + # Wrapped streaming has no (known) limited duration, so we cannot predict + # whether we have enough download speed. The only way is just to hope + # for the best, since any buffer will be emptied if the download speed + # is too low. + return True + + return max(0.0, self.expected_download_time() - self.expected_playback_time()) == 0.0 + + def tick_second(self): + self.rawserver.add_task( self.tick_second, 1.0 ) + + vs = self.videostatus + + # Adjust estimate every second, but don't display every second + display = (int(time.time()) % 5) == 0 + if display: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: Estimated download time: %5.1fs [priority: %7.2f Kbyte/s] [overall: %7.2f Kbyte/s]" % (self.expected_download_time(), self.high_range_rate.get_rate()/1024, self.overall_rate.get_rate()/1024) + + if vs.playing and round(self.playbackrate.rate) > self.MINPLAYBACKRATE and not vs.prebuffering: + if self.doing_bitrate_est: + if display: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: Estimated playback time: %5ds [%7.2f Kbyte/s], doing estimate=%d" % (self.expected_playback_time(),self.playbackrate.rate/1024, self.ffmpeg_est_bitrate is None) + if self.ffmpeg_est_bitrate is None: + vs.set_bitrate( self.playbackrate.rate ) + + if display: + sys.stderr.flush() + + # + # MovieTransport interface + # + # WARNING: these methods will be called by other threads than NetworkThread! + # + def size( self ): + if self.videostatus.get_wraparound(): + return None + else: + return self.videostatus.selected_movie["size"] + + def read(self,numbytes=None): + """ Read at most numbytes from the stream. If numbytes is not given, + pieces are returned. The bytes read will be returned, or None in + case of an error or end-of-stream. """ + if not self.curpiece: + # curpiece_pos could be set to something other than 0! + # for instance, a seek request sets curpiece_pos but does not + # set curpiece. + + x = self.pop() + if x is None: + return None + + piecenr,self.curpiece = x + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: %d: popped piece to transport to player" % piecenr + + curpos = self.curpiece_pos + left = len(self.curpiece) - curpos + + if numbytes is None: + # default on one piece per read + numbytes = left + + if left > numbytes: + # piece contains enough -- return what was requested + data = self.curpiece[curpos:curpos+numbytes] + + self.curpiece_pos += numbytes + else: + # return remainder of the piece, could be less than numbytes + data = self.curpiece[curpos:] + + self.curpiece = "" + self.curpiece_pos = 0 + + return data + + def start( self, bytepos = 0, force = False ): + """ Initialise to start playing at position `bytepos'. """ + if self._playback_event_db: self._playback_event_db.add_event(self._playback_key, "play", "system") + + # ARNOTODO: we don't use start(bytepos != 0) at the moment. See if we + # should. Also see if we need the read numbytes here, or that it + # is better handled at a higher layer. For live it is currently + # done at a higher level, see VariableReadAuthStreamWrapper because + # we have to strip the signature. Hence the self.curpiece buffer here + # is superfluous. Get rid off it or check if + # + # curpiece[0:piecelen] + # + # returns curpiece if piecelen has length piecelen == optimize for + # piecesized case. + # + # For VOD seeking we may use the numbytes facility to seek to byte offsets + # not just piece offsets. + # + vs = self.videostatus + + if vs.playing and not force: + return + + # lock before changing startpos or any other playing variable + self.data_ready.acquire() + try: + if vs.live_streaming: + # Determine where to start playing. There may be several seconds + # between starting the download and starting playback, which we'll + # want to skip. + self.calc_live_startpos( self.max_prebuf_packets, True ) + + # override any position request by VLC, we only have live data + piece = vs.playback_pos + offset = 0 + else: + # Determine piece number and offset + if bytepos < vs.first_piecelen: + piece = vs.first_piece + offset = bytepos + else: + newbytepos = bytepos - vs.first_piecelen + + piece = vs.first_piece + newbytepos / vs.piecelen + 1 + offset = newbytepos % vs.piecelen + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: === START at offset %d (piece %d) (forced: %s) ===" % (bytepos,piece,force) + + # Initialise all playing variables + self.curpiece = "" # piece currently being popped + self.curpiece_pos = offset + self.set_pos( piece ) + self.outbuf = [] + #self.last_pop = time.time() + self.reset_bitrate_prediction() + vs.playing = True + self.playbackrate = Measure( 60 ) + + # boudewijn: decrease the initial minimum buffer size + if not vs.live_streaming: + vs.decrease_high_range() + + finally: + self.data_ready.release() + + # ARNOTODO: start is called by non-NetworkThreads, these following methods + # are usually called by NetworkThread. + # + # We now know that this won't be called until notify_playable() so + # perhaps this can be removed? + # + # CAREFUL: if we use start() for seeking... that's OK. User won't be + # able to seek before he got his hands on the stream, so after + # notify_playable() + + # See what we can do right now + self.update_prebuffering() + self.refill_buffer() + + def stop( self ): + """ Playback is stopped. """ + if self._playback_event_db: self._playback_event_db.add_event(self._playback_key, "stop", "system") + + vs = self.videostatus + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: === STOP = player closed conn === " + if not vs.playing: + return + vs.playing = False + + # clear buffer and notify possible readers + self.data_ready.acquire() + self.outbuf = [] + #self.last_pop = None + vs.prebuffering = False + self.data_ready.notify() + self.data_ready.release() + + def pause( self, autoresume = False ): + """ Pause playback. If `autoresume' is set, playback is expected to be + resumed automatically once enough data has arrived. """ + if self._playback_event_db: self._playback_event_db.add_event(self._playback_key, "pause", "system") + + vs = self.videostatus + + if not vs.playing or not vs.pausable: + return + + if vs.paused: + vs.autoresume = autoresume + return + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: paused (autoresume: %s)" % (autoresume,) + + vs.paused = True + vs.autoresume = autoresume + self.paused_at = time.time() + #self.reset_bitrate_prediction() + self.videoinfo["usercallback"](VODEVENT_PAUSE,{ "autoresume": autoresume }) + + def resume( self ): + """ Resume paused playback. """ + if self._playback_event_db: self._playback_event_db.add_event(self._playback_key, "resume", "system") + + vs = self.videostatus + + if not vs.playing or not vs.paused or not vs.pausable: + return + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: resumed" + + vs.paused = False + vs.autoresume = False + self.stat_stalltime += time.time() - self.paused_at + self.addtime_bitrate_prediction( time.time() - self.paused_at ) + self.videoinfo["usercallback"](VODEVENT_RESUME,{}) + + self.update_prebuffering() + self.refill_buffer() + + def autoresume( self, testfunc = lambda: True ): + """ Resumes if testfunc returns True. If not, will test every second. """ + + vs = self.videostatus + + if not vs.playing or not vs.paused or not vs.autoresume: + return + + if not testfunc(): + self.rawserver.add_task( lambda: self.autoresume( testfunc ), 1.0 ) + return + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Resuming, since we can maintain this playback position" + self.resume() + + def done( self ): + vs = self.videostatus + + if not vs.playing: + return True + + if vs.wraparound: + return False + + return vs.playback_pos == vs.last_piece+1 and self.curpiece_pos >= len(self.curpiece) + + def seek(self,pos,whence=None): + """ Seek to the given position, a number in bytes relative to both + the "whence" reference point and the file being played. + + We currently actually seek at byte level, via the start() method. + We support all forms of seeking, including seeking past the current + playback pos. Note this may imply needing to prebuffer again or + being paused. + + vs.playback_pos in NetworkThread domain. Does data_ready lock cover + that? Nope. However, this doesn't appear to be respected in any + of the MovieTransport methods, check all. + + Check + * When seeking reset other buffering, e.g. read()'s self.curpiece + and higher layers. + + """ + vs = self.videostatus + length = self.size() + + # lock before changing startpos or any other playing variable + self.data_ready.acquire() + try: + if vs.live_streaming: + if pos == 0 and whence == os.SEEK_SET: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: seek: Ignoring seek in live" + else: + raise ValueError("seeking not possible for live") + if whence == os.SEEK_SET: + abspos = pos + elif whence == os.SEEK_END: + if pos > 0: + raise ValueError("seeking beyond end of stream") + else: + abspos = size+pos + else: # SEEK_CUR + raise ValueError("seeking does not currently support SEEK_CUR") + + self.stop() + self.start(pos) + finally: + self.data_ready.release() + + + + def get_mimetype(self): + return self.mimetype + + def set_mimetype(self,mimetype): + self.mimetype = mimetype + # + # End of MovieTransport interface + # + + def have_piece(self,piece): + return self.piecepicker.has[piece] + + def get_piece(self,piece): + """ Returns the data of a certain piece, or None. """ + + vs = self.videostatus + + if not self.have_piece( piece ): + return None + + begin = 0 + length = vs.piecelen + + if piece == vs.first_piece: + begin = vs.movie_range[0][1] + length -= begin + + if piece == vs.last_piece: + cutoff = vs.piecelen - (vs.movie_range[1][1] + 1) + length -= cutoff + + data = self.storagewrapper.do_get_piece(piece, begin, length) + if data is None: + return None + return data.tostring() + + def reset_bitrate_prediction(self): + self.start_playback = None + self.last_playback = None + self.history_playback = collections.deque() + + def addtime_bitrate_prediction(self,seconds): + if self.start_playback is not None: + self.start_playback["local_ts"] += seconds + + def valid_piece_data(self,i,piece): + if not piece: + return False + + if not self.start_playback or self.authenticator is None: + # no check possible + return True + + s = self.start_playback + + seqnum = self.authenticator.get_seqnum( piece ) + source_ts = self.authenticator.get_rtstamp( piece ) + + if seqnum < s["absnr"] or source_ts < s["source_ts"]: + # old packet??? + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: **** INVALID PIECE #%s **** seqnum=%d but we started at seqnum=%d" % (i,seqnum,s["absnr"]) + return False + + return True + + + def update_bitrate_prediction(self,i,piece): + """ Update the rate prediction given that piece i has just been pushed to the buffer. """ + + if self.authenticator is not None: + seqnum = self.authenticator.get_seqnum( piece ) + source_ts = self.authenticator.get_rtstamp( piece ) + else: + seqnum = i + source_ts = 0 + + d = { + "nr": i, + "absnr": seqnum, + "local_ts": time.time(), + "source_ts": source_ts, + } + + # record + if self.start_playback is None: + self.start_playback = d + + if self.last_playback and self.last_playback["absnr"] > d["absnr"]: + # called out of order + return + + self.last_playback = d + + # keep a recent history + MAX_HIST_LEN = 10*60 # seconds + + self.history_playback.append( d ) + + # of at most 10 entries (or minutes if we keep receiving pieces) + while source_ts - self.history_playback[0]["source_ts"] > MAX_HIST_LEN: + self.history_playback.popleft() + + if DEBUG: + vs = self.videostatus + first, last = self.history_playback[0], self.history_playback[-1] + + if first["source_ts"] and first != last: + bitrate = "%.2f kbps" % (8.0 / 1024 * (vs.piecelen - vs.sigsize) * (last["absnr"] - first["absnr"]) / (last["source_ts"] - first["source_ts"]),) + else: + bitrate = "%.2f kbps (external info)" % (8.0 / 1024 * vs.bitrate) + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: %i: pushed at t=%.2f, age is t=%.2f, bitrate = %s" % (i,d["local_ts"]-self.start_playback["local_ts"],d["source_ts"]-self.start_playback["source_ts"],bitrate) + + def piece_due(self,i): + """ Return the time when we expect to have to send a certain piece to the player. For + wraparound, future pieces are assumed. """ + + if self.start_playback is None: + return float(2 ** 31) # end of time + + s = self.start_playback + l = self.last_playback + vs = self.videostatus + + if not vs.wraparound and i < l["nr"]: + # should already have arrived! + return time.time() + + # assume at most one wrap-around between l and i + piecedist = (i - l["nr"]) % vs.movie_numpieces + + if s["source_ts"]: + # ----- we have timing information from the source + first, last = self.history_playback[0], self.history_playback[-1] + + if first != last: + # we have at least two recent pieces, so can calculate average bitrate. use the recent history + # do *not* adjust for sigsize since we don't want the actual video speed but the piece rate + bitrate = 1.0 * vs.piecelen * (last["absnr"] - first["absnr"]) / (last["source_ts"] - first["source_ts"]) + else: + # fall-back to bitrate predicted from torrent / ffmpeg + bitrate = vs.bitrate + + # extrapolate with the average bitrate so far + return s["local_ts"] + l["source_ts"] - s["source_ts"] + piecedist * vs.piecelen / bitrate - self.PIECE_DUE_SKEW + else: + # ----- no timing information from pieces, so do old-fashioned methods + if vs.live_streaming: + # Arno, 2008-11-20: old-fashioned method is well bad, + # ignore. + return time.time() + 60.0 + else: + i = piecedist + (l["absnr"] - s["absnr"]) + + if s["nr"] == vs.first_piece: + bytepos = vs.first_piecelen + (i-1) * vs.piecelen + else: + bytepos = i * vs.piecelen + + return s["local_ts"] + bytepos / vs.bitrate - self.PIECE_DUE_SKEW + + + def max_buffer_size( self ): + vs = self.videostatus + if vs.dropping: + # live + # Arno: 1/2 MB or based on bitrate if that is above 5 Mbps + return max( 0*512*1024, self.BUFFER_TIME * vs.bitrate ) + else: + # VOD + # boudewijn: 1/4 MB, bitrate, or 2 pieces (wichever is higher) + return max(256*1024, vs.piecelen * 2, self.BUFFER_TIME * vs.bitrate) + + + def refill_buffer( self ): + """ Push pieces into the player FIFO when needed and able. This counts as playing + the pieces as far as playback_pos is concerned.""" + + self.data_ready.acquire() + + vs = self.videostatus + + if vs.prebuffering or not vs.playing: + self.data_ready.release() + return + + #if self.last_pop is not None and time.time() - self.last_pop > self.MAX_POP_TIME: + # # last pop too long ago, restart + # self.data_ready.release() + # self.stop() + # self.start(force=True) + # return + + if vs.paused: + self.data_ready.release() + return + + mx = self.max_buffer_size() + self.outbuflen = sum( [len(d) for (p,d) in self.outbuf] ) + now = time.time() + + def buffer_underrun(): + return self.outbuflen == 0 and self.start_playback and now - self.start_playback["local_ts"] > 1.0 + + if buffer_underrun(): + + if vs.dropping: # live + def sustainable(): + # buffer underrun -- check for available pieces + num_future_pieces = 0 + for piece in vs.generate_range( vs.download_range() ): + if self.has[piece]: + num_future_pieces += 1 + + goal = mx / 2 + # progress + self.prebufprogress = min(1.0,float(num_future_pieces * vs.piecelen) / float(goal)) + + # enough future data to fill the buffer + return num_future_pieces * vs.piecelen >= goal + else: # vod + def sustainable(): + # num_immediate_packets = 0 + # for piece in vs.generate_range( vs.download_range() ): + # if self.has[piece]: + # num_immediate_packets += 1 + # else: + # break + # else: + # # progress + # self.prebufprogress = 1.0 + # # completed loop without breaking, so we have everything we need + # return True + # + # # progress + # self.prebufprogress = min(1.0,float(num_immediate_packets) / float(self.max_prebuf_packets)) + # + # return num_immediate_packets >= self.max_prebuf_packets + + self.sustainable_counter += 1 + if self.sustainable_counter > 10: + self.sustainable_counter = 0 + + high_range_length = vs.get_high_range_length() + have_length = len(filter(lambda n:self.has[n], vs.generate_high_range())) + + # progress + self.prebufprogress = min(1.0, float(have_length) / max(1, high_range_length)) + + return have_length >= high_range_length + + else: + num_immediate_packets = 0 + high_range_length = vs.get_high_range_length() + # for piece in vs.generate_range(vs.download_range()): + for piece in vs.generate_high_range(): + if self.has[piece]: + num_immediate_packets += 1 + if num_immediate_packets >= high_range_length: + break + else: + break + else: + # progress + self.prebufprogress = 1.0 + # completed loop without breaking, so we have everything we need + return True + + return num_immediate_packets >= high_range_length + + sus = sustainable() + if vs.pausable and not sus: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: BUFFER UNDERRUN -- PAUSING" + self.pause( autoresume = True ) + self.autoresume( sustainable ) + + # boudewijn: increase the minimum buffer size + vs.increase_high_range() + + self.data_ready.release() + return + elif sus: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: BUFFER UNDERRUN -- IGNORING, rate is sustainable" + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: BUFFER UNDERRUN -- STALLING, cannot pause player to fall back some, so just wait for more pieces" + self.data_ready.release() + return + + def push( i, data ): + # force buffer underrun: + #if self.start_playback and time.time()-self.start_playback["local_ts"] > 60: + # # hack: dont push after 1 minute + # return + + # push packet into queue + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: %d: pushed l=%d" % (vs.playback_pos,piece) + + # update predictions based on this piece + self.update_bitrate_prediction( i, data ) + + self.stat_playedpieces += 1 + self.stat_pieces.set( i, "tobuffer", time.time() ) + + self.outbuf.append( (vs.playback_pos,data) ) + self.outbuflen += len(data) + + self.data_ready.notify() + self.inc_pos() + + def drop( i ): + # drop packet + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: %d: dropped pos=%d; deadline expired %.2f sec ago !!!!!!!!!!!!!!!!!!!!!!" % (piece,vs.playback_pos,time.time()-self.piece_due(i)) + + self.stat_droppedpieces += 1 + self.stat_pieces.complete( i ) + self.inc_pos() + + for piece in vs.generate_range( vs.download_range() ): + ihavepiece = self.has[piece] + forcedrop = False + + # check whether we have room to store it + if self.outbuflen > mx: + # buffer full + break + + # final check for piece validity + if ihavepiece: + data = self.get_piece( piece ) + if not self.valid_piece_data( piece, data ): + # I should have the piece, but I don't: WAAAAHH! + forcedrop = True + ihavepiece = False + + if ihavepiece: + # have piece - push it into buffer + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: BUFFER STATUS (max %.0f): %.0f kbyte" % (mx/1024.0,self.outbuflen/1024.0) + + # piece found -- add it to the queue + push( piece, data ) + else: + # don't have piece, or forced to drop + if not vs.dropping and forcedrop: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: DROPPING INVALID PIECE #%s, even though we shouldn't drop anything." % piece + if vs.dropping or forcedrop: + if time.time() >= self.piece_due( piece ) or buffer_underrun() or forcedrop: + # piece is too late or we have an empty buffer (and future data to play, otherwise we would have paused) -- drop packet + drop( piece ) + else: + # we have time to wait for the piece and still have data in our buffer -- wait for packet + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: %d: due in %.2fs pos=%d" % (piece,self.piece_due(piece)-time.time(),vs.playback_pos) + break + else: # not dropping + if self.outbuflen == 0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: SHOULD NOT HAPPEN: missing piece but not dropping. should have paused. pausable=",vs.pausable + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: prebuffering done, but could not fill buffer." + break + + self.data_ready.release() + + def refill_rawserv_tasker( self ): + self.refill_buffer() + + self.rawserver.add_task( self.refill_rawserv_tasker, self.REFILL_INTERVAL ) + + def pop( self ): + self.data_ready.acquire() + vs = self.videostatus + + while vs.prebuffering and not self.done(): + # wait until done prebuffering + self.data_ready.wait() + + while not self.outbuf and not self.done(): + # wait until a piece is available + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Player waiting for data" + self.data_ready.wait() + + if not self.outbuf: + piece = None + else: + piece = self.outbuf.pop( 0 ) # nr,data pair + self.playbackrate.update_rate( len(piece[1]) ) + + #self.last_pop = time.time() + + self.data_ready.release() + + if piece: + self.stat_pieces.set( piece[0], "toplayer", time.time() ) + self.stat_pieces.complete( piece[0] ) + + return piece + + def notify_playable(self): + """ Tell user he can play the media, + cf. Tribler.Core.DownloadConfig.set_vod_event_callback() + """ + #if self.bufferinfo: + # self.bufferinfo.set_playable() + #self.progressinf.bufferinfo_updated_callback() + + # triblerAPI + if self.usernotified: + return + self.usernotified = True + self.prebufprogress = 1.0 + self.playable = True + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: notify_playable: Calling usercallback to tell it we're ready to play",self.videoinfo['usercallback'] + + # MIME type determined normally in LaunchManyCore.network_vod_event_callback + # However, allow for recognition by videoanalyser + mimetype = self.get_mimetype() + complete = self.piecepicker.am_I_complete() + if complete: + stream = None + filename = self.videoinfo["outpath"] + else: + stream = MovieTransportStreamWrapper(self) + if self.videostatus.live_streaming and self.videostatus.authparams['authmethod'] != LIVE_AUTHMETHOD_NONE: + intermedstream = AuthStreamWrapper(stream,self.authenticator) + endstream = VariableReadAuthStreamWrapper(intermedstream,self.authenticator.get_piece_length()) + else: + endstream = stream + filename = None + + # Call user callback + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: notify_playable: calling:",self.vodeventfunc + self.vodeventfunc( self.videoinfo, VODEVENT_START, { + "complete": complete, + "filename": filename, + "mimetype": mimetype, + "stream": endstream, + "length": self.size(), + } ) + + + # + # Methods for DownloadState to extract status info of VOD mode. + # + def get_stats(self): + """ Returns accumulated statistics. The piece data is cleared after this call to save memory. """ + """ Called by network thread """ + s = { "played": self.stat_playedpieces, + "late": self.stat_latepieces, + "dropped": self.stat_droppedpieces, + "stall": self.stat_stalltime, + "pos": self.videostatus.playback_pos, + "prebuf": self.stat_prebuffertime, + "pp": self.piecepicker.stats, + "pieces": self.stat_pieces.pop_completed(), } + return s + + def get_prebuffering_progress(self): + """ Called by network thread """ + return self.prebufprogress + + def is_playable(self): + """ Called by network thread """ + if not self.playable or self.videostatus.prebuffering: + self.playable = (self.prebufprogress == 1.0 and self.enough_buffer()) + return self.playable + + def get_playable_after(self): + """ Called by network thread """ + return self.expected_buffering_time() + + def get_duration(self): + return 1.0 * self.videostatus.selected_movie["size"] / self.videostatus.bitrate + + # + # Live streaming + # + def live_invalidate_piece_globally(self, piece): + """ Make piece disappear from this peer's view of BT world """ + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: live_invalidate",piece + + self.piecepicker.invalidate_piece(piece) + self.piecepicker.downloader.live_invalidate(piece) + + # LIVESOURCEAUTH + def piece_from_live_source(self,index,data): + if self.authenticator is not None: + return self.authenticator.verify(data,index=index) + else: + return True + diff --git a/tribler-mod/Tribler/Core/Video/VideoOnDemand.py.bak b/tribler-mod/Tribler/Core/Video/VideoOnDemand.py.bak new file mode 100644 index 0000000..5751a09 --- /dev/null +++ b/tribler-mod/Tribler/Core/Video/VideoOnDemand.py.bak @@ -0,0 +1,1572 @@ + +# Written by Jan David Mol, Arno Bakker +# see LICENSE.txt for license information + +import sys +from math import ceil +from threading import Condition,currentThread +from traceback import print_exc +from tempfile import mkstemp +import time +import collections +import os +import base64 + +if sys.version.startswith("2.4"): + os.SEEK_SET = 0 + os.SEEK_CUR = 1 + os.SEEK_END = 2 + +import os,sys,time +import re +from Tribler.Core.BitTornado.CurrentRateMeasure import Measure +from Tribler.Core.Video.MovieTransport import MovieTransport,MovieTransportStreamWrapper +from Tribler.Core.simpledefs import * +from Tribler.Core.Video.LiveSourceAuth import ECDSAAuthenticator,AuthStreamWrapper,VariableReadAuthStreamWrapper +from Tribler.Core.CacheDB.SqliteVideoPlaybackStatsCacheDB import VideoPlaybackEventDBHandler, VideoPlaybackInfoDBHandler + +# pull all video data as if a video player was attached +FAKEPLAYBACK = False + +DEBUG = True #False +DEBUGPP = False + +class PieceStats: + """ Keeps track of statistics for each piece as it flows through the system. """ + + def __init__(self): + self.pieces = {} + self.completed = {} + + def set(self,piece,stat,value,firstonly=True): + if piece not in self.pieces: + self.pieces[piece] = {} + + if firstonly and stat in self.pieces[piece]: + return + + self.pieces[piece][stat] = value + + def complete(self,piece): + self.completed[piece] = 1 + + def reset(self): + for x in self.completed: + self.pieces.pop(x,0) + + self.completed = {} + + def pop_completed(self): + completed = {} + + for x in self.completed: + completed[x] = self.pieces.pop(x,{}) + + self.completed = {} + return completed + +class MovieOnDemandTransporter(MovieTransport): + """ Takes care of providing a bytestream interface based on the available pieces. """ + + # seconds to prebuffer if bitrate is known + PREBUF_SEC_LIVE = 10 + PREBUF_SEC_VOD = 10 + + # max number of seconds in queue to player + # Arno: < 2008-07-15: St*pid vlc apparently can't handle lots of data pushed to it + # Arno: 2008-07-15: 0.8.6h apparently can + BUFFER_TIME = 5.0 + + # polling interval to refill buffer + #REFILL_INTERVAL = BUFFER_TIME * 0.75 + # Arno: there's is no guarantee we got enough (=BUFFER_TIME secs worth) to write to output bug! + REFILL_INTERVAL = 0.1 + + # amount of time (seconds) to push a packet into + # the player queue ahead of schedule + VLC_BUFFER_SIZE = 0 + PIECE_DUE_SKEW = 0.1 + VLC_BUFFER_SIZE + + # Arno: If we don't know playtime and FFMPEG gave no decent bitrate, this is the minimum + # bitrate (in KByte/s) that the playback birate-estimator must have to make us + # set the bitrate in movieselector. + MINPLAYBACKRATE = 32*1024 + + # maximum delay between pops before we force a restart (seconds) + MAX_POP_TIME = 60 + + def __init__(self,bt1download,videostatus,videoinfo,videoanalyserpath,vodeventfunc): + + # dirty hack to get the Tribler Session + from Tribler.Core.Session import Session + session = Session.get_instance() + + if session.get_overlay(): + # there is an overlay + + self._playback_info_db = VideoPlaybackInfoDBHandler.get_instance() + self._playback_event_db = VideoPlaybackEventDBHandler.get_instance() + + # add an event to indicate that the user wants playback to + # start + def set_nat(nat): + self._playback_info_db.set_nat(self._playback_key, nat) + self._playback_key = base64.b64encode(os.urandom(20)) + self._playback_info_db.create_entry(self._playback_key, piece_size=videostatus.piecelen, num_pieces=videostatus.movie_numpieces, bitrate=videostatus.bitrate, nat=session.get_nat_type(callback=set_nat)) + self._playback_event_db.add_event(self._playback_key, "play", "init") + + else: + self._playback_info_db = None + self._playback_event_db = None + + self._complete = False + self.videoinfo = videoinfo + self.bt1download = bt1download + self.piecepicker = bt1download.picker + self.rawserver = bt1download.rawserver + self.storagewrapper = bt1download.storagewrapper + self.fileselector = bt1download.fileselector + + self.vodeventfunc = vodeventfunc + self.videostatus = vs = videostatus + + # Add quotes around path, as that's what os.popen() wants on win32 + if sys.platform == "win32" and videoanalyserpath is not None and videoanalyserpath.find(' ') != -1: + self.video_analyser_path='"'+videoanalyserpath+'"' + else: + self.video_analyser_path=videoanalyserpath + + # counter for the sustainable() call. Every X calls the + # buffer-percentage is updated. + self.sustainable_counter = sys.maxint + + # boudewijn: because we now update the downloadrate for each + # received chunk instead of each piece we do not need to + # average the measurement over a 'long' period of time. Also, + # we only update the downloadrate for pieces that are in the + # high priority range giving us a better estimation on how + # likely the pieces will be available on time. + self.overall_rate = Measure(10) + self.high_range_rate = Measure(2) + + # boudewijn: increase the initial minimum buffer size + if not vs.live_streaming: + vs.increase_high_range() + + # buffer: a link to the piecepicker buffer + self.has = self.piecepicker.has + + # number of pieces in buffer + self.pieces_in_buffer = 0 + + self.data_ready = Condition() + + # Arno: Call FFMPEG only if the torrent did not provide the + # bitrate and video dimensions. This is becasue FFMPEG + # sometimes hangs e.g. Ivaylo's Xvid Finland AVI, for unknown + # reasons + + # Arno: 2007-01-06: Since we use VideoLan player, videodimensions not important + if vs.bitrate_set: + self.doing_ffmpeg_analysis = False + self.doing_bitrate_est = False + self.videodim = None #self.movieselector.videodim + else: + self.doing_ffmpeg_analysis = True + self.doing_bitrate_est = True + self.videodim = None + + self.player_opened_with_width_height = False + self.ffmpeg_est_bitrate = None + + # number of packets required to preparse the video + # I say we need 128 KB to sniff size and bitrate + + # Arno: 2007-01-04: Changed to 1MB. It appears ffplay works better with some + # decent prebuffering. We should replace this with a timing based thing, + + if not self.doing_bitrate_est: + if vs.live_streaming: + prebufsecs = self.PREBUF_SEC_LIVE + else: + prebufsecs = self.PREBUF_SEC_VOD + + # assumes first piece is whole (first_piecelen == piecelen) + piecesneeded = vs.time_to_pieces( prebufsecs ) + bytesneeded = piecesneeded * vs.piecelen + else: + # Arno, 2007-01-08: for very high bitrate files e.g. + # 850 kilobyte/s (500 MB for 10 min 20 secs) this is too small + # and we'll have packet loss because we start too soon. + bytesneeded = 1024 * 1024 + piecesneeded = 1 + int(ceil((bytesneeded - vs.first_piecelen) / float(vs.piecelen))) + + if vs.wraparound: + self.max_prebuf_packets = min(vs.wraparound_delta, piecesneeded) + else: + self.max_prebuf_packets = min(vs.movie_numpieces, piecesneeded) + + if self.doing_ffmpeg_analysis and DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Want",self.max_prebuf_packets,"pieces for FFMPEG analysis, piecesize",vs.piecelen + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Want",self.max_prebuf_packets,"pieces for prebuffering" + + self.nreceived = 0 + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Setting MIME type to",self.videoinfo['mimetype'] + + self.set_mimetype(self.videoinfo['mimetype']) + + # some statistics + self.stat_playedpieces = 0 # number of pieces played successfully + self.stat_latepieces = 0 # number of pieces that arrived too late + self.stat_droppedpieces = 0 # number of pieces dropped + self.stat_stalltime = 0.0 # total amount of time the video was stalled + self.stat_prebuffertime = 0.0 # amount of prebuffer time used + self.stat_pieces = PieceStats() # information about each piece + + # start periodic tasks + self.curpiece = "" + self.curpiece_pos = 0 + self.outbuf = [] + #self.last_pop = None # time of last pop + self.reset_bitrate_prediction() + + self.lasttime=0 + # For DownloadState + self.prebufprogress = 0.0 + self.prebufstart = time.time() + self.playable = False + self.usernotified = False + + self.outbuflen = None + + # LIVESOURCEAUTH + if vs.live_streaming and vs.authparams['authmethod'] == LIVE_AUTHMETHOD_ECDSA: + self.authenticator = ECDSAAuthenticator(vs.first_piecelen,vs.movie_numpieces,pubkeypem=vs.authparams['pubkey']) + vs.sigsize = vs.piecelen - self.authenticator.get_content_blocksize() + else: + self.authenticator = None + + self.refill_rawserv_tasker() + self.tick_second() + + # link to others (last thing to do) + self.piecepicker.set_transporter( self ) + #self.start() + + if FAKEPLAYBACK: + import threading + + class FakeReader(threading.Thread): + def __init__(self,movie): + threading.Thread.__init__(self) + self.movie = movie + + def run(self): + self.movie.start() + while not self.movie.done(): + self.movie.read() + + t = FakeReader(self) + t.start() + + #self.rawserver.add_task( fakereader, 0.0 ) + + if self.videostatus.live_streaming: + self.live_streaming_timer() + + def calc_live_startpos(self,prebufsize=2,have=False): + """ If watching a live stream, determine where to 'hook in'. Adjusts self.download_range[0] + accordingly, never decreasing it. If 'have' is true, we need to have the data + ourself. If 'have' is false, we look at availability at our neighbours. + + Return True if succesful, False if more data has to be collected. """ + + # ----- determine highest known piece number + if have: + numseeds = 0 + numhaves = self.piecepicker.has + totalhaves = self.piecepicker.numgot + + threshold = 1 + else: + numseeds = self.piecepicker.seeds_connected + numhaves = self.piecepicker.numhaves # excludes seeds + totalhaves = self.piecepicker.totalcount # excludes seeds + + numconns = self.piecepicker.num_nonempty_neighbours() + threshold = max( 1, numconns/2 ) + + # FUDGE: number of pieces we subtract from maximum known/have, + # to start playback with some buffer present. We need enough + # pieces to do pass the prebuffering phase. when still + # requesting pieces, FUDGE can probably be a bit low lower, + # since by the time they arrive, we will have later pieces anyway. + # NB: all live torrents have the bitrate set. + FUDGE = prebufsize #self.max_prebuf_packets + + if numseeds == 0 and totalhaves == 0: + # optimisation: without seeds or pieces, just wait + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: calc_live_offset: no pieces" + return False + + # pieces are known, so we can determine where to start playing + vs = self.videostatus + + bpiece = vs.first_piece + epiece = vs.last_piece + + if numseeds > 0 or (not vs.wraparound and numhaves[epiece] > 0): + # special: if full video is available, do nothing and enter VoD mode + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: calc_live_offset: vod mode" + vs.set_live_startpos( 0 ) + return True + + # maxnum = highest existing piece number owned by more than half of the neighbours + maxnum = None + for i in xrange(epiece,bpiece-1,-1): + #if DEBUG: + # if 0 < numhaves[i] < threshold: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: calc_live_offset: discarding piece %d as it is owned by only %d<%d neighbours" % (i,numhaves[i],threshold) + + if numhaves[i] >= threshold: + maxnum = i + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: calc_live_offset: chosing piece %d as it is owned by %d>=%d neighbours" % (i,numhaves[i],threshold) + break + + if maxnum is None: + return False + + # if there is wraparound, newest piece may actually have wrapped + if vs.wraparound and maxnum > epiece - vs.wraparound_delta: + delta_left = vs.wraparound_delta - (epiece-maxnum) + + for i in xrange( vs.first_piece+delta_left-1, vs.first_piece-1, -1 ): + if numhaves[i] >= threshold: + maxnum = i + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: calc_live_offset: chosing piece %d as it is owned by %d>=%d neighbours" % (i,numhaves[i],threshold) + break + + # start watching from maximum piece number, adjusted by fudge. + if vs.wraparound: + maxnum = vs.normalize( maxnum - FUDGE ) + #f = bpiece + (maxnum - bpiece - FUDGE) % (epiece-bpiece) + #t = bpiece + (f - bpiece + vs.wraparound_delta) % (epiece-bpiece) + + # start at a piece known to exist to avoid waiting for something that won't appear + # for another round. guaranteed to succeed since we would have bailed if noone had anything + while not numhaves[maxnum]: + maxnum = vs.normalize( maxnum + 1 ) + else: + maxnum = max( bpiece, maxnum - FUDGE ) + + if maxnum == bpiece: + # video has just started -- watch from beginning + return True + + # If we're connected to the source, and already hooked in, + # don't change the hooking point unless it is really far + oldstartpos = vs.get_live_startpos() + if not have and threshold == 1 and oldstartpos is not None: + diff = vs.dist_range(oldstartpos,maxnum) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: calc_live_offset: m o",maxnum,oldstartpos,"diff",diff + if diff < 8: + return True + + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: === HOOKING IN AT PIECE %d (based on have: %s) ===" % (maxnum,have) + toinvalidateset = vs.set_live_startpos( maxnum ) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: invalidateset is",`toinvalidateset` + for piece in toinvalidateset: + self.live_invalidate_piece_globally(piece) + + return True + + def live_streaming_timer(self): + """ Background 'thread' to check where to hook in if live streaming. """ + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: live_streaming_timer: Finding hookin" + if self.videostatus.playing: + # Stop adjusting the download range + return + + # JD:keep checking correct playback pos since it can change if we switch neighbours + # due to faulty peers etc + + #if not (self.videostatus.live_startpos is None): + # # Adjust it only once on what we see around us + # return + + if self.calc_live_startpos( self.max_prebuf_packets, False ): + # Adjust it only once on what we see around us + #return + pass + + self.rawserver.add_task( self.live_streaming_timer, 1 ) + + def parse_video(self): + """ Feeds the first max_prebuf_packets to ffmpeg to determine video bitrate. """ + + vs = self.videostatus + width = None + height = None + + # Start ffmpeg, let it write to a temporary file to prevent + # blocking problems on Win32 when FFMPEG outputs lots of + # (error) messages. + # + [loghandle,logfilename] = mkstemp() + os.close(loghandle) + if sys.platform == "win32": + # Not "Nul:" but "nul" is /dev/null on Win32 + sink = 'nul' + else: + sink = '/dev/null' + # DON'T FORGET 'b' OTHERWISE WE'RE WRITING BINARY DATA IN TEXT MODE! + (child_out,child_in) = os.popen2( "%s -y -i - -vcodec copy -acodec copy -f avi %s > %s 2>&1" % (self.video_analyser_path, sink, logfilename), 'b' ) + """ + # If the path is "C:\Program Files\bla\bla" (escaping left out) and that file does not exist + # the output will say something cryptic like "vod: trans: FFMPEG said C:\Program" suggesting an + # error with the double quotes around the command, but that's not it. Be warned! + cmd = self.video_analyser_path+' -y -i - -vcodec copy -acodec copy -f avi '+sink+' > '+logfilename+' 2>&1' + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Video analyser command is",cmd + (child_out,child_in) = os.popen2(cmd,'b') # DON'T FORGET 'b' OTHERWISE THINGS GO WRONG! + """ + + # feed all the pieces + first,last = vs.download_range() + for i in xrange(first,last): + piece = self.get_piece( i ) + + if piece is None: + break + + # remove any signatures etc + if self.authenticator is not None: + piece = self.authenticator.get_content( piece ) + + try: + child_out.write( piece ) + except IOError: + print_exc(file=sys.stderr) + break + + child_out.close() + child_in.close() + + logfile = open(logfilename, 'r') + + # find the bitrate in the output + bitrate = None + + r = re.compile( "bitrate= *([0-9.]+)kbits/s" ) + r2 = re.compile( "Video:.* ([0-9]+x[0-9]+)," ) # video dimensions WIDTHxHEIGHT + + founddim = False + for x in logfile.readlines(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: FFMPEG said:",x + occ = r.findall( x ) + if occ: + # use the latest mentioning of bitrate + bitrate = float( occ[-1] ) * 1024 / 8 + if DEBUG: + if bitrate is not None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Bitrate according to FFMPEG: %.2f KByte/s" % (bitrate/1024) + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Bitrate could not be determined by FFMPEG" + occ = r2.findall( x ) + if occ and not founddim: + # use first occurence + dim = occ[0] + idx = dim.find('x') + width = int(dim[:idx]) + height = int(dim[idx+1:]) + founddim = True + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: width",width,"heigth",height + logfile.close() + try: + os.remove(logfilename) + except: + pass + + return [bitrate,width,height] + + def update_prebuffering(self,received_piece=None): + """ Update prebuffering process. 'received_piece' is a hint that we just received this piece; + keep at 'None' for an update in general. """ + + vs = self.videostatus + + if not vs.prebuffering: + return + + if vs.live_streaming and vs.live_startpos is None: + # first determine where to hook in + return + + if received_piece: + self.nreceived += 1 + + high_range = vs.generate_high_range() + high_range_length = vs.get_high_range_length() + missing_pieces = filter(lambda i: not self.have_piece(i), high_range) + gotall = not missing_pieces + if high_range_length: + self.prebufprogress = min(1, float(high_range_length - len(missing_pieces)) / max(1, high_range_length)) + else: + self.prebufprogress = 1.0 + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Already got",(self.prebufprogress*100.0),"% of prebuffer" + + if not gotall and DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Still need pieces",missing_pieces,"for prebuffering/FFMPEG analysis" + + if vs.dropping: + if not self.doing_ffmpeg_analysis and not gotall and not (0 in missing_pieces) and self.nreceived > self.max_prebuf_packets: + perc = float(self.max_prebuf_packets)/10.0 + if float(len(missing_pieces)) < perc or self.nreceived > (2*len(missing_pieces)): + # If less then 10% of packets missing, or we got 2 times the packets we need already, + # force start of playback + gotall = True + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Forcing stop of prebuffering, less than",perc,"missing, or got 2N packets already" + + if gotall and self.doing_ffmpeg_analysis: + [bitrate,width,height] = self.parse_video() + self.doing_ffmpeg_analysis = False + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: after parse",bitrate,self.doing_bitrate_est + if bitrate is None or round(bitrate)== 0: + if self.doing_bitrate_est: + # Errr... there was no playtime info in the torrent + # and FFMPEG can't tell us... + bitrate = (1*1024*1024/8) # 1mbps + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: No bitrate info avail, wild guess: %.2f KByte/s" % (bitrate/1024) + + vs.set_bitrate(bitrate) + if self._playback_info_db: self._playback_info_db.set_bitrate(self._playback_key, bitrate) + if self._playback_event_db: self._playback_event_db.add_event(self._playback_key, "ffmpeg", "bitrate %d" % bitrate) + else: + if self.doing_bitrate_est: + # There was no playtime info in torrent, use what FFMPEG tells us + self.ffmpeg_est_bitrate = bitrate + bitrate *= 1.1 # Make FFMPEG estimation 10% higher + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Estimated bitrate: %.2f KByte/s" % (bitrate/1024) + + vs.set_bitrate(bitrate) + if self._playback_info_db: self._playback_info_db.set_bitrate(self._playback_key, bitrate) + if self._playback_event_db: self._playback_event_db.add_event(self._playback_key, "ffmpeg", "bitrate %d" % bitrate) + + if width is not None and height is not None: + diff = False + if self.videodim is None: + self.videodim = (width,height) + self.height = height + elif self.videodim[0] != width or self.videodim[1] != height: + diff = True + if not self.player_opened_with_width_height or diff: + #self.user_setsize(self.videodim) + pass + + # # 10/03/09 boudewijn: For VOD we will wait for the entire + # # buffer to fill (gotall) before we start playback. For live + # # this is unlikely to happen and we will therefore only wait + # # until we estimate that we have enough_buffer. + # if (gotall or vs.live_streaming) and self.enough_buffer(): + if gotall and self.enough_buffer(): + # enough buffer and could estimated bitrate - start streaming + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Prebuffering done",currentThread().getName() + self.data_ready.acquire() + vs.prebuffering = False + self.stat_prebuffertime = time.time() - self.prebufstart + self.notify_playable() + self.data_ready.notify() + self.data_ready.release() + + elif DEBUG: + if self.doing_ffmpeg_analysis: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Prebuffering: waiting to obtain the first %d packets" % (self.max_prebuf_packets) + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Prebuffering: %.2f seconds left" % (self.expected_buffering_time()) + + def got_have(self,piece): + vs = self.videostatus + + # update stats + self.stat_pieces.set( piece, "known", time.time() ) + """ + if vs.playing and vs.wraparound: + # check whether we've slipped back too far + d = vs.wraparound_delta + n = max(1,self.piecepicker.num_nonempty_neighbours()/2) + if self.piecepicker.numhaves[piece] > n and d/2 < (piece - vs.playback_pos) % vs.movie_numpieces < d: + # have is confirmed by more than half of the neighours and is in second half of future window + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Forcing restart. Am at playback position %d but saw %d at %d>%d peers." % (vs.playback_pos,piece,self.piecepicker.numhaves[piece],n) + + self.start(force=True) + """ + + def got_piece(self, piece_id, begin, length): + """ + Called when a chunk has been downloaded. This information can + be used to estimate download speed. + """ + if self.videostatus.in_high_range(piece_id): + self.high_range_rate.update_rate(length) + # if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "vod: high priority rate:", self.high_range_rate.get_rate() + + def complete(self,piece,downloaded=True): + """ Called when a movie piece has been downloaded or was available from the start (disk). """ + + if not self._complete and self.piecepicker.am_I_complete(): + self._complete = True + if self._playback_event_db: self._playback_event_db.add_event(self._playback_key, "complete", "system") + + vs = self.videostatus + + if vs.wraparound: + assert downloaded + + self.stat_pieces.set( piece, "complete", time.time() ) + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Completed",piece + + if downloaded: + self.overall_rate.update_rate( vs.real_piecelen( piece ) ) + + if vs.in_download_range( piece ): + self.pieces_in_buffer += 1 + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: piece %d too late [pos=%d]" % (piece,vs.playback_pos) + self.stat_latepieces += 1 + + if vs.playing and vs.playback_pos == piece: + # we were delaying for this piece + self.refill_buffer() + + self.update_prebuffering( piece ) + + def set_pos(self,pos): + """ Update the playback position. Called when playback is started (depending + on requested offset). """ + + vs = self.videostatus + + oldpos = vs.playback_pos + vs.playback_pos = pos + + if vs.wraparound: + # recalculate + self.pieces_in_buffer = 0 + for i in vs.generate_range( vs.download_range() ): + if self.has[i]: + self.pieces_in_buffer += 1 + else: + # fast forward + for i in xrange(oldpos,pos+1): + if self.has[i]: + self.pieces_in_buffer -= 1 + + # fast rewind + for i in xrange(pos,oldpos+1): + if self.has[i]: + self.pieces_in_buffer += 1 + + def inc_pos(self): + vs = self.videostatus + + if self.has[vs.playback_pos]: + self.pieces_in_buffer -= 1 + + vs.inc_playback_pos() + + if vs.live_streaming: + self.live_invalidate_piece_globally(vs.live_piece_to_invalidate()) + +# def buffered_time_period(self): +# """Length of period of Buffered pieces""" +# if self.movieselector.bitrate is None or self.movieselector.bitrate == 0.0: +# return 0 +# else: +# return self.pieces_in_buffer * self.movieselector.piece_length / self.movieselector.bitrate +# +# def playback_time_position(self): +# """Time of playback_pos and total duration +# Return playback_time in seconds +# """ +# if self.movieselector.bitrate is None or self.movieselector.bitrate == 0.0: +# return 0 +# else: +# return self.playback_pos * self.movieselector.piece_length / self.movieselector.bitrate + + def expected_download_time(self): + """ Expected download time left. """ + vs = self.videostatus + if vs.wraparound: + return float(2 ** 31) + + pieces_left = vs.last_piece - vs.playback_pos - self.pieces_in_buffer + if pieces_left <= 0: + return 0.0 + + # list all pieces from the high priority set that have not + # been completed + uncompleted_pieces = filter(self.storagewrapper.do_I_have, vs.generate_high_range()) + + # when all pieces in the high-range have been downloaded, + # we have an expected download time of zero + if not uncompleted_pieces: + return 0.0 + + # the download time estimator is very inacurate when we only + # have a few chunks left. therefore, we will put more emphesis + # on the overall_rate as the number of uncompleted_pieces does + # down. + total_length = vs.get_high_range_length() + uncompleted_length = len(uncompleted_pieces) + expected_download_speed = self.high_range_rate.get_rate() * (1 - float(uncompleted_length) / total_length) + \ + self.overall_rate.get_rate() * uncompleted_length / total_length + if expected_download_speed < 0.1: + return float(2 ** 31) + + return pieces_left * vs.piecelen / expected_download_speed + + def expected_playback_time(self): + """ Expected playback time left. """ + + vs = self.videostatus + + if vs.wraparound: + return float(2 ** 31) + + pieces_to_play = vs.last_piece - vs.playback_pos + 1 + + if pieces_to_play <= 0: + return 0.0 + + if not vs.bitrate: + return float(2 ** 31) + + return pieces_to_play * vs.piecelen / vs.bitrate + + def expected_buffering_time(self): + """ Expected time required for buffering. """ + download_time = self.expected_download_time() + playback_time = self.expected_playback_time() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","EXPECT",self.expected_download_time(),self.expected_playback_time() + # Infinite minus infinite is still infinite + if download_time > float(2 ** 30) and playback_time > float(2 ** 30): + return float(2 ** 31) + return abs(download_time - playback_time) + + def enough_buffer(self): + """ Returns True if we can safely start playback without expecting to run out of + buffer. """ + + if self.videostatus.wraparound: + # Wrapped streaming has no (known) limited duration, so we cannot predict + # whether we have enough download speed. The only way is just to hope + # for the best, since any buffer will be emptied if the download speed + # is too low. + return True + + return max(0.0, self.expected_download_time() - self.expected_playback_time()) == 0.0 + + def tick_second(self): + self.rawserver.add_task( self.tick_second, 1.0 ) + + vs = self.videostatus + + # Adjust estimate every second, but don't display every second + display = (int(time.time()) % 5) == 0 + if display: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: Estimated download time: %5.1fs [priority: %7.2f Kbyte/s] [overall: %7.2f Kbyte/s]" % (self.expected_download_time(), self.high_range_rate.get_rate()/1024, self.overall_rate.get_rate()/1024) + + if vs.playing and round(self.playbackrate.rate) > self.MINPLAYBACKRATE and not vs.prebuffering: + if self.doing_bitrate_est: + if display: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: Estimated playback time: %5ds [%7.2f Kbyte/s], doing estimate=%d" % (self.expected_playback_time(),self.playbackrate.rate/1024, self.ffmpeg_est_bitrate is None) + if self.ffmpeg_est_bitrate is None: + vs.set_bitrate( self.playbackrate.rate ) + + if display: + sys.stderr.flush() + + # + # MovieTransport interface + # + # WARNING: these methods will be called by other threads than NetworkThread! + # + def size( self ): + if self.videostatus.get_wraparound(): + return None + else: + return self.videostatus.selected_movie["size"] + + def read(self,numbytes=None): + """ Read at most numbytes from the stream. If numbytes is not given, + pieces are returned. The bytes read will be returned, or None in + case of an error or end-of-stream. """ + if not self.curpiece: + # curpiece_pos could be set to something other than 0! + # for instance, a seek request sets curpiece_pos but does not + # set curpiece. + + x = self.pop() + if x is None: + return None + + piecenr,self.curpiece = x + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: %d: popped piece to transport to player" % piecenr + + curpos = self.curpiece_pos + left = len(self.curpiece) - curpos + + if numbytes is None: + # default on one piece per read + numbytes = left + + if left > numbytes: + # piece contains enough -- return what was requested + data = self.curpiece[curpos:curpos+numbytes] + + self.curpiece_pos += numbytes + else: + # return remainder of the piece, could be less than numbytes + data = self.curpiece[curpos:] + + self.curpiece = "" + self.curpiece_pos = 0 + + return data + + def start( self, bytepos = 0, force = False ): + """ Initialise to start playing at position `bytepos'. """ + if self._playback_event_db: self._playback_event_db.add_event(self._playback_key, "play", "system") + + # ARNOTODO: we don't use start(bytepos != 0) at the moment. See if we + # should. Also see if we need the read numbytes here, or that it + # is better handled at a higher layer. For live it is currently + # done at a higher level, see VariableReadAuthStreamWrapper because + # we have to strip the signature. Hence the self.curpiece buffer here + # is superfluous. Get rid off it or check if + # + # curpiece[0:piecelen] + # + # returns curpiece if piecelen has length piecelen == optimize for + # piecesized case. + # + # For VOD seeking we may use the numbytes facility to seek to byte offsets + # not just piece offsets. + # + vs = self.videostatus + + if vs.playing and not force: + return + + # lock before changing startpos or any other playing variable + self.data_ready.acquire() + try: + if vs.live_streaming: + # Determine where to start playing. There may be several seconds + # between starting the download and starting playback, which we'll + # want to skip. + self.calc_live_startpos( self.max_prebuf_packets, True ) + + # override any position request by VLC, we only have live data + piece = vs.playback_pos + offset = 0 + else: + # Determine piece number and offset + if bytepos < vs.first_piecelen: + piece = vs.first_piece + offset = bytepos + else: + newbytepos = bytepos - vs.first_piecelen + + piece = vs.first_piece + newbytepos / vs.piecelen + 1 + offset = newbytepos % vs.piecelen + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: === START at offset %d (piece %d) (forced: %s) ===" % (bytepos,piece,force) + + # Initialise all playing variables + self.curpiece = "" # piece currently being popped + self.curpiece_pos = offset + self.set_pos( piece ) + self.outbuf = [] + #self.last_pop = time.time() + self.reset_bitrate_prediction() + vs.playing = True + self.playbackrate = Measure( 60 ) + + # boudewijn: decrease the initial minimum buffer size + if not vs.live_streaming: + vs.decrease_high_range() + + finally: + self.data_ready.release() + + # ARNOTODO: start is called by non-NetworkThreads, these following methods + # are usually called by NetworkThread. + # + # We now know that this won't be called until notify_playable() so + # perhaps this can be removed? + # + # CAREFUL: if we use start() for seeking... that's OK. User won't be + # able to seek before he got his hands on the stream, so after + # notify_playable() + + # See what we can do right now + self.update_prebuffering() + self.refill_buffer() + + def stop( self ): + """ Playback is stopped. """ + if self._playback_event_db: self._playback_event_db.add_event(self._playback_key, "stop", "system") + + vs = self.videostatus + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: === STOP = player closed conn === " + if not vs.playing: + return + vs.playing = False + + # clear buffer and notify possible readers + self.data_ready.acquire() + self.outbuf = [] + #self.last_pop = None + vs.prebuffering = False + self.data_ready.notify() + self.data_ready.release() + + def pause( self, autoresume = False ): + """ Pause playback. If `autoresume' is set, playback is expected to be + resumed automatically once enough data has arrived. """ + if self._playback_event_db: self._playback_event_db.add_event(self._playback_key, "pause", "system") + + vs = self.videostatus + + if not vs.playing or not vs.pausable: + return + + if vs.paused: + vs.autoresume = autoresume + return + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: paused (autoresume: %s)" % (autoresume,) + + vs.paused = True + vs.autoresume = autoresume + self.paused_at = time.time() + #self.reset_bitrate_prediction() + self.videoinfo["usercallback"](VODEVENT_PAUSE,{ "autoresume": autoresume }) + + def resume( self ): + """ Resume paused playback. """ + if self._playback_event_db: self._playback_event_db.add_event(self._playback_key, "resume", "system") + + vs = self.videostatus + + if not vs.playing or not vs.paused or not vs.pausable: + return + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: resumed" + + vs.paused = False + vs.autoresume = False + self.stat_stalltime += time.time() - self.paused_at + self.addtime_bitrate_prediction( time.time() - self.paused_at ) + self.videoinfo["usercallback"](VODEVENT_RESUME,{}) + + self.update_prebuffering() + self.refill_buffer() + + def autoresume( self, testfunc = lambda: True ): + """ Resumes if testfunc returns True. If not, will test every second. """ + + vs = self.videostatus + + if not vs.playing or not vs.paused or not vs.autoresume: + return + + if not testfunc(): + self.rawserver.add_task( lambda: self.autoresume( testfunc ), 1.0 ) + return + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Resuming, since we can maintain this playback position" + self.resume() + + def done( self ): + vs = self.videostatus + + if not vs.playing: + return True + + if vs.wraparound: + return False + + return vs.playback_pos == vs.last_piece+1 and self.curpiece_pos >= len(self.curpiece) + + def seek(self,pos,whence=None): + """ Seek to the given position, a number in bytes relative to both + the "whence" reference point and the file being played. + + We currently actually seek at byte level, via the start() method. + We support all forms of seeking, including seeking past the current + playback pos. Note this may imply needing to prebuffer again or + being paused. + + vs.playback_pos in NetworkThread domain. Does data_ready lock cover + that? Nope. However, this doesn't appear to be respected in any + of the MovieTransport methods, check all. + + Check + * When seeking reset other buffering, e.g. read()'s self.curpiece + and higher layers. + + """ + vs = self.videostatus + length = self.size() + + # lock before changing startpos or any other playing variable + self.data_ready.acquire() + try: + if vs.live_streaming: + if pos == 0 and whence == os.SEEK_SET: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: seek: Ignoring seek in live" + else: + raise ValueError("seeking not possible for live") + if whence == os.SEEK_SET: + abspos = pos + elif whence == os.SEEK_END: + if pos > 0: + raise ValueError("seeking beyond end of stream") + else: + abspos = size+pos + else: # SEEK_CUR + raise ValueError("seeking does not currently support SEEK_CUR") + + self.stop() + self.start(pos) + finally: + self.data_ready.release() + + + + def get_mimetype(self): + return self.mimetype + + def set_mimetype(self,mimetype): + self.mimetype = mimetype + # + # End of MovieTransport interface + # + + def have_piece(self,piece): + return self.piecepicker.has[piece] + + def get_piece(self,piece): + """ Returns the data of a certain piece, or None. """ + + vs = self.videostatus + + if not self.have_piece( piece ): + return None + + begin = 0 + length = vs.piecelen + + if piece == vs.first_piece: + begin = vs.movie_range[0][1] + length -= begin + + if piece == vs.last_piece: + cutoff = vs.piecelen - (vs.movie_range[1][1] + 1) + length -= cutoff + + data = self.storagewrapper.do_get_piece(piece, begin, length) + if data is None: + return None + return data.tostring() + + def reset_bitrate_prediction(self): + self.start_playback = None + self.last_playback = None + self.history_playback = collections.deque() + + def addtime_bitrate_prediction(self,seconds): + if self.start_playback is not None: + self.start_playback["local_ts"] += seconds + + def valid_piece_data(self,i,piece): + if not piece: + return False + + if not self.start_playback or self.authenticator is None: + # no check possible + return True + + s = self.start_playback + + seqnum = self.authenticator.get_seqnum( piece ) + source_ts = self.authenticator.get_rtstamp( piece ) + + if seqnum < s["absnr"] or source_ts < s["source_ts"]: + # old packet??? + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: **** INVALID PIECE #%s **** seqnum=%d but we started at seqnum=%d" % (i,seqnum,s["absnr"]) + return False + + return True + + + def update_bitrate_prediction(self,i,piece): + """ Update the rate prediction given that piece i has just been pushed to the buffer. """ + + if self.authenticator is not None: + seqnum = self.authenticator.get_seqnum( piece ) + source_ts = self.authenticator.get_rtstamp( piece ) + else: + seqnum = i + source_ts = 0 + + d = { + "nr": i, + "absnr": seqnum, + "local_ts": time.time(), + "source_ts": source_ts, + } + + # record + if self.start_playback is None: + self.start_playback = d + + if self.last_playback and self.last_playback["absnr"] > d["absnr"]: + # called out of order + return + + self.last_playback = d + + # keep a recent history + MAX_HIST_LEN = 10*60 # seconds + + self.history_playback.append( d ) + + # of at most 10 entries (or minutes if we keep receiving pieces) + while source_ts - self.history_playback[0]["source_ts"] > MAX_HIST_LEN: + self.history_playback.popleft() + + if DEBUG: + vs = self.videostatus + first, last = self.history_playback[0], self.history_playback[-1] + + if first["source_ts"] and first != last: + bitrate = "%.2f kbps" % (8.0 / 1024 * (vs.piecelen - vs.sigsize) * (last["absnr"] - first["absnr"]) / (last["source_ts"] - first["source_ts"]),) + else: + bitrate = "%.2f kbps (external info)" % (8.0 / 1024 * vs.bitrate) + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: %i: pushed at t=%.2f, age is t=%.2f, bitrate = %s" % (i,d["local_ts"]-self.start_playback["local_ts"],d["source_ts"]-self.start_playback["source_ts"],bitrate) + + def piece_due(self,i): + """ Return the time when we expect to have to send a certain piece to the player. For + wraparound, future pieces are assumed. """ + + if self.start_playback is None: + return float(2 ** 31) # end of time + + s = self.start_playback + l = self.last_playback + vs = self.videostatus + + if not vs.wraparound and i < l["nr"]: + # should already have arrived! + return time.time() + + # assume at most one wrap-around between l and i + piecedist = (i - l["nr"]) % vs.movie_numpieces + + if s["source_ts"]: + # ----- we have timing information from the source + first, last = self.history_playback[0], self.history_playback[-1] + + if first != last: + # we have at least two recent pieces, so can calculate average bitrate. use the recent history + # do *not* adjust for sigsize since we don't want the actual video speed but the piece rate + bitrate = 1.0 * vs.piecelen * (last["absnr"] - first["absnr"]) / (last["source_ts"] - first["source_ts"]) + else: + # fall-back to bitrate predicted from torrent / ffmpeg + bitrate = vs.bitrate + + # extrapolate with the average bitrate so far + return s["local_ts"] + l["source_ts"] - s["source_ts"] + piecedist * vs.piecelen / bitrate - self.PIECE_DUE_SKEW + else: + # ----- no timing information from pieces, so do old-fashioned methods + if vs.live_streaming: + # Arno, 2008-11-20: old-fashioned method is well bad, + # ignore. + return time.time() + 60.0 + else: + i = piecedist + (l["absnr"] - s["absnr"]) + + if s["nr"] == vs.first_piece: + bytepos = vs.first_piecelen + (i-1) * vs.piecelen + else: + bytepos = i * vs.piecelen + + return s["local_ts"] + bytepos / vs.bitrate - self.PIECE_DUE_SKEW + + + def max_buffer_size( self ): + vs = self.videostatus + if vs.dropping: + # live + # Arno: 1/2 MB or based on bitrate if that is above 5 Mbps + return max( 0*512*1024, self.BUFFER_TIME * vs.bitrate ) + else: + # VOD + # boudewijn: 1/4 MB, bitrate, or 2 pieces (wichever is higher) + return max(256*1024, vs.piecelen * 2, self.BUFFER_TIME * vs.bitrate) + + + def refill_buffer( self ): + """ Push pieces into the player FIFO when needed and able. This counts as playing + the pieces as far as playback_pos is concerned.""" + + self.data_ready.acquire() + + vs = self.videostatus + + if vs.prebuffering or not vs.playing: + self.data_ready.release() + return + + #if self.last_pop is not None and time.time() - self.last_pop > self.MAX_POP_TIME: + # # last pop too long ago, restart + # self.data_ready.release() + # self.stop() + # self.start(force=True) + # return + + if vs.paused: + self.data_ready.release() + return + + mx = self.max_buffer_size() + self.outbuflen = sum( [len(d) for (p,d) in self.outbuf] ) + now = time.time() + + def buffer_underrun(): + return self.outbuflen == 0 and self.start_playback and now - self.start_playback["local_ts"] > 1.0 + + if buffer_underrun(): + + if vs.dropping: # live + def sustainable(): + # buffer underrun -- check for available pieces + num_future_pieces = 0 + for piece in vs.generate_range( vs.download_range() ): + if self.has[piece]: + num_future_pieces += 1 + + goal = mx / 2 + # progress + self.prebufprogress = min(1.0,float(num_future_pieces * vs.piecelen) / float(goal)) + + # enough future data to fill the buffer + return num_future_pieces * vs.piecelen >= goal + else: # vod + def sustainable(): + # num_immediate_packets = 0 + # for piece in vs.generate_range( vs.download_range() ): + # if self.has[piece]: + # num_immediate_packets += 1 + # else: + # break + # else: + # # progress + # self.prebufprogress = 1.0 + # # completed loop without breaking, so we have everything we need + # return True + # + # # progress + # self.prebufprogress = min(1.0,float(num_immediate_packets) / float(self.max_prebuf_packets)) + # + # return num_immediate_packets >= self.max_prebuf_packets + + self.sustainable_counter += 1 + if self.sustainable_counter > 10: + self.sustainable_counter = 0 + + high_range_length = vs.get_high_range_length() + have_length = len(filter(lambda n:self.has[n], vs.generate_high_range())) + + # progress + self.prebufprogress = min(1.0, float(have_length) / max(1, high_range_length)) + + return have_length >= high_range_length + + else: + num_immediate_packets = 0 + high_range_length = vs.get_high_range_length() + # for piece in vs.generate_range(vs.download_range()): + for piece in vs.generate_high_range(): + if self.has[piece]: + num_immediate_packets += 1 + if num_immediate_packets >= high_range_length: + break + else: + break + else: + # progress + self.prebufprogress = 1.0 + # completed loop without breaking, so we have everything we need + return True + + return num_immediate_packets >= high_range_length + + sus = sustainable() + if vs.pausable and not sus: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: BUFFER UNDERRUN -- PAUSING" + self.pause( autoresume = True ) + self.autoresume( sustainable ) + + # boudewijn: increase the minimum buffer size + vs.increase_high_range() + + self.data_ready.release() + return + elif sus: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: BUFFER UNDERRUN -- IGNORING, rate is sustainable" + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: BUFFER UNDERRUN -- STALLING, cannot pause player to fall back some, so just wait for more pieces" + self.data_ready.release() + return + + def push( i, data ): + # force buffer underrun: + #if self.start_playback and time.time()-self.start_playback["local_ts"] > 60: + # # hack: dont push after 1 minute + # return + + # push packet into queue + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: %d: pushed l=%d" % (vs.playback_pos,piece) + + # update predictions based on this piece + self.update_bitrate_prediction( i, data ) + + self.stat_playedpieces += 1 + self.stat_pieces.set( i, "tobuffer", time.time() ) + + self.outbuf.append( (vs.playback_pos,data) ) + self.outbuflen += len(data) + + self.data_ready.notify() + self.inc_pos() + + def drop( i ): + # drop packet + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: %d: dropped pos=%d; deadline expired %.2f sec ago !!!!!!!!!!!!!!!!!!!!!!" % (piece,vs.playback_pos,time.time()-self.piece_due(i)) + + self.stat_droppedpieces += 1 + self.stat_pieces.complete( i ) + self.inc_pos() + + for piece in vs.generate_range( vs.download_range() ): + ihavepiece = self.has[piece] + forcedrop = False + + # check whether we have room to store it + if self.outbuflen > mx: + # buffer full + break + + # final check for piece validity + if ihavepiece: + data = self.get_piece( piece ) + if not self.valid_piece_data( piece, data ): + # I should have the piece, but I don't: WAAAAHH! + forcedrop = True + ihavepiece = False + + if ihavepiece: + # have piece - push it into buffer + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: BUFFER STATUS (max %.0f): %.0f kbyte" % (mx/1024.0,self.outbuflen/1024.0) + + # piece found -- add it to the queue + push( piece, data ) + else: + # don't have piece, or forced to drop + if not vs.dropping and forcedrop: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: DROPPING INVALID PIECE #%s, even though we shouldn't drop anything." % piece + if vs.dropping or forcedrop: + if time.time() >= self.piece_due( piece ) or buffer_underrun() or forcedrop: + # piece is too late or we have an empty buffer (and future data to play, otherwise we would have paused) -- drop packet + drop( piece ) + else: + # we have time to wait for the piece and still have data in our buffer -- wait for packet + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: %d: due in %.2fs pos=%d" % (piece,self.piece_due(piece)-time.time(),vs.playback_pos) + break + else: # not dropping + if self.outbuflen == 0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: SHOULD NOT HAPPEN: missing piece but not dropping. should have paused. pausable=",vs.pausable + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: prebuffering done, but could not fill buffer." + break + + self.data_ready.release() + + def refill_rawserv_tasker( self ): + self.refill_buffer() + + self.rawserver.add_task( self.refill_rawserv_tasker, self.REFILL_INTERVAL ) + + def pop( self ): + self.data_ready.acquire() + vs = self.videostatus + + while vs.prebuffering and not self.done(): + # wait until done prebuffering + self.data_ready.wait() + + while not self.outbuf and not self.done(): + # wait until a piece is available + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: Player waiting for data" + self.data_ready.wait() + + if not self.outbuf: + piece = None + else: + piece = self.outbuf.pop( 0 ) # nr,data pair + self.playbackrate.update_rate( len(piece[1]) ) + + #self.last_pop = time.time() + + self.data_ready.release() + + if piece: + self.stat_pieces.set( piece[0], "toplayer", time.time() ) + self.stat_pieces.complete( piece[0] ) + + return piece + + def notify_playable(self): + """ Tell user he can play the media, + cf. Tribler.Core.DownloadConfig.set_vod_event_callback() + """ + #if self.bufferinfo: + # self.bufferinfo.set_playable() + #self.progressinf.bufferinfo_updated_callback() + + # triblerAPI + if self.usernotified: + return + self.usernotified = True + self.prebufprogress = 1.0 + self.playable = True + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: notify_playable: Calling usercallback to tell it we're ready to play",self.videoinfo['usercallback'] + + # MIME type determined normally in LaunchManyCore.network_vod_event_callback + # However, allow for recognition by videoanalyser + mimetype = self.get_mimetype() + complete = self.piecepicker.am_I_complete() + if complete: + stream = None + filename = self.videoinfo["outpath"] + else: + stream = MovieTransportStreamWrapper(self) + if self.videostatus.live_streaming and self.videostatus.authparams['authmethod'] != LIVE_AUTHMETHOD_NONE: + intermedstream = AuthStreamWrapper(stream,self.authenticator) + endstream = VariableReadAuthStreamWrapper(intermedstream,self.authenticator.get_piece_length()) + else: + endstream = stream + filename = None + + # Call user callback + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: notify_playable: calling:",self.vodeventfunc + self.vodeventfunc( self.videoinfo, VODEVENT_START, { + "complete": complete, + "filename": filename, + "mimetype": mimetype, + "stream": endstream, + "length": self.size(), + } ) + + + # + # Methods for DownloadState to extract status info of VOD mode. + # + def get_stats(self): + """ Returns accumulated statistics. The piece data is cleared after this call to save memory. """ + """ Called by network thread """ + s = { "played": self.stat_playedpieces, + "late": self.stat_latepieces, + "dropped": self.stat_droppedpieces, + "stall": self.stat_stalltime, + "pos": self.videostatus.playback_pos, + "prebuf": self.stat_prebuffertime, + "pp": self.piecepicker.stats, + "pieces": self.stat_pieces.pop_completed(), } + return s + + def get_prebuffering_progress(self): + """ Called by network thread """ + return self.prebufprogress + + def is_playable(self): + """ Called by network thread """ + if not self.playable or self.videostatus.prebuffering: + self.playable = (self.prebufprogress == 1.0 and self.enough_buffer()) + return self.playable + + def get_playable_after(self): + """ Called by network thread """ + return self.expected_buffering_time() + + def get_duration(self): + return 1.0 * self.videostatus.selected_movie["size"] / self.videostatus.bitrate + + # + # Live streaming + # + def live_invalidate_piece_globally(self, piece): + """ Make piece disappear from this peer's view of BT world """ + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: trans: live_invalidate",piece + + self.piecepicker.invalidate_piece(piece) + self.piecepicker.downloader.live_invalidate(piece) + + # LIVESOURCEAUTH + def piece_from_live_source(self,index,data): + if self.authenticator is not None: + return self.authenticator.verify(data,index=index) + else: + return True + diff --git a/tribler-mod/Tribler/Core/Video/VideoSource.py b/tribler-mod/Tribler/Core/Video/VideoSource.py new file mode 100644 index 0000000..13332fe --- /dev/null +++ b/tribler-mod/Tribler/Core/Video/VideoSource.py @@ -0,0 +1,248 @@ +from time import localtime, strftime +# written by Jan David Mol +# see LICENSE.txt for license information +# +# Represent a source of video (other than a BitTorrent swarm), which can inject +# pieces into the downloading engine. + +# We assume we are the sole originator of these pieces, i.e. none of the pieces +# injected are already obtained from another source or requested from some peer. + +import sys +from threading import RLock,Thread +from traceback import print_exc +from time import sleep +from Tribler.Core.BitTornado.BT1.PiecePicker import PiecePicker +from Tribler.Core.simpledefs import * +from Tribler.Core.Video.LiveSourceAuth import NullAuthenticator,ECDSAAuthenticator + +DEBUG = True + +class SimpleThread(Thread): + """ Wraps a thread around a single function. """ + + def __init__(self,runfunc): + Thread.__init__(self) + self.setDaemon(True) + self.setName("VideoSourceSimple"+self.getName()) + self.runfunc = runfunc + + def run(self): + self.runfunc() + + +class VideoSourceTransporter: + """ Reads data from an external source and turns it into BitTorrent chunks. """ + + def __init__(self, stream, bt1download, authconfig): + self.stream = stream + self.bt1download = bt1download + self.exiting = False + + # shortcuts to the parts we use + self.storagewrapper = bt1download.storagewrapper + self.picker = bt1download.picker + self.rawserver = bt1download.rawserver + self.connecter = bt1download.connecter + self.fileselector = bt1download.fileselector + + # generic video information + self.videostatus = bt1download.videostatus + + # buffer to accumulate video data + self.buffer = [] + self.buflen = 0 + self.bufferlock = RLock() + self.handling_pieces = False + + # LIVESOURCEAUTH + if authconfig.get_method() == LIVE_AUTHMETHOD_ECDSA: + self.authenticator = ECDSAAuthenticator(self.videostatus.piecelen,self.bt1download.len_pieces,keypair=authconfig.get_keypair()) + else: + self.authenticator = NullAuthenticator(self.videostatus.piecelen,self.bt1download.len_pieces) + + + def start(self): + """ Start transporting data. """ + + self.input_thread_handle = SimpleThread(self.input_thread) + self.input_thread_handle.start() + + def _read(self,length): + """ Called by input_thread. """ + return self.stream.read(length) + + def input_thread(self): + """ A thread reading the stream and buffering it. """ + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","VideoSource: started input thread" + + # we can't set the playback position from this thread, so + # we assume all pieces are vs.piecelen in size. + + contentbs = self.authenticator.get_content_blocksize() + try: + while not self.exiting: + data = self._read(contentbs) + if not data: + break + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","VideoSource: read %d bytes" % len(data) + + self.process_data(data) + except IOError: + if DEBUG: + print_exc() + + self.shutdown() + + def shutdown(self): + """ Stop transporting data. """ + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","VideoSource: shutting down" + + if self.exiting: + return + + self.exiting = True + + try: + self.stream.close() + except IOError: + # error on closing, nothing we can do + pass + + def process_data(self,data): + """ Turn data into pieces and queue them for insertion. """ + """ Called by input thread. """ + + vs = self.videostatus + + self.bufferlock.acquire() + try: + # add data to buffer + self.buffer.append( data ) + self.buflen += len( data ) + + if not self.handling_pieces: + # signal to network thread that data has arrived + self.rawserver.add_task( self.create_pieces ) + self.handling_pieces = True + finally: + self.bufferlock.release() + + def create_pieces(self): + """ Process the buffer and create pieces when possible. + Called by network thread """ + + def handle_one_piece(): + vs = self.videostatus + + # LIVESOURCEAUTH + # Arno: make room for source auth info + contentbs = self.authenticator.get_content_blocksize() + + if self.buflen < contentbs: + return False + + if len(self.buffer[0]) == contentbs: + content = self.buffer[0] + del self.buffer[0] + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","VideoSource: JOIN ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^" + buffer = "".join(self.buffer) + self.buffer = [buffer[contentbs:]] + content = buffer[:contentbs] + self.buflen -= contentbs + + datas = self.authenticator.sign(content) + + piece = "".join(datas) + + # add new piece + self.add_piece(vs.playback_pos,piece) + + # invalidate old piece + self.del_piece( vs.live_piece_to_invalidate() ) + + # advance pointer + vs.inc_playback_pos() + + return True + + self.bufferlock.acquire() + try: + while handle_one_piece(): + pass + + self.handling_pieces = False + finally: + self.bufferlock.release() + + def add_piece(self,index,piece): + """ Push one piece into the BitTorrent system. """ + + # Modelled after BitTornado.BT1.Downloader.got_piece + # We don't need most of that function, since this piece + # was never requested from another peer. + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","VideoSource: created piece #%d" % index + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","VideoSource: sig",`piece[-64:]` + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","VideoSource: dig",sha(piece[:-64]).hexdigest() + + # act as if the piece was requested and just came in + # do this in chunks, as StorageWrapper expects to handle + # a request for each chunk + chunk_size = self.storagewrapper.request_size + length = min( len(piece), self.storagewrapper._piecelen(index) ) + x = 0 + while x < length: + self.storagewrapper.new_request( index ) + self.storagewrapper.piece_came_in( index, x, [], piece[x:x+chunk_size], min(chunk_size,length-x) ) + x += chunk_size + + # also notify the piecepicker + self.picker.complete( index ) + + # notify our neighbours + self.connecter.got_piece( index ) + + def del_piece(self,piece): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","VideoSource: del_piece",piece + # See Tribler/Core/Video/VideoOnDemand.py, live_invalidate_piece_globally + self.picker.invalidate_piece(piece) + self.picker.downloader.live_invalidate(piece) + + +class RateLimitedVideoSourceTransporter(VideoSourceTransporter): + """ Reads from the stream at a certain byte rate. + + Useful for creating live streams from file. """ + + def __init__( self, ratelimit, *args, **kwargs ): + """@param ratelimit: maximum rate in bps""" + VideoSourceTransporter.__init__( self, *args, **kwargs ) + + self.ratelimit = int(ratelimit) + + def _read(self,length): + # assumes reads and processing data is instant, so + # we know how long to sleep + sleep(1.0 * length / self.ratelimit) + return VideoSourceTransporter._read(self,length) + + +class PiecePickerSource(PiecePicker): + """ A special piece picker for the source, which never + picks any pieces. Used to prevent the injection + of corrupted pieces at the source. """ + + def next(self,*args,**kwargs): + # never pick any pieces + return None + + diff --git a/tribler-mod/Tribler/Core/Video/VideoSource.py.bak b/tribler-mod/Tribler/Core/Video/VideoSource.py.bak new file mode 100644 index 0000000..74c986a --- /dev/null +++ b/tribler-mod/Tribler/Core/Video/VideoSource.py.bak @@ -0,0 +1,247 @@ +# written by Jan David Mol +# see LICENSE.txt for license information +# +# Represent a source of video (other than a BitTorrent swarm), which can inject +# pieces into the downloading engine. + +# We assume we are the sole originator of these pieces, i.e. none of the pieces +# injected are already obtained from another source or requested from some peer. + +import sys +from threading import RLock,Thread +from traceback import print_exc +from time import sleep +from Tribler.Core.BitTornado.BT1.PiecePicker import PiecePicker +from Tribler.Core.simpledefs import * +from Tribler.Core.Video.LiveSourceAuth import NullAuthenticator,ECDSAAuthenticator + +DEBUG = True + +class SimpleThread(Thread): + """ Wraps a thread around a single function. """ + + def __init__(self,runfunc): + Thread.__init__(self) + self.setDaemon(True) + self.setName("VideoSourceSimple"+self.getName()) + self.runfunc = runfunc + + def run(self): + self.runfunc() + + +class VideoSourceTransporter: + """ Reads data from an external source and turns it into BitTorrent chunks. """ + + def __init__(self, stream, bt1download, authconfig): + self.stream = stream + self.bt1download = bt1download + self.exiting = False + + # shortcuts to the parts we use + self.storagewrapper = bt1download.storagewrapper + self.picker = bt1download.picker + self.rawserver = bt1download.rawserver + self.connecter = bt1download.connecter + self.fileselector = bt1download.fileselector + + # generic video information + self.videostatus = bt1download.videostatus + + # buffer to accumulate video data + self.buffer = [] + self.buflen = 0 + self.bufferlock = RLock() + self.handling_pieces = False + + # LIVESOURCEAUTH + if authconfig.get_method() == LIVE_AUTHMETHOD_ECDSA: + self.authenticator = ECDSAAuthenticator(self.videostatus.piecelen,self.bt1download.len_pieces,keypair=authconfig.get_keypair()) + else: + self.authenticator = NullAuthenticator(self.videostatus.piecelen,self.bt1download.len_pieces) + + + def start(self): + """ Start transporting data. """ + + self.input_thread_handle = SimpleThread(self.input_thread) + self.input_thread_handle.start() + + def _read(self,length): + """ Called by input_thread. """ + return self.stream.read(length) + + def input_thread(self): + """ A thread reading the stream and buffering it. """ + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","VideoSource: started input thread" + + # we can't set the playback position from this thread, so + # we assume all pieces are vs.piecelen in size. + + contentbs = self.authenticator.get_content_blocksize() + try: + while not self.exiting: + data = self._read(contentbs) + if not data: + break + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","VideoSource: read %d bytes" % len(data) + + self.process_data(data) + except IOError: + if DEBUG: + print_exc() + + self.shutdown() + + def shutdown(self): + """ Stop transporting data. """ + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","VideoSource: shutting down" + + if self.exiting: + return + + self.exiting = True + + try: + self.stream.close() + except IOError: + # error on closing, nothing we can do + pass + + def process_data(self,data): + """ Turn data into pieces and queue them for insertion. """ + """ Called by input thread. """ + + vs = self.videostatus + + self.bufferlock.acquire() + try: + # add data to buffer + self.buffer.append( data ) + self.buflen += len( data ) + + if not self.handling_pieces: + # signal to network thread that data has arrived + self.rawserver.add_task( self.create_pieces ) + self.handling_pieces = True + finally: + self.bufferlock.release() + + def create_pieces(self): + """ Process the buffer and create pieces when possible. + Called by network thread """ + + def handle_one_piece(): + vs = self.videostatus + + # LIVESOURCEAUTH + # Arno: make room for source auth info + contentbs = self.authenticator.get_content_blocksize() + + if self.buflen < contentbs: + return False + + if len(self.buffer[0]) == contentbs: + content = self.buffer[0] + del self.buffer[0] + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","VideoSource: JOIN ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^" + buffer = "".join(self.buffer) + self.buffer = [buffer[contentbs:]] + content = buffer[:contentbs] + self.buflen -= contentbs + + datas = self.authenticator.sign(content) + + piece = "".join(datas) + + # add new piece + self.add_piece(vs.playback_pos,piece) + + # invalidate old piece + self.del_piece( vs.live_piece_to_invalidate() ) + + # advance pointer + vs.inc_playback_pos() + + return True + + self.bufferlock.acquire() + try: + while handle_one_piece(): + pass + + self.handling_pieces = False + finally: + self.bufferlock.release() + + def add_piece(self,index,piece): + """ Push one piece into the BitTorrent system. """ + + # Modelled after BitTornado.BT1.Downloader.got_piece + # We don't need most of that function, since this piece + # was never requested from another peer. + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","VideoSource: created piece #%d" % index + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","VideoSource: sig",`piece[-64:]` + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","VideoSource: dig",sha(piece[:-64]).hexdigest() + + # act as if the piece was requested and just came in + # do this in chunks, as StorageWrapper expects to handle + # a request for each chunk + chunk_size = self.storagewrapper.request_size + length = min( len(piece), self.storagewrapper._piecelen(index) ) + x = 0 + while x < length: + self.storagewrapper.new_request( index ) + self.storagewrapper.piece_came_in( index, x, [], piece[x:x+chunk_size], min(chunk_size,length-x) ) + x += chunk_size + + # also notify the piecepicker + self.picker.complete( index ) + + # notify our neighbours + self.connecter.got_piece( index ) + + def del_piece(self,piece): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","VideoSource: del_piece",piece + # See Tribler/Core/Video/VideoOnDemand.py, live_invalidate_piece_globally + self.picker.invalidate_piece(piece) + self.picker.downloader.live_invalidate(piece) + + +class RateLimitedVideoSourceTransporter(VideoSourceTransporter): + """ Reads from the stream at a certain byte rate. + + Useful for creating live streams from file. """ + + def __init__( self, ratelimit, *args, **kwargs ): + """@param ratelimit: maximum rate in bps""" + VideoSourceTransporter.__init__( self, *args, **kwargs ) + + self.ratelimit = int(ratelimit) + + def _read(self,length): + # assumes reads and processing data is instant, so + # we know how long to sleep + sleep(1.0 * length / self.ratelimit) + return VideoSourceTransporter._read(self,length) + + +class PiecePickerSource(PiecePicker): + """ A special piece picker for the source, which never + picks any pieces. Used to prevent the injection + of corrupted pieces at the source. """ + + def next(self,*args,**kwargs): + # never pick any pieces + return None + + diff --git a/tribler-mod/Tribler/Core/Video/VideoStatus.py b/tribler-mod/Tribler/Core/Video/VideoStatus.py new file mode 100644 index 0000000..25afea3 --- /dev/null +++ b/tribler-mod/Tribler/Core/Video/VideoStatus.py @@ -0,0 +1,343 @@ +from time import localtime, strftime + +# Written by Jan David Mol, Arno Bakker +# see LICENSE.txt for license information + +import sys +from math import ceil +from sets import Set + +from Tribler.Core.simpledefs import * + +# live streaming means wrapping around +LIVE_WRAPAROUND = True + +DEBUG = True #False + +class VideoStatus: + """ Info about the selected video and status of the playback. """ + + # TODO: thread safety? PiecePicker, MovieSelector and MovieOnDemandTransporter all interface this + + def __init__(self,piecelen,fileinfo,videoinfo,authparams): + """ + piecelen = length of BitTorrent pieces + fileinfo = list of (name,length) pairs for all files in the torrent, + in their recorded order + videoinfo = videoinfo object from download engine + """ + self.piecelen = piecelen # including signature, if any + self.sigsize = 0 + self.fileinfo = fileinfo + self.videoinfo = videoinfo + self.authparams = authparams + + # size of high probability set, in seconds (piecepicker varies + # between the minmax values depending on network performance) + self.high_prob_min_time = 10 + self.high_prob_min_time_limit = (10, 180) + + # minimal size of high probability set, in pieces (piecepicker + # varies between the minmax values depending on network + # performance) + self.high_prob_min_pieces = 5 + self.high_prob_min_pieces_limit = (5, 50) + + # ----- locate selected movie in fileinfo + index = self.videoinfo['index'] + if index == -1: + index = 0 + + movie_offset = sum( (filesize for (_,filesize) in fileinfo[:index] if filesize) ) + movie_name = fileinfo[index][0] + movie_size = fileinfo[index][1] + + self.selected_movie = { + "offset": movie_offset, + "name": movie_name, + "size": movie_size, + } + + # ----- derive generic movie parameters + movie_begin = movie_offset + movie_end = movie_offset + movie_size - 1 + + # movie_range = (bpiece,offset),(epiece,offset), inclusive + self.movie_range = ( (movie_begin/piecelen, movie_begin%piecelen), + (movie_end/piecelen, movie_end%piecelen) ) + self.first_piecelen = piecelen - self.movie_range[0][1] + self.last_piecelen = self.movie_range[1][1] + self.first_piece = self.movie_range[0][0] + self.last_piece = self.movie_range[1][0] + self.movie_numpieces = self.last_piece - self.first_piece + 1 + + # ----- live streaming settings + self.live_streaming = videoinfo['live'] + self.live_startpos = None + self.playback_pos_observers = [] + self.wraparound = self.live_streaming and LIVE_WRAPAROUND + # /8 means -12.5 % ... + 12.5 % = 25 % window + self.wraparound_delta = max(4,self.movie_numpieces/8) + + # ----- generic streaming settings + # whether to drop packets that come in too late + if self.live_streaming: + self.dropping = True # drop, but we will autopause as well + else: + self.dropping = False # just wait and produce flawless playback + + if videoinfo['bitrate']: + self.set_bitrate( videoinfo['bitrate'] ) + else: + self.set_bitrate( 512*1024/8 ) # default to 512 Kbit/s + self.bitrate_set = False + + # ----- set defaults for dynamic positions + self.playing = False # video has started playback + self.paused = False # video is paused + self.autoresume = False # video is paused but will resume automatically + self.prebuffering = True # video is prebuffering + self.playback_pos = self.first_piece + + self.pausable = (VODEVENT_PAUSE in videoinfo["userevents"]) and (VODEVENT_RESUME in videoinfo["userevents"]) + + def add_playback_pos_observer( self, observer ): + """ Add a function to be called when the playback position changes. Is called as follows: + observer( oldpos, newpos ). In case of initialisation: observer( None, startpos ). """ + self.playback_pos_observers.append( observer ) + + def real_piecelen( self, x ): + if x == self.first_piece: + return self.first_piecelen + elif x == self.last_piece: + return self.last_piecelen + else: + return self.piecelen + + def set_bitrate( self, bitrate ): + self.bitrate_set = True + self.bitrate = bitrate + self.sec_per_piece = 1.0 * bitrate / self.piecelen + + def set_live_startpos( self, pos ): + if self.wraparound: + if self.live_startpos is None: + oldrange = self.first_piece,self.last_piece + else: + oldrange = self.live_get_valid_range() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vodstatus: set_live_pos: old",oldrange + self.live_startpos = pos + self.playback_pos = pos + for o in self.playback_pos_observers: + o( None, pos ) + + if self.wraparound: + newrange = self.live_get_valid_range() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vodstatus: set_live_pos: new",newrange + return self.get_range_diff(oldrange,newrange) + else: + return Set() + + def get_live_startpos(self): + return self.live_startpos + + # the following functions work with absolute piece numbers, + # so they all function within the range [first_piece,last_piece] + + # the range of pieces to download is + # [playback_pos,numpieces) for normal downloads and + # [playback_pos,playback_pos+delta) for wraparound + + def generate_range( self, (f, t) ): + if self.wraparound and f > t: + for x in xrange( f, self.last_piece+1 ): + yield x + for x in xrange( self.first_piece, t ): + yield x + else: + for x in xrange( f, t ): + yield x + + def dist_range(self, f, t): + """ Returns the distance between f and t """ + if f > t: + return self.last_piece-f + t-self.first_piece + else: + return t - f + + def in_range( self, f, t, x ): + if self.wraparound and f > t: + return self.first_piece <= x < t or f <= x <= self.last_piece + else: + return f <= x < t + + def inc_playback_pos( self ): + oldpos = self.playback_pos + self.playback_pos += 1 + + if self.playback_pos > self.last_piece: + if self.wraparound: + self.playback_pos = self.first_piece + else: + self.playback_pos = self.last_piece + + for o in self.playback_pos_observers: + o( oldpos, self.playback_pos ) + + def in_download_range( self, x ): + if self.wraparound: + wraplen = self.playback_pos + self.wraparound_delta - self.last_piece + if wraplen > 0: + return self.first_piece <= x < self.first_piece + wraplen or self.playback_pos <= x <= self.last_piece + + return self.playback_pos <= x < self.playback_pos + self.wraparound_delta + else: + return self.first_piece <= x <= self.last_piece + + def in_valid_range(self,piece): + if self.live_streaming: + if self.live_startpos is None: + # Haven't hooked in yet + return True + else: + (begin,end) = self.live_get_valid_range() + ret = self.in_range(begin,end,piece) + if ret == False: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: status: NOT in_valid_range:",begin,"<",piece,"<",end + return ret + else: + return self.first_piece <= piece <= self.last_piece + + def live_get_valid_range(self): + begin = self.normalize(self.playback_pos - self.wraparound_delta) + end = self.normalize(self.playback_pos + self.wraparound_delta) + return (begin,end) + + def live_piece_to_invalidate(self): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: live_piece_to_inval:",self.playback_pos,self.wraparound_delta,self.movie_numpieces + return self.normalize(self.playback_pos - self.wraparound_delta) + + def get_range_diff(self,oldrange,newrange): + """ Returns the diff between oldrange and newrange as a Set. + """ + oldset = range2set(oldrange,self.movie_numpieces) + newset = range2set(newrange,self.movie_numpieces) + return oldset - newset + + def normalize( self, x ): + """ Caps or wraps a piece number. """ + + if self.first_piece <= x <= self.last_piece: + return x + + if self.wraparound: + # in Python, -1 % 3 == 2, so modulo will do our work for us if x < first_piece + return (x - self.first_piece) % self.movie_numpieces + self.first_piece + else: + return max( self.first_piece, min( x, self.last_piece ) ) + + def time_to_pieces( self, sec ): + """ Returns the piece number that contains data for a few seconds down the road. """ + + # TODO: take first and last piece into account, as they can have a different size + return int(ceil(sec * self.sec_per_piece)) + + def download_range( self ): + """ Returns the range [first,last) of pieces we like to download. """ + + first = self.playback_pos + + if self.wraparound: + wraplen = first + self.wraparound_delta + 1 - self.last_piece + if wraplen > 0: + last = self.first_piece + wraplen + else: + last = first + self.wraparound_delta + 1 + else: + last = self.last_piece + 1 + + return (first,last) + + def get_wraparound(self): + return self.wraparound + + def increase_high_range(self, factor=1): + """ + Increase the high priority range (effectively enlarging the buffer size) + """ + assert factor > 0 + self.high_prob_min_time += factor * self.high_prob_min_time_limit[0] + if self.high_prob_min_time > self.high_prob_min_time_limit[1]: + self.high_prob_min_time = self.high_prob_min_time_limit[1] + + self.high_prob_min_pieces += int(factor * self.high_prob_min_pieces_limit[0]) + if self.high_prob_min_pieces > self.high_prob_min_pieces_limit[1]: + self.high_prob_min_pieces = self.high_prob_min_pieces_limit[1] + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "VideoStatus:increase_high_range", self.high_prob_min_time, "seconds or", self.high_prob_min_pieces, "pieces" + + def decrease_high_range(self, factor=1): + """ + Decrease the high priority range (effectively reducing the buffer size) + """ + assert factor > 0 + self.high_prob_min_time -= factor * self.high_prob_min_time_limit[0] + if self.high_prob_min_time < self.high_prob_min_time_limit[0]: + self.high_prob_min_time = self.high_prob_min_time_limit[0] + + self.high_prob_min_pieces -= int(factor * self.high_prob_min_pieces_limit[0]) + if self.high_prob_min_pieces < self.high_prob_min_pieces_limit[0]: + self.high_prob_min_pieces = self.high_prob_min_pieces_limit[0] + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "VideoStatus:decrease_high_range", self.high_prob_min_time, "seconds or", self.high_prob_min_pieces, "pieces" + + def set_high_range(self, seconds=None, pieces=None): + """ + Set the minimum size of the high priority range. Can be given + in seconds of pieces. + """ + if seconds: self.high_prob_min_time = seconds + if pieces: self.high_prob_min_pieces = pieces + + def get_high_range(self): + """ + Returns (first, last) tuple + """ + first, _ = self.download_range() + number_of_pieces = self.time_to_pieces(self.high_prob_min_time) + last = min(self.last_piece, # last piece + 1 + first + max(number_of_pieces, self.high_prob_min_pieces), # based on time OR pieces + 1 + first + self.high_prob_min_pieces_limit[1]) # hard-coded buffer maximum + return first, last + + def in_high_range(self, piece): + """ + Returns True when PIECE is in the high priority range. + """ + first, last = self.get_high_range() + return self.in_range(first, last, piece) + + def get_range_length(self, first, last): + if self.wraparound and first > last: + return self.last_piece - first + \ + last - self.first_piece + else: + return last - first + + def get_high_range_length(self): + first, last = self.get_high_range() + return self.get_range_length(first, last) + + def generate_high_range(self): + """ + Returns the high current high priority range in piece_ids + """ + first, last = self.get_high_range() + return self.generate_range((first, last)) + +def range2set(range,maxrange): + if range[0] <= range[1]: + set = Set(xrange(range[0],range[1])) + else: + set = Set(xrange(range[0],maxrange)) | Set(xrange(0,range[1])) + return set diff --git a/tribler-mod/Tribler/Core/Video/VideoStatus.py.bak b/tribler-mod/Tribler/Core/Video/VideoStatus.py.bak new file mode 100644 index 0000000..366da06 --- /dev/null +++ b/tribler-mod/Tribler/Core/Video/VideoStatus.py.bak @@ -0,0 +1,342 @@ + +# Written by Jan David Mol, Arno Bakker +# see LICENSE.txt for license information + +import sys +from math import ceil +from sets import Set + +from Tribler.Core.simpledefs import * + +# live streaming means wrapping around +LIVE_WRAPAROUND = True + +DEBUG = True #False + +class VideoStatus: + """ Info about the selected video and status of the playback. """ + + # TODO: thread safety? PiecePicker, MovieSelector and MovieOnDemandTransporter all interface this + + def __init__(self,piecelen,fileinfo,videoinfo,authparams): + """ + piecelen = length of BitTorrent pieces + fileinfo = list of (name,length) pairs for all files in the torrent, + in their recorded order + videoinfo = videoinfo object from download engine + """ + self.piecelen = piecelen # including signature, if any + self.sigsize = 0 + self.fileinfo = fileinfo + self.videoinfo = videoinfo + self.authparams = authparams + + # size of high probability set, in seconds (piecepicker varies + # between the minmax values depending on network performance) + self.high_prob_min_time = 10 + self.high_prob_min_time_limit = (10, 180) + + # minimal size of high probability set, in pieces (piecepicker + # varies between the minmax values depending on network + # performance) + self.high_prob_min_pieces = 5 + self.high_prob_min_pieces_limit = (5, 50) + + # ----- locate selected movie in fileinfo + index = self.videoinfo['index'] + if index == -1: + index = 0 + + movie_offset = sum( (filesize for (_,filesize) in fileinfo[:index] if filesize) ) + movie_name = fileinfo[index][0] + movie_size = fileinfo[index][1] + + self.selected_movie = { + "offset": movie_offset, + "name": movie_name, + "size": movie_size, + } + + # ----- derive generic movie parameters + movie_begin = movie_offset + movie_end = movie_offset + movie_size - 1 + + # movie_range = (bpiece,offset),(epiece,offset), inclusive + self.movie_range = ( (movie_begin/piecelen, movie_begin%piecelen), + (movie_end/piecelen, movie_end%piecelen) ) + self.first_piecelen = piecelen - self.movie_range[0][1] + self.last_piecelen = self.movie_range[1][1] + self.first_piece = self.movie_range[0][0] + self.last_piece = self.movie_range[1][0] + self.movie_numpieces = self.last_piece - self.first_piece + 1 + + # ----- live streaming settings + self.live_streaming = videoinfo['live'] + self.live_startpos = None + self.playback_pos_observers = [] + self.wraparound = self.live_streaming and LIVE_WRAPAROUND + # /8 means -12.5 % ... + 12.5 % = 25 % window + self.wraparound_delta = max(4,self.movie_numpieces/8) + + # ----- generic streaming settings + # whether to drop packets that come in too late + if self.live_streaming: + self.dropping = True # drop, but we will autopause as well + else: + self.dropping = False # just wait and produce flawless playback + + if videoinfo['bitrate']: + self.set_bitrate( videoinfo['bitrate'] ) + else: + self.set_bitrate( 512*1024/8 ) # default to 512 Kbit/s + self.bitrate_set = False + + # ----- set defaults for dynamic positions + self.playing = False # video has started playback + self.paused = False # video is paused + self.autoresume = False # video is paused but will resume automatically + self.prebuffering = True # video is prebuffering + self.playback_pos = self.first_piece + + self.pausable = (VODEVENT_PAUSE in videoinfo["userevents"]) and (VODEVENT_RESUME in videoinfo["userevents"]) + + def add_playback_pos_observer( self, observer ): + """ Add a function to be called when the playback position changes. Is called as follows: + observer( oldpos, newpos ). In case of initialisation: observer( None, startpos ). """ + self.playback_pos_observers.append( observer ) + + def real_piecelen( self, x ): + if x == self.first_piece: + return self.first_piecelen + elif x == self.last_piece: + return self.last_piecelen + else: + return self.piecelen + + def set_bitrate( self, bitrate ): + self.bitrate_set = True + self.bitrate = bitrate + self.sec_per_piece = 1.0 * bitrate / self.piecelen + + def set_live_startpos( self, pos ): + if self.wraparound: + if self.live_startpos is None: + oldrange = self.first_piece,self.last_piece + else: + oldrange = self.live_get_valid_range() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vodstatus: set_live_pos: old",oldrange + self.live_startpos = pos + self.playback_pos = pos + for o in self.playback_pos_observers: + o( None, pos ) + + if self.wraparound: + newrange = self.live_get_valid_range() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vodstatus: set_live_pos: new",newrange + return self.get_range_diff(oldrange,newrange) + else: + return Set() + + def get_live_startpos(self): + return self.live_startpos + + # the following functions work with absolute piece numbers, + # so they all function within the range [first_piece,last_piece] + + # the range of pieces to download is + # [playback_pos,numpieces) for normal downloads and + # [playback_pos,playback_pos+delta) for wraparound + + def generate_range( self, (f, t) ): + if self.wraparound and f > t: + for x in xrange( f, self.last_piece+1 ): + yield x + for x in xrange( self.first_piece, t ): + yield x + else: + for x in xrange( f, t ): + yield x + + def dist_range(self, f, t): + """ Returns the distance between f and t """ + if f > t: + return self.last_piece-f + t-self.first_piece + else: + return t - f + + def in_range( self, f, t, x ): + if self.wraparound and f > t: + return self.first_piece <= x < t or f <= x <= self.last_piece + else: + return f <= x < t + + def inc_playback_pos( self ): + oldpos = self.playback_pos + self.playback_pos += 1 + + if self.playback_pos > self.last_piece: + if self.wraparound: + self.playback_pos = self.first_piece + else: + self.playback_pos = self.last_piece + + for o in self.playback_pos_observers: + o( oldpos, self.playback_pos ) + + def in_download_range( self, x ): + if self.wraparound: + wraplen = self.playback_pos + self.wraparound_delta - self.last_piece + if wraplen > 0: + return self.first_piece <= x < self.first_piece + wraplen or self.playback_pos <= x <= self.last_piece + + return self.playback_pos <= x < self.playback_pos + self.wraparound_delta + else: + return self.first_piece <= x <= self.last_piece + + def in_valid_range(self,piece): + if self.live_streaming: + if self.live_startpos is None: + # Haven't hooked in yet + return True + else: + (begin,end) = self.live_get_valid_range() + ret = self.in_range(begin,end,piece) + if ret == False: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: status: NOT in_valid_range:",begin,"<",piece,"<",end + return ret + else: + return self.first_piece <= piece <= self.last_piece + + def live_get_valid_range(self): + begin = self.normalize(self.playback_pos - self.wraparound_delta) + end = self.normalize(self.playback_pos + self.wraparound_delta) + return (begin,end) + + def live_piece_to_invalidate(self): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","vod: live_piece_to_inval:",self.playback_pos,self.wraparound_delta,self.movie_numpieces + return self.normalize(self.playback_pos - self.wraparound_delta) + + def get_range_diff(self,oldrange,newrange): + """ Returns the diff between oldrange and newrange as a Set. + """ + oldset = range2set(oldrange,self.movie_numpieces) + newset = range2set(newrange,self.movie_numpieces) + return oldset - newset + + def normalize( self, x ): + """ Caps or wraps a piece number. """ + + if self.first_piece <= x <= self.last_piece: + return x + + if self.wraparound: + # in Python, -1 % 3 == 2, so modulo will do our work for us if x < first_piece + return (x - self.first_piece) % self.movie_numpieces + self.first_piece + else: + return max( self.first_piece, min( x, self.last_piece ) ) + + def time_to_pieces( self, sec ): + """ Returns the piece number that contains data for a few seconds down the road. """ + + # TODO: take first and last piece into account, as they can have a different size + return int(ceil(sec * self.sec_per_piece)) + + def download_range( self ): + """ Returns the range [first,last) of pieces we like to download. """ + + first = self.playback_pos + + if self.wraparound: + wraplen = first + self.wraparound_delta + 1 - self.last_piece + if wraplen > 0: + last = self.first_piece + wraplen + else: + last = first + self.wraparound_delta + 1 + else: + last = self.last_piece + 1 + + return (first,last) + + def get_wraparound(self): + return self.wraparound + + def increase_high_range(self, factor=1): + """ + Increase the high priority range (effectively enlarging the buffer size) + """ + assert factor > 0 + self.high_prob_min_time += factor * self.high_prob_min_time_limit[0] + if self.high_prob_min_time > self.high_prob_min_time_limit[1]: + self.high_prob_min_time = self.high_prob_min_time_limit[1] + + self.high_prob_min_pieces += int(factor * self.high_prob_min_pieces_limit[0]) + if self.high_prob_min_pieces > self.high_prob_min_pieces_limit[1]: + self.high_prob_min_pieces = self.high_prob_min_pieces_limit[1] + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "VideoStatus:increase_high_range", self.high_prob_min_time, "seconds or", self.high_prob_min_pieces, "pieces" + + def decrease_high_range(self, factor=1): + """ + Decrease the high priority range (effectively reducing the buffer size) + """ + assert factor > 0 + self.high_prob_min_time -= factor * self.high_prob_min_time_limit[0] + if self.high_prob_min_time < self.high_prob_min_time_limit[0]: + self.high_prob_min_time = self.high_prob_min_time_limit[0] + + self.high_prob_min_pieces -= int(factor * self.high_prob_min_pieces_limit[0]) + if self.high_prob_min_pieces < self.high_prob_min_pieces_limit[0]: + self.high_prob_min_pieces = self.high_prob_min_pieces_limit[0] + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "VideoStatus:decrease_high_range", self.high_prob_min_time, "seconds or", self.high_prob_min_pieces, "pieces" + + def set_high_range(self, seconds=None, pieces=None): + """ + Set the minimum size of the high priority range. Can be given + in seconds of pieces. + """ + if seconds: self.high_prob_min_time = seconds + if pieces: self.high_prob_min_pieces = pieces + + def get_high_range(self): + """ + Returns (first, last) tuple + """ + first, _ = self.download_range() + number_of_pieces = self.time_to_pieces(self.high_prob_min_time) + last = min(self.last_piece, # last piece + 1 + first + max(number_of_pieces, self.high_prob_min_pieces), # based on time OR pieces + 1 + first + self.high_prob_min_pieces_limit[1]) # hard-coded buffer maximum + return first, last + + def in_high_range(self, piece): + """ + Returns True when PIECE is in the high priority range. + """ + first, last = self.get_high_range() + return self.in_range(first, last, piece) + + def get_range_length(self, first, last): + if self.wraparound and first > last: + return self.last_piece - first + \ + last - self.first_piece + else: + return last - first + + def get_high_range_length(self): + first, last = self.get_high_range() + return self.get_range_length(first, last) + + def generate_high_range(self): + """ + Returns the high current high priority range in piece_ids + """ + first, last = self.get_high_range() + return self.generate_range((first, last)) + +def range2set(range,maxrange): + if range[0] <= range[1]: + set = Set(xrange(range[0],range[1])) + else: + set = Set(xrange(range[0],maxrange)) | Set(xrange(0,range[1])) + return set diff --git a/tribler-mod/Tribler/Core/Video/__init__.py b/tribler-mod/Tribler/Core/Video/__init__.py new file mode 100644 index 0000000..b7ef832 --- /dev/null +++ b/tribler-mod/Tribler/Core/Video/__init__.py @@ -0,0 +1,3 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/Video/__init__.py.bak b/tribler-mod/Tribler/Core/Video/__init__.py.bak new file mode 100644 index 0000000..395f8fb --- /dev/null +++ b/tribler-mod/Tribler/Core/Video/__init__.py.bak @@ -0,0 +1,2 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/__init__.py b/tribler-mod/Tribler/Core/__init__.py new file mode 100644 index 0000000..b7ef832 --- /dev/null +++ b/tribler-mod/Tribler/Core/__init__.py @@ -0,0 +1,3 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/__init__.py.bak b/tribler-mod/Tribler/Core/__init__.py.bak new file mode 100644 index 0000000..395f8fb --- /dev/null +++ b/tribler-mod/Tribler/Core/__init__.py.bak @@ -0,0 +1,2 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Core/defaults.py b/tribler-mod/Tribler/Core/defaults.py new file mode 100644 index 0000000..1851676 --- /dev/null +++ b/tribler-mod/Tribler/Core/defaults.py @@ -0,0 +1,227 @@ +from time import localtime, strftime +# Written by Arno Bakker and Bram Cohen +# see LICENSE.txt for license information +""" Default values for all configurarable parameters of the Core""" +# +# For an explanation of each parameter, see SessionConfig/DownloadConfig.py +# +# defaults with comments behind them are not user-setable via the +# *ConfigInterface classes, because they are not currently implemented (IPv6) +# or we only use them internally. +# +# WARNING: +# As we have release Tribler 4.5.0 you must now take into account that +# people have stored versions of these params on their disk. Make sure +# you change the version number of the structure and provide upgrade code +# such that your code won't barf because we loaded an older version from +# disk that does not have your new fields. +# + +from simpledefs import * + +DEFAULTPORT=7760 + +# +# Session opts +# +# History: +# Version 2: as released in Tribler 4.5.0 +# +SESSDEFAULTS_VERSION = 2 +sessdefaults = {} +sessdefaults['version'] = SESSDEFAULTS_VERSION +sessdefaults['state_dir'] = None +sessdefaults['install_dir'] = u'.' +sessdefaults['ip'] = '' +sessdefaults['minport'] = DEFAULTPORT +sessdefaults['maxport'] = DEFAULTPORT +sessdefaults['random_port'] = 1 +sessdefaults['bind'] = [] +sessdefaults['ipv6_enabled'] = 0 # allow the client to connect to peers via IPv6 (currently not supported) +sessdefaults['ipv6_binds_v4'] = None # set if an IPv6 server socket won't also field IPv4 connections (default = set automatically) +sessdefaults['upnp_nat_access'] = UPNPMODE_UNIVERSAL_DIRECT +sessdefaults['timeout'] = 300.0 +sessdefaults['timeout_check_interval'] = 60.0 +sessdefaults['eckeypairfilename'] = None +sessdefaults['megacache'] = True +sessdefaults['overlay'] = True +sessdefaults['crawler'] = True +sessdefaults['buddycast'] = True +sessdefaults['start_recommender'] = True +sessdefaults['download_help'] = True +sessdefaults['torrent_collecting'] = True +sessdefaults['superpeer'] = False +sessdefaults['overlay_log'] = None +sessdefaults['buddycast_interval'] = 15 +sessdefaults['buddycast_max_peers'] = 2500 # max number of peers to use for recommender. +sessdefaults['torrent_collecting_max_torrents'] = 5000 +sessdefaults['torrent_collecting_dir'] = None +sessdefaults['torrent_collecting_rate'] = 5 +sessdefaults['torrent_checking'] = 1 +sessdefaults['torrent_checking_period'] = 31 #will be changed to min(max(86400/ntorrents, 15), 300) at runtime +sessdefaults['dialback'] = True +sessdefaults['dialback_active'] = True # do active discovery (needed to disable for testing only) (0 = disabled) +sessdefaults['dialback_trust_superpeers'] = True # trust superpeer replies (needed to disable for testing only) (0 = disabled) +sessdefaults['socnet'] = True +sessdefaults['rquery'] = True +sessdefaults['stop_collecting_threshold'] = 200 +sessdefaults['internaltracker'] = True +sessdefaults['nickname'] = '__default_name__' # is replaced with hostname in LaunchManyCore.py +sessdefaults['mugshot'] = None +sessdefaults['videoanalyserpath'] = None +sessdefaults['overlay_max_message_length'] = 2 ** 23 +sessdefaults['download_help_dir'] = None +sessdefaults['bartercast'] = True +sessdefaults['superpeer_file'] = None +sessdefaults['crawler_file'] = None +sessdefaults['buddycast_collecting_solution'] = BCCOLPOLICY_SIMPLE +sessdefaults['peer_icon_path'] = None +sessdefaults['stop_collecting_threshold'] = 200 +sessdefaults['coopdlconfig'] = None +sessdefaults['family_filter'] = True +sessdefaults['nat_detect'] = True +sessdefaults['puncturing_internal_port'] = 6700 +sessdefaults['stun_servers'] = [('stun1.tribler.org',6701),('stun2.tribler.org',6702)] +sessdefaults['pingback_servers'] = [('pingback.tribler.org',6703),('pingback2.tribler.org',6703)] +sessdefaults['live_aux_seeders'] = [] +sessdefaults['mainline_dht'] = True +sessdefaults['rss_reload_frequency'] = 600 # reload a rss source every n seconds +sessdefaults['rss_check_frequency'] = 15 # test a potential .torrent in a rss source every n seconds +sessdefaults['moderationcast_recent_own_moderations_per_have'] = 13 +sessdefaults['moderationcast_random_own_moderations_per_have'] = 12 +sessdefaults['moderationcast_recent_forward_moderations_per_have'] = 13 +sessdefaults['moderationcast_random_forward_moderations_per_have'] = 12 +sessdefaults['moderationcast_upload_bandwidth_limit'] = 5*1024 +sessdefaults['moderationcast_download_bandwidth_limit'] = 20*1024 + +trackerdefaults = {} +trackerdefaults['tracker_url'] = None +trackerdefaults['tracker_dfile'] = None +trackerdefaults['tracker_dfile_format'] = ITRACKDBFORMAT_PICKLE +trackerdefaults['tracker_socket_timeout'] = 15 +trackerdefaults['tracker_save_dfile_interval'] = 300 +trackerdefaults['tracker_timeout_downloaders_interval'] = 2700 +trackerdefaults['tracker_reannounce_interval'] = 1800 +trackerdefaults['tracker_response_size'] = 50 +trackerdefaults['tracker_timeout_check_interval'] = 5 +trackerdefaults['tracker_nat_check'] = 3 +trackerdefaults['tracker_log_nat_checks'] = 0 +trackerdefaults['tracker_min_time_between_log_flushes'] = 3.0 +trackerdefaults['tracker_min_time_between_cache_refreshes'] = 600.0 +trackerdefaults['tracker_allowed_dir'] = None +trackerdefaults['tracker_allowed_list'] = '' +trackerdefaults['tracker_allowed_controls'] = 0 +trackerdefaults['tracker_multitracker_enabled'] = 0 +trackerdefaults['tracker_multitracker_allowed'] = ITRACKMULTI_ALLOW_AUTODETECT +trackerdefaults['tracker_multitracker_reannounce_interval'] = 120 +trackerdefaults['tracker_multitracker_maxpeers'] = 20 +trackerdefaults['tracker_aggregate_forward'] = [None,None] +trackerdefaults['tracker_aggregator'] = 0 +trackerdefaults['tracker_hupmonitor'] = 0 +trackerdefaults['tracker_multitracker_http_timeout'] = 60 +trackerdefaults['tracker_parse_dir_interval'] = 60 +trackerdefaults['tracker_show_infopage'] = 1 +trackerdefaults['tracker_infopage_redirect'] = None +trackerdefaults['tracker_show_names'] = 1 +trackerdefaults['tracker_favicon'] = None +trackerdefaults['tracker_allowed_ips'] = [] +trackerdefaults['tracker_banned_ips'] = [] +trackerdefaults['tracker_only_local_override_ip'] = ITRACK_IGNORE_ANNOUNCEIP_IFNONATCHECK + +trackerdefaults['tracker_logfile'] = None +trackerdefaults['tracker_allow_get'] = 1 +trackerdefaults['tracker_keep_dead'] = 0 +trackerdefaults['tracker_scrape_allowed'] = ITRACKSCRAPE_ALLOW_FULL + +sessdefaults.update(trackerdefaults) + +# +# BT per download opts +# +# History: +# Version 2: as released in Tribler 4.5.0 +# Version 3: +DLDEFAULTS_VERSION = 3 +dldefaults = {} +dldefaults['version'] = DLDEFAULTS_VERSION +dldefaults['max_uploads'] = 7 +dldefaults['keepalive_interval'] = 120.0 +dldefaults['download_slice_size'] = 2 ** 14 +dldefaults['upload_unit_size'] = 1460 +dldefaults['request_backlog'] = 10 +dldefaults['max_message_length'] = 2 ** 23 +dldefaults['selector_enabled'] = 1 # whether to enable the file selector and fast resume function. Arno, 2009-02-9: Must be on for checkpoints to work. +dldefaults['expire_cache_data'] = 10 # the number of days after which you wish to expire old cache data (0 = disabled) +dldefaults['priority'] = [] # a list of file priorities separated by commas, must be one per file, 0 = highest, 1 = normal, 2 = lowest, -1 = download disabled' +dldefaults['saveas'] = None # Set to get_default_destdir() +dldefaults['max_slice_length'] = 2 ** 17 +dldefaults['max_rate_period'] = 20.0 +dldefaults['upload_rate_fudge'] = 5.0 +dldefaults['tcp_ack_fudge'] = 0.03 +dldefaults['rerequest_interval'] = 300 +dldefaults['min_peers'] = 20 +dldefaults['http_timeout'] = 60 +dldefaults['max_initiate'] = 40 +dldefaults['check_hashes'] = 1 +dldefaults['max_upload_rate'] = 0 +dldefaults['max_download_rate'] = 0 +dldefaults['alloc_type'] = DISKALLOC_NORMAL +dldefaults['alloc_rate'] = 2.0 +dldefaults['buffer_reads'] = 1 +dldefaults['write_buffer_size'] = 4 +dldefaults['breakup_seed_bitfield'] = 1 +dldefaults['snub_time'] = 30.0 +dldefaults['rarest_first_cutoff'] = 2 +dldefaults['rarest_first_priority_cutoff'] = 5 +dldefaults['min_uploads'] = 4 +dldefaults['max_files_open'] = 50 +dldefaults['round_robin_period'] = 30 +dldefaults['super_seeder'] = 0 +dldefaults['security'] = 1 +dldefaults['max_connections'] = 0 +dldefaults['auto_kick'] = 1 +dldefaults['double_check'] = 0 +dldefaults['triple_check'] = 0 +dldefaults['lock_files'] = 0 +dldefaults['lock_while_reading'] = 0 +dldefaults['auto_flush'] = 0 +# +# Tribler per-download opts +# +dldefaults['coopdl_role'] = COOPDL_ROLE_COORDINATOR +dldefaults['coopdl_coordinator_permid'] = '' +dldefaults['exclude_ips'] = '' +dldefaults['mode'] = 0 +dldefaults['vod_usercallback'] = None +dldefaults['vod_userevents'] = [] +dldefaults['video_source'] = None +dldefaults['video_ratelimit'] = 0 +dldefaults['video_source_authconfig'] = None +dldefaults['selected_files'] = [] +dldefaults['ut_pex_max_addrs_from_peer'] = 16 +# Version 3: +dldefaults['same_nat_try_internal'] = 0 +dldefaults['unchoke_bias_for_internal'] = 0 + +tdefdictdefaults = {} +tdefdictdefaults['comment'] = None +tdefdictdefaults['created by'] = None +tdefdictdefaults['announce'] = None +tdefdictdefaults['announce-list'] = None +tdefdictdefaults['nodes'] = None # mainline DHT +tdefdictdefaults['httpseeds'] = None +tdefdictdefaults['encoding'] = None + +tdefmetadefaults = {} +tdefmetadefaults['version'] = 1 +tdefmetadefaults['piece length'] = 0 +tdefmetadefaults['makehash_md5'] = 0 +tdefmetadefaults['makehash_crc32'] = 0 +tdefmetadefaults['makehash_sha1'] = 0 +tdefmetadefaults['createmerkletorrent'] = 0 +tdefmetadefaults['torrentsigkeypairfilename'] = None +tdefmetadefaults['thumb'] = None # JPEG data + +tdefdefaults = {} +tdefdefaults.update(tdefdictdefaults) +tdefdefaults.update(tdefmetadefaults) diff --git a/tribler-mod/Tribler/Core/defaults.py.bak b/tribler-mod/Tribler/Core/defaults.py.bak new file mode 100644 index 0000000..c5262c8 --- /dev/null +++ b/tribler-mod/Tribler/Core/defaults.py.bak @@ -0,0 +1,226 @@ +# Written by Arno Bakker and Bram Cohen +# see LICENSE.txt for license information +""" Default values for all configurarable parameters of the Core""" +# +# For an explanation of each parameter, see SessionConfig/DownloadConfig.py +# +# defaults with comments behind them are not user-setable via the +# *ConfigInterface classes, because they are not currently implemented (IPv6) +# or we only use them internally. +# +# WARNING: +# As we have release Tribler 4.5.0 you must now take into account that +# people have stored versions of these params on their disk. Make sure +# you change the version number of the structure and provide upgrade code +# such that your code won't barf because we loaded an older version from +# disk that does not have your new fields. +# + +from simpledefs import * + +DEFAULTPORT=7760 + +# +# Session opts +# +# History: +# Version 2: as released in Tribler 4.5.0 +# +SESSDEFAULTS_VERSION = 2 +sessdefaults = {} +sessdefaults['version'] = SESSDEFAULTS_VERSION +sessdefaults['state_dir'] = None +sessdefaults['install_dir'] = u'.' +sessdefaults['ip'] = '' +sessdefaults['minport'] = DEFAULTPORT +sessdefaults['maxport'] = DEFAULTPORT +sessdefaults['random_port'] = 1 +sessdefaults['bind'] = [] +sessdefaults['ipv6_enabled'] = 0 # allow the client to connect to peers via IPv6 (currently not supported) +sessdefaults['ipv6_binds_v4'] = None # set if an IPv6 server socket won't also field IPv4 connections (default = set automatically) +sessdefaults['upnp_nat_access'] = UPNPMODE_UNIVERSAL_DIRECT +sessdefaults['timeout'] = 300.0 +sessdefaults['timeout_check_interval'] = 60.0 +sessdefaults['eckeypairfilename'] = None +sessdefaults['megacache'] = True +sessdefaults['overlay'] = True +sessdefaults['crawler'] = True +sessdefaults['buddycast'] = True +sessdefaults['start_recommender'] = True +sessdefaults['download_help'] = True +sessdefaults['torrent_collecting'] = True +sessdefaults['superpeer'] = False +sessdefaults['overlay_log'] = None +sessdefaults['buddycast_interval'] = 15 +sessdefaults['buddycast_max_peers'] = 2500 # max number of peers to use for recommender. +sessdefaults['torrent_collecting_max_torrents'] = 5000 +sessdefaults['torrent_collecting_dir'] = None +sessdefaults['torrent_collecting_rate'] = 5 +sessdefaults['torrent_checking'] = 1 +sessdefaults['torrent_checking_period'] = 31 #will be changed to min(max(86400/ntorrents, 15), 300) at runtime +sessdefaults['dialback'] = True +sessdefaults['dialback_active'] = True # do active discovery (needed to disable for testing only) (0 = disabled) +sessdefaults['dialback_trust_superpeers'] = True # trust superpeer replies (needed to disable for testing only) (0 = disabled) +sessdefaults['socnet'] = True +sessdefaults['rquery'] = True +sessdefaults['stop_collecting_threshold'] = 200 +sessdefaults['internaltracker'] = True +sessdefaults['nickname'] = '__default_name__' # is replaced with hostname in LaunchManyCore.py +sessdefaults['mugshot'] = None +sessdefaults['videoanalyserpath'] = None +sessdefaults['overlay_max_message_length'] = 2 ** 23 +sessdefaults['download_help_dir'] = None +sessdefaults['bartercast'] = True +sessdefaults['superpeer_file'] = None +sessdefaults['crawler_file'] = None +sessdefaults['buddycast_collecting_solution'] = BCCOLPOLICY_SIMPLE +sessdefaults['peer_icon_path'] = None +sessdefaults['stop_collecting_threshold'] = 200 +sessdefaults['coopdlconfig'] = None +sessdefaults['family_filter'] = True +sessdefaults['nat_detect'] = True +sessdefaults['puncturing_internal_port'] = 6700 +sessdefaults['stun_servers'] = [('stun1.tribler.org',6701),('stun2.tribler.org',6702)] +sessdefaults['pingback_servers'] = [('pingback.tribler.org',6703),('pingback2.tribler.org',6703)] +sessdefaults['live_aux_seeders'] = [] +sessdefaults['mainline_dht'] = True +sessdefaults['rss_reload_frequency'] = 600 # reload a rss source every n seconds +sessdefaults['rss_check_frequency'] = 15 # test a potential .torrent in a rss source every n seconds +sessdefaults['moderationcast_recent_own_moderations_per_have'] = 13 +sessdefaults['moderationcast_random_own_moderations_per_have'] = 12 +sessdefaults['moderationcast_recent_forward_moderations_per_have'] = 13 +sessdefaults['moderationcast_random_forward_moderations_per_have'] = 12 +sessdefaults['moderationcast_upload_bandwidth_limit'] = 5*1024 +sessdefaults['moderationcast_download_bandwidth_limit'] = 20*1024 + +trackerdefaults = {} +trackerdefaults['tracker_url'] = None +trackerdefaults['tracker_dfile'] = None +trackerdefaults['tracker_dfile_format'] = ITRACKDBFORMAT_PICKLE +trackerdefaults['tracker_socket_timeout'] = 15 +trackerdefaults['tracker_save_dfile_interval'] = 300 +trackerdefaults['tracker_timeout_downloaders_interval'] = 2700 +trackerdefaults['tracker_reannounce_interval'] = 1800 +trackerdefaults['tracker_response_size'] = 50 +trackerdefaults['tracker_timeout_check_interval'] = 5 +trackerdefaults['tracker_nat_check'] = 3 +trackerdefaults['tracker_log_nat_checks'] = 0 +trackerdefaults['tracker_min_time_between_log_flushes'] = 3.0 +trackerdefaults['tracker_min_time_between_cache_refreshes'] = 600.0 +trackerdefaults['tracker_allowed_dir'] = None +trackerdefaults['tracker_allowed_list'] = '' +trackerdefaults['tracker_allowed_controls'] = 0 +trackerdefaults['tracker_multitracker_enabled'] = 0 +trackerdefaults['tracker_multitracker_allowed'] = ITRACKMULTI_ALLOW_AUTODETECT +trackerdefaults['tracker_multitracker_reannounce_interval'] = 120 +trackerdefaults['tracker_multitracker_maxpeers'] = 20 +trackerdefaults['tracker_aggregate_forward'] = [None,None] +trackerdefaults['tracker_aggregator'] = 0 +trackerdefaults['tracker_hupmonitor'] = 0 +trackerdefaults['tracker_multitracker_http_timeout'] = 60 +trackerdefaults['tracker_parse_dir_interval'] = 60 +trackerdefaults['tracker_show_infopage'] = 1 +trackerdefaults['tracker_infopage_redirect'] = None +trackerdefaults['tracker_show_names'] = 1 +trackerdefaults['tracker_favicon'] = None +trackerdefaults['tracker_allowed_ips'] = [] +trackerdefaults['tracker_banned_ips'] = [] +trackerdefaults['tracker_only_local_override_ip'] = ITRACK_IGNORE_ANNOUNCEIP_IFNONATCHECK + +trackerdefaults['tracker_logfile'] = None +trackerdefaults['tracker_allow_get'] = 1 +trackerdefaults['tracker_keep_dead'] = 0 +trackerdefaults['tracker_scrape_allowed'] = ITRACKSCRAPE_ALLOW_FULL + +sessdefaults.update(trackerdefaults) + +# +# BT per download opts +# +# History: +# Version 2: as released in Tribler 4.5.0 +# Version 3: +DLDEFAULTS_VERSION = 3 +dldefaults = {} +dldefaults['version'] = DLDEFAULTS_VERSION +dldefaults['max_uploads'] = 7 +dldefaults['keepalive_interval'] = 120.0 +dldefaults['download_slice_size'] = 2 ** 14 +dldefaults['upload_unit_size'] = 1460 +dldefaults['request_backlog'] = 10 +dldefaults['max_message_length'] = 2 ** 23 +dldefaults['selector_enabled'] = 1 # whether to enable the file selector and fast resume function. Arno, 2009-02-9: Must be on for checkpoints to work. +dldefaults['expire_cache_data'] = 10 # the number of days after which you wish to expire old cache data (0 = disabled) +dldefaults['priority'] = [] # a list of file priorities separated by commas, must be one per file, 0 = highest, 1 = normal, 2 = lowest, -1 = download disabled' +dldefaults['saveas'] = None # Set to get_default_destdir() +dldefaults['max_slice_length'] = 2 ** 17 +dldefaults['max_rate_period'] = 20.0 +dldefaults['upload_rate_fudge'] = 5.0 +dldefaults['tcp_ack_fudge'] = 0.03 +dldefaults['rerequest_interval'] = 300 +dldefaults['min_peers'] = 20 +dldefaults['http_timeout'] = 60 +dldefaults['max_initiate'] = 40 +dldefaults['check_hashes'] = 1 +dldefaults['max_upload_rate'] = 0 +dldefaults['max_download_rate'] = 0 +dldefaults['alloc_type'] = DISKALLOC_NORMAL +dldefaults['alloc_rate'] = 2.0 +dldefaults['buffer_reads'] = 1 +dldefaults['write_buffer_size'] = 4 +dldefaults['breakup_seed_bitfield'] = 1 +dldefaults['snub_time'] = 30.0 +dldefaults['rarest_first_cutoff'] = 2 +dldefaults['rarest_first_priority_cutoff'] = 5 +dldefaults['min_uploads'] = 4 +dldefaults['max_files_open'] = 50 +dldefaults['round_robin_period'] = 30 +dldefaults['super_seeder'] = 0 +dldefaults['security'] = 1 +dldefaults['max_connections'] = 0 +dldefaults['auto_kick'] = 1 +dldefaults['double_check'] = 0 +dldefaults['triple_check'] = 0 +dldefaults['lock_files'] = 0 +dldefaults['lock_while_reading'] = 0 +dldefaults['auto_flush'] = 0 +# +# Tribler per-download opts +# +dldefaults['coopdl_role'] = COOPDL_ROLE_COORDINATOR +dldefaults['coopdl_coordinator_permid'] = '' +dldefaults['exclude_ips'] = '' +dldefaults['mode'] = 0 +dldefaults['vod_usercallback'] = None +dldefaults['vod_userevents'] = [] +dldefaults['video_source'] = None +dldefaults['video_ratelimit'] = 0 +dldefaults['video_source_authconfig'] = None +dldefaults['selected_files'] = [] +dldefaults['ut_pex_max_addrs_from_peer'] = 16 +# Version 3: +dldefaults['same_nat_try_internal'] = 0 +dldefaults['unchoke_bias_for_internal'] = 0 + +tdefdictdefaults = {} +tdefdictdefaults['comment'] = None +tdefdictdefaults['created by'] = None +tdefdictdefaults['announce'] = None +tdefdictdefaults['announce-list'] = None +tdefdictdefaults['nodes'] = None # mainline DHT +tdefdictdefaults['httpseeds'] = None +tdefdictdefaults['encoding'] = None + +tdefmetadefaults = {} +tdefmetadefaults['version'] = 1 +tdefmetadefaults['piece length'] = 0 +tdefmetadefaults['makehash_md5'] = 0 +tdefmetadefaults['makehash_crc32'] = 0 +tdefmetadefaults['makehash_sha1'] = 0 +tdefmetadefaults['createmerkletorrent'] = 0 +tdefmetadefaults['torrentsigkeypairfilename'] = None +tdefmetadefaults['thumb'] = None # JPEG data + +tdefdefaults = {} +tdefdefaults.update(tdefdictdefaults) +tdefdefaults.update(tdefmetadefaults) diff --git a/tribler-mod/Tribler/Core/exceptions.py b/tribler-mod/Tribler/Core/exceptions.py new file mode 100644 index 0000000..360340c --- /dev/null +++ b/tribler-mod/Tribler/Core/exceptions.py @@ -0,0 +1,83 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information +""" The Tribler-specifc Exceptions the Core may throw. """ + +# +# Exceptions +# +class TriblerException(Exception): + """ Super class for all Tribler-specific Exceptions the Tribler Core + throws. + """ + def __init__(self,msg=None): + Exception.__init__(self,msg) + + def __str__(self): + return str(self.__class__)+': '+Exception.__str__(self) + + +class OperationNotPossibleAtRuntimeException(TriblerException): + """ The requested operation is not possible after the Session or Download + has been started. + """ + def __init__(self,msg=None): + TriblerException.__init__(self,msg) + +class OperationNotPossibleWhenStoppedException(TriblerException): + """ The requested operation is not possible when the Download + has been stopped. + """ + def __init__(self,msg=None): + TriblerException.__init__(self,msg) + +class OperationNotEnabledByConfigurationException(TriblerException): + """ The requested operation is not possible with the current + Session/Download configuration. + """ + def __init__(self,msg=None): + TriblerException.__init__(self,msg) + + +class NotYetImplementedException(TriblerException): + """ The requested operation is not yet fully implemented. """ + def __init__(self,msg=None): + TriblerException.__init__(self,msg) + + +class DuplicateDownloadException(TriblerException): + """ The Download already exists in the Session, i.e., a Download for + a torrent with the same infohash already exists. """ + def __init__(self,msg=None): + TriblerException.__init__(self,msg) + +class VODNoFileSelectedInMultifileTorrentException(TriblerException): + """ Attempt to download a torrent in Video-On-Demand mode that contains + multiple video files, but without specifying which one to play. """ + def __init__(self,msg=None): + TriblerException.__init__(self,msg) + +class LiveTorrentRequiresUsercallbackException(TriblerException): + """ Attempt to download a live-stream torrent without specifying a + callback function to call when the stream is ready to play. + Use set_video_event_callback(usercallback) to correct this problem. """ + def __init__(self,msg=None): + TriblerException.__init__(self,msg) + +class TorrentDefNotFinalizedException(TriblerException): + """ Attempt to start downloading a torrent from a torrent definition + that was not finalized. """ + def __init__(self,msg=None): + TriblerException.__init__(self,msg) + + +class TriblerLegacyException(TriblerException): + """ Wrapper around fatal errors that happen in the download engine, + but which are not reported as Exception objects for legacy reasons, + just as text (often containing a stringified Exception). + Will be phased out. + """ + + def __init__(self,msg=None): + TriblerException.__init__(self,msg) + diff --git a/tribler-mod/Tribler/Core/exceptions.py.bak b/tribler-mod/Tribler/Core/exceptions.py.bak new file mode 100644 index 0000000..a704380 --- /dev/null +++ b/tribler-mod/Tribler/Core/exceptions.py.bak @@ -0,0 +1,82 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information +""" The Tribler-specifc Exceptions the Core may throw. """ + +# +# Exceptions +# +class TriblerException(Exception): + """ Super class for all Tribler-specific Exceptions the Tribler Core + throws. + """ + def __init__(self,msg=None): + Exception.__init__(self,msg) + + def __str__(self): + return str(self.__class__)+': '+Exception.__str__(self) + + +class OperationNotPossibleAtRuntimeException(TriblerException): + """ The requested operation is not possible after the Session or Download + has been started. + """ + def __init__(self,msg=None): + TriblerException.__init__(self,msg) + +class OperationNotPossibleWhenStoppedException(TriblerException): + """ The requested operation is not possible when the Download + has been stopped. + """ + def __init__(self,msg=None): + TriblerException.__init__(self,msg) + +class OperationNotEnabledByConfigurationException(TriblerException): + """ The requested operation is not possible with the current + Session/Download configuration. + """ + def __init__(self,msg=None): + TriblerException.__init__(self,msg) + + +class NotYetImplementedException(TriblerException): + """ The requested operation is not yet fully implemented. """ + def __init__(self,msg=None): + TriblerException.__init__(self,msg) + + +class DuplicateDownloadException(TriblerException): + """ The Download already exists in the Session, i.e., a Download for + a torrent with the same infohash already exists. """ + def __init__(self,msg=None): + TriblerException.__init__(self,msg) + +class VODNoFileSelectedInMultifileTorrentException(TriblerException): + """ Attempt to download a torrent in Video-On-Demand mode that contains + multiple video files, but without specifying which one to play. """ + def __init__(self,msg=None): + TriblerException.__init__(self,msg) + +class LiveTorrentRequiresUsercallbackException(TriblerException): + """ Attempt to download a live-stream torrent without specifying a + callback function to call when the stream is ready to play. + Use set_video_event_callback(usercallback) to correct this problem. """ + def __init__(self,msg=None): + TriblerException.__init__(self,msg) + +class TorrentDefNotFinalizedException(TriblerException): + """ Attempt to start downloading a torrent from a torrent definition + that was not finalized. """ + def __init__(self,msg=None): + TriblerException.__init__(self,msg) + + +class TriblerLegacyException(TriblerException): + """ Wrapper around fatal errors that happen in the download engine, + but which are not reported as Exception objects for legacy reasons, + just as text (often containing a stringified Exception). + Will be phased out. + """ + + def __init__(self,msg=None): + TriblerException.__init__(self,msg) + diff --git a/tribler-mod/Tribler/Core/osutils.py b/tribler-mod/Tribler/Core/osutils.py new file mode 100644 index 0000000..1ebed0b --- /dev/null +++ b/tribler-mod/Tribler/Core/osutils.py @@ -0,0 +1,169 @@ +from time import localtime, strftime +# Written by Arno Bakker, ABC authors +# see LICENSE.txt for license information +""" OS-independent utility functions """ + +# +# Multiple methods for getting free diskspace +# +import sys +import os +import binascii + +try: + # Unix + from os import statvfs + import statvfs + def getfreespace(path): + s = os.statvfs(path.encode("utf-8")) + size = s[statvfs.F_BAVAIL] * long(s[statvfs.F_BSIZE]) + return size +except: + if (sys.platform == 'win32'): + try: + # Windows if win32all extensions are installed + import win32file + try: + # Win95 OSR2 and up + # Arno: this code was totally broken as the method returns + # a list of values indicating 1. free space for the user, + # 2. total space for the user and 3. total free space, so + # not a single value. + win32file.GetDiskFreeSpaceEx(".") + def getfreespace(path): + # Boudewijn: the win32file module is NOT unicode + # safe! We will try directories further up the + # directory tree in the hopes of getting a path on + # the same disk without the unicode... + while True: + try: + return win32file.GetDiskFreeSpaceEx(path)[0] + except: + path = os.path.split(path)[0] + if not path: + raise + except: + # Original Win95 + # (2GB limit on partition size, so this should be + # accurate except for mapped network drives) + # Arno: see http://aspn.activestate.com/ASPN/docs/ActivePython/2.4/pywin32/win32file__GetDiskFreeSpace_meth.html + def getfreespace(path): + [spc, bps, nfc, tnc] = win32file.GetDiskFreeSpace(path) + return long(nfc) * long(spc) * long(bps) + + except ImportError: + # Windows if win32all extensions aren't installed + # (parse the output from the dir command) + def getfreespace(path): + try: + mystdin, mystdout = os.popen2(u"dir " + u"\"" + path + u"\"") + + sizestring = "0" + + for line in mystdout: + line = line.strip() + # Arno: FIXME: this won't work on non-English Windows, as reported by the IRT + index = line.rfind("bytes free") + if index > -1 and line[index:] == "bytes free": + parts = line.split(" ") + if len(parts) > 3: + part = parts[-3] + part = part.replace(",", "") + sizestring = part + break + + size = long(sizestring) + + if size == 0L: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","getfreespace: can't determine freespace of ",path + for line in mystdout: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",line + + size = 2**80L + except: + # If in doubt, just return something really large + # (1 yottabyte) + size = 2**80L + + return size + else: + # Any other cases + # TODO: support for Mac? (will statvfs work with OS X?) + def getfreespace(path): + # If in doubt, just return something really large + # (1 yottabyte) + return 2**80L + + +invalidwinfilenamechars = '' +for i in range(32): + invalidwinfilenamechars += chr(i) +invalidwinfilenamechars += '"*/:<>?\\|' +invalidlinuxfilenamechars = '/' + +def fix_filebasename(name, unit=False, maxlen=255): + """ Check if str is a valid Windows file name (or unit name if unit is true) + * If the filename isn't valid: returns a corrected name + * If the filename is valid: returns the filename + """ + if unit and (len(name) != 2 or name[1] != ':'): + return 'c:' + if not name or name == '.' or name == '..': + return '_' + + if unit: + name = name[0] + fixed = False + if len(name) > maxlen: + name = name[:maxlen] + fixed = True + + fixedname = '' + spaces = 0 + for c in name: + if sys.platform.startswith('win'): + invalidchars = invalidwinfilenamechars + else: + invalidchars = invalidlinuxfilenamechars + + if c in invalidchars: + fixedname += '_' + fixed = True + else: + fixedname += c + if c == ' ': + spaces += 1 + + file_dir, basename = os.path.split(fixedname) + while file_dir != '': + fixedname = basename + file_dir, basename = os.path.split(fixedname) + fixed = True + + if fixedname == '': + fixedname = '_' + fixed = True + + if fixed: + return last_minute_filename_clean(fixedname) + elif spaces == len(name): + # contains only spaces + return '_' + else: + return last_minute_filename_clean(name) + +def last_minute_filename_clean(name): + s = name.strip() # Arno: remove initial or ending space + if sys.platform == 'win32' and s.endswith('..'): + s = s[:-2] + return s + + +def get_readable_torrent_name(infohash, raw_filename): + # return name__infohash.torrent + hex_infohash = binascii.hexlify(infohash) + suffix = '__' + hex_infohash + '.torrent' + save_name = ' ' + fix_filebasename(raw_filename, maxlen=254-len(suffix)) + suffix + # use a space ahead to distinguish from previous collected torrents + return save_name + diff --git a/tribler-mod/Tribler/Core/osutils.py.bak b/tribler-mod/Tribler/Core/osutils.py.bak new file mode 100644 index 0000000..d949eca --- /dev/null +++ b/tribler-mod/Tribler/Core/osutils.py.bak @@ -0,0 +1,168 @@ +# Written by Arno Bakker, ABC authors +# see LICENSE.txt for license information +""" OS-independent utility functions """ + +# +# Multiple methods for getting free diskspace +# +import sys +import os +import binascii + +try: + # Unix + from os import statvfs + import statvfs + def getfreespace(path): + s = os.statvfs(path.encode("utf-8")) + size = s[statvfs.F_BAVAIL] * long(s[statvfs.F_BSIZE]) + return size +except: + if (sys.platform == 'win32'): + try: + # Windows if win32all extensions are installed + import win32file + try: + # Win95 OSR2 and up + # Arno: this code was totally broken as the method returns + # a list of values indicating 1. free space for the user, + # 2. total space for the user and 3. total free space, so + # not a single value. + win32file.GetDiskFreeSpaceEx(".") + def getfreespace(path): + # Boudewijn: the win32file module is NOT unicode + # safe! We will try directories further up the + # directory tree in the hopes of getting a path on + # the same disk without the unicode... + while True: + try: + return win32file.GetDiskFreeSpaceEx(path)[0] + except: + path = os.path.split(path)[0] + if not path: + raise + except: + # Original Win95 + # (2GB limit on partition size, so this should be + # accurate except for mapped network drives) + # Arno: see http://aspn.activestate.com/ASPN/docs/ActivePython/2.4/pywin32/win32file__GetDiskFreeSpace_meth.html + def getfreespace(path): + [spc, bps, nfc, tnc] = win32file.GetDiskFreeSpace(path) + return long(nfc) * long(spc) * long(bps) + + except ImportError: + # Windows if win32all extensions aren't installed + # (parse the output from the dir command) + def getfreespace(path): + try: + mystdin, mystdout = os.popen2(u"dir " + u"\"" + path + u"\"") + + sizestring = "0" + + for line in mystdout: + line = line.strip() + # Arno: FIXME: this won't work on non-English Windows, as reported by the IRT + index = line.rfind("bytes free") + if index > -1 and line[index:] == "bytes free": + parts = line.split(" ") + if len(parts) > 3: + part = parts[-3] + part = part.replace(",", "") + sizestring = part + break + + size = long(sizestring) + + if size == 0L: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","getfreespace: can't determine freespace of ",path + for line in mystdout: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",line + + size = 2**80L + except: + # If in doubt, just return something really large + # (1 yottabyte) + size = 2**80L + + return size + else: + # Any other cases + # TODO: support for Mac? (will statvfs work with OS X?) + def getfreespace(path): + # If in doubt, just return something really large + # (1 yottabyte) + return 2**80L + + +invalidwinfilenamechars = '' +for i in range(32): + invalidwinfilenamechars += chr(i) +invalidwinfilenamechars += '"*/:<>?\\|' +invalidlinuxfilenamechars = '/' + +def fix_filebasename(name, unit=False, maxlen=255): + """ Check if str is a valid Windows file name (or unit name if unit is true) + * If the filename isn't valid: returns a corrected name + * If the filename is valid: returns the filename + """ + if unit and (len(name) != 2 or name[1] != ':'): + return 'c:' + if not name or name == '.' or name == '..': + return '_' + + if unit: + name = name[0] + fixed = False + if len(name) > maxlen: + name = name[:maxlen] + fixed = True + + fixedname = '' + spaces = 0 + for c in name: + if sys.platform.startswith('win'): + invalidchars = invalidwinfilenamechars + else: + invalidchars = invalidlinuxfilenamechars + + if c in invalidchars: + fixedname += '_' + fixed = True + else: + fixedname += c + if c == ' ': + spaces += 1 + + file_dir, basename = os.path.split(fixedname) + while file_dir != '': + fixedname = basename + file_dir, basename = os.path.split(fixedname) + fixed = True + + if fixedname == '': + fixedname = '_' + fixed = True + + if fixed: + return last_minute_filename_clean(fixedname) + elif spaces == len(name): + # contains only spaces + return '_' + else: + return last_minute_filename_clean(name) + +def last_minute_filename_clean(name): + s = name.strip() # Arno: remove initial or ending space + if sys.platform == 'win32' and s.endswith('..'): + s = s[:-2] + return s + + +def get_readable_torrent_name(infohash, raw_filename): + # return name__infohash.torrent + hex_infohash = binascii.hexlify(infohash) + suffix = '__' + hex_infohash + '.torrent' + save_name = ' ' + fix_filebasename(raw_filename, maxlen=254-len(suffix)) + suffix + # use a space ahead to distinguish from previous collected torrents + return save_name + diff --git a/tribler-mod/Tribler/Core/simpledefs.py b/tribler-mod/Tribler/Core/simpledefs.py new file mode 100644 index 0000000..103ff9f --- /dev/null +++ b/tribler-mod/Tribler/Core/simpledefs.py @@ -0,0 +1,140 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information +""" Simple definitions for the Tribler Core. """ +import os + +DLSTATUS_ALLOCATING_DISKSPACE = 0 # TODO: make sure this get set when in this alloc mode +DLSTATUS_WAITING4HASHCHECK = 1 +DLSTATUS_HASHCHECKING = 2 +DLSTATUS_DOWNLOADING = 3 +DLSTATUS_SEEDING = 4 +DLSTATUS_STOPPED = 5 +DLSTATUS_STOPPED_ON_ERROR = 6 + +dlstatus_strings = ['DLSTATUS_ALLOCATING_DISKSPACE', +'DLSTATUS_WAITING4HASHCHECK', +'DLSTATUS_HASHCHECKING', +'DLSTATUS_DOWNLOADING', +'DLSTATUS_SEEDING', +'DLSTATUS_STOPPED', +'DLSTATUS_STOPPED_ON_ERROR'] + +UPLOAD = 'up' +DOWNLOAD = 'down' + +DLMODE_NORMAL = 0 +DLMODE_VOD = 1 + +PERSISTENTSTATE_CURRENTVERSION = 2 + +STATEDIR_ITRACKER_DIR = 'itracker' +STATEDIR_DLPSTATE_DIR = 'dlcheckpoints' +STATEDIR_PEERICON_DIR = 'icons' +STATEDIR_TORRENTCOLL_DIR = 'collected_torrent_files' +STATEDIR_SESSCONFIG = 'sessconfig.pickle' +DESTDIR_COOPDOWNLOAD = 'downloadhelp' + +TRIBLER_TORRENT_EXT = ".tribe" + +# For observer/callback mechanism, see Session.add_observer() + +# subjects +NTFY_PEERS = 'peers' +NTFY_TORRENTS = 'torrents' +NTFY_PREFERENCES = 'preferences' +NTFY_SUPERPEERS = 'superpeers' # use NTFY_PEERS !! +NTFY_FRIENDS = 'friends' # use NTFY_PEERS !! +NTFY_MYPREFERENCES = 'mypreferences' # currently not observable +NTFY_BARTERCAST = 'bartercast' # currently not observable +NTFY_MYINFO = 'myinfo' +NTFY_SEEDINGSTATS = 'seedingstats' +NTFY_SEEDINGSTATSSETTINGS = 'seedingstatssettings' +NTFY_MODERATIONCAST = 'moderationcast' +NTFY_VOTECAST = 'votecast' +NTFY_SEARCH = 'clicklogsearch' # BuddyCast 4 +NTFY_TERM= 'clicklogterm' + + +# non data handler subjects +NTFY_ACTIVITIES = 'activities' # an activity was set (peer met/dns resolved) +NTFY_REACHABLE = 'reachable' # the Session is reachable from the Internet + +# changeTypes +NTFY_UPDATE = 'update' # data is updated +NTFY_INSERT = 'insert' # new data is inserted +NTFY_DELETE = 'delete' # data is deleted +NTFY_SEARCH_RESULT = 'search_result' # new search result +NTFY_CONNECTION = 'connection' # connection made or broken + +# object IDs for NTFY_ACTIVITIES subject +NTFY_ACT_NONE = 0 +NTFY_ACT_UPNP = 1 +NTFY_ACT_REACHABLE = 2 +NTFY_ACT_GET_EXT_IP_FROM_PEERS = 3 +NTFY_ACT_MEET = 4 +NTFY_ACT_GOT_METADATA = 5 +NTFY_ACT_RECOMMEND = 6 +NTFY_ACT_DISK_FULL = 7 +NTFY_ACT_NEW_VERSION = 8 +NTFY_ACT_ACTIVE = 9 + +# Disk-allocation policies for download, see DownloadConfig.set_alloc_type +DISKALLOC_NORMAL = 'normal' +DISKALLOC_BACKGROUND = 'background' +DISKALLOC_PREALLOCATE = 'pre-allocate' +DISKALLOC_SPARSE = 'sparse' + +# UPnP modes, see SessionConfig.set_upnp_mode +UPNPMODE_DISABLED = 0 +UPNPMODE_WIN32_HNetCfg_NATUPnP = 1 +UPNPMODE_WIN32_UPnP_UPnPDeviceFinder = 2 +UPNPMODE_UNIVERSAL_DIRECT = 3 + +# Buddycast Collecting Policy parameters +BCCOLPOLICY_SIMPLE = 1 +# BCCOLPOLICY_T4T = 2 # Future work + +# Internal tracker scrape +ITRACKSCRAPE_ALLOW_NONE = 'none' +ITRACKSCRAPE_ALLOW_SPECIFIC = 'specific' +ITRACKSCRAPE_ALLOW_FULL = 'full' + +ITRACKDBFORMAT_BENCODE = 'bencode' +ITRACKDBFORMAT_PICKLE= 'pickle' + +ITRACKMULTI_ALLOW_NONE = 'none' +ITRACKMULTI_ALLOW_AUTODETECT = 'autodetect' +ITRACKMULTI_ALLOW_ALL = 'all' + +ITRACK_IGNORE_ANNOUNCEIP_NEVER = 0 +ITRACK_IGNORE_ANNOUNCEIP_ALWAYS = 1 +ITRACK_IGNORE_ANNOUNCEIP_IFNONATCHECK = 2 + +# Cooperative download +COOPDL_ROLE_COORDINATOR = 'coordinator' +COOPDL_ROLE_HELPER = 'helper' + +# Methods for authentication of the source in live streaming +LIVE_AUTHMETHOD_NONE = "None" # None +LIVE_AUTHMETHOD_ECDSA = "ECDSA" # Elliptic Curve DSA signatures + +# Video-On-Demand / live events +VODEVENT_START = "start" +VODEVENT_PAUSE = "pause" +VODEVENT_RESUME = "resume" + + +# Friendship messages +F_REQUEST_MSG = "REQ" +F_RESPONSE_MSG = "RESP" +F_FORWARD_MSG = "FWD" # Can forward any type of other friendship message + + +# States for a friend +FS_NOFRIEND = 0 +FS_MUTUAL = 1 +FS_I_INVITED = 2 +FS_HE_INVITED = 3 +FS_I_DENIED = 4 +FS_HE_DENIED = 5 diff --git a/tribler-mod/Tribler/Core/simpledefs.py.bak b/tribler-mod/Tribler/Core/simpledefs.py.bak new file mode 100644 index 0000000..94e9bcf --- /dev/null +++ b/tribler-mod/Tribler/Core/simpledefs.py.bak @@ -0,0 +1,139 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information +""" Simple definitions for the Tribler Core. """ +import os + +DLSTATUS_ALLOCATING_DISKSPACE = 0 # TODO: make sure this get set when in this alloc mode +DLSTATUS_WAITING4HASHCHECK = 1 +DLSTATUS_HASHCHECKING = 2 +DLSTATUS_DOWNLOADING = 3 +DLSTATUS_SEEDING = 4 +DLSTATUS_STOPPED = 5 +DLSTATUS_STOPPED_ON_ERROR = 6 + +dlstatus_strings = ['DLSTATUS_ALLOCATING_DISKSPACE', +'DLSTATUS_WAITING4HASHCHECK', +'DLSTATUS_HASHCHECKING', +'DLSTATUS_DOWNLOADING', +'DLSTATUS_SEEDING', +'DLSTATUS_STOPPED', +'DLSTATUS_STOPPED_ON_ERROR'] + +UPLOAD = 'up' +DOWNLOAD = 'down' + +DLMODE_NORMAL = 0 +DLMODE_VOD = 1 + +PERSISTENTSTATE_CURRENTVERSION = 2 + +STATEDIR_ITRACKER_DIR = 'itracker' +STATEDIR_DLPSTATE_DIR = 'dlcheckpoints' +STATEDIR_PEERICON_DIR = 'icons' +STATEDIR_TORRENTCOLL_DIR = 'collected_torrent_files' +STATEDIR_SESSCONFIG = 'sessconfig.pickle' +DESTDIR_COOPDOWNLOAD = 'downloadhelp' + +TRIBLER_TORRENT_EXT = ".tribe" + +# For observer/callback mechanism, see Session.add_observer() + +# subjects +NTFY_PEERS = 'peers' +NTFY_TORRENTS = 'torrents' +NTFY_PREFERENCES = 'preferences' +NTFY_SUPERPEERS = 'superpeers' # use NTFY_PEERS !! +NTFY_FRIENDS = 'friends' # use NTFY_PEERS !! +NTFY_MYPREFERENCES = 'mypreferences' # currently not observable +NTFY_BARTERCAST = 'bartercast' # currently not observable +NTFY_MYINFO = 'myinfo' +NTFY_SEEDINGSTATS = 'seedingstats' +NTFY_SEEDINGSTATSSETTINGS = 'seedingstatssettings' +NTFY_MODERATIONCAST = 'moderationcast' +NTFY_VOTECAST = 'votecast' +NTFY_SEARCH = 'clicklogsearch' # BuddyCast 4 +NTFY_TERM= 'clicklogterm' + + +# non data handler subjects +NTFY_ACTIVITIES = 'activities' # an activity was set (peer met/dns resolved) +NTFY_REACHABLE = 'reachable' # the Session is reachable from the Internet + +# changeTypes +NTFY_UPDATE = 'update' # data is updated +NTFY_INSERT = 'insert' # new data is inserted +NTFY_DELETE = 'delete' # data is deleted +NTFY_SEARCH_RESULT = 'search_result' # new search result +NTFY_CONNECTION = 'connection' # connection made or broken + +# object IDs for NTFY_ACTIVITIES subject +NTFY_ACT_NONE = 0 +NTFY_ACT_UPNP = 1 +NTFY_ACT_REACHABLE = 2 +NTFY_ACT_GET_EXT_IP_FROM_PEERS = 3 +NTFY_ACT_MEET = 4 +NTFY_ACT_GOT_METADATA = 5 +NTFY_ACT_RECOMMEND = 6 +NTFY_ACT_DISK_FULL = 7 +NTFY_ACT_NEW_VERSION = 8 +NTFY_ACT_ACTIVE = 9 + +# Disk-allocation policies for download, see DownloadConfig.set_alloc_type +DISKALLOC_NORMAL = 'normal' +DISKALLOC_BACKGROUND = 'background' +DISKALLOC_PREALLOCATE = 'pre-allocate' +DISKALLOC_SPARSE = 'sparse' + +# UPnP modes, see SessionConfig.set_upnp_mode +UPNPMODE_DISABLED = 0 +UPNPMODE_WIN32_HNetCfg_NATUPnP = 1 +UPNPMODE_WIN32_UPnP_UPnPDeviceFinder = 2 +UPNPMODE_UNIVERSAL_DIRECT = 3 + +# Buddycast Collecting Policy parameters +BCCOLPOLICY_SIMPLE = 1 +# BCCOLPOLICY_T4T = 2 # Future work + +# Internal tracker scrape +ITRACKSCRAPE_ALLOW_NONE = 'none' +ITRACKSCRAPE_ALLOW_SPECIFIC = 'specific' +ITRACKSCRAPE_ALLOW_FULL = 'full' + +ITRACKDBFORMAT_BENCODE = 'bencode' +ITRACKDBFORMAT_PICKLE= 'pickle' + +ITRACKMULTI_ALLOW_NONE = 'none' +ITRACKMULTI_ALLOW_AUTODETECT = 'autodetect' +ITRACKMULTI_ALLOW_ALL = 'all' + +ITRACK_IGNORE_ANNOUNCEIP_NEVER = 0 +ITRACK_IGNORE_ANNOUNCEIP_ALWAYS = 1 +ITRACK_IGNORE_ANNOUNCEIP_IFNONATCHECK = 2 + +# Cooperative download +COOPDL_ROLE_COORDINATOR = 'coordinator' +COOPDL_ROLE_HELPER = 'helper' + +# Methods for authentication of the source in live streaming +LIVE_AUTHMETHOD_NONE = "None" # None +LIVE_AUTHMETHOD_ECDSA = "ECDSA" # Elliptic Curve DSA signatures + +# Video-On-Demand / live events +VODEVENT_START = "start" +VODEVENT_PAUSE = "pause" +VODEVENT_RESUME = "resume" + + +# Friendship messages +F_REQUEST_MSG = "REQ" +F_RESPONSE_MSG = "RESP" +F_FORWARD_MSG = "FWD" # Can forward any type of other friendship message + + +# States for a friend +FS_NOFRIEND = 0 +FS_MUTUAL = 1 +FS_I_INVITED = 2 +FS_HE_INVITED = 3 +FS_I_DENIED = 4 +FS_HE_DENIED = 5 diff --git a/tribler-mod/Tribler/Core/superpeer.txt b/tribler-mod/Tribler/Core/superpeer.txt new file mode 100644 index 0000000..8f9410d --- /dev/null +++ b/tribler-mod/Tribler/Core/superpeer.txt @@ -0,0 +1,9 @@ +#ip, port, permid, [name] +superpeer1.das2.ewi.tudelft.nl, 7001, MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2I5yVc1+dWVEx3nbriRKJmOSlQePZ9LU7yYQoGABMvU1uGHvqnT9t+53eaCGziV12MZ1g2p0GLmZP9, SuperPeer1@Tribler +superpeer2.cs.vu.nl, 7002, MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZNX5NBOuGH4j2kumv/9WkPLrJPVkOr5oVImhcp8AC7w7ww9eZwUF7S/Q96If4UmVX+L6HMKSOTLPoPk, SuperPeer2@Tribler +superpeer3.tribler.org, 7003, MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQaLGR940aKktbAJNm6vYOTSN2P8z1P9EiQ48kJNAdrDl7oBkyrERZOq+IMMKIpu4ocsz5hxZHMTy2Fh, SuperPeer3@Tribler +superpeer4.das2.ewi.tudelft.nl, 7004, MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAduAK0/ZTjdg/NPd8CD9Q17J10CXqpFyHN5M8m6fAFXBQflBZT/YdH1fYwizR/hnQE4hIKCQTfvKz1pA, SuperPeer4@Tribler +superpeer5.das2.ewi.tudelft.nl, 7005, MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGZomjLNDu6i/5c/ATpsatWiL0P7huV/ixgzdvwlAU8AEYHp7ppyumydUg2MnoneHJ74H58yB+pUPSdu, SuperPeer5@Tribler +superpeer6.das2.ewi.tudelft.nl, 7006, MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfizxRUl7S3Fec2cl1+uML6tORwnUIod5mh3soWVANxmu+flFNp1yayiLPjB+dWQ6Va77FXbHDkw5smd, SuperPeer6@Tribler +superpeer7.das2.ewi.tudelft.nl, 7007, MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFzfWHb/WPL+luFfXfUbtJGRpUnwbmyB0kH7t3UpAVSpKilym4Fzt2rS7HJTZyQ7yCI3c+xTRtMLZ0sc, SuperPeer7@Tribler +superpeer8.das2.ewi.tudelft.nl, 7008, MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIV8h+eS+vQ+0uqZNv3MYYTLo5s0JP+cmkvJ7U4JAHhfRv1wCqZSKIuY7Q+3ESezhRnnmmX4pbOVhKTU, SuperPeer8@Tribler diff --git a/tribler-mod/Tribler/Debug/__init__.py b/tribler-mod/Tribler/Debug/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tribler-mod/Tribler/Debug/__init__.py.bak b/tribler-mod/Tribler/Debug/__init__.py.bak new file mode 100644 index 0000000..e69de29 diff --git a/tribler-mod/Tribler/Debug/console.py b/tribler-mod/Tribler/Debug/console.py new file mode 100644 index 0000000..ff2ae9d --- /dev/null +++ b/tribler-mod/Tribler/Debug/console.py @@ -0,0 +1,35 @@ +from time import localtime, strftime +""" +Alternate stdout and stderr with much more protection +""" + +import sys + +class SafePrintStream: + def __init__(self, stream): + self._stream = stream + + def write(self, arg): + self._stream.write(arg.encode("ASCII", "backslashreplace")) + + def flush(self): + self._stream.flush() + +class SafeLinePrintStream: + def __init__(self, stream): + self._stream = stream + self._parts = [] + + def write(self, arg): + self._parts.append(arg.encode("ASCII", "backslashreplace")) + if arg == "\n": + self._stream.write("".join(self._parts)) + self._parts = [] + + def flush(self): + self._stream.write("".join(self._parts)) + self._parts = [] + self._stream.flush() + +sys.stderr = SafeLinePrintStream(sys.stderr) +sys.stdout = sys.stderr diff --git a/tribler-mod/Tribler/Debug/console.py.bak b/tribler-mod/Tribler/Debug/console.py.bak new file mode 100644 index 0000000..1471d69 --- /dev/null +++ b/tribler-mod/Tribler/Debug/console.py.bak @@ -0,0 +1,34 @@ +""" +Alternate stdout and stderr with much more protection +""" + +import sys + +class SafePrintStream: + def __init__(self, stream): + self._stream = stream + + def write(self, arg): + self._stream.write(arg.encode("ASCII", "backslashreplace")) + + def flush(self): + self._stream.flush() + +class SafeLinePrintStream: + def __init__(self, stream): + self._stream = stream + self._parts = [] + + def write(self, arg): + self._parts.append(arg.encode("ASCII", "backslashreplace")) + if arg == "\n": + self._stream.write("".join(self._parts)) + self._parts = [] + + def flush(self): + self._stream.write("".join(self._parts)) + self._parts = [] + self._stream.flush() + +sys.stderr = SafeLinePrintStream(sys.stderr) +sys.stdout = sys.stderr diff --git a/tribler-mod/Tribler/Images/SwarmPlayerIcon.ico b/tribler-mod/Tribler/Images/SwarmPlayerIcon.ico new file mode 100644 index 0000000000000000000000000000000000000000..31286164eeec246dbeb7678048b19ef46bf9dc38 GIT binary patch literal 13094 zcmeHNTdOTc5$@^R%=Fx+dv4u5GfB)bH;KofAo!32UOH!4D1w-$pr9y- z4?#o`MIji;gE3(8*Tk>azOB8^+Iz2cA|%3U_UxHiQ&nA6UHx@c&kzz3PU@QAoRC)p zAzvVbJoebx{ofKo-o$VB@1Nh7guIO7!Gp8=r+Y&F;|X~hJP<*^wvmHVxVUQD*Dqy+ zI8I269|ewoKMzK9@@-AXXHr6be~%E&2zhNJ< zCPEZs{N*V^D9)b+;pLdd73q&l(JSJsRrMUQ71hkY}=5F zF5lNV;Ot{K9^eRgy=mLMZ2RWDPn_ZF*8$RB%L!S4jc+_hNDd(U6ma&Pj}h`iIPbge z3~&F~5%Ny}J)n1kwHAWEj6Pn0{}KrP*OP?&0mn}s2mS(6e+J%PJVD5dF#C7U5b`D5 z2aJ6V7=3Cd51c&kq4B^K0`96**I~+%#q2&fzmJ6rc(KXu_K;A~tfrzH8G9b*aZZ}_L~%~|GFK-=DySkJUZ;tnt2~}R^8Nl&B`+Br|H_-b|ocm8eev)v}~{WbX^;n_EP$J@7U!cUfN+8tPPp* z!!2K%Mo(6GKkSvCw@KFbvM)=&?i+gHoX(W&x+wMY!{Uiq-MY5nc_hU?0rl9 zlHN9ZVjz*_;L9jVBZyG#&YDr>PDaAW!ZKKH#iO?7` z!D!W->$=3PWl77mFD9Ltk-I#0xk>nCzEYW*Bz)x4q}FA2&6wkds<{?2n1As&Ui)c} zzyHsE3V8}L3Gxjbmw5_Oj?1>4r@ZwMLjH_e?j=mMPvQtQ;d`hF-@x%IYQlG4K#hke ze*5F774H%9E5zGV$Upvssrx*}BFOqCYRVrVuM0WLYmoO6#^C!1h85G}x5!y4@V|(m z{T5O^g9PVA@a>%#ml~cfP98XU-~;l2*s2c?Q=vJCV_k(Hn>Aon7fZn{^4Hn8dOcI- z0QsvKOXq|_FSiK2dL+it8Vh}dI6xVp#EiN$P&NmU6X#278)$RY;jLF%_hu9a($Apw zHSb-$DSB@ejEBNe+V{Qh`-SgICwBRr*3R`hY0I)a#Fu^7O}=c~*6vE$d%wD*NV}dE zEL7~bx8UoV(jh5bH+6T)Z;-gHj>gF_DyQRpsnNCmK)+f~yIAH!T--^H6>V+cOq$8O zN(OePKIjbe$bM2eBuS?a@TM4b7DK+KRQGp9PcxU}?6NG+Wfo7JzssSbtjk$vS(ImS zJWc%_`mDCBk!#Yp!w6&N=5~8AlV&sHKl54#j=j0-w*ObdODkAAbX}5!4(_sd=G=@) zQqXCbVmRn7dktf?r?fy48_85oz?1F%cdu6z9*dv4wu&OSROWQrU!YttmdnC8uUQr4 z5$I)n2fZlyQr9(4@;nB)%-MX$gq+KI;Y$~%X&UJDG}X7lkK&q6?KEX+mVp6mF`LcJ z?U?0p-A<3|F-4*y1qW*LVWGDNP7AKE*I3Y6tvJQ|dMI z(};Mjm+hd1KE_zYa<{Y-rZF7`3QMT#Y44_X2BwzV5z?sH3ITi@JgQmRvo@LHG^&C& zV==H!Yoo@UC|t{$6_h6JVAZ0#4D?yHEVFKnsktgWcKpcp8f(X;qqJj;v$n&$ADHhk zFBTT6A&V+Wlx4dOmclNb=HT7nNvtoLZF%-HB2FeDCZMC=LpDs@xINk%HPJMvmRUDUf>eUXw zS#61DoAwN9#@9cJ{czNpGwRHjQLEm^@p;sle@CtQ1mNed*kAt<1caLPACUeoYStew zg#7XzA)m$scm-&V@^e$Oj*uZyyuJgTub|F-6SZlBqBO(tIn=kmM##T?ALng6IM?NL zI(gvafe(!bdVNE>a-fFh{jrB$v;%}3)ii3j-jMUIwZm(|W16wHYZ|NjvwqEyE-;RF zQ5;7o3omszru#lkc^Wre=)XVOc6@;XerGo+ig^@%-En*2aeZ-dr~zFg`_QR5ciy3V z7j$h^Vej2y&>z8a2&t+HDKs~|h4YVWIE$$fj1U<*Q!{3Osf$&&f#stZ(FJ=g-xMpT_`(l=mAisLF^?b;(jLQ)xo&M*CD4B;3m8(h%@LE&*fdIQiv0yt zQ)QVX6eDoN*t_ad-E06nVz9_B+p${4Habp<UnH-Z#EQ(1>{o5`ut%))hV) zI;80aFT8~Z#Yy`99xa(u&!kAB>G=w zpJ71>{0eU$yjWWvPzj;7ftw9oc_gGuvnAQ@nKDx%IyTb_p zih7k|&X=nn#SyE8I6s z6mj7ji!W+C>;u1z_CbTOQ3NZMQW>C3_l>A)Y$XqKu`Dh4$CX7EnISg~+o02KhTCKG z8Bj=7h*=xK`y=#U*+^^AaLt8)|L`UXlH=aEo+DGfHWGc8h8aX#ApmIrW11BB3@F2{ zFmhopuvv#MDM*U(TQGGqDF~s^+hZqL`c+#6}^)vt0YPYB1&|k zZ1gVL?swmxKj3$MbI*O|o;l~S&nac_n_vkrLvc zkwFoRct+x^udM;V{Wpp_%d_wjGCv*b004l3{=Z28$a}|v7lHzH4K+bCU?LD7KUHq1 z834dmt*fDG7BaW@-rA3ICUh&hy!Ff4Qff9kDclA%O2_lNL66It$ZeoT?;Rqga?|aT z7Iex$)Lh0~Q0TGlr~Nv55puE=Yy(+lP03tU&g6$dE|j|v<>KuGlexj?@s ztv6?LhR7AYwDDHrQp3qz@geRGf4c=~NY33NNcY3r=utZ{w5`g()7lbcOn1(V|8ms{ z`o;rxJ9ToeDICiBD=%7w5%mscNT{f~t^4lY-$|6IBZhtXW_~#qa+<_0%&I*f7s0G@ z>ZmYzXd+|-ir=}R5$w(JjNc$Ho%gc6GZv-sIMDa65i z66!K4>yTKUxkW=T4Q+g)jpsucO@OW@?R%B^q$Q&&>Q49$J}afU>+uQXql=zAqGU0#wsJa2@OZ zb4&zK1pJW9;GT9XO2XEb%A5O|sHBe9gA6@3=$=!V*A$A>nq29-yJN6a`q9nx;%N&F zWAytx*Yr5={mxC9J5v-EvYFqiq{YaI;Wva6oMsKLa|k)de+AG2Veb(vyopiDHtdV( z_XRk3Q+!OUdPL09Pgk&DaMS)LsfNHsidQ$<4`nTwccRmmkX(Xnyw_U~^#`dl8{0f| zHg%)k{!~bgf2!OS-vRZW@_AWqLcES2 zs0gtnZuR)&Wy7jByPiLlea<}gkH1!Q#D5MH`Uf8qCXde>Ne8I5!(YP)8DK@1rZpX1 zsxX2KO*aTK3cd#TMs*HWMJ`+lo&XogYRq6bYK_m8-DMLeuI~wKxSx`_(Ju`km3gxu ztt2&tPSaTl*HUy0wO6Kqn}3Wu#0bUNJc4bu{bqSl_M4Y1G(H$zrcEPJPqUZV>6ota zPpcF(PJ4tu33R~YC&K7vfnDovF<2BcEg@+Tq*pj~{ZAC4!olI>I|*jxiu-TzX2D!q zgYK>V+LwvR_33eL)gO?tiD39Eo;h7-YT;LPSc>UoqKs6SqFF<(zo=!G@EP2=4?qBM zu&t1OeWPRjeX`0ZtVoMIhb{GZ+DOz)@RmTB!1!3ngn zq%TSn1|5zz_@Q}QVJ_QN8i@346^U)OPZ9z^l3)AggqgE}Q1uT`IZ==xXB;N;sfQ_- zw$4JrVuH)1dH12jXKtZK@%g14Dp=9tFH-6(PXPWv|D01z9Pd!)ZWMT9rN-~f_isdO zMzSDgj}T>+EAIansk^l^`2a<3=AYO#Ld>Mo3Ofl>KhZ;wH!&?_u)9)aW6_H(+2r<~tj@}~6n zirrW)==!OeTmfFh9Di|LF7eB(>fcj4Ew<=#jO;G>8eIZJEd6UF?LBOK2$vR35-lP} zf!ApgqoES@_?j9N>&Kqil31xr1059!q&Gjx8uH^dL8@L}z7L0LYF&aG{*kMd zcJycCQs>eTtFIBKB?;DYx>!Q@2Nu-G#hY?XHa#ubry`Cj`jVL)P9#0`ld_#6$;xeghzMImQ! zGo%L{3L_sMe3y6W@&YnH*g4kB{qxLw#H^VCM^YJp9V(`JCzknS6l@a3Ak3U8k*6A7 zurmdI%+!WW!F}H0v3QZ%(oQwpV@-tE+bTFH6#%I1iD5e=T&nAoeO%AHI%01CO8?aE z^yCi`>(O{tbBA^5ZYir$$Mugk-iQV$qvv9{J_d4DjGwE~$@DB8#H#4Vua=YPxYCxx zGkeKRhSzEw;Y&tilYt6`D0VbfK);*E5=|ZlBtH$~eWUNPUL;36h`3su^%91DB5Y4@ zyYj$DSw!o>N6icHDnHvI;7_{sXC9wD&q$h@X+JJ^goM!)3~rZPyg&qA_9=hd$9;$& zmbRg71gcmF+e1!d-`g8jup(1wV1yOhpqYS?u{t3mD|KCt{1Hz&5Dio1mX+dG{&Hlg zoGk5%AUaugx4kct3nvFGXjl^Ytc z$>qKqC`x4tH7gR9be7CCj#HoJnM;6<;HY=I2e8C}CP*HGb>8D!^i83rrcQOKokocS zDIK->{qCoO!i>Ue>8}b=(GQ}ibx2$3=y`(IEc_T>lTp)r@z?w4+4ic0ziQQD&{F8u zQ?7xCaYO>%n{?>pIBjdf_@m@G3UM$m66-}ibcUiMrsUHa_|w17+W;Ev4}yxvK~Zx= zSgp1)dy!YXqfS*z+^J<*Nsg+iswqA z({UwG1e5m_cdX91(u@2WXnmrlMhv0Q{Dtvp!!vD%Rm!CepRg=&Zs zG2Ps-I$r&WKW7z{eQ*3Kgofeog>`7wfz9 zZp;<@`HAQ)E+au1cE}X7>MI0fa}VBf9~|{c*=71l?A%!dd1WQS0PDRt$xFtlKDa%4 zZ5HVf7bR?$6razt=|D5$MvODxSVw0a6Q1u>kvv+;K@iH#^-YBW1g|94>9)T; zc5em=M<+Mz8dh<=XY(gM<(sh2IsOEdIH3&)DpU1g!=Y`=kEag>uP{Je>q zjL8Ajz5QGx2aaq_noyhjmgALdyxZLODtn3rNexInbkCfEn?gb)(c-;^BedI|ce_1W zf&ddi>Y$a4kT%2np%aM+Uodyc94V(6$$g59h!Umf2WTb);i}$vdhAL%Vg`Q^`vW=$C(D%HFYS@gc| zrFKbYuD4hRiGW%1t2i%h8P&=#LSRZ)C*>zF>T9gtWZcH3W1z>*U37ll8N(SLJ(hrLCpkOK{rq%qCwS}f151f z4^Jm(+>4I8?iB^NJAsvFZIHF@6Tk4kXChU%-~h>?WsL{Ivtk{F&aIM1=3GD9?&8Rg zl-43Of_X#-2^$c$FdK6I4!*nnEzsBvCgUkFL&paryMq$rOAg#;l42zs(#%5=^nYE_ zi3Ly*6K@XYe87i9MQ?_;B6p_4>TI;Kx5sYe1elh}TmYqD$OrWc%S^e;py3@T0i_z$ zC(psgo($|iUlh%&t|9cA)?b5EOt^H~sZ4csYvO#W+eBo=zF&BaX?*_fe4QLZP>RCL zOYQxX9ux@7!6vSGIi^+*!G(JXiCN7Eo}G-rIh?dE)eNG?S=Ik~wI*XPfVS9;=Xw*`%ijC>wGzxj8>r;P;Qwe>YoDrpF4!dF%+#okH9D^Bx>QW|NDyJpkUH%|7y)GB$aI=mdI6x~IOKHV zl=^_ohv?)ipQG=$x3>kC+S=Nk`5~C`Vl&a^`@S3N|06USJvcbH+Lz=#A9eJVxiyMm z+-F-XnI?bpe1L`%!C$<1=BccAbu-SbF`tSk(XJ0xX_b5wpU6ZwE~cgkGof>ExDQ)u70u1f z^g>4bSwr7yBM!osZ#FSt!hDL@~*m?eX@vqRIozio*Cg=O&A8^ zI^+3cJQ$HH7H9ut56{yjQ6YV;T*3{&tWAXgY zKzg|U;x*=QweP`XwW6fk{Q2ge7VmKT(E!FjfBxXr;1#4rr>nHyzI$g- zW{SsPV`C#9A0K{!H>B*1{io(>`NrT{pEPPfbE213p+mKLvv3nuFyUf6P)(}pZk`tZ zPmI#`hfJz6fm7iy1|j2Bx$Lv_>j^y2Gc#%DTOL>ZDppU@($+37FE@Voa5ZxKcSuM` zn_z#3&(`fhQp?i{n7lU8g_wqk^B#<1NKg!6=ejIqGml z_25hXmmoeZEiFMoL3ej|g_--ZvMsCe6%`fuJ;?NQV@2}G^Q&dsj?aShuxj>% zJR&hNF(eCp4@%bxpNh%Sk`We%eD69CmTd9^aM?#)ZX6x?r|%|FaUOsEfx%!F78cgm zKg=w4%HUH2Pl~d?juTu4klt+$?002lEhhK!vm*>Qy!Xe&U2;6G@ll_cn0Q=n-<>*Cqj8UB3EN6-7ea5menEHb2X z*h^l7K(Aqbe&aH@!^1k7wLjs9wdoTRCXjfVwFzzRMX#uXcszXYfHV^oh=<+VURZeN zag0Y3H5fedS&9GZ^Fx(13uM`io8> zmRQAhf#?l?+CC}Li;0WyCgssiT*X;Bn38gIWP}5sk9d@Ws5nq(?vk6Mqobjc3M3(| zBVXi~MyuT@TfBv`hSFb<6at^_Svxo=`>mi!)Wg7sYH(%-Hoo`F8XwEcA6J-F3oTfX z0Ib39f0Uy&WxX~&Se8@V^I@N;r@!@-hD^Jo%Uorm*-kRIEqxA2Q_6oljp@i?ofcJV z_F3R5!+JhP+q6c52Rq>Vf#^C!@?2?kBqib?$IfF z%j2i|wZreAPzgkgy77Bl?8LPt88@f!U$q9~GaFy`@00kZ<*ksMJRZZQ&iI{ROmNkW zR8V`~z}i|Ow`_tXt5>XL7kBzUTX&()S^>Wz93vg(kw|3NRP5N~&4WFl%_m}Frd4|q ztYJ10v~A?4$Q4f9*(1OkBziCbd~$K6trU^g%bSg(d55=`FL zrf}l-o0f9MBwrQz5&X2)(@q{pR$OuL-Vpn0dBcbWdKr_LS-vu*QcSu#2bB;mq0Vq1 zVSWM{ba2IT3uC3WrQ0t^4|;U!57mE(U1Q<@`dqP6ArEhY)fRTGogb4Pa{aqYcHut; z6IWEhtiNf$k%Na5T%1k6i8!sjWlsHLcY6>6@nQEdB9oP5GILN|m{0Gk=%VyqP`2o1 zzI2N8!2ILk90OE!n}mA^apOgdp)c zykgSd?c~4DJsW%$!`Pw>G)p)uZ2YX#rSm8*5VK%B@3S15yXU&}O?OhsuFy53%ivL3 z3w!f;m_Q1^R*XEtd5A}2Y%zXc8>0i_`^nFu`SPdcw9(`dCA>L(&@3tLE7CEjO+J<& zS<*$o#IlHt zlXZ%oqjns|)0SKOG%=fj=>2G9CQ)6m<6i%$Q@8FWCvPOchCwlQSWTWchPE_hX}Pnr zLrw6p1IqN#U;qJGi@5|q_|2}onT2ZU-W?uV!HGziNp$)HCr5!yYhg!H4WwRBwQwhe zc5RI+Wv^|L7>m@ET~TD>%qxM0gN(>pCDAIg$k}nj$5nebDB&l*|2{IZIA8C;<*3;n zgj=ltSp7)0M>4b~a(Xw6W0XJ4x6d>5S7Nz|evD^}Ed#2@y1c<)H|kziwxzX#=C*&8 z#Hn!dfbbK(=mI^mN@m5slKjwjWe**eNA$`b?9sD3?%e5NFWxheOUy^wDaDWjb45mi z^|&u>wZfh;k|r2Lr!0C9dbJDJ5lk4)Ct1sj{01=Qr!3ZeFq^Ox2qS7`sk+}pneC|0 zQ0w)H!wW@VDZcoyLF^QL?*%#Vr-!iQ^s)qw#k2HNa-rx0v4_02;Uh|ZUau_TG+LmHyn_!O3-uplsp|tDdQy|0UVBtl0$GAx|2zf7BM$hDg zh(IZeDl|M~rp`OfwtpA(hH`|V0iLgJh1e!3>)K)_<+LV~eZ9PCqQWF1$;Vme$GBl$ zwZAE=sdCRt+19N>bk*IYC!>dxhIm2DhiX+^w6K53G5D9Vxl!XZ1qYUM6DV8iM_Vq( zjvj*MffA~U_=hzg&99}C9^OI6hAa-T@131#E17$$rh~|OYct>ZhKtw=q}jrn*Ms2l zOEJUb@aMo25Wb&-W>x{gusKAcx%yaZvI=i40ENjNCgMNp4k`kuPZrcwxUXq8OZyD} fukMeMI96FzYQGUQ|6Ksz%L3?X8fnz1ImZ4U#P}CV literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Images/SwarmPluginIcon.ico b/tribler-mod/Tribler/Images/SwarmPluginIcon.ico new file mode 100644 index 0000000000000000000000000000000000000000..31286164eeec246dbeb7678048b19ef46bf9dc38 GIT binary patch literal 13094 zcmeHNTdOTc5$@^R%=Fx+dv4u5GfB)bH;KofAo!32UOH!4D1w-$pr9y- z4?#o`MIji;gE3(8*Tk>azOB8^+Iz2cA|%3U_UxHiQ&nA6UHx@c&kzz3PU@QAoRC)p zAzvVbJoebx{ofKo-o$VB@1Nh7guIO7!Gp8=r+Y&F;|X~hJP<*^wvmHVxVUQD*Dqy+ zI8I269|ewoKMzK9@@-AXXHr6be~%E&2zhNJ< zCPEZs{N*V^D9)b+;pLdd73q&l(JSJsRrMUQ71hkY}=5F zF5lNV;Ot{K9^eRgy=mLMZ2RWDPn_ZF*8$RB%L!S4jc+_hNDd(U6ma&Pj}h`iIPbge z3~&F~5%Ny}J)n1kwHAWEj6Pn0{}KrP*OP?&0mn}s2mS(6e+J%PJVD5dF#C7U5b`D5 z2aJ6V7=3Cd51c&kq4B^K0`96**I~+%#q2&fzmJ6rc(KXu_K;A~tfrzH8G9b*aZZ}_L~%~|GFK-=DySkJUZ;tnt2~}R^8Nl&B`+Br|H_-b|ocm8eev)v}~{WbX^;n_EP$J@7U!cUfN+8tPPp* z!!2K%Mo(6GKkSvCw@KFbvM)=&?i+gHoX(W&x+wMY!{Uiq-MY5nc_hU?0rl9 zlHN9ZVjz*_;L9jVBZyG#&YDr>PDaAW!ZKKH#iO?7` z!D!W->$=3PWl77mFD9Ltk-I#0xk>nCzEYW*Bz)x4q}FA2&6wkds<{?2n1As&Ui)c} zzyHsE3V8}L3Gxjbmw5_Oj?1>4r@ZwMLjH_e?j=mMPvQtQ;d`hF-@x%IYQlG4K#hke ze*5F774H%9E5zGV$Upvssrx*}BFOqCYRVrVuM0WLYmoO6#^C!1h85G}x5!y4@V|(m z{T5O^g9PVA@a>%#ml~cfP98XU-~;l2*s2c?Q=vJCV_k(Hn>Aon7fZn{^4Hn8dOcI- z0QsvKOXq|_FSiK2dL+it8Vh}dI6xVp#EiN$P&NmU6X#278)$RY;jLF%_hu9a($Apw zHSb-$DSB@ejEBNe+V{Qh`-SgICwBRr*3R`hY0I)a#Fu^7O}=c~*6vE$d%wD*NV}dE zEL7~bx8UoV(jh5bH+6T)Z;-gHj>gF_DyQRpsnNCmK)+f~yIAH!T--^H6>V+cOq$8O zN(OePKIjbe$bM2eBuS?a@TM4b7DK+KRQGp9PcxU}?6NG+Wfo7JzssSbtjk$vS(ImS zJWc%_`mDCBk!#Yp!w6&N=5~8AlV&sHKl54#j=j0-w*ObdODkAAbX}5!4(_sd=G=@) zQqXCbVmRn7dktf?r?fy48_85oz?1F%cdu6z9*dv4wu&OSROWQrU!YttmdnC8uUQr4 z5$I)n2fZlyQr9(4@;nB)%-MX$gq+KI;Y$~%X&UJDG}X7lkK&q6?KEX+mVp6mF`LcJ z?U?0p-A<3|F-4*y1qW*LVWGDNP7AKE*I3Y6tvJQ|dMI z(};Mjm+hd1KE_zYa<{Y-rZF7`3QMT#Y44_X2BwzV5z?sH3ITi@JgQmRvo@LHG^&C& zV==H!Yoo@UC|t{$6_h6JVAZ0#4D?yHEVFKnsktgWcKpcp8f(X;qqJj;v$n&$ADHhk zFBTT6A&V+Wlx4dOmclNb=HT7nNvtoLZF%-HB2FeDCZMC=LpDs@xINk%HPJMvmRUDUf>eUXw zS#61DoAwN9#@9cJ{czNpGwRHjQLEm^@p;sle@CtQ1mNed*kAt<1caLPACUeoYStew zg#7XzA)m$scm-&V@^e$Oj*uZyyuJgTub|F-6SZlBqBO(tIn=kmM##T?ALng6IM?NL zI(gvafe(!bdVNE>a-fFh{jrB$v;%}3)ii3j-jMUIwZm(|W16wHYZ|NjvwqEyE-;RF zQ5;7o3omszru#lkc^Wre=)XVOc6@;XerGo+ig^@%-En*2aeZ-dr~zFg`_QR5ciy3V z7j$h^Vej2y&>z8a2&t+HDKs~|h4YVWIE$$fj1U<*Q!{3Osf$&&f#stZ(FJ=g-xMpT_`(l=mAisLF^?b;(jLQ)xo&M*CD4B;3m8(h%@LE&*fdIQiv0yt zQ)QVX6eDoN*t_ad-E06nVz9_B+p${4Habp<UnH-Z#EQ(1>{o5`ut%))hV) zI;80aFT8~Z#Yy`99xa(u&!kAB>G=w zpJ71>{0eU$yjWWvPzj;7ftw9oc_gGuvnAQ@nKDx%IyTb_p zih7k|&X=nn#SyE8I6s z6mj7ji!W+C>;u1z_CbTOQ3NZMQW>C3_l>A)Y$XqKu`Dh4$CX7EnISg~+o02KhTCKG z8Bj=7h*=xK`y=#U*+^^AaLt8)|L`UXlH=aEo+DGfHWGc8h8aX#ApmIrW11BB3@F2{ zFmhopuvv#MDM*U(TQGGqDF~s^+hZqL`c+#6}^)vt0YPYB1&|k zZ1gVL?swmxKj3$MbI*O|o;l~S&nac_n_vkrLvc zkwFoRct+x^udM;V{Wpp_%d_wjGCv*b004l3{=Z28$a}|v7lHzH4K+bCU?LD7KUHq1 z834dmt*fDG7BaW@-rA3ICUh&hy!Ff4Qff9kDclA%O2_lNL66It$ZeoT?;Rqga?|aT z7Iex$)Lh0~Q0TGlr~Nv55puE=Yy(+lP03tU&g6$dE|j|v<>KuGlexj?@s ztv6?LhR7AYwDDHrQp3qz@geRGf4c=~NY33NNcY3r=utZ{w5`g()7lbcOn1(V|8ms{ z`o;rxJ9ToeDICiBD=%7w5%mscNT{f~t^4lY-$|6IBZhtXW_~#qa+<_0%&I*f7s0G@ z>ZmYzXd+|-ir=}R5$w(JjNc$Ho%gc6GZv-sIMDa65i z66!K4>yTKUxkW=T4Q+g)jpsucO@OW@?R%B^q$Q&&>Q49$J}afU>+uQXql=zAqGU0#wsJa2@OZ zb4&zK1pJW9;GT9XO2XEb%A5O|sHBe9gA6@3=$=!V*A$A>nq29-yJN6a`q9nx;%N&F zWAytx*Yr5={mxC9J5v-EvYFqiq{YaI;Wva6oMsKLa|k)de+AG2Veb(vyopiDHtdV( z_XRk3Q+!OUdPL09Pgk&DaMS)LsfNHsidQ$<4`nTwccRmmkX(Xnyw_U~^#`dl8{0f| zHg%)k{!~bgf2!OS-vRZW@_AWqLcES2 zs0gtnZuR)&Wy7jByPiLlea<}gkH1!Q#D5MH`Uf8qCXde>Ne8I5!(YP)8DK@1rZpX1 zsxX2KO*aTK3cd#TMs*HWMJ`+lo&XogYRq6bYK_m8-DMLeuI~wKxSx`_(Ju`km3gxu ztt2&tPSaTl*HUy0wO6Kqn}3Wu#0bUNJc4bu{bqSl_M4Y1G(H$zrcEPJPqUZV>6ota zPpcF(PJ4tu33R~YC&K7vfnDovF<2BcEg@+Tq*pj~{ZAC4!olI>I|*jxiu-TzX2D!q zgYK>V+LwvR_33eL)gO?tiD39Eo;h7-YT;LPSc>UoqKs6SqFF<(zo=!G@EP2=4?qBM zu&t1OeWPRjeX`0ZtVoMIhb{GZ+DOz)@RmTB!1!3ngn zq%TSn1|5zz_@Q}QVJ_QN8i@346^U)OPZ9z^l3)AggqgE}Q1uT`IZ==xXB;N;sfQ_- zw$4JrVuH)1dH12jXKtZK@%g14Dp=9tFH-6(PXPWv|D01z9Pd!)ZWMT9rN-~f_isdO zMzSDgj}T>+EAIansk^l^`2a<3=AYO#Ld>Mo3Ofl>KhZ;wH!&?_u)9)aW6_H(+2r<~tj@}~6n zirrW)==!OeTmfFh9Di|LF7eB(>fcj4Ew<=#jO;G>8eIZJEd6UF?LBOK2$vR35-lP} zf!ApgqoES@_?j9N>&Kqil31xr1059!q&Gjx8uH^dL8@L}z7L0LYF&aG{*kMd zcJycCQs>eTtFIBKB?;DYx>!Q@2Nu-G#hY?XHa#ubry`Cj`jVL)P9#0`ld_#6$;xeghzMImQ! zGo%L{3L_sMe3y6W@&YnH*g4kB{qxLw#H^VCM^YJp9V(`JCzknS6l@a3Ak3U8k*6A7 zurmdI%+!WW!F}H0v3QZ%(oQwpV@-tE+bTFH6#%I1iD5e=T&nAoeO%AHI%01CO8?aE z^yCi`>(O{tbBA^5ZYir$$Mugk-iQV$qvv9{J_d4DjGwE~$@DB8#H#4Vua=YPxYCxx zGkeKRhSzEw;Y&tilYt6`D0VbfK);*E5=|ZlBtH$~eWUNPUL;36h`3su^%91DB5Y4@ zyYj$DSw!o>N6icHDnHvI;7_{sXC9wD&q$h@X+JJ^goM!)3~rZPyg&qA_9=hd$9;$& zmbRg71gcmF+e1!d-`g8jup(1wV1yOhpqYS?u{t3mD|KCt{1Hz&5Dio1mX+dG{&Hlg zoGk5%AUaugx4kct3nvFGXjl^Ytc z$>qKqC`x4tH7gR9be7CCj#HoJnM;6<;HY=I2e8C}CP*HGb>8D!^i83rrcQOKokocS zDIK->{qCoO!i>Ue>8}b=(GQ}ibx2$3=y`(IEc_T>lTp)r@z?w4+4ic0ziQQD&{F8u zQ?7xCaYO>%n{?>pIBjdf_@m@G3UM$m66-}ibcUiMrsUHa_|w17+W;Ev4}yxvK~Zx= zSgp1)dy!YXqfS*z+^J<*Nsg+iswqA z({UwG1e5m_cdX91(u@2WXnmrlMhv0Q{Dtvp!!vD%Rm!CepRg=&Zs zG2Ps-I$r&WKW7z{eQ*3Kgofeog>`7wfz9 zZp;<@`HAQ)E+au1cE}X7>MI0fa}VBf9~|{c*=71l?A%!dd1WQS0PDRt$xFtlKDa%4 zZ5HVf7bR?$6razt=|D5$MvODxSVw0a6Q1u>kvv+;K@iH#^-YBW1g|94>9)T; zc5em=M<+Mz8dh<=XY(gM<(sh2IsOEdIH3&)DpU1g!=Y`=kEag>uP{Je>q zjL8Ajz5QGx2aaq_noyhjmgALdyxZLODtn3rNexInbkCfEn?gb)(c-;^BedI|ce_1W zf&ddi>Y$a4kT%2np%aM+Uodyc94V(6$$g59h!Umf2WTb);i}$vdhAL%Vg`Q^`vW=$C(D%HFYS@gc| zrFKbYuD4hRiGW%1t2i%h8P&=#LSRZ)C*>zF>T9gtWZcH3W1z>*U37ll8N(SLJ(hrLCpkOK{rq%qCwS}f151f z4^Jm(+>4I8?iB^NJAsvFZIHF@6Tk4kXChU%-~h>?WsL{Ivtk{F&aIM1=3GD9?&8Rg zl-43Of_X#-2^$c$FdK6I4!*nnEzsBvCgUkFL&paryMq$rOAg#;l42zs(#%5=^nYE_ zi3Ly*6K@XYe87i9MQ?_;B6p_4>TI;Kx5sYe1elh}TmYqD$OrWc%S^e;py3@T0i_z$ zC(psgo($|iUlh%&t|9cA)?b5EOt^H~sZ4csYvO#W+eBo=zF&BaX?*_fe4QLZP>RCL zOYQxX9ux@7!6vSGIi^+*!G(JXiCN7Eo}G-rIh?dE)eNG?S=Ik~wI*XPfVS9;=Xw*`%ijC>wGzxj8>r;P;Qwe>YoDrpF4!dF%+#okH9D^Bx>QW|NDyJpkUH%|7y)GB$aI=mdI6x~IOKHV zl=^_ohv?)ipQG=$x3>kC+S=Nk`5~C`Vl&a^`@S3N|06USJvcbH+Lz=#A9eJVxiyMm z+-F-XnI?bpe1L`%!C$<1=BccAbu-SbF`tSk(XJ0xX_b5wpU6ZwE~cgkGof>ExDQ)u70u1f z^g>4bSwr7yBM!osZ#FSt!hDL@~*m?eX@vqRIozio*Cg=O&A8^ zI^+3cJQ$HH7H9ut56{yjQ6YV;T*3{&tWAXgY zKzg|U;x*=QweP`XwW6fk{Q2ge7VmKT(E!FjfBxXr;1#4rr>nHyzI$g- zW{SsPV`C#9A0K{!H>B*1{io(>`NrT{pEPPfbE213p+mKLvv3nuFyUf6P)(}pZk`tZ zPmI#`hfJz6fm7iy1|j2Bx$Lv_>j^y2Gc#%DTOL>ZDppU@($+37FE@Voa5ZxKcSuM` zn_z#3&(`fhQp?i{n7lU8g_wqk^B#<1NKg!6=ejIqGml z_25hXmmoeZEiFMoL3ej|g_--ZvMsCe6%`fuJ;?NQV@2}G^Q&dsj?aShuxj>% zJR&hNF(eCp4@%bxpNh%Sk`We%eD69CmTd9^aM?#)ZX6x?r|%|FaUOsEfx%!F78cgm zKg=w4%HUH2Pl~d?juTu4klt+$?002lEhhK!vm*>Qy!Xe&U2;6G@ll_cn0Q=n-<>*Cqj8UB3EN6-7ea5menEHb2X z*h^l7K(Aqbe&aH@!^1k7wLjs9wdoTRCXjfVwFzzRMX#uXcszXYfHV^oh=<+VURZeN zag0Y3H5fedS&9GZ^Fx(13uM`io8> zmRQAhf#?l?+CC}Li;0WyCgssiT*X;Bn38gIWP}5sk9d@Ws5nq(?vk6Mqobjc3M3(| zBVXi~MyuT@TfBv`hSFb<6at^_Svxo=`>mi!)Wg7sYH(%-Hoo`F8XwEcA6J-F3oTfX z0Ib39f0Uy&WxX~&Se8@V^I@N;r@!@-hD^Jo%Uorm*-kRIEqxA2Q_6oljp@i?ofcJV z_F3R5!+JhP+q6c52Rq>Vf#^C!@?2?kBqib?$IfF z%j2i|wZreAPzgkgy77Bl?8LPt88@f!U$q9~GaFy`@00kZ<*ksMJRZZQ&iI{ROmNkW zR8V`~z}i|Ow`_tXt5>XL7kBzUTX&()S^>Wz93vg(kw|3NRP5N~&4WFl%_m}Frd4|q ztYJ10v~A?4$Q4f9*(1OkBziCbd~$K6trU^g%bSg(d55=`FL zrf}l-o0f9MBwrQz5&X2)(@q{pR$OuL-Vpn0dBcbWdKr_LS-vu*QcSu#2bB;mq0Vq1 zVSWM{ba2IT3uC3WrQ0t^4|;U!57mE(U1Q<@`dqP6ArEhY)fRTGogb4Pa{aqYcHut; z6IWEhtiNf$k%Na5T%1k6i8!sjWlsHLcY6>6@nQEdB9oP5GILN|m{0Gk=%VyqP`2o1 zzI2N8!2ILk90OE!n}mA^apOgdp)c zykgSd?c~4DJsW%$!`Pw>G)p)uZ2YX#rSm8*5VK%B@3S15yXU&}O?OhsuFy53%ivL3 z3w!f;m_Q1^R*XEtd5A}2Y%zXc8>0i_`^nFu`SPdcw9(`dCA>L(&@3tLE7CEjO+J<& zS<*$o#IlHt zlXZ%oqjns|)0SKOG%=fj=>2G9CQ)6m<6i%$Q@8FWCvPOchCwlQSWTWchPE_hX}Pnr zLrw6p1IqN#U;qJGi@5|q_|2}onT2ZU-W?uV!HGziNp$)HCr5!yYhg!H4WwRBwQwhe zc5RI+Wv^|L7>m@ET~TD>%qxM0gN(>pCDAIg$k}nj$5nebDB&l*|2{IZIA8C;<*3;n zgj=ltSp7)0M>4b~a(Xw6W0XJ4x6d>5S7Nz|evD^}Ed#2@y1c<)H|kziwxzX#=C*&8 z#Hn!dfbbK(=mI^mN@m5slKjwjWe**eNA$`b?9sD3?%e5NFWxheOUy^wDaDWjb45mi z^|&u>wZfh;k|r2Lr!0C9dbJDJ5lk4)Ct1sj{01=Qr!3ZeFq^Ox2qS7`sk+}pneC|0 zQ0w)H!wW@VDZcoyLF^QL?*%#Vr-!iQ^s)qw#k2HNa-rx0v4_02;Uh|ZUau_TG+LmHyn_!O3-uplsp|tDdQy|0UVBtl0$GAx|2zf7BM$hDg zh(IZeDl|M~rp`OfwtpA(hH`|V0iLgJh1e!3>)K)_<+LV~eZ9PCqQWF1$;Vme$GBl$ zwZAE=sdCRt+19N>bk*IYC!>dxhIm2DhiX+^w6K53G5D9Vxl!XZ1qYUM6DV8iM_Vq( zjvj*MffA~U_=hzg&99}C9^OI6hAa-T@131#E17$$rh~|OYct>ZhKtw=q}jrn*Ms2l zOEJUb@aMo25Wb&-W>x{gusKAcx%yaZvI=i40ENjNCgMNp4k`kuPZrcwxUXq8OZyD} fukMeMI96FzYQGUQ|6Ksz%L3?X8fnz1ImZ4U#P}CV literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Images/background.png b/tribler-mod/Tribler/Images/background.png new file mode 100644 index 0000000000000000000000000000000000000000..50075ade4345103cb0349c93d71a93bbe3a75046 GIT binary patch literal 164 zcmeAS@N?(olHy`uVBq!ia0vp^+CVJH!2~4LdmRjc6id3JuOkD)`V;1?T`xhBC9V-A z!TD(=<%vb942~)JNvR5+xryniL8*x;m4zo$ZGei*JY5_^DsH{G=*Y{Uz{7ITNuVxk zb+fwAspPPOD!V6L$=JP2Spf(t=DsUyYyg4>@&`o|7}Ske(%5fIv;dmG;OXk;vd$@? F2>_1eF%SR% literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Images/fullScreen.png b/tribler-mod/Tribler/Images/fullScreen.png new file mode 100644 index 0000000000000000000000000000000000000000..efe2b4bd3c1909b04d3e01b09b9135aa7a65e059 GIT binary patch literal 280 zcmeAS@N?(olHy`uVBq!ia0vp^IzTMR!2~3a21a`TDVB6cUq=Rp^(V|(yIz7MOI#yL zg7ec#$`gxH85~pclTsBta}(23gHjVyDhp4h+5i=;@^oFvFdGyc<X0ssI3vQ}%}00004XF*Lt006JZ zHwB960000PbVXQnQ*UN;cVTj606}DLVr3vnZDD6+Qe|Oed2z{QJOBU!V@X6oRCwBA zoH=vm^XJbQkN^`9H#9V$3jx9Zdsl%7C<*75e2y% z#)bfh6nY4vfzBtt00M{+IeyW?6iqc&SwwJ90vIt<8A$->3Q)xUx_=W{v~A(?s`yB( z8n8-%jNtq89m1(tx&f}VBRT+0DMS`11tgJWK}LxF{0QO94RJ*1fC<6@E?Iy8Vl*~3 z_Vx88FCZi(C4u3{f;HN(CPiGbRESjUM&M6Z_>92g5j;{<3eIQGo{?`k)NN5 zNu5~5fqFqku>SvpTns=|!^;L_DM->pmICSp1t;g<-w*_s0H$BKAOs<&Xk_(3666Z5 zKR?l&hbe+f7RUw}!Tt9KdM<#8V9z5k**{>tAR~Bx{~#|$-h=f51Q@8unKf$`*-3*E z08A4Qt66ZVASstnGn0{OF6Ao8DQ}1|om3Y;gYJr=E34m5Z9l?%NU}7~3F47zmgx>|V zOV(jE1gjMgaTZuv{EMLfdLn(m<>fD8l$9*sf+mj33ZOXD5!`=&5_AO6-05Mi#6(Bt zs@<4^xU2w*Lmk2M2fgrL9At|{#~-5V2qN`q-rcx zhM5_udtDz;gsK=(Fe3v1h;#cRK%WTVsIT1G)~S5^*~q&b+=BLxlMS^OL*q5VF#EKKD-~xw63+g0a}>VzgPbb z;oKGx@aXzCDE>hNHv`6ynZ&}1>lCZNnvP+gv)BzaBJLAK{nLy@`Wda|>|c!yMC-c_ zozD9Ybx$r&>iB)%&DEJPAF|L6>~Hq_{-7VSALs-SeRf`-`TsYZ7BGZkP04`W+eUz8 z?y2;veaJb1<15~(tisJF&-N3)M4&Fj-;@ErK|Zrk(8n-=WSN?1(TO*C{}_LmQKFX?B@wO547h##P@otinX_7tckz&RaWnwC^mB0H)hOF1Ii zhn9v8w>Yb|f%zX&Q3Uk-l}hFV(!^3RZN%&teor|Pc;Dj@p$=KN6V1$P`>QzEUS;!_ z5k}sN01c$g;*0pHw)4Zx-=d-cw92so5cgduHvcqdQI2E4?;dV{=Sp#(hHmNL4&k#| zp^+vvsL+04UA(4IgggZ-3Vaa5?7px-Tj+apVl714XIYozMw`hIJJ0K7VNi}yBYg=s zFtii=&guzoKB0evQU7{^&*as^6V93HZ2rTa@3gqgmCxx`q!3Gcs;;< z_0N$uO7AJWEKnKjqSkL~ifU0w+~s@RJ^I;NuE~>Cw`U5kbh^2N#IFDeDxg-5(N_5B zcb7t5{XL-O+En>o(epBLM12&>Hp$&7ztM7P)!XZy)jzg;fpkpn!%ch1^|uvZHG8)B zsRE+9@P`_sFP%ix)Cz5GI3nj&&jbxZxyDgrG*rspmZ2)BIlY;SP0xwejxUmxa^5`u za6isvV31v@X&R`IL7RQ@t?{Xkls|KJ2L7o{8ldpyF#H3`iVd zbo_ib2c6Y-xxsD|oS5h(rooK_rPo}71#Svu%u^X_`*FP5>inHA1g*xQ+&yANI?)c) z+w1n7S-!)<=pzfpVUPmXp+(pJ=JtPby0?3JbyGsSbKCGeIIHU{s(VjfWx}T1U%ILW zqSfxj;1xile9KDC;#=t@`~fc!!*V`nB%yS)%kov52C#~jLDHei*u*l-?bKZ$*A$1j z5go0sQY`}(<%WU09^Y0FqtPjUb>m-e29d*eJ^y`3vG)pcjA4HdyiI`&<73W6c#l+pV08zjpcXbD?S3G!RMrR)1f9S?{2 z;-Pmm;_@_><)U4sJ3w_j zE9y{$c}MT``b5XZA6T-&aa^hm_i6i?9X0H4t~9p_muV`KPmX1=i?RaB(o^BNOReOr zo=!(cR-qy!_IISL?~tcEnL>eokP734%3_Owm})ud#R4^cF{tXoi@%jgR1Iplt*}gllw}LSd{-&pZX$^ zyRdoEDl%iNUrwQ}?-E~V?4N~=#p0SD^{-H4)wOQ#E)>5M%3Qe_+Ph{6n|)-r60h!E zJh_6RD4ljsFR8U^H!1QWanUFO<^seQRyO~f<~#4*4M=VG$*N4QSY=i`>S0w-PLuR? zz=2QA*1nqNo-p%2;CU&F^7KnyVwZxkO}pP4#mk6Zk7@GI_txtiJejUe8{&#yFQv?^ zkK&}Z_tz)9Di9zJ`OjQsb|}RlE3Z4_wt@byC6sD2ROi~JXm!!aTLD6Z_=ww|@J(q8 zx%0`DMtNP+eJ@)_Hn{x2@4T2Cd z|A1Q_N(}%%m`g^sG%X{|nrwL5eP@SuI2G@`0Xj|6XgfkBDgwYCQbUuDa(=n4w2)`Z zRH!v+UubjnPVCAL#$t>%N>Y-pOu{B|nw?tsf0YE6u?Fqe4d|=?1gP#rJWg2XY*C2l z7_zB<$I?)9Z5>i7-zT`o=}b|&%uZ>EYo^uw8QXv<5TzHFSh`a6`TW%7=IVA!($x3G zJ}!EL+9zZF!OL%pXt*B4KiXOcOO}16PYW|DSGKlPsoJQW9BUhC!ovd-c>6m2aphxO znXQ&*n4LOSS*uC)>7FoY&C?h@($dvmVUux>x0;%2m1}9d6iJVRO9u^<*1md7lV`88?S!-mj|jEFsc(lnf1%T6@Z?2qT^YNWBe z)>(7)K%61{T<#9dUsT!=(K! ztM^orUA?;v=Wdn1P#_QG&$(PyPpFcU=+u@ZT5Qh#BIUGs)QVju@X76Yq58+I%a?Zu z@Y_e^@k;Y7>D-bd-!XCU&DGSRc8+5Ani%n-s*NcfG!hUJN?8z4D+)m2J($ipfO zoBAx_+gApSC!9^zov{KUmB!4+M*^hLv!(UE?2C~MmC@5*RHbc<;fETVL=?IC{DuM` z*B*4T@92J0YY_+WXj+x3`3>~TN4S!DxgToEZ#H0@=qn$o=V*%N1+CsKS#|sZ2It+8poy@{!F6!Eh^`_av~DLkq2ct83T} zP~60su5CX9xmP)(y}(@0kBU@Yv@n50ITZXl;|W8Wk? zV9$%w9vh@%@TVRH+bo&ssJY4Sz0Y{MoFqWk>nL;*7?_YgD~N1^OaYsI0Jt%`dKJEf zNUe2jziFRxZCt=?B85iWsQU@4b1;e_KvwW&p(1h1T%0w<$@3x?f=THx{a*V=nd9lz zR@)z29dizk-$vCtW@!CLMz*?`cIqBWB_qOx7-WT}SD;N0^UM_%m%h-hnWG>tB)s1Afj#mf<$_oge z>Gma#=XJcGp3@?7&lTj#YIaE$L6x5bW?-9z<1C@3; zzlm|t({%dw5G~7y^`SN+d8fAFd?UlQ1#u__V^g{=Q~DMN=Se29@TgDeu6<3mAJe`> z5hOH}&!cO=Dq6C&dU^_3t<(U`-bMJ_0s_va?|B?be7LW)(Q>Cqk0*|O@NL6yo#M_{ zcTkRdv0}Q9uf_B^Drvtwww505i3tdTIbGPC4MZsmZ&(dp*L{fH!#!rqYoS)5(PKW5Z^ua@X1F+!xK*oEAI8avU=79^j;zhQ@7S~recI*IM;LihLL~^=D zyV`L1lusj%Mw+e->*qJNgy*Xb(&&Cdo>a-HeMwR$dI#mY4cE)<+)s|h8tM1YkWy`- z>g^vCiP0L)>&;^Gm9@9+b31lZW2qj9Coc`TVC=NzFLOGp0k^-Z0Na!S;miTDoad6&zlHoE+vVp`n z&=o4EDPZDrbGky~2+{CzU&m_{+?U11(Avm43-DpNrnK?-!$VV_H(HtW=TebCr2~M% z9J*e;WC1q|c=(jn6hPcQo4_t!Auo@{4|nhZ9{18s;8fe9`&=csUm~wqp&*OZAaz#_ zKe2WBHSv*rD>cFK_$}WrlM^x3>sN)vyjXIU6=AO1#I{lpi*sSPL*sSe+b`h zFaRtTePx#24YJ`L;(K){{a7>y{D4^000SaNLh0L01FcU01FcV0GgZ_00007bV*G`2iOb< z5E(J#1^ZqA00q@aL_t(&-tC%sP}Nlw$3G8{5M>3;xbuI z<1m>zC7Pu+O~(hEI4-p3i2A`y{j4rNGnHJ{5b?K0{a?| zEG~o%i{ZEL8a3^2{-coeL7 zO=WzfwVUZa?K-p&VpUq)LV!5Cwh$mLS``9uXl1%j8+R1~aWNxIJiJr}#90fjZO!v@ z_f!#(F1??e;VMbD7gPkK|8Qknc`3{p3K^-&oL@CSMod(;UDnA|KJk=syYCp40MWXF z|0HGmUnk}7r3B;lp<_YorUD?7Lgl1YlDNawK+Y*s-3h@~2BNWp(07&Xndy*xMqXd} z8%hd|_j`Sbe3sRKj1Q6}9VI!mLr-zb%i#1eUmZG(sLlZu~OMyTnO7gHa<#w zHXHWax)SCMhdXIa%n%)&VC_!1m7pZsS3^#Qaog^%VDKoooB(^aSfuW29+0qQ^>g#Y zIN0^2vc04T7EiG_BQQO*9@SG7yq^QBWeLHrTEMMDK&jiY6YB@O2Ow18&bK z_vt+Z)Jm~88!pu3Uue&HHvb98Lt5%KVXLH;mgwB!u=IgouDyt+U^t~}A;`JBvN zBSPee1cp_UE?~e2nUf`+f~rd6G`Zg}u(N{;XQB9^EcA8m2}KW}stS4ylKYgVi0~y! z_#*yqQCo$9YL5|D?U~Zhd8JckLtZYwsK8W`av=nC}mdishX0>S{>3EV^o?BvogDS@(QA2y=Y8g(Oeuw2-ATnB>_cFJjQ+u*gdwbg6L55Bnclj@-B!d5Bc?jTg zbUI0T(L*xCg`nyEQtBu!&78lnM TqWn~800000NkvXXu0mjfNS*YX literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Images/mute.png b/tribler-mod/Tribler/Images/mute.png new file mode 100644 index 0000000000000000000000000000000000000000..7841c6f9ac4666fce7d5d80fad29d573d0974070 GIT binary patch literal 698 zcmV;r0!96aP)X0ssI3rF8oj00004XF*Lt006JZ zHwB960000PbVXQnQ*UN;cVTj606}DLVr3vnZDD6+Qe|Oed2z{QJOBU!Oi4sRRCwBA zY-ng;KmtqTL;@t=Xg@e?P293X=QBA=d?W(;Nk1Q4UKv9YhOFS>pps0SO& z0?`H}O@Jz&JwwO?xj+&ifEa=H18qV!0tkR2XU?8&Y-nJC=mnCjtgJxBl&Moe+_bcG zpl-NpAQC_~0THq>00G2^<~b}LMFS8=Bjk2~0Yt(C>|ka!HMP&5KOa4M6zGBH&z~<| zvIHW*%gYOOj-H+##GUX^LYO&o)+~qwvaN_f#dHxs073nL8Ab#F6COE;ry%A+d-dB1(0hD za{@@Su`zn+!2wVkJS6o{rI57&0YCsw%IF3_5C)=+TXYVWU@iBPojfRBFt&j9lW^hM zccsi$RY6fc)+T>zRY@>mpFN(G5;fQyfSJG9X8-`tTXRHjQow?Y4gt2$XGsEPOTHXE z=;6@VS{S6Bgni~}4EHQRZBd>bI!YvlUjrTP!3dq%n!|VWM#`4h>Tn!ApNjhfOx!MN ztS$j+Eu$m=K@f=kc%!;s<2AfcKXs>^tQ8am8{@m!c3>Z6BF5Q-5Js8ubYNd3}#LduM)@x29bMW}A_;L=1 zMJ%v_<|KUz?I3p3T^1PFxza;5{@5u2%OnYqQNR@OU$_RHLJN9|%a&4S-T?I%msHP= gw2qiAIYdr-U4SAh|Hth-9RL6T07*qoM6N<$f@X&tpa1{> literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Images/mute_hover.png b/tribler-mod/Tribler/Images/mute_hover.png new file mode 100644 index 0000000000000000000000000000000000000000..556c8936260087353453b48adbcae61bfbd66409 GIT binary patch literal 935 zcmV;Y16cftP)X0ssI3rF8oj00004XF*Lt006JZ zHwB960000PbVXQnQ*UN;cVTj606}DLVr3vnZDD6+Qe|Oed2z{QJOBU#IY~r8RCwBA zY-ng;KmtqTL;@t-|=_W$11<0no4IY0&rkkbD6WVOF#Igs`72|xfb8XFt? z`ud{l2ZDOA!7LDMK+?q6*axm3M9Q!M^#cSDBhY@JO~^(70Z`=3*|Uue4J<(GfqWpz z%E}64Oqn_r#7#>}H#P=}vOtvUe0-d{a{J_PPauMDA>sf5#E9lOEFMJz5Jw~Ac7Opy z0_XuCgIP^Y?epi)M~@x_`uzFx=Zlvtfk^Q3@`9Wr`QyE$nrs8uoegoJJ`i($ya&k% z|NH`UCy)VjCy))d1tcf&;{z~IAq->}0R#}#513&@5HR79gLn#JF2r{bgm4d5)y80- zLNx#>a2z2!4H{oS@az6fG*dun=)?(NV7cq4?Ywf!9jpMZ3}GkVpYK32Kv|-E=>`!0 z#0g6a3m`WX$o}&^R6}<4m0LiP0jvx~r|8d*Kr(5~{&^ve5Rp_M1`9$!mLeZA7a)K@ zVJj&q35-Ps5&;tfxqw)m@Q6W4CAcEf!@~ov2bgn!5kuHc)BN?vT zf+&D1L+E7v{|89AtI0yRkh};jfxuF%NF<6*&cDCmLXgylNP2&NgOc9WduYm_I=TM* z1d@>44awk9pyc`!t}b3hi~&^ys+0Tg54aF8-6C^w0jN&i-yo%sQUk&!2mk^MQ6d84 zX4b4(q?Rc(Oz>0$EL1lUk+Wc+5w&U|%}V5)1kbvVxPaSKkE*2+qbg$|(gOV6!n6re zoncKGgnbFm=5TNpDbUWKmP;n^wgcGH*y9pjGyqd9s5F0YlNg`D%L+KCN{HNb`4*Z9 zSWQFsCG-D(c>Dyl4AonnIx0X0#6E}#+ppZgWf~SM*^x>ZZxtyxyJpEcG?gteL7>_a zTtIEO{tzMX-l!d@{|0K33;Q50nCzk}-Vil(l001jTk)TiJ1NQ&`002ov JPDHLkV1nzGjg$ZY literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Images/pause.png b/tribler-mod/Tribler/Images/pause.png new file mode 100644 index 0000000000000000000000000000000000000000..6b6be2349d1c055a11045d12de74ad64594b48c6 GIT binary patch literal 191 zcmeAS@N?(olHy`uVBq!ia0vp^3P3E$!2~4l=k7iRq*&4&eH|GX)}JtE?Rp84EOCt} z3C>R|DNig)WpGT%PfAtr%uP&B4N6T+sVqF1Y6Dah=;`7ZQgJIOCnFssI20 literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Images/pause_hover.png b/tribler-mod/Tribler/Images/pause_hover.png new file mode 100644 index 0000000000000000000000000000000000000000..754b934705b29a58ff7782f39d3beb2989c5253d GIT binary patch literal 464 zcmV;>0WbcEP)X0ssI3sA`K)00004XF*Lt006JZ zHwB960000PbVXQnQ*UN;cVTj606}DLVr3vnZDD6+Qe|Oed2z{QJOBUzVo5|nRCwBA zY-ng;fB_~3B*60j-cdt%iA5J1?Cl9ZHWV!#2Q5)ggJx)GK`7!VR@6pQ4K_fT`e3^*6NtJs|a zSR{UYz?1;$0tg_SKF12Mc%uOfkiCH2d&C2lU-xffioylaWr4xL_vbsN2Dl)mEaTh| zN8;V{$29^VfJh6a&6_uq>}iTMAV&gq;(_#tgQr(GKo-H2WfA%L8BGXT1XGrU_5U9< zA!HFuSr*Q}ztMz{MKEP?=4-5A;hk43+<$+N7D~LoNz(ujV5B8Jk^nG21Islg1{wnv z{L)y<5B!A+rAd|Md1265!1D9KO=8qxRYpLsq$J1!=KueQu>h+wV)U}GW8_w>0IM=$ z^a2ZuUwEW@B7LANBxP6yaazFr_a|*5ndi?B8X5f!OacH50i5O!qAE840000X0ssI3sA`K)00004XF*Lt006JZ zHwB960000PbVXQnQ*UN;cVTj606}DLVr3vnZDD6+Qe|Oed2z{QJOBUzSV=@dRCwBA zY-ng;fB_~51rcI-_Usvi4`e`P($dm^BtQT$qU(A6`Za_D(!1#~I0 zZiF9!AUiu7!bLG^>eQ)lPk?kF2Lu*7FabaSVRtSr0JNJ0O$a&UuX0ssI3sA`K)00004XF*Lt006JZ zHwB960000PbVXQnQ*UN;cVTj606}DLVr3vnZDD6+Qe|Oed2z{QJOBUz;Ymb6RCwBA zY-ng;fB_~51rcKTfA1=U4`eW|^0$O50|+2SbUm+MzlLzYdU(YkI$5+oK4AbNFaQW3 zgq{X4fa_;gQ&am41{M|;-@bk8?(T+&Fir~hWWWXh0tm%=IFOW-WMaSp5dH$X6j?XR zvuDrXob2pu2p43O#=$_~#cO1$7&k4K+&Q#|KQCfm#3p2)lD} z0ifM1XhO&#hgA;?rV!9Ckaz()A5$mGulqN#ngmf@6(5PB0O%yXKi{z`?T8KlaWG_A zM1Oun=FbgrgtAb@A>a=K13&_*F~~a|N6Glgy09^XCVR d^6)n>2>>i*^Up=FkDdSk002ovPDHLkV1n0t_3Z!v literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Images/save.png b/tribler-mod/Tribler/Images/save.png new file mode 100644 index 0000000000000000000000000000000000000000..0000d9f52385a8d70b1d97b0e8c29a3ceb5e47db GIT binary patch literal 356 zcmV-q0h|7bP)X0ssI3wxhe_00004XF*Lt006JZ zHwB960000PbVXQnQ*UN;cVTj606}DLVr3vnZDD6+Qe|Oed2z{QJOBUy_DMuRRCwBA zY-ng;Km|+;7=Y#3vuA`=0R#{u70m(&AS`C#GiuhXSrA5AS{jfIBoQVdb8rEG03vK1 z6G1bWNN_K*lOe8#sz6qb)#21|2S5N36}VKeha6KNPHO-ItPH{-3juHu2m=_HKn5gv zU@9gdsn98000L+*$I+-M*i#)OOGDBaTK>fpAtBY_(Ts2lLDv$KxJP3eI{*kU>g2{j zDR&&uT#_s|0%Sz0`3Ar%`DXWmRKq_yb#qbs$ zL>9lXWJVBja}tk>(OQ~VTXY1<6{@s2Ml8U}fi7AzZCsS>JdRaVO978H@CFNvfBsj48@JKW~ zy}3EPC6tjxKw0!G6NiFM+C$@0H*QEAvZ(&1Gl5q{u*gVW-nnvEvzOa-4pQ3>6>x3~uaSVFnB| N22WQ%mvv4FO#r68f9C)I literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Images/saveDisabled_hover.png b/tribler-mod/Tribler/Images/saveDisabled_hover.png new file mode 100644 index 0000000000000000000000000000000000000000..45df000425fe70e03fdc8075b25c84d7eb354c89 GIT binary patch literal 370 zcmeAS@N?(olHy`uVBq!ia0vp^dO$46!NkD8xNY(7$3PBCx}&cn1H<|g=B!;WL6Rk| z5hcO-X(i=}MX3yqDfvmM3ZA)%>8U}fi7AzZCsS>JdRaVO978H@CFNvfBsj48@JKW~ zy}3EPC6tjxKw0!G6NiFM+C$@0H*QEAvZ(&1Gl5q{u*gVW-nnvEvzOa-4pQ3>6>x3~uaSVFnB| N22WQ%mvv4FO#r68f9C)I literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Images/save_hover.png b/tribler-mod/Tribler/Images/save_hover.png new file mode 100644 index 0000000000000000000000000000000000000000..c4f3dff361930314e84a1f8cc3632bdfe320a2ab GIT binary patch literal 483 zcmV<90UZ8`P)X0ssI3wxhe_00004XF*Lt006JZ zHwB960000PbVXQnQ*UN;cVTj606}DLVr3vnZDD6+Qe|Oed2z{QJOBUzbxA})RCwBA zY-ng;Km|+;7=Y#fy{qU#+8>{w2>}EUBNfd82p}wGVKM62vu6<2tXZ=ljI^{gAR9;` zOqvw#Nzf#K03vK16G1bWNN_K*lOe8#s*wEg9xhrR7mCme6K4_r`GtTj54*-X+(#^;KR;4# z3P1o2<~SNP1xtR0XK6?ggq*99rLe>_Fj3;S2bg3bDHxwySpWaQZwjvAWFKUPN5lKA*ceLm~b(WE4v05z-j_TPQhv} zn#K4%Ktibed~g$wbY(&$TtWMS4OasaC#GC!jtLqp$(Hl)ufgIJ?!P|=i&J?1{1_xo Z0RXVexu?2F_;~;T002ovPDHLkV1kUBxkvy2 literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Images/sliderDot.png b/tribler-mod/Tribler/Images/sliderDot.png new file mode 100644 index 0000000000000000000000000000000000000000..cf77466657e1e5f7324611743d7dbfb781bf74ff GIT binary patch literal 352 zcmV-m0iXVfP)X1^@s7q|7Be00004XF*Lt006JZ zHwB960000PbVXQnQ*UN;cVTj606}DLVr3vnZDD6+Qe|Oed2z{QJOBUy@<~KNRCwBA z{Qv(y0}L>Mh{ndo|BQ@`|CwN-KovkPk}`k*LgD;xXlOvFg-Zbh5Q-WYV1bzr69DRE zz^W1;fUpWOF<=33H^bZw7KAZimVgC89zho*-Xeei!m1J*V1esEas)Dfxq}6!2<8|J zE5YIZpOFdO)wlp^G@)6K?0up=0}wz&DP$tXBqnUW!)YBhNsMsA79=Ev8F>!Js|ejX zWIj>8qhW|+0{{U=bV{I_jl=>LTv>vUM&yJ}RL;PbM9{^s<{oUt3_gIY24)}=A)ARY yj3yDlgzi*IT!xX(@I?Z?z$ch!@nx=|l>-3nf}|tN_JH320000F!Nq_$Tw`b-sXzVz4 z?AZSVHV%VBOBk3q&LuFNJS@7{gONpG4kL5H=Y%SQ5C#^3E(T#{sUP<*Tws{=?|e(5 zRw6S{?P0x?159BDPjoC~TG2U?^%!H+`G#|8Ne9>$l>Ku*u28s#kBLKJ&!mdCN3=Vy z&R4Ks^n&Fl@6Gj7ZhG!hu@ETOIn7OxMIfP~SM_$ttc;fB-Z`A&(N4!Mbm%5N7GAtx n{^Yd|jn)&qGARcSzI0~@n4Y%WxT(w!=mQ2%S3j3^P6b|1OQNYW;r2c2=`o9s`ztUfY{Eq^z zU%PfCadrPI^M5vR`2#?G8?bb(p6nVm;5zj+GU{uW%>XvQHNbVUzY70<>U;Ghr?`FR z?yZ|-H?G*qe*mtL-MG?u@A_@BTjYPq+_-t`+I2GW+jkfd)HJko^bAafZ}JC+C>}lF zl$7%Hs;uG_?dRqbdk!XW@kqSX)wlgi%T@8$ZvJ~EfIByDk>94cL3aI${fhcZ%T#7YK6lT_Eho?erF1>YDj39Gyo-w;VC3TF z(X;pn?^m+4ax1+Y0o)_IY7#XWH9!%NQ6HnAbip3z_80C)J?)@$?_V zn;7<$`m>5-8j*PUv+%Y_Yca|@3-@&Myky8mE(R-bC1USwl#^RJKzhjGPDB1(E4z}$ zj+wSN5gqd2QF{+NjSFj1k?gnd0Zr@ia2QzNaQ^zzmk3myK;;hKx6=Wy=LgS=3!`FF z8J!I@4oVC6K+8b41m;14V?j6y#fvHkU)DDby(1;@S8M+}b!;en7>$)LQs+0#0=AWe zzk@&zu~^V`Zpavnv+Rn@dF@{!}TqywU@7 z_*vL$7M*qb2Z1=b&}c)fe?dgrtFH%t?$aLm99L!sE-*$*nrtrVIDA`I;oAD*;5BsL z!|Vd0@NcI5lq%x2$5-{*Gwa{%>i5t${7n9Aq_5G9{9m%a{Pn1G(nNvacl`d}6;I`W zo&23adOi~N`K4t;jUVc8r*G}!3g6c5uD!~6H!I1lF-xKBQ|0~Jp=|*l4ndpq*(9<- zo$18|BJ4Ctrff0jg`YJ!l%1L~>STJOXtGPSjJzPTsTYrCIehB&FMTf0$-=hZG!AY= zL|fh0RSe5ag-C6@UFb?bzq20CR^Y89DYqJiD+v;2Z)Zma-B*;Xa31@KPn0{M9ephK z`k&4IXK(-0$^T`}{O8#I$EURZ&E^W$pD~-9iq8}`73Vha7oC3cjOM7705hHTT<3eo zZO;2k&jhKwxEa|}{`Nm00l?sSrjS`?l6}ER;25y|9JXeoK3Mji7Pjf*tczq~^r4S$k>1{B(fmC`6ISV>7o1BLnG zvw|vI;0p zZw<}$vT+M*GfCiW^BrC@lZjUN8fiQ#K51eWSWXe5QD$U91mBWd4(?Fs(n|g=@}T^s zq)<-8a(E65lWarSYM$3&evBN7b9+-Kqne$uuQfNOtimV5^5RfwH(S)j(NyG($;#6S z)1PuAjS&Mi)C!k<0+X)(YqU`^H(G1s1!EhXmaEEK!n%>^nv7JxRD@p@#mpq;tqY!| zj3%#^8)ALislGIWkhC1fD?#(6OsEFJ&P1Gq7^m_im13Hvs@9C^7{()cG&vkdiQZ+u zj~v`%AWvYt%CB}m#)E_)D z98m1dhEEl?ZW4onFPx(4(S~ zhux zY~M`f ziZlp%l|jQ%>XmX>Vjelof5UX>NrlOyBOaZVm{K^7TjlQ#lK&`h?Y=9^;kCNPxKZ&_ z;k7N(z@ox^A4SfDI`Kue#-VRV#WG`mn0P_~+<0Mr$4?u=T0kF+rqT4M$Q=RCTKdXH zVHV4NM7}kl&AI|TG>0@uJmS|dtStQS&NxINo6gvY069bp2gsKUbk(vJ`&C>5ko;B) zrV-V2)~yelBnOqTCJqUMrBECnzpkdxD?-4Xx;=t+IBfNI?ZX#KgQbFz#wijQ%AVW1 zGkP#6eT}0Y+yeLj(@68VQ-7}N%xlyb-OyDCF{~(*da6bF>Rv&)EohSkxp~_@=*eXw@AyR(L&Xs7yr$a-dLzti-ztnh=x>3=GWpem`6M z0eGKTVsicAd@I_m4DnDk!i9d+QP)PQHO>ue0CM{NPk|tVkgr;OoKOE_FaO=|+kP>ZfEvpA zvkX!uNyj#Lb>CE{sN%%g;qVZV;HjmmQ6v6qOD`F zHdt_lKi4wb9TndC2EoL>$qv@Y!B^szU00xyG?cD=m5XG@yX&N5kU71$ zcD*=TYcx;eqBF86CE*ev)VL#(rdmm~Hsw?kg8v@rlP;Q#PJ{;`Dp|IRRf_E!&H=JHq_fbmVZobmq)z%93qW;;%$=K;E-?{M61f+$owYPo`XSbF` z;kVW#{;6HB`Tb!)(#9@SLB&v9`rC^V5e>cMcx0~)^AQ34Wo^s5+UyP3AvJ*w%kL6W zSebYW=$?_G6c&G4J!2??23_!pwol@?2b7zWlgE;p4vkMSGN5pCyHbrkP&PW>!~9`H zgA|uuM>c$2X~qHMKs97QERj74Ub}GVc(WNJp7n9n+X;lt8d=Z!`4S`1Wg^@b zZn*~E6!Sc3pOeZjOA=i`8Wi^DZ;`@oCR*#pJ-dTnTWVYw_ZLBj$9oEZTz|f56YOJM zr|#F}%=R&&r}bcTfmwQ}h|>4E9qL;Lz@I+rs?x|C5#Yhi%Ei4I>H1t+#pA{WGJLK@ zA*+3uuPRXAZDbv5B;g*5Ee?UdWeUMp{J)W(?{k=g=|anYh5YdUeaH{E+3YK`LTR}A zvt1rOYw~jXz3wnNjMxcFdVLIVPX$zb&(WIUt1Z<%(ObTO=^LJ+Gay%>4q4XE? zE2m;=s+3VxHwV2`7q;353LG{(m^c))ad#y2Krvrhfz-f037Osv*aEwJ^}4Ei*=UKB zhP@UWZle8ND9RfNsul(0VDX3^Mocg zmgnbn=10u@7qR7x`I|wLprCk9(fr0`mC6kv^9!P)^@lc<-$Bld7i8x=HcxaWE&&aw zO8~#VNKMRRle>xALVpz0(&zi66sR-47YPoWm^FP^nl#kh9DZqY5*t#T;^u(Oop zR^V1Y7*B5vMp2?2h^dk``eFk*loD>Ftb}zK%+i$a35b2C@@jKmCJ6*<%N6J>y6)1? zI2f1L|1!}r1(_=_2o~js?9(*ZmQn3g4RXIfrtcY7w@qG3S&G;n_?R~fzQgf%a15p$(OeyOJOi5_UaN8w-FMvgpH?z0J&p#u< zUC7doJd! z=GijgU{)XaWw;9CE!+zH1slP*IH#&xm}PrRRK;;UKFa1nwmBORRX^sKk!oLE=quv z_-AWT+M)m#WKav#1M0hCgFsW^E#7S8(wT)~?=vLrgf;RRYXa@trWSeDpA6@`1W4$~tlX?M@jh;58!t;rFR@O@ zi`0{$bk(^8U|tF&OLIkv*Hq~>CT5mfd!W`l&>~g6!0nA(;wZ#0EbXYwpAiAJ-)D`a zZ>czor_z~l7{Gon#a{xFBQA8Gt&fA~sbAN5s&p~SkB{)(ds~Z^H5fKGour4?9&%`^ zvaIAq>6~L@xj(k22x66>0s_cdFF>$eKV5G*-7j?9z)kt7VlJ= z7iv1R&09I5ie~yVi>9}N+Ka5CI1?GH&olopkPF1*1@k0K`s1DEx-J1mkPG(cNA~Nq ze=96&lHWGGnH3#dWxVMjoM+2HgHSCVM9;Rk=f`pn4b1PQpjO8V=rtrnipCr}Fuj5K z>C&PK^G#&kK*O)rR3=8_iIvYKt$-s8vVH z>^P+zWdmZ~PRUqo3oY$&yK}y)-_xm5o0d}%d-kt34=YvWWlJ^!7T7XjKk=X4op%ZTztP=CGgTWn3+MQ_ zV%C<$onEDszYumvl(o_Q^b2<37cJcRu+-nns$LUdo;!VUJy*Ce)V6 zv}`l&xjEs?o9q-fwMJGFlxD`4#wWgGid{Rzb73I6iEuYPJ#8n7-yWNDP!E|(FUAL~ z!d=O=iqKIO3jYN3hsW*7ZrnGsw2n5n^o4bcTusxTX&6?Na|%t|@99iosS=TnQZ?Q= zfSq6?Clek=&Vf`_wOa-TaAoZaC0Q0;xpXNTa|c>F_q@jTYk#35Y|iPOz2zm|XDssk>2Vdc!tzo*?b0|w%+11IuC?b?1cOCj0&`iV4Ba@1ECZ>Mf?CTWaFDhq6oHJ=$j?#~eZKq86#RsK^#zpLYFvIRRumg~EKaE3yiSiluimD^Au?gHXYh?VTce@s2{=VkrmPRJwktMCW+SmUG z1%LPZ`olTC#czipif5m!F9Fu|>~~8D9?wen=JcvV{b#FB_BiOn?gSD<_$V1}s&W@0h2o0Ku+%%!r>xm?@pmI2HlP~Y z6`^`@F+X^l)4nu1$owlX2aAmm~|OD;8meG)UTg+%PiR;SfcF zCafD?LXj4)H6Ed$%O>BWJU4Bd7?~iAiL7aMBS=AVr8vvX=@%!gep~m@TIB3MnJh?$ zV4|YnQO-|5A`uxvj;>q~475uyJEW+QOM}kw94$5>C+l2wKo*vhyW2J&-OIGLQ=t_+ z&o094=r~+H-wPL)6ZB+#-1iQB=yM}UbT-#s^Zm;P-e&MCo9@Q^IOuNp^>pWf?|@qSBj|G7oD=HU%0MMMFGbmZ zKv8KjVbeDc!-L`8X|vv%gbqMgO@yhSgr_(aj%+=7`<=F&S3PE`3WaHZ91MV-?ht~MmvUhIo{ ze^81)mCsy!!Y$)IYok>w-LUiv=k(JA zM<-?CIbzRSq!BcDQkMzWm|)tCnq2+uZ^*j-MMMq=VCi0NBII%~0Z$Q$ldyiQQ^Q-O znb{&UF7MY^MO6b+(B0kVi2M(bQDy2OWvmF6B4Pn|uy!mMu?j_;I;mI7Fjah(zK-W* zXw@AYB)zjtS381ZVtHof^Ic`|L_RFY(sQR++mxUT)3uBYD3{A5Fil2gbrNJSx}S>{I=uQceXr92&Z~elM*v zJRI`fuGcnM*ELL#K{tve7;WS!YIcmH>-$MR=J4{LS19;g82*V zU6eU|`^U1~mJ<_#wRMZt#^(E(xDDUOso>OLpoyu7f^|`bH;WA*TjADSyDqx=l0@9=oM!BC6CBfv7Tkq0d0sCi32q_F7v8;dlbGVbn19&GvBs-%tFLsJ|3 z>Ny)eENLw<1=^*<6^M$;xJ|UfzM159Q;SM3)Glb{{BzE=v|m7`TM3FpId%M0W zM3jco)IdaKK~qIm1VVYP@Do1+$22>pp z^+q>JMSG}lZ|~`YK9}eO8z=DbG%T4MD%BN>c8?D!z_IQQFjZ*tHe45|bp^Mt7J68F zg(|BS5VWCHnfJ%#W@Dq!aMjln2dE7AS@xq8qy$mJ3!7TjXebEsGC>jB`!<-@j-Sx* znRLH6C2J|F+$&^#J|AwJ6UMr)89Q|3?#1BQS|{qTJ=TskmQwc^vUYEZO)8y73!#Z5 zCyj9i@C?OSRT2JyP4+8A<=4aJ%njN{U|BUzWlI_!m~p)vJ;)lm6g7?F6i)RXQ6wXI zeyVgB2WfhG%GUmVhG8Pn%l?*>d~Wm_jR2t>Unsgp7$`Xkj@B&=_)wcHSg6A&g8_iM zau@8qBzD~(>XpppxU!d3BD)s*tK0rO4D`g0AFl=9qgz##fzJ9G=M0Rj<$^Q>|2{W7)+E_P z&+4%^@6}CK!}QDgj=M+rmaS!fVUe-7<@`n?CjyDXhT&<+6j+XLU8e4i1-I;0lmIvbePdAuq z^`=v`9CxZ@e5YH=0To*n!|0H#1F7tt8?=-3`N_LnrCI-Ty) zij}8L1a}GusEH&<2WJKvDzjL-nQFPXR`vr64y6pMgE5gf!a%UyZ1+9guL9y>&$g_I z+ivKl_y_E@@cfiKAO7K^4F(2o_>W^YI_JI}PC-aSI9DH|sz{k}&lq9gBCl31RNqVh zDkFH0$UL$vGV)|Q$7#UsxtvKS^-nb!yXF~x>JeS6S}kb> zuat_g*DcOB31Ws_NxI5ljUhakTg!i^zCu@HNTvsx&)wel(9#%Lx<09jL{*dtWBsB& zAes#oaz6AZG)sW(tvl5Ug<}jF1O(ogdllrzN|RXn{1zY~9u6ge=vXEh#%8oiU90KG zg6xypJdS7XbwoOp zi57$)C}$~OPp|#0^$ld=TzK%P2o3{zC6peXV;@9;3?|%|3x))WJlw$gQtyO*)y|@8 z%2LHj`kc*I%SIbkm|<%D>ir!-?ifp5`Uf33_LEgeF9o5tof4M-X798j9VkN% zo8rThEKNg-^KiMm6R#M)2yB$vMYKpxU|6|OKP+mu_AFXHe6SfHz+P43)Sj~6LmZyhP>R-!o7 zY!vn(QOYUlIIeanw@Fm{6o|8iwzmry-zt02%T#P{hu}}V#qw1Vigjj)JOt`^Uk0N#aL_H2e@##Tm(0tIOwH^H9?#?5T? zCy7o#n0!9%U{V=pZAac#jem$Ys5<%5mj=^+r2Yuwzx`Z5urCFhf&}6Xd1pPgne+5| zzY<3DW)qU-7#^dWAJ~hph=4mO!Zas6Nhm}Hs+XQ#%UZUICjrvo?v?{vhj)nY5y2HD zjUqlE=D7(+uPEVfz=mMkp~qIQ@G#lM>sHHYq;vwXz~RR_)X*kUT*zk6Q|A^$zoo-9 zbt5-v)wnL;Y4_KrZ*4tD%>YC&Uh752n^t{JPK9VgLFyCH6l`iyTYGIMPAaNwxqRZK zS(%Dp(`HT8ex!(0vfTp0I5KTfSwbtJOjdX~*T8Bl+uzg!sxk;xkQg5in_fSUnZLiy zi@HdZmE8T;ZK06gzuOj){V#0`CC9p>%1jk2v%lIs`QR60Z#2Lqd(UgS1#VwdY}!ZW zt10%lBl4EqV3DqMJnj@-mQdRS>Ca%!(ERM266|fEW##`wNv9r!`jUAlfRIgkziHs% zK{cKqf#Sn@Z`oVfr`1VsDS@WjHfG$i8@h^dYd*rb(REzaq&1P)g~oLf6OBD8w+y?A z(em`ULG8hdz;4&lrGhzAdd6U6ewKDmHaxbJCtSA=ORcNxp{zU;5s^kuFOAHPCs|CO zJv;07VS-P=Yrvk^KCrVR*)o&F;HJkb7aUIrYg?5~E9nhh%4#mbD-n~6Qi!YGSUPjVjfc=C^zqX;Xwbznlr(<0sI8$7}; zCaKJF{DH43aifJE@omrrD*GAM_HUKOCExp$DUpnA6hAs3i;^n82u6nLxvB!(_Xb=Z zq!d*u6C^s4>C1VqlMeR7oomF(Hub>~(5wu!pmjbR-f%}?&O$$juA;{hL@3LSRnZJQcS@(>z9Z~3p3A0oskm~t==oB=`Y4!ZIaj}!Qqaj{s-?0GNQ6Ja-hFuw%enT z;mqaguLtdO2ob0u@rT8$K^-U9j-2EDAl3j6A808o)CzI!(T~$Vtakkw=4~7y$oG{$ z`!f4$1mYGoR7~b^frHDWOXgaO#H#$BQ%Sx%y_>U~dXD9L605$ewuY_x z4HxWNOKKF!Yax)uYSCr|)S6!2viLcs6S3rvB*(wT9vYh_-kQb4^Uynm5+XkG|t?8%9BTBSz?I$b3;j`Z^q%E}j?aCBq(@ z1CdpOV2JdltQU__5#w;D&SP7hbkBQx)#|P}%@3eGqt=vkV)|8Moho`D-PSgx6ouGM=D{21{tm-bC8;V!Xn zU(C9w=;JdMF;q*qSf%0YRslYdhtO^=A*X54I>L|kjv`srcd3j&%D|=fcTX@w7hS(Z zd4Bm)qGUgrSK^jd+sah$r6Bh62-!$I#j5=ELjaJdb%K)v`tcW9Z6~Y{#bex(@QVt@ zw3*zxf)x?c!(W!)_=h-}{E61QVJ3 zbV;A&RFu4_jkz55SE5x@)QaAS-if(d_4-Yi@S*YkjDWo*%H_sW<=2sP1?^X1&gB^m z&oWOZJ6Zx`fE58km~LTMZdenJ2cH3hx6q&_>axj?&?4w#lMxhHQ&;#`mB2|NSRF`L zhuK$pjGl|VsZzg$Y7nS4wjq~Ns<55jwRYD*W4Ru7e>CH@HLAFoi|yGXt-Yx12#J5?MwXiH?hr>=ok%^c&(^o1lvL0>eKh}-dnJ(2xa21Nvm01hNCv+7}l1-o^T~<(wDu?ARowX4Q zC}5m=8Hali#dI2rGQ#KN1QmpwvZ70TKNFi9oflp(j6_G!1?ULVQgd1TZWGXG);21> z3R0MA`N_-t+-j_@EzUnzsde-22@K9WshC`l>dK7jP;f(Dj7*cr%bU_T?wTDIC>zqTJcc}v9xc7S^Pwm63*n6oy_&-_SSmnP2yk6K&JHM&8Z`ar@bF>yWB>Q9? z!xr=Jw7g7hb=#x61Q7Br0Z$ia%XCbZ8mnqNpRa8#lK;Ax*eJ`~r*dZM{jGlqFibsb z`v0{W$$KU0R1WJTUdcpLi+o+x?~7$aKA{7>u#oHb!1;Q}5v27`P^|6RvHwU-F;dCr zDJhj#*0$>^m>N5pdd~rya@h5Wf1?6EvR>QKD8Hk0PCW7Aj8LuL7&z4RdQgOvRmrDo zJB}RZBqiP&&M9euR3KY4CtHZwD*% z)4CZx<-qj*#{;uO3X-m0PjZ^sy3{!2h)t zxQ2FmyU(Nii_}mZ;qZ)orw7`Et8OTol}sR|;JyABdLfC}7;~gx&5A)N(FQ{PJW#Ul zEOs=T$n8|;uzatLZWJbf46rnXuWytyEN(J_I%ZR{?bq>Itv#>Q%u_f?(&4qocUxKI z6D;F8$7u70w%eq6G2^Gt3*8PnpdJqH?^K8jGg+?WpUn#`(yIAdZ#A&Uj=z}o!C9l` zxbLbgc{{(hKh9OR8SL$$WGmZiWGkKtN@!uL<=y_lY%et?K)Lr;mGu23C?^M;DEJVf!kXK;!YteN;cO$rA*H$Nt+rGzf}zBL5N$py z$10hL#9LN-`jM1kkXx}wvjd5lXO2XUu*w6iUwr9TKeGllGzYUr<9vf~dRVVav083bw?;XquCY8ot61UbadD*5wkG{aKz+g~ z-;8X?zFgA#Dm<*XoO>pk)=10NRk>W{;iG*Hk$QSxtsrovUHM#4D8IYEK?1ZQ z*?1Xhb@-U%O+ZjL?9o7JHT%%-SV}QMHjv@hOc3=rFpn7i zgjkYF!&m(vtH+r9*Cb6FTSR?sYqXfT%&x?NO=lXVhk8vG(b#Rh=eq2su+(=@`<}DG z8y%i%hfp%k<;rW*Ynob>>#Df+d07yhqMV`(Z*)J}i@P)H^d7qP48!;aIfjqAd-ZoF z=Dr={vT~P+c__{Efm(*7=$FTZyx3Mv&LNovT<_V-N0y5e0j<53n3ibubl}QiH4ON` z=?6^3)_3}+rarfBvSMt**q;vBo_0>^J8}hH$~~q$vpKVds~ggmD?DZ@PaS-ELa&Y= zCVMeN+7f+HZC1XM&TG8tc`|DuQEP=Fkxv3pbUxc=i}r!$f?gbQ$s!GVDxO^kms)~W%ttzKf4_Y~j_LA!^_i;h4_0O0SR}8G zae~yOKa9;cO)5X#C}2fC?+WKwzKIzM{5;@^wrJsp*>5Vnb*QxE4zMTPCAI_zGhsQE zo<;9v7()}E$vr0j6zjaq}H$TPw9E2qQt@)>IQf<7L0>e|LmCpkjJdwy17~RvaWa%0{Cb0pLY1K zo#5K-b`RBixuaTfKArm|4{}b+fEGe7_@+l;(0p`0M=Qz0$+0xh48D@i`m1*@V%3heJ1+Gnq| z_F52GQC|uTwIh)VdlNm5tDBqr{9_LyhB7-l`PtWs=xSd=d#O3?8%Q*;pJ=Z<+YlWY zF51h_g}sOtdZ^la5q;>RYTut|%_LQOYoeiZiF^SrUa=V2icr`|a)~}cv02YhX!<)e zdG2Ghw)6lwMW3Z!K@Aj`(@GinS}MEyMOwS9jsEm<8@>IWmfAFoIOm=8+HsX_L5V84 z85+(~m6nujG?W_^my{&yD@@9j5hFtRrfuz&>hg;}S}fnhSv!EL{j;361E|VtIoBCF zQk9>>kbI=CP?eXXTy*rsS^QCz-ywHYTFZ_am6wQ)o+#NQy++l(WGBl1UV`p`ub~{M zqa|-Cd6sjns-KE&C8c@#l2$;e&pGocwrsc6IFRJ{2xIYxd? z9*Lh*uVCykQ<^BVppDAzJ4_oM(9o9chw0C+Yv}0v8ftCpaG$ZVSCV)?QocSdksf>L zAM=+>&%N^tK9q}=DbH(|`q?`F9RFG>kn64UFLJHXGd{ap^UFWVm+O1wui^kW$F}O9 z^J{qv@`v(spN;tkxweqAjmmS$#&XV9ah(F+r=;XA&L84!?ayVLf5)5KpG(CcU+v)8 zrlLfu|5~ZHsi-i0t`P95`ict|zWwZ#jq-S)z4(eDZIH&J4<2{y+ws{p{c~x>j#_pR zJBLzVQYtn9-e!0%IVeA`li^F`=Wi+u&uIViClwW2l<_VptJR5@>YocrO1Fu1u7A!i z05amEBem}r5fLjsBPUKA!n1f2BigJbN^1G<=gor8?U3D&^?ZIe?)6F3i7e8R^)+M{ zdI7wBlOl7@(#X^{3Y*$QiHkX97hI!t+gj;`DlHv5t)a$d!QVuuPMtEAr+P*pCF3Ku z16Ma(&%G#LpKTty7tg)sX%;n!>m2^k+NXeX=?Z3$t|gU~%SCzV@mC+u zLAyISKe8BbEBU8Si0jM)B_($gS*K;#+uKRF{JRg)@6E`$R@#b5Bvi)Kio+{S=K%X(E^81{#vg$T5|X(>NUs zAJ51+ozbWaMx)bilGlWrJklD;b3zk&XEagpq#NX$)kOZ;&E%eicG;Y~CNT<} z+(N!nIECc2kY5g-r!oqj&M0!)4T_rCNbz%SQtX@til5s^Nedb&EVqRsrnl0V8LbpH zlTpkp+|OZX(?;?0G?Xx3LoxFh#m#3FzmS1n8KusAk1`j0Kq(8GD0yKUvX;^oH&WK} z2AZ<6fl~5xl(s}m;Ya40q?Ts{J zt%hdb&M2?6kqS1q(vrIv72+y%jMeux(XuU^R%~gbRrhMBXsd?Gb~e-c?QPf_T50_b zMw=hg(9UO@>7K{6bl;P$bnjCfR~>DCs)e@i(bB`ux6(t;G1`UeqkG%vvAwOd`$a~T z`!)2;t1YzWbxzM6Y@-(rwbDy(w$gJ|oc14Sqy5#KUaQv9TkmP;@OxZZpYMOFq0^sf z=!4I7^wD`vpM0sI&%V;or(bF5i?4O`2O{3++7;vQ}bcjmn9POk!dWpFGVd7!$63_jJ z_`37NcYjN~>I(7mb%yUmAcfFG%ApmsmbRgXz3AZs^zc1;sI%vEBaBl+E~ln-oSJuY zYOUg=InNF7BLN=-_$0tj1^iOLZv=b=;4AGpy&T5rtz1s;uj6!XH>Yo_I9)s64sQea zfq-`e{7AsN1Ku0(fq+l6=QKNv)4E(v53WOByE)ZVar)=^cKFV|4Y#cBeutsMkw%E* zqNGtsetwRYzE1Wd-P}ftr~Vz5+B><9eE#(5)9=hylOY56n74O! za&jt1^V6q~k2fM*f-rCI;XcyoEvfzKDt{vYfmBa3{QbQ>oCP`}RZ2rOdbC%Nem2b8 z2L||iM;(>hpPnlZ?sM-xl^{cyw~ver@b_Q$z6@~4T>xai2kIY`hSt3Om@xw41?2^I ziP?BUdU!=3W!^qI3J`(g-<1L080qTjaYDL(FAGcpV~|NojENSn_>=O2Sx&BN<=dyk z{F%2;2LK=@C@ zAOun}nF>~?{|q(+r<=FWTckidq;D!+A8;Ni0L1*e<0U%zI#3{hd!zDV$`hqHP6;@cI7?MChAMMeq;^Y$Az0Als>WqCV{ z+RvBy*4k|RA?*VKNEzd^O5)o{7%ENDMi}D6E7oi}T5U+j-prPm%0m0%4SEP5r3}@i zY>7p{=@2YU()!zrH$duF$j}4GED{q9L#atBSOEyULXffHcJYeU4?cXS%p&QD=Ixi{ z7p_<@kSg7C_??rl?cV(ONzB>H3uOpflip!3U9zM=K$N_0G@j20Hq6R2Z$D$^;wAY7 zh3lV{nj83`I!^#hf??Mr(xyz^ds;x8xL2MHWy0gk+J}V$B1J%K+jqF? z#f=zS0im$SShMyap<&^%vB?S<`q|L4NW5A5;9x)qq+~MW*#J|?e7n*@``{4i9SVdp zf0%JF?#A29+W*!9E~T^#!}V@qx56_z=&4+-{|)M z@>6n&{fvf;IZva;ekIwv283K9@~)M2QvX`A396+5(Y0h3ewiG`T&Cf%SI8ydD%r)= z($J*qXj_NF99dWG16`G%t5RUIq^A<}RDzz`I~00r=uJsawF=eX;73Qjqd4`A;WS_@ zhknW>{d7poP3WjM$RVx)dg=|8p32EP<#X~+`+|bf&r#UKztET|f1#M%^AtbhOL87p zOQTX6p|f)6tDIcYIrLRdZYlpEue2Y@J-wEECf1T)#!nQS{WAqksip9oYZN}cmZGNr zLb0=crKEYkQsVqtavRr3?grhqk^Hmjq1!e~x~*Flmv!68yj{1=ZItxefLxSK<sn}TaT_fxX`%eP znrP`J4J|I`v~kN}D!uoxWXs%i|6$r(fxP_)t+`)IB@Z^!x(W>y?`Wmshd7n))KU2( zt+eS;F4;WquVnPVo)+5mTnqhauZA9ZiPIDNTIreBp{rK4(O&4Pue`0Hy@$25Pw1;h zwDjsRt)#OaJgLLEoQ95{(a?uq$olDLf7Q}?=%;`ErhBlw9k zQe&cLRHNpo(=+({D$r2(pFL+LTD*#ynvJjH&vu}p7QH6t%|(kRMg41nj@gcGZtD7+ zg1m)uXH|%Lkxn={Iy$QBb5|9j#eH&ZP0ich9zz|~qUO}nHLD8qx9aPw!ULQ<)S}#m zyF`O+`uaB_!UF=-qS76AZ$=A4{XvXIxY~5EVat}y<#%r0f8fCWeXqRo(w{IIk?Q(s z<#%sE7k91^Xh^iC#;8^C+;y8oi~FB=e}maVMR*VfcLvZ^3ILB*e$6Xq{k`naL~ z;GMuTQB@CFY~K8}Vt#85KmF{}TgvY&U6Z7$ca0sFHh$g%M`~&g-Lt-YGe+aCJCjxQ zqe4Q_V#0!Z?q0F7NTzl3?W+3WLBSzmq6N@c0b0dq6sziY+t}DVi1g4yJ9j?(@WZ=y z?Rx4drCNv!|9{RMI+s(#xr1%M1+opiKn{@?$YtzBvJL)Tk~ez7{%sp}g~&&iC)#C+ zChCndI3Y_646ldmprt_xoCZh9_H4(5CK?`JPfqc$NvC{F@v}arq&a6{$8Mxy;~)z_ z4)99*oQXM*oU;lv zI6GvN3A^*0Vn&NfTdAgf0fZS>(e4V^j1 z=!?Gz+b{OkiyV8au=DCjqh*pkH(?hM#6_BOj;H`bUdruh2M?n?JeS_!>*y5UO<(dV zy3AqQRs3kYLm-dl5Y1gnRJ@C5=OLo~fPcS^=se)R0sMD>zXbT70ACCECKY^$H&IRs z(b{=LyTIRvb`qW2PgHjr=VxEq6aOZR_=Q~Jm)8;hc{lO8D&kG&4et+e0K5m_LjjNV zc?f<*=g?Q(3c%lCPkcuh@n>?0A6!TLz1`@uiujfD?eH*%<-p{y7JJ(+d>;>C4?BnN zU>)`f?lzcsVi@uHxx~xX5r1Mg@gr5l|9ZY1Ua}?+gPh?GIU@kpD->)uit=y1MTbv4~9FLNVaiP2T3K$)@8G}C`U9Is65-J z7cGtLH*5M(MKp18Tv#-B*pj8b%Cm=acF~e?Mf3V8R!^_dV~dLNiYE2xA@?5?5L1Md z(O;=g2u>8wnGT8-Dmf`(V%GRHKWD|t9TJ~1b6W0#)X}aoym#Pi(SDAMIro1hlXW`Y6NW5K zRuneEUB^jJM@KCL?!%~m&i&-}-X-$6+D1M4mAydrgSJYn9`x9LC`WdzX}Lr_v$othQAdJ zn6?t~5hI^^;4iYC4*gt+tLa>HK!-FJ%6zEjB3M5c;;Nbpw}amT^GM{*)K}l+&?>rve;I6JOwR3e z^b5Pea0U<41YoMtb5P6Ruc1fV=ql!6bUMb} z_ZF>{&0{4$L2RzH04H!ZUTZ%OOi>^o(Nr#e|1Zb!R4{yhdS zE9)0p&-w<}F+qp5e=U2l%9wZ5_=|h^xTwBUpv8m^_%tz_Fu_ljcuKW4ZUO#1f&ZY0 z8w~zZ%r>N+*#y-w_(QQDuIfzDHazQ46*mWCEq$xZzqjzyVK>2o@Z(|dpXz`PtchEI zzx9yiWzc(>UE~c0U6}R9T-XNJGx#|%ZO0@q_yVz>{@5>6-&?^`zF7?naT10k*8Tb& zt*mE|YA(b&7i--0NVoZ?m&5Wh_@J>N(KneX9q@B9;9=k8T{8eOz_E5@ULaNU$9JT3s`Cd`ZoPe z(Wtk&J^nVsmX~>^H!+X#O-xOPnx|ekgvsw9!EcYjPR6VtgM12pvXtN_i0`dRHq?J_ zZwdbSWw4zw_elm}=Cgz&j$n-oV=*YRP$r4(IGWd#Oz0^tHOhcjS2L71xcgpB{ z3%+yd{`mKG$}bC?)WZBHHZz~hX6A*t7!5jb-lx#ffO%=?a9s*C;s43tx2nRp{T4ox z%zljc&bG^jhPiM#+-?4`9iCqnn$yC9CbzJFNzKepO^1r7;J4k5l*t|ZMcHF7S>}EC zjT+X6{yQb@8DcJO0sj3)2( zoeRp67HW9>d<}w#=%n@JjJm#IMxP-Z@DSz(z6AWjf&v+F3BQ7 zzBQH8tz5d#|ASl#$|fw+aro2o)Wx7(ADV?Gd5f-;2;Wo;OE%6M&=vIzFS&SVk2 zoIcR6`}p@CSx^?Yfb+yfoU7@`RP&U{+DKonmA+TX_ZGT*8^gZAk{7kH+qY{NVs#ke ze+;sSpNTA@m(%S6y05?V8&OaeHHY(Y6) z7>5rOM{Ev9d=E!F5kqVeLktl^nIQ=q1>%ArlRy@MY=}54BbiFbw}S(_kH78kf-?BB z@yOYnN6qCNzH2-lbR>a}RL~*ThFIH(L$a6z!kw5Y7~-FdWh#|?>k!<1{QCm`fT@fj z28|)Uj7837EE;qOo=Q-73b9umCc}VtaD(p^L(H1AHVpEuBBu`x?Y{lQ#;Kspdop8) zrDK897z>%fSokc?;j19=l%V0tPaUGqJ3!k|Ec6$|(8=y!-g~8CFo{K9Moc zNsReSVaz|5v7l+3htA+U0(7L{yQI_oe*6*N#vmKQ{@%un<@8oIGP3*j&t5|d$`A|5 zTr(N-03C>NWQcF%0aH1L-zSg9_#QoVUR@_ zWD$n%tuzB^aYfSg;|2=^}VpADnQ<+m5Vj?&96n1054U2YkI4;)|vE5cEJ!mw3uI>6YMs2Zb}% zBZ9GBpu+}q^o!+uKs@JmiJT8f1|6xKJAtQ0f)1m#0lt#IxB7iUrUUWR5*>)8W+~vQ zG!;(?eiD2o)`ow|@$R0VTG8ObVhY1qxRQHD=s<^#BUYXFi`DUgV|Cm^@Fn(>~7orhiLGa{7AQ0k#22Sp{Uww`H z7ovXz8sTU3mL|(@5wFlFuNYmU@1^6b=MTd3*axqu_sMKPM2iSQk_i#C7~TRxWLgY_ zXviBruw5nekEkl|I)|8ggt%aDN5G2{{tpLeF(0l#lUmR!P#KMwrA{D+Z%E5d)oq_n?91bdkbm*?kQ_CFxxGOT?# z%tq{#M$<0)Kw=Y7Q1aCK-3-ZXL?F=fRT`IZ(u~Lq=J{Ed{o6-efh7wEwf)2D5yE5` zBHVnAC?sX-Z@gHgSYC0AICFiA!o#CK?XrKz=&Lvt#$hUKmk3?E@{^GoBZ9Yl>IO#C z19`)%_Z@{(pR`hbL;oYj{s*UQFwWu37GZLT)H3vg8xeTrU5oH9L!nm7yfP&2!g}=h zq)YrqCH;s~NgU?EUNbzt!6U7qv8f$Z7$Fb`h_Ek1|KeX?=_iHUKkl;s(c^x?DGv_Z zaAt_(+mT5PZ=EtSQX<|BVPS^;MaY_dQpo*mm;L*s|BREEfb3?R>)~h;77qlj8G6GF z;?8iaZYU99eOQn3E8ITnvj4z|*I=E;fe+3XV6wvrC<5yYy_wvAf#F&LFC}ToBc`zH z{==sH0<9H?88}yh=>R862;?*A*6@hEYgW|8cW^h1LXPI`lrMuF$>_e5meIeGh*Wd8NRK zrC&&WZ_lpBCvo0&XarEspbJ1PgN8hBLp!&qJI6h~docfsl@fguHh5d#uK6c!b}giK z{2;GGb%GX~v0VFFwGLs6Z=Ar57IBORj%xQE(IUVR@saXMffEJ%Kn4VK-GAitT3E+2 z3t%4;Y7V4QD9g~z#Bk{+1xG(J4`ZdkO#nPBR!U-neMr~qD|kw6!XjZzgk2YAMDdnF zHg$~-A)G~9wFn%A9*z)DL}nszGk#tROoZ}Efqm04yzB3S@5EYInd0UPn5x6e zI}xUelS_G}h!Ezkl5=V;G&uz6hRomy#YIGG=V|qQTC5ZtkYZBY6T05Nh9=fR^M&FI z{T-?zqGvlvUwkdhB;a983Ra32LYuovcCoe4vZ2&LFZGXo)j4AFexxygNI?XPFy6+)f4i?;RjaAm?`JG?4BHo z$gvndtD(g`orc5=@HbPGkK@@JtUBr|0L6e@yy~#FtHVvFs+@7Zw(;$7G zf%~cJsi#9I+KP}G0i%b-%a+cCq{tzSa%cw}G9m)zn>oa31mLwH7*E3?u5e@;E02hcpC9^ z3N8)YDo~FPaPhlj5#Gl6-g0bL9I<*5y!hB5-j55K5P!!JkH-;<$jRq>i|_;7?kmT3 z!LeU(#N`!}DJj;XNWM%3;fs&e{ahM!&lN@oB95Iy~agrQynbO#} zB>dYVyuD9FIbubnvlhf!nkB?uO8y%@2UL{*A;#E0fLOEhXr87jtno4KTus!m)nO9BWoRP~uran)x$6RXeor&gZ{O|L!^ki7pSR*v{IDC>7mN^+0imx=bI F{15Pbq-6jA literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Images/tribler.ico b/tribler-mod/Tribler/Images/tribler.ico new file mode 100644 index 0000000000000000000000000000000000000000..aaea20ee79ffd3be2643be5eb710de444014fa21 GIT binary patch literal 24190 zcmeHPd0bW1_FpI};sgqc3Npw%Pcq3Y${;E@&x!+}si>h48D@i`m1*@V%3heJ1+Gnq| z_F52GQC|uTwIh)VdlNm5tDBqr{9_LyhB7-l`PtWs=xSd=d#O3?8%Q*;pJ=Z<+YlWY zF51h_g}sOtdZ^la5q;>RYTut|%_LQOYoeiZiF^SrUa=V2icr`|a)~}cv02YhX!<)e zdG2Ghw)6lwMW3Z!K@Aj`(@GinS}MEyMOwS9jsEm<8@>IWmfAFoIOm=8+HsX_L5V84 z85+(~m6nujG?W_^my{&yD@@9j5hFtRrfuz&>hg;}S}fnhSv!EL{j;361E|VtIoBCF zQk9>>kbI=CP?eXXTy*rsS^QCz-ywHYTFZ_am6wQ)o+#NQy++l(WGBl1UV`p`ub~{M zqa|-Cd6sjns-KE&C8c@#l2$;e&pGocwrsc6IFRJ{2xIYxd? z9*Lh*uVCykQ<^BVppDAzJ4_oM(9o9chw0C+Yv}0v8ftCpaG$ZVSCV)?QocSdksf>L zAM=+>&%N^tK9q}=DbH(|`q?`F9RFG>kn64UFLJHXGd{ap^UFWVm+O1wui^kW$F}O9 z^J{qv@`v(spN;tkxweqAjmmS$#&XV9ah(F+r=;XA&L84!?ayVLf5)5KpG(CcU+v)8 zrlLfu|5~ZHsi-i0t`P95`ict|zWwZ#jq-S)z4(eDZIH&J4<2{y+ws{p{c~x>j#_pR zJBLzVQYtn9-e!0%IVeA`li^F`=Wi+u&uIViClwW2l<_VptJR5@>YocrO1Fu1u7A!i z05amEBem}r5fLjsBPUKA!n1f2BigJbN^1G<=gor8?U3D&^?ZIe?)6F3i7e8R^)+M{ zdI7wBlOl7@(#X^{3Y*$QiHkX97hI!t+gj;`DlHv5t)a$d!QVuuPMtEAr+P*pCF3Ku z16Ma(&%G#LpKTty7tg)sX%;n!>m2^k+NXeX=?Z3$t|gU~%SCzV@mC+u zLAyISKe8BbEBU8Si0jM)B_($gS*K;#+uKRF{JRg)@6E`$R@#b5Bvi)Kio+{S=K%X(E^81{#vg$T5|X(>NUs zAJ51+ozbWaMx)bilGlWrJklD;b3zk&XEagpq#NX$)kOZ;&E%eicG;Y~CNT<} z+(N!nIECc2kY5g-r!oqj&M0!)4T_rCNbz%SQtX@til5s^Nedb&EVqRsrnl0V8LbpH zlTpkp+|OZX(?;?0G?Xx3LoxFh#m#3FzmS1n8KusAk1`j0Kq(8GD0yKUvX;^oH&WK} z2AZ<6fl~5xl(s}m;Ya40q?Ts{J zt%hdb&M2?6kqS1q(vrIv72+y%jMeux(XuU^R%~gbRrhMBXsd?Gb~e-c?QPf_T50_b zMw=hg(9UO@>7K{6bl;P$bnjCfR~>DCs)e@i(bB`ux6(t;G1`UeqkG%vvAwOd`$a~T z`!)2;t1YzWbxzM6Y@-(rwbDy(w$gJ|oc14Sqy5#KUaQv9TkmP;@OxZZpYMOFq0^sf z=!4I7^wD`vpM0sI&%V;or(bF5i?4O`2O{3++7;vQ}bcjmn9POk!dWpFGVd7!$63_jJ z_`37NcYjN~>I(7mb%yUmAcfFG%ApmsmbRgXz3AZs^zc1;sI%vEBaBl+E~ln-oSJuY zYOUg=InNF7BLN=-_$0tj1^iOLZv=b=;4AGpy&T5rtz1s;uj6!XH>Yo_I9)s64sQea zfq-`e{7AsN1Ku0(fq+l6=QKNv)4E(v53WOByE)ZVar)=^cKFV|4Y#cBeutsMkw%E* zqNGtsetwRYzE1Wd-P}ftr~Vz5+B><9eE#(5)9=hylOY56n74O! za&jt1^V6q~k2fM*f-rCI;XcyoEvfzKDt{vYfmBa3{QbQ>oCP`}RZ2rOdbC%Nem2b8 z2L||iM;(>hpPnlZ?sM-xl^{cyw~ver@b_Q$z6@~4T>xai2kIY`hSt3Om@xw41?2^I ziP?BUdU!=3W!^qI3J`(g-<1L080qTjaYDL(FAGcpV~|NojENSn_>=O2Sx&BN<=dyk z{F%2;2LK=@C@ zAOun}nF>~?{|q(+r<=FWTckidq;D!+A8;Ni0L1*e<0U%zI#3{hd!zDV$`hqHP6;@cI7?MChAMMeq;^Y$Az0Als>WqCV{ z+RvBy*4k|RA?*VKNEzd^O5)o{7%ENDMi}D6E7oi}T5U+j-prPm%0m0%4SEP5r3}@i zY>7p{=@2YU()!zrH$duF$j}4GED{q9L#atBSOEyULXffHcJYeU4?cXS%p&QD=Ixi{ z7p_<@kSg7C_??rl?cV(ONzB>H3uOpflip!3U9zM=K$N_0G@j20Hq6R2Z$D$^;wAY7 zh3lV{nj83`I!^#hf??Mr(xyz^ds;x8xL2MHWy0gk+J}V$B1J%K+jqF? z#f=zS0im$SShMyap<&^%vB?S<`q|L4NW5A5;9x)qq+~MW*#J|?e7n*@``{4i9SVdp zf0%JF?#A29+W*!9E~T^#!}V@qx56_z=&4+-{|)M z@>6n&{fvf;IZva;ekIwv283K9@~)M2QvX`A396+5(Y0h3ewiG`T&Cf%SI8ydD%r)= z($J*qXj_NF99dWG16`G%t5RUIq^A<}RDzz`I~00r=uJsawF=eX;73Qjqd4`A;WS_@ zhknW>{d7poP3WjM$RVx)dg=|8p32EP<#X~+`+|bf&r#UKztET|f1#M%^AtbhOL87p zOQTX6p|f)6tDIcYIrLRdZYlpEue2Y@J-wEECf1T)#!nQS{WAqksip9oYZN}cmZGNr zLb0=crKEYkQsVqtavRr3?grhqk^Hmjq1!e~x~*Flmv!68yj{1=ZItxefLxSK<sn}TaT_fxX`%eP znrP`J4J|I`v~kN}D!uoxWXs%i|6$r(fxP_)t+`)IB@Z^!x(W>y?`Wmshd7n))KU2( zt+eS;F4;WquVnPVo)+5mTnqhauZA9ZiPIDNTIreBp{rK4(O&4Pue`0Hy@$25Pw1;h zwDjsRt)#OaJgLLEoQ95{(a?uq$olDLf7Q}?=%;`ErhBlw9k zQe&cLRHNpo(=+({D$r2(pFL+LTD*#ynvJjH&vu}p7QH6t%|(kRMg41nj@gcGZtD7+ zg1m)uXH|%Lkxn={Iy$QBb5|9j#eH&ZP0ich9zz|~qUO}nHLD8qx9aPw!ULQ<)S}#m zyF`O+`uaB_!UF=-qS76AZ$=A4{XvXIxY~5EVat}y<#%r0f8fCWeXqRo(w{IIk?Q(s z<#%sE7k91^Xh^iC#;8^C+;y8oi~FB=e}maVMR*VfcLvZ^3ILB*e$6Xq{k`naL~ z;GMuTQB@CFY~K8}Vt#85KmF{}TgvY&U6Z7$ca0sFHh$g%M`~&g-Lt-YGe+aCJCjxQ zqe4Q_V#0!Z?q0F7NTzl3?W+3WLBSzmq6N@c0b0dq6sziY+t}DVi1g4yJ9j?(@WZ=y z?Rx4drCNv!|9{RMI+s(#xr1%M1+opiKn{@?$YtzBvJL)Tk~ez7{%sp}g~&&iC)#C+ zChCndI3Y_646ldmprt_xoCZh9_H4(5CK?`JPfqc$NvC{F@v}arq&a6{$8Mxy;~)z_ z4)99*oQXM*oU;lv zI6GvN3A^*0Vn&NfTdAgf0fZS>(e4V^j1 z=!?Gz+b{OkiyV8au=DCjqh*pkH(?hM#6_BOj;H`bUdruh2M?n?JeS_!>*y5UO<(dV zy3AqQRs3kYLm-dl5Y1gnRJ@C5=OLo~fPcS^=se)R0sMD>zXbT70ACCECKY^$H&IRs z(b{=LyTIRvb`qW2PgHjr=VxEq6aOZR_=Q~Jm)8;hc{lO8D&kG&4et+e0K5m_LjjNV zc?f<*=g?Q(3c%lCPkcuh@n>?0A6!TLz1`@uiujfD?eH*%<-p{y7JJ(+d>;>C4?BnN zU>)`f?lzcsVi@uHxx~xX5r1Mg@gr5l|9ZY1Ua}?+gPh?GIU@kpD->)uit=y1MTbv4~9FLNVaiP2T3K$)@8G}C`U9Is65-J z7cGtLH*5M(MKp18Tv#-B*pj8b%Cm=acF~e?Mf3V8R!^_dV~dLNiYE2xA@?5?5L1Md z(O;=g2u>8wnGT8-Dmf`(V%GRHKWD|t9TJ~1b6W0#)X}aoym#Pi(SDAMIro1hlXW`Y6NW5K zRuneEUB^jJM@KCL?!%~m&i&-}-X-$6+D1M4mAydrgSJYn9`x9LC`WdzX}Lr_v$othQAdJ zn6?t~5hI^^;4iYC4*gt+tLa>HK!-FJ%6zEjB3M5c;;Nbpw}amT^GM{*)K}l+&?>rve;I6JOwR3e z^b5Pea0U<41YoMtb5P6Ruc1fV=ql!6bUMb} z_ZF>{&0{4$L2RzH04H!ZUTZ%OOi>^o(Nr#e|1Zb!R4{yhdS zE9)0p&-w<}F+qp5e=U2l%9wZ5_=|h^xTwBUpv8m^_%tz_Fu_ljcuKW4ZUO#1f&ZY0 z8w~zZ%r>N+*#y-w_(QQDuIfzDHazQ46*mWCEq$xZzqjzyVK>2o@Z(|dpXz`PtchEI zzx9yiWzc(>UE~c0U6}R9T-XNJGx#|%ZO0@q_yVz>{@5>6-&?^`zF7?naT10k*8Tb& zt*mE|YA(b&7i--0NVoZ?m&5Wh_@J>N(KneX9q@B9;9=k8T{8eOz_E5@ULaNU$9JT3s`Cd`ZoPe z(Wtk&J^nVsmX~>^H!+X#O-xOPnx|ekgvsw9!EcYjPR6VtgM12pvXtN_i0`dRHq?J_ zZwdbSWw4zw_elm}=Cgz&j$n-oV=*YRP$r4(IGWd#Oz0^tHOhcjS2L71xcgpB{ z3%+yd{`mKG$}bC?)WZBHHZz~hX6A*t7!5jb-lx#ffO%=?a9s*C;s43tx2nRp{T4ox z%zljc&bG^jhPiM#+-?4`9iCqnn$yC9CbzJFNzKepO^1r7;J4k5l*t|ZMcHF7S>}EC zjT+X6{yQb@8DcJO0sj3)2( zoeRp67HW9>d<}w#=%n@JjJm#IMxP-Z@DSz(z6AWjf&v+F3BQ7 zzBQH8tz5d#|ASl#$|fw+aro2o)Wx7(ADV?Gd5f-;2;Wo;OE%6M&=vIzFS&SVk2 zoIcR6`}p@CSx^?Yfb+yfoU7@`RP&U{+DKonmA+TX_ZGT*8^gZAk{7kH+qY{NVs#ke ze+;sSpNTA@m(%S6y05?V8&OaeHHY(Y6) z7>5rOM{Ev9d=E!F5kqVeLktl^nIQ=q1>%ArlRy@MY=}54BbiFbw}S(_kH78kf-?BB z@yOYnN6qCNzH2-lbR>a}RL~*ThFIH(L$a6z!kw5Y7~-FdWh#|?>k!<1{QCm`fT@fj z28|)Uj7837EE;qOo=Q-73b9umCc}VtaD(p^L(H1AHVpEuBBu`x?Y{lQ#;Kspdop8) zrDK897z>%fSokc?;j19=l%V0tPaUGqJ3!k|Ec6$|(8=y!-g~8CFo{K9Moc zNsReSVaz|5v7l+3htA+U0(7L{yQI_oe*6*N#vmKQ{@%un<@8oIGP3*j&t5|d$`A|5 zTr(N-03C>NWQcF%0aH1L-zSg9_#QoVUR@_ zWD$n%tuzB^aYfSg;|2=^}VpADnQ<+m5Vj?&96n1054U2YkI4;)|vE5cEJ!mw3uI>6YMs2Zb}% zBZ9GBpu+}q^o!+uKs@JmiJT8f1|6xKJAtQ0f)1m#0lt#IxB7iUrUUWR5*>)8W+~vQ zG!;(?eiD2o)`ow|@$R0VTG8ObVhY1qxRQHD=s<^#BUYXFi`DUgV|Cm^@Fn(>~7orhiLGa{7AQ0k#22Sp{Uww`H z7ovXz8sTU3mL|(@5wFlFuNYmU@1^6b=MTd3*axqu_sMKPM2iSQk_i#C7~TRxWLgY_ zXviBruw5nekEkl|I)|8ggt%aDN5G2{{tpLeF(0l#lUmR!P#KMwrA{D+Z%E5d)oq_n?91bdkbm*?kQ_CFxxGOT?# z%tq{#M$<0)Kw=Y7Q1aCK-3-ZXL?F=fRT`IZ(u~Lq=J{Ed{o6-efh7wEwf)2D5yE5` zBHVnAC?sX-Z@gHgSYC0AICFiA!o#CK?XrKz=&Lvt#$hUKmk3?E@{^GoBZ9Yl>IO#C z19`)%_Z@{(pR`hbL;oYj{s*UQFwWu37GZLT)H3vg8xeTrU5oH9L!nm7yfP&2!g}=h zq)YrqCH;s~NgU?EUNbzt!6U7qv8f$Z7$Fb`h_Ek1|KeX?=_iHUKkl;s(c^x?DGv_Z zaAt_(+mT5PZ=EtSQX<|BVPS^;MaY_dQpo*mm;L*s|BREEfb3?R>)~h;77qlj8G6GF z;?8iaZYU99eOQn3E8ITnvj4z|*I=E;fe+3XV6wvrC<5yYy_wvAf#F&LFC}ToBc`zH z{==sH0<9H?88}yh=>R862;?*A*6@hEYgW|8cW^h1LXPI`lrMuF$>_e5meIeGh*Wd8NRK zrC&&WZ_lpBCvo0&XarEspbJ1PgN8hBLp!&qJI6h~docfsl@fguHh5d#uK6c!b}giK z{2;GGb%GX~v0VFFwGLs6Z=Ar57IBORj%xQE(IUVR@saXMffEJ%Kn4VK-GAitT3E+2 z3t%4;Y7V4QD9g~z#Bk{+1xG(J4`ZdkO#nPBR!U-neMr~qD|kw6!XjZzgk2YAMDdnF zHg$~-A)G~9wFn%A9*z)DL}nszGk#tROoZ}Efqm04yzB3S@5EYInd0UPn5x6e zI}xUelS_G}h!Ezkl5=V;G&uz6hRomy#YIGG=V|qQTC5ZtkYZBY6T05Nh9=fR^M&FI z{T-?zqGvlvUwkdhB;a983Ra32LYuovcCoe4vZ2&LFZGXo)j4AFexxygNI?XPFy6+)f4i?;RjaAm?`JG?4BHo z$gvndtD(g`orc5=@HbPGkK@@JtUBr|0L6e@yy~#FtHVvFs+@7Zw(;$7G zf%~cJsi#9I+KP}G0i%b-%a+cCq{tzSa%cw}G9m)zn>oa31mLwH7*E3?u5e@;E02hcpC9^ z3N8)YDo~FPaPhlj5#Gl6-g0bL9I<*5y!hB5-j55K5P!!JkH-;<$jRq>i|_;7?kmT3 z!LeU(#N`!}DJj;XNWM%3;fs&e{ahM!&lN@oB95Iy~agrQynbO#} zB>dYVyuD9FIbubnvlhf!nkB?uO8y%@2UL{*A;#E0fLOEhXr87jtno4KTus!m)nO9BWoRP~uran)x$6RXeor&gZ{O|L!^ki7pSR*v{IDC>7mN^+0imx=bI F{15Pbq-6jA literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Images/volume.png b/tribler-mod/Tribler/Images/volume.png new file mode 100644 index 0000000000000000000000000000000000000000..edbc2f77fef9d9373ae84853d48d9e42bf6dfae7 GIT binary patch literal 517 zcmV+g0{Z=lP)X0ssI3rF8oj00004XF*Lt006JZ zHwB960000PbVXQnQ*UN;cVTj606}DLVr3vnZDD6+Qe|Oed2z{QJOBUzmq|oHRCwBA zY-ng;Kmtqj5%Lft z00M{+sBH7*&B#Vzu^vdWva$jhzkmORXoZNgz;!?*&;URHF`{`6i$@8_!5so;fGmeN zJv%!aSrbqY5jaz)PDKtS6sIDK5_Stf073nL8Ab#FK2zZV1NR-WEDJ8xa4$Dt0H{vn zxW=>=9-C++7CYgg1PMyG2#}mLYZms9#bpF85r6;!g{`Ed<4GVrJ;mKd1lmHA{c>KbX4lq8^$`rKJg6`U>U;yU=V;`rT_!AJx zu>v;&IqBiE6Hi(rE!Wd3^#KGJqC^D74bU^BmML^~EZKmC>L#LX0ssI3rF8oj00004XF*Lt006JZ zHwB960000PbVXQnQ*UN;cVTj606}DLVr3vnZDD6+Qe|Oed2z{QJOBU!S4l)cRCwBA zY-ng;KmtqkYr_L1u}mB{tYD4($XN}ED+^7A0IPd z0{{WUh~_yg9wi_LcLr2oD&z?~r9#aH)oSxd8(}bwc6=2!7qa ziD@lF5R*i)lkd-WAlVTe0O3}EgAynR5&@GH3ztLK=sHDzenhryZipi|pg*FBAaMZ# z2o$!Gl9JD#KW88jFfot|h}DUibeV{WOtkpGrxQ&ZRy8d6orEWUfeQ{vNqBmIC!MKN zr@{pxSsPBGxK`xnXCQ)TmR44b}AEc%ss7~JBBQ4krzFHByl4RW z5>%Q$xJjf=WR=9Qar%<^|35r_LiRL7Wy`{qHSysL|Nj9QaB*ZdVJq4H{)V%gV}j6R zYnQBpy8xmRq~`B$pg2Mc%midMJ}Wu@{=yTOJ&`^L4w6cQ_}^b8%eO#9a2SPdCHLQ- r6z0q6VXk1ie-c|1@cj8f>(U1R?PztD77&h^00000NkvXXu0mjf1ePK6 literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/LICENSE.txt b/tribler-mod/Tribler/LICENSE.txt new file mode 100644 index 0000000..930592d --- /dev/null +++ b/tribler-mod/Tribler/LICENSE.txt @@ -0,0 +1,630 @@ +Unless otherwise noted, all files are released under the MIT +license, exceptions contain licensing information in them. + +Copyright (C) 2001-2002 Bram Cohen + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation files +(the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +The Software is provided "AS IS", without warranty of any kind, +express or implied, including but not limited to the warranties of +merchantability, fitness for a particular purpose and +noninfringement. In no event shall the authors or copyright holders +be liable for any claim, damages or other liability, whether in an +action of contract, tort or otherwise, arising from, out of or in +connection with the Software or the use or other dealings in the +Software. + +------------------------------------------------------------------------------ + +All code written by Jie Yang, Pawel Garbacki, Jun Wang, Arno Bakker, +Jan David Mol, Qin Chen, Yuan Yuan, Jelle Roozenburg, Freek Zindel, +Fabian van der Werf, Lucian Musat, Michel Meulpolder, Maarten ten Brinke, +Ali Abbas, Boxun Zhang, Lucia d' Acunto, Rameez Rahman, Boudewijn Schoon, +Richard Gwin, Diego Rabaioli, Riccardo Petrocco has the following license: + + TRIBLER file-sharing library. + + Copyright (c) 2005-2009, Delft University of Technology and Vrije + Universiteit Amsterdam; All rights reserved. + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + Delft University of Technology + Postbus 5 + 2600 AA Delft + The Netherlands + + Vrije Universiteit + De Boelelaan 1105 + 1081 HV Amsterdam + The Netherlands + + +The research leading to this library has received funding from: + - BSIK Freeband Communication I-Share project (Dutch Ministry of Economic + Affairs) + - Netherlands Organisation for Scientific Research (NWO) grant 612.060.215. + - Dutch Technology Foundation STW: Veni project DTC.7299 + - European Community's Sixth Framework Programme in the P2P-FUSION project + under contract no 035249. + - The European Community's Seventh Framework Programme in the P2P-Next project + under grant agreement no 216217. + +------------------------------------------------------------------------------- + + BuddyCast4 content-recommendation library. + + The research leading to this library has received funding from the + European Community's Seventh Framework Programme [FP7/2007-2011] + in the Petamedia project under grant agreement no. 216444 + + The following library modules are Copyright (c) 2008-2009, + Delft University of Technology and Technische Universität Berlin; + All rights reserved. + + BaseLib/Core/BuddyCast/buddycast.py + + The following library modules are Copyright (c) 2008-2009, + Technische Universität Berlin; + All rights reserved. + + BaseLib/Core/Search/Reranking.py + BaseLib/Test/test_buddycast4.py + BaseLib/Test/test_buddycast4_stresstest.py + + All library modules are free software, unless stated otherwise; you can + redistribute them and/or modify them under the terms of the GNU Lesser + General Public License as published by the Free Software Foundation; in + particular, version 2.1 of the License. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + Delft University of Technology + Postbus 5 + 2600 AA Delft + The Netherlands + + Technische Universität Berlin + Strasse des 17. Juni 135 + 10623 Berlin + Germany + +------------------------------------------------------------------------------- + + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! + +------------------------------------------------------------------------------- + +PRIVACY WARNING: This software will by default exchange your download +history with others. This feature can be disabled by disabling the +recommender in the Preference menu. See also the disclaimer on +http://www.tribler.org/ diff --git a/tribler-mod/Tribler/Lang/__init__.py b/tribler-mod/Tribler/Lang/__init__.py new file mode 100644 index 0000000..78b152e --- /dev/null +++ b/tribler-mod/Tribler/Lang/__init__.py @@ -0,0 +1,4 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information + diff --git a/tribler-mod/Tribler/Lang/__init__.py.bak b/tribler-mod/Tribler/Lang/__init__.py.bak new file mode 100644 index 0000000..84ea404 --- /dev/null +++ b/tribler-mod/Tribler/Lang/__init__.py.bak @@ -0,0 +1,3 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information + diff --git a/tribler-mod/Tribler/Lang/english.lang b/tribler-mod/Tribler/Lang/english.lang new file mode 100644 index 0000000..bbc84e8 --- /dev/null +++ b/tribler-mod/Tribler/Lang/english.lang @@ -0,0 +1,1423 @@ +################################################################## +# You can change language here, this is all Tribler using variables +# Make sure that try to keep text length as the original text +# +# Note: text strings can be written as either: +# stringname = "some string" +# or: +# stringname_line1 = "first line of a string" +# stringname_line2 = "second line of a string" +# +# (Tribler will automatically add the lines together) +# +################################################################## + +[ABC/language] + +# The name of the language defined in this file: +languagename = "English" + +# this credit will display in aboutme dialog +# translate = "Translator: " +translate = "" + +# All ABC Variables +####################### +title = "Tribler" + +superseederrornotcompleted = "Super-seed can only be enabled for completed torrents" +superseedmustruntorrentbefore = "This torrent must be running before using Super-Seed mode" + +superwarningmsg_line1 = "This option greatly reduces the torrent's efficiency." +superwarningmsg_line2 = "Super-seed should only be used for initial seeding or" +superwarningmsg_line3 = "for re-seeding." +superwarningmsg_line4 = "" +superwarningmsg_line5 = "Super-seed mode will stay in effect until the torrent" +superwarningmsg_line6 = "is stopped." + +failedinvalidtorrent = "Failed : Invalid torrent file." +failedtorrentmissing = "Failed : .torrent file does not exist or cannot be read." +removetorrent = "Do you wish to delete the torrent file?" +ok = "OK" +cancel = "Cancel" +apply = "Apply" +close = "Close" +save = "Save" +saveandapply = "Save and Apply" +done = "Done" + +choosefiletosaveas = "Choose file to save as, pick a partial download to resume" +choosedirtosaveto = "Choose a directory to save to (pick a partial download to resume)" +enterurl = "Enter the URL for the torrent you wish to add:" +confirmmsg = "Do you want to close Tribler ?" +confirm = "Confirm" +aboutabc = "About Tribler" +abcpreference = "Preferences" +managefriendspeers = "Manage Friends/Encountered Peer List" +managefriends = "Manage Friends" +recommendatefiles = "Recommendation" +addfriend = "Add a friend" +editfriend = "Edit a friend's info" +viewpeerlist = "View Encountered Peers" +addpeeradfriend = "Add this peer as your friend" +deletepeer = "Delete this peer" +deletepeerfriend = "Remove the peer from your friends list" +fakefile = "Fake File" +norating = "No Rating" +rankitems = "Rank Items" +assignrating = "Right click on a torrent to assign a 1--5 star rating" +showabcwindow = "Show Tribler Window" +error = "Error" +warning = "Warning" +invalidinput = "Invalid input" +cantconnectwebserver_line1 = "Could not connect to update server." +cantconnectwebserver_line2 = "It may be down or you are not connected to the Internet." +abclatestversion = "Latest Version" +nonewversion = "There is no new version available.
Please visit www.tribler.org for more information" +hasnewversion = "There is a new version available. Please upgrade." +globaluploadsetting = "Global Upload" +downloadsetting = "Download Setting" +ratelimits = "Rate Limiting" +seedoptions = "Seeding Options" +webinterfaceservice = "Web Interface Service" + +duplicatetorrent = "Duplicate Torrent" +duplicatetorrentinlist = "This torrent (or one with the same hash value) already exists in the list" +duplicatetorrentmsg = "This torrent is a duplicate!\nAre you sure you want to replace it?" +choosetorrentfile = "Choose a torrent file" +cantgettorrentfromurl = "Can't get torrent from this URL" +localsetting = "Local Settings" +errordeletefile = "Error, while trying to delete file.\nFile not cannot be found or is in use" +filenotfound = "File not found or cannot be accessed." +confirmdeletefile = "Are you sure you want to remove this file or folder?" +choosenewlocation = "Choose a new location for this torrent" + +extracterrorduplicatemsg_line1 = "A file with the same name already exists in the destination folder. +extracterrorduplicatemsg_line2 = "Do you want to overwrite it?" +extracterrorduplicate = "Duplicate file name" + + +extracterrorinactive = "At least one selected torrent is active. Please deactivate before extracting." + + +extracterrormoving = "Can't move the torrent file." +torrentdetail = "Torrent Details..." + +moveup = "Move torrent up" +movedown = "Move torrent down" +movetop = "Move torrent to top" +movebottom = "Move torrent to bottom" +clearallcompleted = "Clear all completed torrents" +pauseall = "Pause All" +stopall = "Stop All" +restartall = "Restart All" +unstopall = "Queue all stopped torrents" +mode = "Mode Manual/Auto" +webservice = "Web Service: " +torrentfilenotfound = "Torrent file not found" +clear = "Clear" +errormovefile = "Error while moving files" + +totaldlspeed = "Total DL Speed:" +totalulspeed = "Total UL Speed:" + +failbehavior1 = "Set status to:" +failbehavior2 = "when a torrent fails" + +defaultpriority = "Default priority for new torrents:" + +################################ +# Menu +################################ +menu_file = "&File" +menuaction = "&Action" +menutools = "&Tools" +menuversion = "&Help" +menuaboutabc = "&About Tribler" +menuaboutabcmsg = "See Credits" +menuchecklatestversion = "&Check for updates" +menuchecklatestversionmsg = "Check Latest Version" +menuwebinterfaceservice = "&Web Interface Service" +menuwebinterfaceservicemsg = "Start/Stop and Config Web Interface Service" +menucreatetorrent = "&Create Torrent" +menucreatetorrentmsg = "Create .torrent file" +menumanagefriends = "&Manage Friends List" +menumyinfo = "My &Info" +menuexit = "&Exit" +menuexitmsg = "Close Program" +menuglobaluploadsetting = "&Global Upload Setting" +menuglobaluploadsettingmsg = "Setting global upload value" +menuabcpreference = "&Preferences" +menuabcpreferencemsg = "Set preferences" + +menu_addtorrent = "&Add Torrent" +menu_addtorrentfile = "Add torrent from &file" +menu_addtorrentnondefault = "Add torrent from file (to &non-default location)" +menu_addtorrenturl = "Add torrent from &URL" + +menu_pauseall = "&Pause All" +menu_stopall = "&Stop All" +menu_unstopall = "&Queue all stopped torrents" +menu_clearcompleted = "&Clear Completed" + +######################### +# Library Overview +######################### + +playFastDisabled = "Give high priority and play ASAP" +playFastEnabled = "Back to normal mode" +playerDisabled = "Please wait until first part is available \n(Tribler is currently giving first part high priority)" +playerEnabled = "Click to play" +boostDisabled = "Ask friends to boost your download" +boostEnabled = "Boosting" + + +######################### +# ToolBar +######################### +addtorrentfile_short = "Add Torrent File" +addtorrentfiletonondefault_short = "Add Torrent File to non-default location" +addtorrenturl_short = "Add Torrent from URL" + +tb_play_short = "Play video" +tb_resume_short = "Resume torrent" +tb_resume_long = "Resume/Launch torrent" +tb_reseedresume_short = "Reseed Resume" +tb_reseedresume_long = "Resume without hashcheck, use only for seeding/reseeding." +tb_pause_short = "Pause torrent" +tb_pause_long = "Pause active torrent(s) (without releasing resources)" +tb_stop_short = "Stop torrent" +tb_stop_long = "Stop torrent (release resources)" +tb_queue_short = "Queue torrent" +tb_queue_long = "Force torrent into queue" +tb_delete_short = "Remove torrent" +tb_delete_long = "Remove torrent only from Tribler list" +tb_spy_short = "Current Seed/Peer" +tb_spy_long = "See current number of seed/peer of torrent on the tracker" +tb_torrentdetail_short = "Torrent Details" +tb_buddy_short = "Manage Friends/Encountered Peers" +tb_file_short = "Show Download History" +tb_video_short = "Video Player" +tb_dlhelp_short = "Download Booster" + +tb_urm = "URM:" +tb_maxsim = "Active:" + +########################## +# Priority +########################## +# These are used for display in the list +highest = "Highest" +high = "High" +normal = "Normal" +low = "Low" +lowest = "Lowest" + +# These are used for menus +rhighest = "H&ighest" +rhigh = "&High" +rnormal = "&Normal" +rlow = "&Low" +rlowest = "L&owest" + +################################################### +# Seeding Setting +################################################### +uploadoptforcompletedfile = "Upload option for completed files" +unlimitedupload = "Unlimited seeding" +continueuploadfor = "Continue seeding for" +untilratio = "Seeding until UL/DL ratio = " +uploadsetting = "Upload Setting" +maxuploads = "Maximum uploads:" +maxuploadrate = "Maximum upload rate:" +maxoveralluploadrate = "Maximum overall upload rate:" +whendownload = "when downloading" +whennodownload = "when no downloading" + +maxdownloadrate = "Maximum download rate:" +maxoveralldownloadrate = "Maximum overall download rate:" + +zeroisunlimited = "(0 = Unlimited)" +zeroisauto = "(0 = Auto)" + +uploadrateintwarning = "Only integer allowed in Maximum upload rate setting" +uploadrateminwarning = "Minimum upload rate is 3kB/s or 0 for unlimited upload rate" +uploadrateminwarningauto = "Minimum upload rate is 3kB/s or 0 for auto upload rate" + +#Common option for t4t and g2g +default_setting = "default" +seed_sometime = "Seeding for" +seed_hours = "hours" +seed_mins = "minutes" +no_seeding = "No seeding" + +#Seeding option texts for tit-4-tat +tit-4-tat = "tit-4-tat: (Forgets about uploads)" +no_leeching = "Seed until UL/DL ratio > 1.0 (no Bittorrent leeching)" +unlimited_seeding = "Unlimited seeding" + +#Seeding option texts for give-2-get +give-2-get = "give-2-get: (Remembers every upload)" +seed_for_large_ratio = "Seed only to peers with UL/DL ratio >" +boost__reputation = "Unlimited seeding (Boost your reputation)" + + +############################################ +# Units +############################################ +Byte = "B" +KB = "KB" +MB = "MB" +GB = "GB" +TB = "TB" + +week = "W" +day = "D" +hour = "H" +minute = "M" +second = "S" +l_week = "w" +l_day = "d" +l_hour = "h" +l_minute = "m" +l_second = "s" + +############################################ +# Tribler Tweak +############################################ +up = "upload speed" +down = "download speed" +columns = "Columns" +column = "Column :" +displayname = "Column Name :" +columnwidth = "Column Width :" +eta = "Estimated time needed to complete: " + +customizetoolbar = "Customize Toolbar" + +############################################## +# Tribler Detail Frame +############################################## + +networkinfo = "Network Info" +fileinfo = "File Info" +torrentinfo = "Torrent Info" +geoinfo = "Geographic Info" +helperinfo = "Download Booster" + +dnumconnectedseed = "# Connected seed :" +dseenseed = "# Seen seed" +dnumconnectedpeer = "# Connected peer :" +dseeingcopies = "# Seeing copies :" +davgpeerprogress = "Avg peer progress :" +ddownloadedsize = "Downloaded size :" +duploadedsize = "Uploaded size : " +dtotalspeed = "Total speed : " +dportused = "Port used : " +updateseedpeer = "Update #Seed/#Peer" +manualannounce = "Manual Announce" +externalannounce = "External Announce" +finishallocation = "Finish Allocation" +spewoptunchoke = "Optimistic Unchoke" +spewIP = "IP" +spewlr = "Local/Remote" +spewinterested = "Interested" +spewchoking = "Choking" +spewinteresting = "Interesting" +spewchoecked = "Choked" +spewsnubbed = "Snubbed" +spewdownloaded = "Downloaded" +spewuploaded = "Uploaded" +spewcompleted = "Completed" +spewpeerdownloadspeed = "Peer Download Speed" +entertrackerannounceurl = "Enter tracker anounce URL:" +TOTALS = "TOTALS:" +KICKED = "KICKED" +BANNED = "BANNED" +detailline1 = "currently downloading %d pieces (%d just started), %d pieces partially retrieved" +detailline2 = "%d of %d pieces complete (%d just downloaded), %d failed hash check" + +country_name = "Country" +country_code = "Country Code" +city = "City" +latitude = "Latitude" +longitude = "Longitude" +coordinate = "Coordinate" +peer_active = "Active" +peer_inactive = "Inactive" +name = "Name" +permid = "PermID" +mypermid = "My PermID" +pasteinvitationemail = "Your friend should provide you the following information by sending you an invitation:" +ipaddress = "IP" +icon = "Icon" +#nickname_help = "Input the friend's nickname or whatever you'd like to identify him/her" +#friendsipaddr_help = "Input the friend's IP address, e.g. 202.115.39.65" +#friendsport_help = "Input the friend's listening port number" +#friendspermid_help = "Input the friend's PermID" +#friendsicon_help = "Input full path of the friend's icon" +nicknameempty_error = "Name is empty" +friendsport_error = "Port is not a number" +friendspermid_error = "PermID must be given (in BASE64, single line)" +fiendsiconnotfound_error= "Icon file does not exist" +friendsiconnot32bmp_error= "Icon file is not a 32x32 BMP" +friendsiconnotbmp_error = "Icon file is not BMP" +myinfo = "My information" +myinfo_explanation = "Copy and paste this information in an email to your friends, so they can add you to their Friends List in Tribler." +invitation_body = "Hi,\r\n\r\nI am using Tribler (http://tribler.org) and want to ask you to do the same and add me as a friend. To do so, start Tribler, click on Friends, then click on the Add Friends button, and paste the following information:\r\n\r\n" +invitation_subject = "Friendship invitation on Tribler" +invitationbtn = "Invite friends" +dlhelpdisabledstop = "Download Booster is disabled because the torrent is stopped" +dlhelpdisabledhelper = "Download Booster is disabled because you are a helper" +dlhelphowto1 = "You can only request mutual (two way) friends to boost your downloads." +dlhelphowto2 = "\nMore info: \nTo use the download booster you must make friends with other Tribler users, and they must make friends with you. To make friends, use the 'Add as friend' button in the Persons overview or the 'Invite Friends' and 'Add Friends' button in the Friends overview." +friends = "Friends" +helpers = "Helpers" +availcandidates = "Available Candidates" +requestdlhelp = "Request Help ->" +requestdlhelp_help = "Ask friends to help in downloading this torrent" +stopdlhelp = "<- Stop Help" +stopdlhelp_help = "Stop friends' help" +helping_friend = "Helping " +helping_stopped = "Helping was stopped remotely, please remove torrent" + +##################################################### +# Meta info frame +##################################################### +fileinfo0_text = "Filename" +fileinfo1_text = "Size" +fileinfo2_text = "Progress" +fileinfo3_text = "MD5" +fileinfo4_text = "CRC-32" +fileinfo5_text = "SHA-1" +fileinfo6_text = "ED2K" + +encoding = "Encoding :" + +filename = "File name :" +destination = "Destination :" + +directoryname = "Directory name :" +file = "File" +progress = "Progress" +infohash = "Info Hash :" +pieces = "Pieces : " +str1 = "%s (%s bytes)" +str2 = "%i (%s bytes each)" +announceurl = "Announce URL :" +announceurls = "Announce URLs" +tier = "Tier " +single = "Single:" +likelytracker = "Likely Tracker :" +comment = "Comments :" +creationdate = "Creation Date :" +filesize = "Filesize" +archivesize = "Archive Size" + +######################################################## +# ABCOptionDlg +####################################################### +networksetting = "Network" +portnumber = "Port:" +portsetting = "Ports" +minportnumber = "Minimum port : " +maxportnumber = "Maximum port :" +portrangewarning = "Minimum port cannot be greater than maximum port" +randomport = "Randomize Ports" +kickban = "Kick/Ban clients that send you bad data" +security = "Don't allow multiple connections from the same IP" +scrape = "Retrieve scrape data" +internaltrackerurl = "URL of internal tracker" + +scrape_hint_line1 = "Automatically retrieve the total number of seeds/peers" +scrape_hint_line2 = "connected to the tracker" +scrape_hint_line3 = "(rather than just the number of connected seeds/peers)" +scrape_hint_line4 = "" +scrape_hint_line5 = "Note: This can put an additional burden on trackers" +scrape_hint_line6 = " and is therefore disabled by default" + +global_uprate_hint_line1 = "Amount of bandwidth to distribute between" +global_uprate_hint_line2 = "uploading torrents" +global_uprate_hint_line3 = "" +global_uprate_hint_line4 = "Note: Each torrent will always get a minimum" +global_uprate_hint_line5 = " of 3KB/s" + +choose_language = "Language: " +recategorize = "Recategorize all torrents: " +recategorize_button = "Recategorize now" +choosevideoplayer = "Choose video player" +choosevideoanalyser = "Locate FFMPEG" + +queuesetting = "Queue" +maxnumsimul = "Maximum number of active torrents" +trignexttorrent = "Consider torrents active if they are:" +after_downloading = "Downloading" +after_seeding = "Downloading or Seeding" +prioritizelocal = "Don't count torrents with local settings towards global limit" +fastresume = "Fast Resume (also enables File Selector)" + +skipcheck = "Skip hashcheck for completed torrents" +skipcheck_hint_line1 = "Don't conduct a hashcheck for torrents" +skipcheck_hint_line2 = "that have already completed." + +fastresume_hint_line1 = "Automatically resume torrents that have already" +fastresume_hint_line2 = "conducted a hashcheck." +fastresume_hint_line3 = "" +fastresume_hint_line4 = "Note: This option is required in order to set" +fastresume_hint_line5 = " priorities for individual files within" +fastresume_hint_line6 = " a multi-file torrent." + + +displaysetting = "Display" +miscsetting = "Misc." +removebackuptorrent = "Remove .torrent backup file when using remove" +confirmonexit = "Confirm on exit program" +triblersetting = "Tribler" +corefuncsetting = "Core functionality" +myinfosetting = "My information" +torrentcollectsetting = "Torrent collecting" +enablerecommender = "Enable Recommender" +enabledlhelp = "Enable Download Booster" +enabledlcollecting = "Enable Torrent Collecting" +myname = "My name (as broadcast to others):" +maxntorrents = "Max number of torrents to collect:" +maxnpeers = "Max number of peers to discover:" +tc_threshold = "Stop collecting more torrents if the disk has less than:" +current_free_space = "current available space:" +torrentcollectingrate = "Maximum rate of torrent collecting (Kbps):" +myicon = "My Tribler icon (as broadcast to others):" +setdefaultfolder = "Set default download folder" +stripedlist = "Striped list" +videosetting = "Video" + +choosedefaultdownloadfolder = "Choose a default folder for download files" +maxsimdownloadwarning_line1 = "The maximum number of simultaneous downloading torrents" +maxsimdownloadwarning_line2 = "must not be greater than the number of reserved ports" + +choosemovedir = "Choose a folder to move completed files to" +movecompleted = "\"Clear Completed\" moves files to:" + +showtray = "Show in tray:" +showtray_never = "Never" +showtray_min = "When Minimized" +showtray_always = "Always" +showtray_only = "Only show in Tray" + +######################################################## +# ABCOptionDlg - Advanced Options +####################################################### + +disksettings = "Disk" +advanced = "Advanced" +advsetting = "Advanced settings" +changeownrisk = "(Under most circumstances, these settings do not need to be changed)" +localip = "Local IP: " +iptobindto = "IP to bind to: " +minnumberofpeer = "Minimum number of peers: " +diskalloctype = "Disk allocation type:" +allocrate = "Allocation rate:" +filelocking = "File locking:" +extradatachecking = "Extra data checking:" +maxfileopen = "Max files open:" +maxpeerconnection = "Max peer connections:" +reverttodefault = "Restore Defaults" +bufferdisk = "Disk Buffering" +buffer_read = "Read Cache" +buffer_write = "Write Cache" +ut_pex_maxaddrs1 = "Maximum number of addresses to accept" +ut_pex_maxaddrs2 = "via peer exchange per client" +flush_data = "Flush data to disk every" + +iphint_line1 = "The IP reported to the tracker." +iphint_line2 = "(unless the tracker is on the same intranet as this client," +iphint_line3 = " the tracker will autodetect the client's IP and ignore this" +iphint_line4 = " value)" + +bindhint_line1 = "The IP the client will bind to." +bindhint_line2 = "Only useful if your machine is directly handling multiple IPs." +bindhint_line3 = "If you don't know what this is, leave it blank." + +minpeershint_line1 = "The minimum number of peers the client tries to stay connected with." +minpeershint_line2 = "" +minpeershint_line3 = "Do not set this higher unless you have a very fast connection and a lot of system resources." + +ut_pex_maxaddrs_hint_line1 = "When you meet other peers they can give you addresses of the peers they know." +ut_pex_maxaddrs_hint_line2 = "This value sets the maximum number of gossiped addresses you accept from each peer." +ut_pex_maxaddrs_hint_line3 = "Don't set this too high as these gossiped addresses are from an untrusted source" +ut_pex_maxaddrs_hint_line4 = "(i.e. a random peer) and not the trustworthy tracker." + +alloctypehint_line1 = "How to allocate disk space: +alloctypehint_line2 = "" +alloctypehint_line3 = "'Normal' allocates space as data is received" +alloctypehint_line4 = "'background' also adds space in the background" +alloctypehint_line5 = "'pre-allocate' reserves space up front" +alloctypehint_line6 = "'sparse' is only for filesystems that support it by default" + +allocratehint_line1 = "At what rate to allocate disk space when allocating in the background." +allocratehint_line2 = "" +allocratehint_line3 = "Set this too high on a slow filesystem and your download will slow to a crawl." + +lockinghint_line1 = "File locking prevents other programs (including other instances" +lockinghint_line2 = "of BitTorrent) from accessing files you are downloading." + +doublecheckhint_line1 = "How much extra checking to do to make sure no data is corrupted." +doublecheckhint_line2 = "Double-check requires higher CPU usage" +doublecheckhint_line3 = "Triple-check also increases disk accesses" + +maxfileopenhint_line1 = "The maximum number of files to keep open at the same time." +maxfileopenhint_line2 = "Please note that if this option is in effect," +maxfileopenhint_line3 = "files are not guaranteed to be locked." + +maxconnectionhint_line1 = "Some operating systems, (most notably Win9x/ME) combined" +maxconnectionhint_line2 = "with certain network drivers, can only handle a limited" +maxconnectionhint_line3 = "number of open ports." +maxconnectionhint_line4 = "" +maxconnectionhint_line5 = "If the client freezes, try setting this to 60 or below." + + + +recommendinstructions = "Double click on a torrent to start downloading; right click to delete or manually check health of the torrent" +recommendfilter = "Don't show torrents with recommendation value less than" +recommendfilterall = "(set to 0.0 to see all known torrents)" + +############################################################ +# BTMakeTorrentGUI +############################################################ +btfilemakertitle = "Create Torrent" +btmaketorrenttitle = "Make Torrent" +maketorrentof = "Source :" +dir = "Dir" +add = "Add" +remove = "Remove" +announce = "Tracker" +announcelist = "Announce list :" +copyannouncefromtorrent = "Copy tracker from torrent" +createdby = "Created By :" + +trackerinfo = "Tracker Info" +miscinfo = "Misc. Info" + +selectdir = "Select a directory" + +multiannouncehelp_line1="(A list of announces separated by commas or whitespace. +multiannouncehelp_line2=" Trackers on the same line will be tried randomly." +multiannouncehelp_line3=" All the trackers on one line will be tried before the trackers on the next.)" + +httpseeds = "HTTP Seeds :" +httpseedshelp = "(A list of HTTP seeds separated by commas or whitespace.) + +saveasdefaultconfig = "Save as default config" +maketorrent = "Make Torrent" + +choosefiletouse = "Choose file or directory to use" +choosedottorrentfiletouse = "Choose .torrent file to use" +youmustselectfileordir = "You must select a\n file or directory" + +dirnotice_line1 = "Do you want to make a separate .torrent" +dirnotice_line2 = "for every item in this directory?" +yes = "Yes" +yestoall = "Yes to All" +no = "No" +notoall = "No to All" +playtime = "Duration of video ([hh:]mm:ss)" +addthumbnail = "Thumbnail" +useinternaltracker = "Use internal tracker" +manualtrackerconfig = "Use additional trackers (you must add internal tracker URL)" + +########################################################### +# BTcompletedirgui +########################################################### +directorytomake = "Directory to build :" +select = "Select" +piecesize = "Piece size :" +make = "Make" +errormustselectdir = "You must select a directory" +btmakedirtitle = "Make Directory" +checkfilesize = "Checking file sizes" +building = "Building " + +################################################# +# Timeouts +################################################# +timeout = "Timeouts" +schedulerrulemsg = "Set timeout rules for torrents" +setrule_line1 = "Reduce a torrent's priority and force it into queue so" +setrule_line2 = "other torrents in queue won't be blocked when:" +timeout_tracker = "Torrent can't connect for:" +timeout_download = "Torrent can't download for:" +timeout_upload = "Seeding torrent doesn't upload for:" +minute_long = "Minutes" +hour_long = "Hours" +time = "Time" + +################################################################################################ +#(-Right-) Click Menu +################################################################################################ +rHashCheck = "&Hash Check" +rResume = "&Resume Torrent" +rPlay = " &Play Video" +rStop = "&Stop" +rPause = "P&ause" +rQueue = "&Queue" +rRemoveTorrent = "Remove Torrent" +rRemoveTorrentandFile = "Remove Torrent and File(s)" + +rChangeViewModusThumb= "Thumbnail view" +rChangeViewModusList= "List view" + + +############# FILE and LIBRARY +rOptions = "Options:" +rDownloadSecretly = " Download and hide this from other Tribler users" +rDownloadOpenly = " Download" +rModerate = " Change info..." +rModerateCat = " Change category" +rRecommend = " Recommend to a friend..." +rAdvancedInfo = " Advanced info..." + +# Arno: categories must be completely defined by category.conf, +# not in the code + +############# LIBRARY +rLibraryOptions = "Library options:" +rOpenfilename = " Open file" +rOpenfiledestination= " Open destination" +rRemoveFromList = " Remove from library" +rRemoveFromListAndHD= " Remove from library and harddisk" + +############# PERSONS and FRIENDS +rAddAsFriend = " Add as friend" +rRemoveAsFriend = " Remove this friend" +rChangeInfo = " Change friend info" + +############# FRIENDS +rFriendsOptions = "Friends options:" +rSendAMessage = " Send a message..." + +############# SUBSCRIPTIONS +rChangeSubscrTitle = " Change title" +rRemoveSubscr = " Remove subscription" + + +################################################################################################ +# Mouse roll over +################################################################################################ + +############# FILE +rNumberOfSeeders = "Number of current uploaders (seeders) +rNumberOfLeechers = "Number of current downloaders (leechers) + + + +rcopyfilename = "&Copy Filename" +rcopypath = "Copy &Path" + +rcopyfromlist = "&Copy from list..." +rexportfromlist = "&Export torrent" +rextractfromlist = "&Extract from List..." +rclearmessage = "&Clear Message" +rtorrentdetail = "&Torrent Details..." +rcurrentseedpeer = "Current Seed/Peer" +rchangedownloaddest = "Change Download Destination..." +ropenfiledest = "Open &File..." +ropendest = "&Open Destination..." +rsuperseedmode = "Use Super-seed &Mode" +rpriosetting = "&Priority Setting" +rlocaluploadsetting = "&Local Settings..." + +openfiledest = "Open File" +opendest = "Open Destination" + +################################ +# BT status +################################ +completed = "completed" +completedseeding = "completed/sharing" +working = "downloading" +superseeding = "super-seeding" +waiting = "waiting.." +pause = "pause" +queue = "queue" +stopping = "stopping.." +stop = "stopped" +checkingdata = "checking existing data" +allocatingspace = "allocating disk space" +movingdata = "moving data" +connectingtopeers = "connecting" + +############################################## +# Web Interface Service +############################################# +cantopensocket = "Can't open socket" +socketerror = "Socket Error!" +inactive = "Webservice: Inactive" +active = "Webservice: Active" +toolbar_webservice = "Webservice" +webinterfacetitle = "Web Interface Service (version 3.0)" +webip = "IP :" +webport = "Port :" +uniquekey = "Unique Key :" +commandpermission = "Command Permissions" +webquery = "Query" +webdelete = "Delete" +webadd = "Add" +webqueue = "Queue" +webstop = "Stop" +webpause = "Pause/Unpause" +webresume = "Resume" +websetparam = "Set Parameters" +webgetparam = "Get Parameters" +priority = "Priority" +webclearallcompleted = "Clear all completed" +webautostart = "Auto start web service when launching Tribler" +startservice = "Start Service" +stopservice = "Stop Service" +warningportunder1024_line1 = "Ports below 1024 are normally used for system services" +warningportunder1024_line2 = "Do you really want to use this port?" +cantconnectabcwebinterface = "Unable to connect to Tribler web service" + +############################################## +# Scrape Dialog +############################################## +cantreadmetainfo = "Can't read metainfo" +cantgetdatafromtracker = "Can't get data from tracker" +noannouncetrackerinmeta = "No announce tracker in your metainfo" +warningscrapelessthanmin = "Please don't scrape more than once per minute." +trackernoscrape = "Tracker does not support scraping" +seed = "Seed :" +peer = "Peer :" +status = "Status :" +scraping = "Scraping..." +scrapingdone = "Scraping done" + +############################################## +# Upload Rate Maximizer +############################################## +autostart_threshold = "Start a new torrent if upload is more than" +autostart_delay = "below the global limit for at least" + +activetorrents = "Active Torrents" +autostart = "Auto Start" + +dynmaxuprate = "Adjust upload rate for network overhead" +dynrate = "(Dynamic Rate = Global Upload Rate - DownCalc - ConnectCalc)" +downcalc_left = "DownCalc = " +downcalc_top = "Download Rate" +downcalc_bottom = " * Download Rate + " +connectcalc_left = "ConnectCalc = " +connectcalc_top = "(Seeds + Peers)" +connectcalc_bottom = " * (Seeds + Peers) + " + +errorlanguagefile_line1 = "Your language file is missing at least one string." +errorlanguagefile_line2 = "Please check to see if an updated version is available." +restartabc = "(takes effect next time Tribler is opened)" + +messagelog = "Message Log" +clearlog = "Clear Log" +date = "Date" + +close_title = "Closing" + +noportavailable = "Couldn't find an available port to listen on" +tryotherport = "Would you like Tribler to try using another port?" + +column4_text = "Title" +column5_text = "Progress" +column6_text = "BT Status" +column7_text = "Priority" +column8_text = "ETA" +column9_text = "Size" +column10_text = "DL Speed" +column11_text = "UL Speed" +column12_text = "%U/D Size" +column13_text = "Message" +column14_text = "Seeds" +column15_text = "Peers" +column16_text = "Copies" +column17_text = "Peer Avg Progress" +column18_text = "DL Size" +column19_text = "UL Size" +column20_text = "Total Speed" +column21_text = "Torrent Name" +column22_text = "Destination" +column23_text = "Seeding Time" +column24_text = "Connections" +column25_text = "Seeding Option" + +savecolumnwidth = "Save column widths when resizing" +showearthpanel = "Show worldmap in detail window (higher CPU load)" + +errorinactivesingle_line1 = "Torrent must be inactive before proceeding" +errorinactivesingle_line2 = "Stop this torrent?" + +errorinactivemultiple_line1 = "Torrents must be inactive before proceeding" +errorinactivemultiple_line2 = "Stop torrents?" + +disabletimeout = "Disable timeouts for this torrent" + +forcenewdir = "Always create new directory for multi-file torrents" + +forcenewdir_hint_line1 = "If this is enabled, a multi-file torrent will always" +forcenewdir_hint_line2 = "be placed within its own directory." +forcenewdir_hint_line3 = "" +forcenewdir_hint_line4 = "If this is disabled, a multi-file torrent will be" +forcenewdir_hint_line5 = "placed in its own directory only if no pieces" +forcenewdir_hint_line6 = "of the file are already present to resume from." + +upnp = "UPnP" +upnp_0 = "Disabled" +upnp_1 = "Mode 1 (indirect via Windows)" +upnp_2 = "Mode 2 (indirect via Windows)" +upnp_3 = "Mode 3 (direct via network)" +tribler_warning = "Tribler Warning" +tribler_information = "Tribler Information" +tribler_startup_nonfatalerror = "A non-fatal error occured during Tribler startup, you may need to change the network Preferences: \n\n" +tribler_upnp_error_intro = "An error occured while trying to open the listen port " +tribler_upnp_error_intro_postfix= " on the firewall." +tribler_upnp_error1 = "request to the firewall failed." +tribler_upnp_error2 = "request to firewall returned: '" +tribler_upnp_error2_postfix = "'. " +tribler_upnp_error3 = "was enabled, but initialization failed." +tribler_upnp_error_extro = " This will hurt the performance of Tribler.\n\nTo fix this, configure your firewall/router/modem or try setting a different listen port or UPnP mode in (advanced) network Preferences." +tribler_unreachable_explanation = "Others cannot contact you over the Internet. This will hurt the performance of Tribler.\n\nTo fix this, configure your firewall/router/modem or try different UPnP settings in the advanced network preferences." +currentdiscoveredipaddress = "Your discovered IP address" + +associate = "Associate with .torrent files" +notassociated_line1 = "Tribler is not currently associated with .torrent files" +notassociated_line2 = "Do you wish to use Tribler to open .torrent files?" +errorassociating = "Error associating Tribler with .torrent files" + +savelog = "Save Log" +savelogas = "Save log file as..." +error_savelog = "Error writing log file" + +download_normal = "Download &Normally" +download_never = "Download Ne&ver" +download_later = "Download &Later" +download_first = "Download &First" +download_start = "Start downloading" +click_and_download = "Click and Download" +delete_torrent = "The associated torrent file %s is not found on disk. Do you want to delete this entry from the Tribler database?" +delete_dead_torrent = "Remove Torrent" + +### +# Abbreviations in the status bar: +### + +reachable_tooltip = "Others can reach you, i.e. you are not firewalled. This is good" +restart_tooltip = "Please restart Tribler for your changes to take place" +connecting_tooltip = "Your current firewall status is being checked ..." +unknownreach_tooltip = "Others cannot reach you. This is not good. Click to learn more." +abbrev_loaded = "L:" +abbrev_running = "R:" +abbrev_pause = "P:" +abbrev_downloading = "D:" +abbrev_seeding = "S:" +abbrev_connections = "CX:" +abbrev_down = "D:" +abbrev_up = "U:" +discover_peer = "# Peers:" +discover_file = "# Files:" + + +alloc_normal = "normal" +alloc_background = "background" +alloc_prealloc = "pre-allocate" +alloc_sparse = "sparse" + +lock_never = "no locking" +lock_writing = "lock while writing" +lock_always = "lock always" + +check_none = "no extra checking" +check_double = "double-check" +check_triple = "triple-check" + +nolimit = "no limit" + +automatic = "Automatic" +loopback = "Loop Back" + +move_up = "Move Up" +move_down = "Move Down" + +interfacemode = "Interface mode:" +mode_simple = "Simple" +mode_intermediate = "Intermediate" +mode_expert = "Expert" + +spew0_text = "Optimistic Unchoke" +spew1_text = "IP" +spew2_text = "Local/Remote" +spew3_text = "Up" +spew4_text = "Interested" +spew5_text = "Choking" +spew6_text = "Down" +spew7_text = "Interesting" +spew8_text = "Choked" +spew9_text = "Snubbed" +spew10_text = "Downloaded" +spew11_text = "Uploaded" +spew12_text = "Completed" +spew13_text = "Peer Download Speed" +spew14_text = "PermID" + +spew_direction_local = "L" +spew_direction_remote = "R" + +color_startup = "Not active" +color_disconnected = "Can't contact server" +color_noconnections = "No connections" +color_noincoming = "No incoming connections" +color_nocomplete = "No complete copies" +color_good = "All good" + +color_stripe = "Stripe color" + +torrentcolors = "Torrent Colors" + +more = "More..." + +trackererror_problemconnecting = "Problem connecting to tracker" +trackererror_rejected = "Rejected by tracker" +trackererror_baddata = "Bad data from tracker" + +################### +#Rename Torrent Dlg +################### +rrenametorrent="Rename torrent" +renametorrent="Rename torrent : " +edittorname="Edit torrent name :" +usenamefrom="Use name from" +currenttorname="Current torrent name :" + +originalname="Original name :" +torrentfilename=".torrent file name :" +othername = "Other :" + +destname="Destination name :" + +copybtn="Copy" +rendestwithtor="Also rename destination" +rtwd = "Rename torrent with destination by default" + +### +####################################################### +# Change destination dialog +####################################################### +choosedowndest="Change download destination..." +downdestloc="Set download directory location" +downdirname="Set download directory name" +downfilename="Set download file name" +choosenewdestloc="Choose new download directory location" +choosenewdirname="Choose new download directory name :" +choosenewfilename="Choose new download file name :" +totalsize="total size :" +updatetorname="Rename torrent" +choosenewdest="New download destination :" +browsebtn="Browse" + +rentorwithdest="Also change title in list" + +#errors: +errorinvalidpath="Invalid syntax in the path. \nTry to add a \\" +errorinvalidwinunitname="This name cannot be used as a Windows unit name." + +suggestedname="Suggested corrected name :" +invalidwinname="This name cannot be used as a Windows file or folder name." +iconbadformat="The icon you selected is not in a supported format" + +######### +#Other +######### +warningopenfile = "Torrent is not completed yet, are you sure you want to open it?" +upgradeabc = "Your software is outdated. Would you like to visit http://tribler.org to upgrade?" +upgradeabctitle = "Update to Tribler " +mainpage = "Tribler Main Page" +sharing_reputation_information_title = "Sharing reputation information" +sharing_reputation_information_message = "This progress bar shows your sharing reputation. You will have faster video playback by sharing more. Leaving Tribler running will improve your sharing reputation." +sharing_reputation_poor = "Your current sharing reputation is low! This could affect your download speed. Please leave Tribler running to improve this." + +############# +#Make Torrent +############# +savedtofolderwithsource = "Torrent will be saved to folder containing source" +notadir="The default download directory is a file" +savetor="Torrent location" +savetordefault="Save to default folder :" +savetorsource="Save to folder containing source" +savetorask="Ask where to save to" +choosetordeffolder="Choose a default folder to save torrents" + +torrentfileswildcard = ".torrent files" +allfileswildcard = "All Files" +logfileswildcard = "Log Files" + +listfont = "List font:" +choosefont = "Choose Font..." +sampletext = "Sample Text, 0123456789" + +startnow = "Start seeding immediately" +makehash_md5 = "MD5" +makehash_crc32 = "CRC-32" +makehash_sha1 = "SHA-1" +makehash_optional = "Optional hashes:" +createmerkletorrent = "Create Merkle torrent (Tribler-only feature)" +createtorrentsig = "Create signature (only if PermIDs enabled)" + +diskfull = "Error: Not enough space left on the destination disk" +diskfullthreshold = "Stop torrents if destination has less than:" + +changetitle = "Change title to" + +separator = "Separator" +buttons_available = "Available toolbar buttons:" +buttons_current = "Current toolbar buttons:" +buttons_add = "Add" +buttons_remove = "Remove" +buttons_update = "Update" +buttons_edit = "Edit" + +customizecontextmenu = "Customize Context Menu" +menu_available = "Available menu items:" +menu_current = "Current menu items:" + +lowuploadstart1 = "Start next torrent if upload speed stays" +lowuploadstart2 = "below global limit for at least" + + +############# +#Torrent List +############# +torrent0_text = "Torrent Name" +torrent1_text = "Content Name" +torrent2_text = "Recommendation" +torrent3_text = "Sources" +torrent4_text = "Leechers" +torrent5_text = "Seeders" +torrent6_text = "Injected" +torrent7_text = "Size" +torrent8_text = "Files" +torrent9_text = "Tracker" +torrent10_text = "Category" + +############# +#My Preference List +############# +mypref0_text = "Torrent Name" +mypref1_text = "Content Name" +mypref2_text = "Rank" +mypref3_text = "Size" +mypref4_text = "Last Seen" + +############# +#Taste Buddy List +############# +buddy0_text = "Friend" +buddy1_text = "Name" +buddy2_text = "IP" +buddy3_text = "Similarity" +buddy4_text = "Last Seen" +buddy5_text = "Downloads" +buddy6_text = "Connnected" +buddy7_text = "Exchanged" + +############# +#Tribler UI +############# +configcolumns = "Configure Columns" +file_list_title = "Recommended Torrents" +mypref_list_title = "My Download History" +click_download = "Click and Download" +start_downloading = "Start downloading " +add_friend_notes = "Right click on a peer to add as a friend or delete it" +delete = "Delete" +download = "Download" +checkstatus = "Check health" +loading = "Loading ..." +############# +# Tribler activities +############# +act_upnp = "Opening firewall (if any) via UPnP" +act_reachable = "Seeing if not firewalled" +act_get_ext_ip_from_peers = "Asking peers for my IP address" +act_meet = "Person connected: " +act_got_metadata = "File discovered:" +act_recommend = "Discovered more persons and files from" +act_disk_full = "Disk is full to collect more torrents. Please change your preferences or free space on " +act_new_version = "New version of Tribler available" + +############# +#Tribler UI - ContentFrontPanel, Tribler 3.6 +############# +item = "item" +person_item = "person" +page = "page" +order_by = "Order by" +swarmsize = "Popular" +swarmsize_tool = "Order content by the number people in the swarm" +recommended = "Recommended" +recommendation = "Recommendation" +recommendation_tool = "Order the content by how it's related to your taste" +myhistory_tool = "Show the files you have recently downloaded" +categories = "Categories" +leecher = "leecher" +leecher_tool = "%d downloaders" +seeder = "seeder" +seeder_tool = "%d uploaders" +swarm_outdated_tool = "The tracker status is unknown" +swarm_unavailable_tool = "The swarm status could not be queried" +no_info = "No info" +refresh = "Refresh info" +refresh_tool = "Refresh the number of seeders and leechers in the swarm" +size = "Size" +size_tool = "Total size of content" +tracker = "Tracker" +created = "Created" +last_checked = "Last checked" +refreshing = "Refreshing" +swarm = "Swarm" +no_information = "No information" +searching_content = "Searching for Tribler content..." +delete_sure = "Are you sure you want to delete %s" +delete_mypref_sure = "Are you sure you want to remove %s from your download history" +recomm_relevance = "How much is it related to your taste" +torrent_files = "Included files(%d)" + +################# +# Tribler Video # +################# +videoplayererrortitle = "Tribler Video Error" +videoplayerstartfailure = "Problem while starting video player:" +videoplayernotfound = "Could not find video player:" +videoplayernotfoundfor = "Could not find video player for file:" +videoanalysernotfound = "Could not find video analyser:" +videoanalyserwhereset = "Set it to FFMPEG in the Preferences / Video menu" +videonotcomplete = "The video cannot yet be played as it has not been completely downloaded:" +notvideotorrent = "Nothing to play, no video files found in torrent" +videoplaycontentnotfound = "Cannot find video file on disk" +selectvideofiletitle = "Select video file" +selectvideofile = "Select which video file to play:\n" +playback_section = "Playback options" +analysis_section = "Video-analysis options" +videoplayer_default_path = "Path to external video player:" +videoanalyserpath = "Path to the FFMPEG video analyser:" +playback_mode = "Which video player to use: " +playback_external_default = "Use external player specified below" +playback_internal = "Use internal player (recommended)" +playback_external_mime = "Use default Windows player" +selectbandwidthtitle = "Enter your Internet speed" +selectdlulbwprompt = "Your download/upload bandwidth is" +selectdlulbwexplan = "For optimal performance, Tribler needs to know your Internet connection speed. Please specify it below. 'xxxx' means any, so if you have 512/256 kbps subscription, select 'xxxx/256 kbps'" +savemedia = "Save content as" +vodwarntitle = "Play As Soon As Possible" +vodwarngeneral = "Be warned that Tribler Video-On-Demand unfortunately only works if you have high upload bandwidth and/or a lot of people are offering the video for download. It also won't work for some file types (e.g. .mov) as they are meant to be played from disk and not incrementally from the network as Tribler VOD does, sorry. But please give it a spin!" +livewarntitle = "Play Live Stream" +livewarngeneral = "You are about to play a live video stream that probably needs all your upload bandwidth" +vodwarnbitrateunknown = "" +vodwarnbitrateinsufficient = "" +vodwarnbitrateinsufficientmeasured = "" +vodwarnmov = "" +vodwarnconclusionno = "" +vodwarnbitratesufficient = "" +vodwarnconclusionyes = "" + +vodwarntitle_old = "Experimental Feature Warning" +vodwarngeneral_old = "Tribler Video-On-Demand is a highly experimental feature that allows you to watch videos while they are downloading, given you have sufficient upload bandwidth and/or a lot of people are offering the video for download. " +vodwarnbitrateunknown_old = "The video you have selected has a unknown bitrate. " +vodwarnbitrateinsufficient_old = "The video you have selected has a bitrate of %s KB/s, and your upload bandwidth is just %s. " +vodwarnbitrateinsufficientmeasured_old = "The video you have selected has a bitrate of %s KB/s, and your best measured upload bandwidth is just %s. " +vodwarnmov_old = "The selected video is a .MOV which usually cannot be played on demand. " +vodwarnconclusionno_old = "So it's not clear whether there is enough bandwidth to watch it." +vodwarnbitratesufficient_old = "The video you have selected has a bitrate of %s KB/s, and your upload bandwidth is %s. " +vodwarnconclusionyes_old = "So you should be able to play it, but keep in mind this is highly experimental!" + +vodwhataboutothertorrents = "What to do with other downloads? \n" +vodrestartothertorrents = "Stop all others and resume them afterwards (recommended)" +vodstopothertorrents = "Stop all other downloads" +vodleaveothertorrents = "Leave other downloads running" + +vodwarnprompt = "Continue?" +vodwarnprompt_old = "Would you like to continue?" + + +unlimited = "unlimited" +bitrateprompt = "Bitrate:" +unknown = "unknown" +doesnotapply = "n/a" +videoposition = "Position:" +videoprogress = "Progress:" +playprompt = "Play" +pauseprompt = "Pause" +fullscreen = "Fullscreen" +volumeprompt = "Volume:" +backtocontentview = "Back to Content View" +vodprogress = "Progress:" +launchvideoplayer = "Launch Video Player" +videoserverservefailure = "Error serving video to player, probably the player does not understand the video format or cannot play it from the network." +videoserverservefailureadvice = "Please wait until the download is complete and try again, or select a different player in Preferences/Video." +downloading = "Active" + +############# +#Tribler UI - Profile View, Tribler 4.0.0 +############# +nothingToDo = "You are optimal here!!" +profileDetails_Overall_description = "You are a: -current level- \n- Beginner\n- Experienced\n- Top User\n- Master" +# --- Recommendation quality +profileDetails_Quality_description = "Based on the files you have downloaded over time, Tribler recommends other files that are likely to be interesting to you. \n\nSo far you have%s downloaded %s files." +profileDetails_Quality_descriptionz_onlyword = " only" +profileDetails_Quality_improve = "* Download more files to increase the quality of Tribler recommendations." +# --- Discoverd Files +profileDetails_Files_description = "So far, you have discovered %s files." +profileDetails_Files_improve = "* Stay online longer to discover more files. \n\n* You have set your maximum to %s files. If you have reached this limit please set it higher." +# --- Discoverd Persons +profileDetails_Persons_description = "So far, you have discovered %s people." +profileDetails_Persons_improve = "* Stay online longer and you will discover more people." +# --- Optimal Download Speed +profileDetails_Download_info = "You are not using your download speed optimally. To increase, follow the instructions." +profileDetails_Download_UpSpeed = "Your upload speed is set to %d KB/s. Limiting your upload speed also limits your download speed." +profileDetails_Download_UpSpeedMax = "Your upload speed is set to 'unlimited'. That's good." +profileDetails_Download_UpSpeed_improve = "* Increase the upload speed limit in your preferences (for -Play ASAP- mode you need at least 64 KB/s). " +profileDetails_Download_UpSpeedMax_improve = "* For an improved performance, you can also increase the number of upload slots in Preferences. " +# profileDetails_Download_UpSlots = "You set up a number of %d slots for upload." +# profileDetails_Download_UpSlotsMax = "You set up an unlimited number of slots for upload. That's good." +# profileDetails_Download_DlSpeed = "Your download speed is set to %d KB/s." +# profileDetails_Download_DlSpeedMax = "Your download speed is set to unlimited. That's good." +profileDetails_Download_Friends = "At the moment you have %d friends. If you make more friends you can help in boosting each others download speeds." +profileDetails_Download_Friends_improve = "* Invite your friends, family, and colleagues by e-mail, to start tribler too and let them add you as a friend." +profileDetails_Download_VisibleYes = "You are currently accessible by other people." +profileDetails_Download_VisibleYes_improve = "* Your friends should also be accessible. For that, please guide them to www.tribler.org for instructions." +#profileDetails_Download_VisibleNo = "Other users are not able to connect to you, because your modem/router blocks them." +profileDetails_Download_VisibleNo = "Other users are not able to connect to you, because your modem/router (%s) blocks them." +profileDetails_Download_VisibleNo_improve = "* You have to open a port on your modem/router to enable other users to connect to you. This will almost double your possible download speed. Read more on www.tribler.org for instructions." +# --- Network Reach +profileDetails_Presence_info = "If you want to increase your network reach, follow the instructions." +#profileDetails_Presence_Friends = profileDetails_Download_Friends +#profileDetails_Presence_Friends_improve = profileDetails_Download_Friends_improve +profileDetails_Presence_Sharingratio = "Your overall sharing ratio is %d. This means that you download more from others than you upload to them." +profileDetails_Presence_Sharingratio_improve = "* To reach a fair sharing ratio, you should share your files longer. " +profileDetails_Presence_VersionNewer = "You are using a newer version of Tribler (%s) than on website (%s)." +profileDetails_Presence_VersionNewer_improve = "* Check the website for news and updates at %s" +profileDetails_Presence_VersionOlder = "You are using an old version of Tribler (%s) and not taking advantage of the new features available. " +profileDetails_Presence_VersionOlder_improve = "* Update to the newest version %s at %s" +profileDetails_Presence_VersionCurrent = "You are up to date! The current version client is %s." +profileDetails_Presence_VersionCurrent_improve = "* Check the website for news and updates at %s" +profileDetails_Presence_VersionUnknown = "unknown" +profileDetails_Presence_VersionError = "Your current client version is %s." +profileDetails_Presence_VersionError_improve = "* Check the website for news and updates at %s" + +############### +# Tribler UI - persons.py, Tribler 3.7 +############## +peer_status_tooltip = "Status of person based on last time seen" +peer_friend_tooltip = "This person is a friend of yours. Click to remove friendship." +peer_nofriend_tooltip = "Click to make this person your friend." +peer_connected_times_tooltip = "Successful connections made to this person." +peer_buddycast_times_tooltip = "Specific Tribler messages exchanged with this person." +peer_similarity_tooltip = "Similarity between you and this person based on the download history." +commonFiles = " Common files (%d)" +alsoDownloaded = "Also downloaded (%d/%s)" +peer_common_files_tooltip = "Files that you and this person have in common." +peer_other_files_tooltip = "Other files that this person has downloaded." + +################# +# Notification # +################# +notification_download_complete = "Download Complete" +notification_finished_seeding = "Finished Seeding" + +############# +#Tribler UI - Persons View, Tribler 4.0.0 +############# +persons_view_no_data = "No people encountered yet" + +torrentcollectsleep = "Seconds between downloading torrents from RSS:" +buddycastsubscription = "Discover content via other Tribler users" +web2subscription = "Discover content from YouTube and LiveLeak" +filesdefaultsearchweb2txt = "search files, YouTube and LiveLeak" +filesdefaultsearchtxt = "search all files" +rssurldefaulttxt = "Paste your RSS link here" + +vlc_linux_start_bug_title = "No flash video streaming on Ubuntu Linux with VLC" +vlc_linux_start_bug = "The current Ubuntu version of the VLC video player cannot stream Youtube.com movies. So be warned, they will not start playing until they have been completely downloaded. We have submitted a patch to Ubuntu." +going_search = " Results: %d" +#going_search = "Searching for '%s'... (%d results)" +finished_search = "Finished search '%s'. (%d results)" +search_web2 = "Web movies (%d results)" +search_torrent = "Discovered files (%d results)" +search_peers = "Discovered persons (%d results)" +search_friends = "Friends (%d results)" +search_library = "Library files (%d results)" +search_remote = "Tribler network (%d results)" +# search buttons +searchStop = "stop searching" +searchStopEnabled= "stopped searching" +searchClear = "clear results and browse all discovered files" +help = "Current sharing reputation : %2.2f" + +################ +#Tribler UI - Column headers Tribler 4.1.0 +################# +# FILES +C_filename = "Name of the file" +C_filesize = "Total size" +C_popularity = "Popularity of the file" +C_creationdate = "Creation date" +C_uploaders = "Number of uploaders (seeders)" +C_downloaders = "Number of downloaders (leechers)" +C_recommfiles = "Fit to your taste (top20 of discovered files)" +C_source = "Source of file" +# PERSONS +C_personname = "Name of the persons" +C_status = "Last time you connected with this person" +C_discfiles = "Number of files discovered by this person" +C_discpersons = "Number of persons discovered by this person" +C_recommpersons = "Fit to your taste (top20 of discovered persons)" +C_friends = "Friends of yours" +# LIBRARY +C_progress = "Progress of downloads" +C_downspeed = "Download speed" +C_upspeed = "Upload speed" +C_downupspeed = "Current download and upload speed" +C_message = "Status of downloads (no sorting)" +C_info = "Other info (no sorting)" +# FRIENDS +C_friendname = "Name of your friends" +C_friendstatus = "Last time you connected with your friends" +C_helping = "Whether friend is boosting your downloads (no sorting)" +C_remove = "Remove file from Library and Disk" + +# TopNList discovered peers in profile view - Tribler 4.1.0 + +totalUp = "Up: %s" +totalDown = "Down: %s" + +# Core download status +DLSTATUS_ALLOCATING_DISKSPACE = "initializing" +DLSTATUS_WAITING4HASHCHECK = "initializing" +DLSTATUS_HASHCHECKING = "checking old data" +DLSTATUS_DOWNLOADING = "downloading" +DLSTATUS_SEEDING = "completed/sharing" +DLSTATUS_STOPPED = "stopped" +DLSTATUS_STOPPED_ON_ERROR = "stopped/error" + +duplicate_download_msg = "You are already downloading this torrent, see your library." +duplicate_download_title = "Duplicate download" + +invalid_torrent_no_playable_files_msg = "You are attempting to play files from a torrent that does not contain any playable files." +invalid_torrent_no_playable_files_title = "Invalid torrent" + +# +# Friendship +# +question = 'Question' +addfriendfillin = "Do you want to add\n%s\nas your friend?' + +################ +#Tribler UI - Upload tab +################# +peer_ip = "Peer IP" +tribler_name = "Tribler name" +curr_ul_rate = "Current upload rate" +ul_amount = "Amount of MBytes uploaded" + diff --git a/tribler-mod/Tribler/Lang/lang.py b/tribler-mod/Tribler/Lang/lang.py new file mode 100644 index 0000000..934cd57 --- /dev/null +++ b/tribler-mod/Tribler/Lang/lang.py @@ -0,0 +1,210 @@ +from time import localtime, strftime +# Written by ABC authors and Arno Bakker +# see LICENSE.txt for license information +import wx +import sys +import os + +from traceback import print_exc, print_stack +from cStringIO import StringIO + +from Tribler.__init__ import LIBRARYNAME +from Tribler.Utilities.configreader import ConfigReader +from Tribler.Core.BitTornado.__init__ import version_id + +################################################################ +# +# Class: Lang +# +# Keep track of language strings. +# +# Lookups occur in the following order: +# 1. See if the string is in user.lang +# 2. See if the string is in the local language file +# 3. See if the string is in english.lang +# +################################################################ +class Lang: + def __init__(self, utility): + self.utility = utility + + filename = self.utility.config.Read('language_file') + + + langpath = os.path.join(self.utility.getPath(), LIBRARYNAME, "Lang") + + sys.stdout.write("Setting up languages\n") + sys.stdout.write("Language file: " + str(filename) + "\n") + + # Set up user language file (stored in user's config directory) + self.user_lang = None + user_filepath = os.path.join(self.utility.getConfigPath(), 'user.lang') + self.user_lang = ConfigReader(user_filepath, "ABC/language") + + # Set up local language file + self.local_lang_filename = None + self.local_lang = None + local_filepath = os.path.join(langpath, filename) + + if filename != 'english.lang' and existsAndIsReadable(local_filepath): + self.local_lang_filename = filename + # Modified + self.local_lang = wx.FileConfig(localFilename = local_filepath) + self.local_lang.SetPath("ABC/language") + #self.local_lang = ConfigReader(local_filepath, "ABC/language") + + # Set up english language file + self.english_lang = None + english_filepath = os.path.join(langpath, 'english.lang') + if existsAndIsReadable(english_filepath): + self.english_lang = ConfigReader(english_filepath, "ABC/language") + + self.cache = {} + + self.langwarning = False + + def flush(self): + if self.user_lang is not None: + try: + self.user_lang.DeleteEntry("dummyparam", False) + except: + pass + self.user_lang.Flush() + self.cache = {} + + # Retrieve a text string + def get(self, label, tryuser = True, trylocal = True, tryenglish = True, giveerror = True): + if tryuser and trylocal and tryenglish: + tryall = True + else: + tryall = False + + if tryall and label in self.cache: + return self.expandEnter(self.cache[label]) + + if (label == 'version'): + return version_id + if (label == 'build'): + return "Build 12034" + if (label == 'build_date'): + return "Jul 01, 2009" + # see if it exists in 'user.lang' + if tryuser: + text, found = self.getFromLanguage(label, self.user_lang) + if found: + if tryall: + self.cache[label] = text + return self.expandEnter(text) + + # see if it exists in local language + if trylocal and self.local_lang is not None: + text, found = self.getFromLanguage(label, self.local_lang, giveerror = True) + if found: + if tryall: + self.cache[label] = text + return self.expandEnter(text) + + # see if it exists in 'english.lang' + if tryenglish: + text, found = self.getFromLanguage(label, self.english_lang) + if found: + if tryall: + self.cache[label] = text + return self.expandEnter(text) + + # if we get to this point, we weren't able to read anything + if giveerror: + sys.stdout.write("Language file: Got an error finding: "+label) + self.error(label) + return "" + + def expandEnter(self, text): + text = text.replace("\\r","\n") + text = text.replace("\\n","\n") + return text + + def getFromLanguage(self, label, langfile, giveerror = False): + try: + if langfile is not None: + if langfile.Exists(label): + return self.getSingleline(label, langfile), True + if langfile.Exists(label + "_line1"): + return self.getMultiline(label, langfile), True + + if giveerror: + self.error(label, silent = True) + except: + fileused = "" + langfilenames = { "user.lang": self.user_lang, + self.local_lang_filename: self.local_lang, + "english.lang": self.english_lang } + for name in langfilenames: + if langfilenames[name] == langfile: + fileused = name + break + sys.stderr.write("Error reading language file: (" + fileused + "), label: (" + label + ")\n") + data = StringIO() + print_exc(file = data) + sys.stderr.write(data.getvalue()) + + return "", False + + def getSingleline(self, label, langfile): + return langfile.Read(label) + + def getMultiline(self, label, langfile): + i = 1 + text = "" + while (langfile.Exists(label + "_line" + str(i))): + if (i != 1): + text+= "\n" + text += langfile.Read(label + "_line" + str(i)) + i += 1 + if not text: + sys.stdout.write("Language file: Got an error reading multiline string\n") + self.error(label) + return text + + def writeUser(self, label, text): + change = False + + text_user = self.get(label, trylocal = False, tryenglish = False, giveerror = False) + text_nonuser = self.get(label, tryuser = False, giveerror = False) + + user_lang = self.user_lang + + # The text string is the default string + if text == text_nonuser: + # If there was already a user string, delete it + # (otherwise, do nothing) + if text_user != "": + user_lang.Write("exampleparam", "example value") + user_lang.DeleteEntry(label) + change = True + elif text != text_user: + # Only need to update if the text string differs + # from what was already stored + user_lang.Write(label, text) + change = True + + return change + + def error(self, label, silent = False): + # Display a warning once that the language file doesn't contain all the values + if (not self.langwarning): + self.langwarning = True + error_title = self.get('error') + error_text = self.get('errorlanguagefile') + if (error_text == ""): + error_text = "Your language file is missing at least one string.\nPlease check to see if an updated version is available." + # Check to see if the frame has been created yet + if not silent and hasattr(self.utility, 'frame'): + # For the moment don't do anything if we can't display the error dialog + dlg = wx.MessageDialog(None, error_text, error_title, wx.ICON_ERROR) + dlg.ShowModal() + dlg.Destroy() + sys.stderr.write("\nError reading language file!\n") + sys.stderr.write(" Cannot find value for variable: " + label + "\n") + +def existsAndIsReadable(filename): + return os.access(filename, os.F_OK) and os.access(filename, os.R_OK) diff --git a/tribler-mod/Tribler/Lang/lang.py.bak b/tribler-mod/Tribler/Lang/lang.py.bak new file mode 100644 index 0000000..4e6dcc8 --- /dev/null +++ b/tribler-mod/Tribler/Lang/lang.py.bak @@ -0,0 +1,209 @@ +# Written by ABC authors and Arno Bakker +# see LICENSE.txt for license information +import wx +import sys +import os + +from traceback import print_exc, print_stack +from cStringIO import StringIO + +from Tribler.__init__ import LIBRARYNAME +from Tribler.Utilities.configreader import ConfigReader +from Tribler.Core.BitTornado.__init__ import version_id + +################################################################ +# +# Class: Lang +# +# Keep track of language strings. +# +# Lookups occur in the following order: +# 1. See if the string is in user.lang +# 2. See if the string is in the local language file +# 3. See if the string is in english.lang +# +################################################################ +class Lang: + def __init__(self, utility): + self.utility = utility + + filename = self.utility.config.Read('language_file') + + + langpath = os.path.join(self.utility.getPath(), LIBRARYNAME, "Lang") + + sys.stdout.write("Setting up languages\n") + sys.stdout.write("Language file: " + str(filename) + "\n") + + # Set up user language file (stored in user's config directory) + self.user_lang = None + user_filepath = os.path.join(self.utility.getConfigPath(), 'user.lang') + self.user_lang = ConfigReader(user_filepath, "ABC/language") + + # Set up local language file + self.local_lang_filename = None + self.local_lang = None + local_filepath = os.path.join(langpath, filename) + + if filename != 'english.lang' and existsAndIsReadable(local_filepath): + self.local_lang_filename = filename + # Modified + self.local_lang = wx.FileConfig(localFilename = local_filepath) + self.local_lang.SetPath("ABC/language") + #self.local_lang = ConfigReader(local_filepath, "ABC/language") + + # Set up english language file + self.english_lang = None + english_filepath = os.path.join(langpath, 'english.lang') + if existsAndIsReadable(english_filepath): + self.english_lang = ConfigReader(english_filepath, "ABC/language") + + self.cache = {} + + self.langwarning = False + + def flush(self): + if self.user_lang is not None: + try: + self.user_lang.DeleteEntry("dummyparam", False) + except: + pass + self.user_lang.Flush() + self.cache = {} + + # Retrieve a text string + def get(self, label, tryuser = True, trylocal = True, tryenglish = True, giveerror = True): + if tryuser and trylocal and tryenglish: + tryall = True + else: + tryall = False + + if tryall and label in self.cache: + return self.expandEnter(self.cache[label]) + + if (label == 'version'): + return version_id + if (label == 'build'): + return "Build 12034" + if (label == 'build_date'): + return "Jul 01, 2009" + # see if it exists in 'user.lang' + if tryuser: + text, found = self.getFromLanguage(label, self.user_lang) + if found: + if tryall: + self.cache[label] = text + return self.expandEnter(text) + + # see if it exists in local language + if trylocal and self.local_lang is not None: + text, found = self.getFromLanguage(label, self.local_lang, giveerror = True) + if found: + if tryall: + self.cache[label] = text + return self.expandEnter(text) + + # see if it exists in 'english.lang' + if tryenglish: + text, found = self.getFromLanguage(label, self.english_lang) + if found: + if tryall: + self.cache[label] = text + return self.expandEnter(text) + + # if we get to this point, we weren't able to read anything + if giveerror: + sys.stdout.write("Language file: Got an error finding: "+label) + self.error(label) + return "" + + def expandEnter(self, text): + text = text.replace("\\r","\n") + text = text.replace("\\n","\n") + return text + + def getFromLanguage(self, label, langfile, giveerror = False): + try: + if langfile is not None: + if langfile.Exists(label): + return self.getSingleline(label, langfile), True + if langfile.Exists(label + "_line1"): + return self.getMultiline(label, langfile), True + + if giveerror: + self.error(label, silent = True) + except: + fileused = "" + langfilenames = { "user.lang": self.user_lang, + self.local_lang_filename: self.local_lang, + "english.lang": self.english_lang } + for name in langfilenames: + if langfilenames[name] == langfile: + fileused = name + break + sys.stderr.write("Error reading language file: (" + fileused + "), label: (" + label + ")\n") + data = StringIO() + print_exc(file = data) + sys.stderr.write(data.getvalue()) + + return "", False + + def getSingleline(self, label, langfile): + return langfile.Read(label) + + def getMultiline(self, label, langfile): + i = 1 + text = "" + while (langfile.Exists(label + "_line" + str(i))): + if (i != 1): + text+= "\n" + text += langfile.Read(label + "_line" + str(i)) + i += 1 + if not text: + sys.stdout.write("Language file: Got an error reading multiline string\n") + self.error(label) + return text + + def writeUser(self, label, text): + change = False + + text_user = self.get(label, trylocal = False, tryenglish = False, giveerror = False) + text_nonuser = self.get(label, tryuser = False, giveerror = False) + + user_lang = self.user_lang + + # The text string is the default string + if text == text_nonuser: + # If there was already a user string, delete it + # (otherwise, do nothing) + if text_user != "": + user_lang.Write("exampleparam", "example value") + user_lang.DeleteEntry(label) + change = True + elif text != text_user: + # Only need to update if the text string differs + # from what was already stored + user_lang.Write(label, text) + change = True + + return change + + def error(self, label, silent = False): + # Display a warning once that the language file doesn't contain all the values + if (not self.langwarning): + self.langwarning = True + error_title = self.get('error') + error_text = self.get('errorlanguagefile') + if (error_text == ""): + error_text = "Your language file is missing at least one string.\nPlease check to see if an updated version is available." + # Check to see if the frame has been created yet + if not silent and hasattr(self.utility, 'frame'): + # For the moment don't do anything if we can't display the error dialog + dlg = wx.MessageDialog(None, error_text, error_title, wx.ICON_ERROR) + dlg.ShowModal() + dlg.Destroy() + sys.stderr.write("\nError reading language file!\n") + sys.stderr.write(" Cannot find value for variable: " + label + "\n") + +def existsAndIsReadable(filename): + return os.access(filename, os.F_OK) and os.access(filename, os.R_OK) diff --git a/tribler-mod/Tribler/Main/Build/Mac/Info.plist b/tribler-mod/Tribler/Main/Build/Mac/Info.plist new file mode 100644 index 0000000..2830b10 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Mac/Info.plist @@ -0,0 +1,57 @@ + + + + + CFBundleDevelopmentRegion + English + CFBundleDocumentTypes + + + CFBundleTypeExtensions + + torrent + + CFBundleTypeIconFile + TriblerDoc + CFBundleTypeMIMETypes + + application/x-bittorrent + + CFBundleTypeName + BitTorrent Meta-Info + CFBundleTypeOSTypes + + BTMF + + CFBundleTypeRole + Viewer + NSDocumentClass + DownloadDocument + + + CFBundleTypeOSTypes + + **** + fold + disk + + CFBundleTypeRole + Viewer + + + CFBundleExecutable + Tribler + CFBundleIconFile + tribler.icns + CFBundleIdentifier + Tribler + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + Tribler + CFBundlePackageType + APPL + CFBundleSignature + ???? + + diff --git a/tribler-mod/Tribler/Main/Build/Mac/Makefile b/tribler-mod/Tribler/Main/Build/Mac/Makefile new file mode 100644 index 0000000..6d944b6 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Mac/Makefile @@ -0,0 +1,115 @@ +# Building on Mac OS/X requires: +# * Python 2.5 +# * wxPython 2.8-unicode +# * py2app 0.3.6 +# * swig, subversion (available through MacPorts) +# * XCode 2.4+ +# +# Use lower versions at your own risk. + +APPNAME=Tribler +PYTHON_VER= +PWD:=${shell pwd} +ARCH:=${shell arch} + +# how to get to the directory containing Tribler/ (and macbinaries/) +SRCDIR:=../../../.. + +PYTHON=python${PYTHON_VER} + +all: clean ${APPNAME}-${ARCH}.dmg + +clean: + rm -rf build/imagecontents/ ${APPNAME}-${ARCH}.dmg + +.PHONY: all clean dirs + +APPRES=build/imagecontents/${APPNAME}.app/Contents/Resources + +build/imagecontents/: + rm -rf $@ + mkdir -p $@ + + cd ${SRCDIR} && DYLD_LIBRARY_PATH=macbinaries PYTHONPATH=macbinaries ${PYTHON} -OO - < ${PWD}/setuptriblermac.py py2app + mv ${SRCDIR}/dist/* $@ + + # Thin everything for this architecture. Some things ship Universal (Python, wxPython, ...) and + # others get a stub for the other architecture (things built by Universal Python) + for i in `find build/imagecontents`; do ./smart_lipo_thin $$i; done + + # Replace any rogue references to local ones. For instance, some libraries are accidently + # linked against /usr/local/lib/* or /opt/local/lib. Py2app puts them in the Frameworks dir, + # but fails to correct the references in the binaries. + #./process_libs build/imagecontents | bash - + + # Background + mkdir -p $@/.background + cp background.png $@/.background + + # Volume Icon + cp VolumeIcon.icns $@/.VolumeIcon.icns + + # Shortcut to /Applications + ln -s /Applications $@/Applications + + touch $@ + +${APPNAME}-${ARCH}.dmg: build/imagecontents/ SLAResources.rsrc + rm -f $@ + mkdir -p build/temp + + # create image + hdiutil create -srcfolder $< -format UDRW -scrub -volname ${APPNAME} $@ + + # open it + hdiutil attach -readwrite -noverify -noautoopen $@ -mountpoint build/temp/mnt + + # make sure root folder is opened when image is + bless --folder build/temp/mnt --openfolder build/temp/mnt + # hack: wait for completion + sleep 1 + + # position items + # oddly enough, 'set f .. as alias' can fail, but a reboot fixes that + osascript -e "tell application \"Finder\"" \ + -e " set f to POSIX file (\"${PWD}/build/temp/mnt\" as string) as alias" \ + -e " tell folder f" \ + -e " open" \ + -e " tell container window" \ + -e " set toolbar visible to false" \ + -e " set statusbar visible to false" \ + -e " set current view to icon view" \ + -e " delay 1 -- Sync" \ + -e " set the bounds to {50, 100, 1000, 1000} -- Big size so the finder won't do silly things" \ + -e " end tell" \ + -e " delay 1 -- Sync" \ + -e " set icon size of the icon view options of container window to 128" \ + -e " set arrangement of the icon view options of container window to not arranged" \ + -e " set background picture of the icon view options of container window to file \".background:background.png\"" \ + -e " set position of item \"${APPNAME}.app\" to {150, 140}" \ + -e " set position of item \"Applications\" to {410, 140}" \ + -e " set the bounds of the container window to {50, 100, 600, 400}" \ + -e " update without registering applications" \ + -e " delay 5 -- Sync" \ + -e " close" \ + -e " end tell" \ + -e " -- Sync" \ + -e " delay 5" \ + -e "end tell" || true + + # turn on custom volume icon + /Developer/Tools/SetFile -a C build/temp/mnt || true + + # close + hdiutil detach build/temp/mnt || true + + # make read-only + mv $@ build/temp/rw.dmg + hdiutil convert build/temp/rw.dmg -format UDZO -imagekey zlib-level=9 -o $@ + rm -f build/temp/rw.dmg + + # add EULA + hdiutil unflatten $@ + /Developer/Tools/DeRez -useDF SLAResources.rsrc > build/temp/sla.r + /Developer/Tools/Rez -a build/temp/sla.r -o $@ + hdiutil flatten $@ diff --git a/tribler-mod/Tribler/Main/Build/Mac/SLAResources.rsrc b/tribler-mod/Tribler/Main/Build/Mac/SLAResources.rsrc new file mode 100644 index 0000000000000000000000000000000000000000..451f5c5dc8e991cc816a3719825a98e2f31589e4 GIT binary patch literal 106972 zcmeFaYiy)hdfzunc4v29IYANw`H&C63j)Eg*KBsP=dwF9x>b|xZqBkvidCH1mMlm` zvPjl+v5Klob2tv-?Cb|S*3E(tCC~>OqqR1QW63M;C|X<6?kZBeT3d#MRfHgk0SAT? zWi8l|9mEivz{v0aJm;MEyj8`%%&aU&7O^``R=w}JJonpq&Qni)!&6Uv!(aRdPkqB* z_y_#*dr$rA_wyhB@wLuTd(s=1o9+JbqR{3zZ?A5(KJXEKHk+;0 zZya5W${vq}NB+a!;Go?rhn*20l#7F-qfuvE9t{Vl!c*Se#&e)-_eu-hMhsM&td zSzca#YvZWA7)(n0syy$GdFiCXi^J1VIXL2vF6-?cbo!&tQhC5{_sazvaG@Lz%J#uQ zXEd@`MuVgAd3)GdF3T|H;h^jf#y<2?dBz?`@&b$g51xAZLu;MMcyw^`jg9+n zbC1!WrwhW224BllFe6zYI1%4Rit~ABw|2^+Y%fc&Km~*7shJxS(bJQ!R6J*8b5! z??a6PaM&IeTOtkn^}`#dXT$DkcUat>w1dchGG^L6hF71NHg93*}jR*bdNb56e!ktVnRZZ+}nDCX6xASl>J8 zK#dym2jAPY)s{rMPyL@yJ^kTZ!*>4%+TGEI8?eC{JoBO5&e3Gl8HP=FhX3|@f7m(h zj>f}|HvTu*_=CY@WPHO5ZzO@46H>5JXPbO+9KEdH%>1TC?7pJ}0N zA3^ zNV(hY55V+6XFMK!Xt#URos0)hH^=SMv-UUc`fl57_D?s3T|L#++CcDO_pR=r--Shl zj)dH|!0l1HYtn^X`X(704707UK_pAr%~9OwjiD<*)vdoA7Gx{_8Y_Oaeb(-GMxEkE z{_3Co&9{H{?Z5l>&;7CV>tBESpM3bw{$D@v|GxcmZ-4DW|M~y&!~ZSoe)mWJ)TjRB zr^cT;{?z)XZhxxrgUA2H+4ueE5C7ixfA^36led5Ehkp0_f9UP6*!!P*`yamjwIBM2 zKk#SX{>s~b=aY^6owtAYQ>&l)kxzZ%Q^%kD&QE=x27mh(-~M}Vf8~>tH1p4W^1<7` z`=9+q7W6A`|J2)Gc>8m2|M(|we)8MW$iMpbk5^-7@3G=}Z$gn@dFtt6t8>^HqFxS1 z|HiBBe{(BJ`)6);&fCU%e`y0OAf=G__wNrx4ae>FxI9JaKz|STL=Q>=K4?o`z7+@& z!Ms%7N1O5H;H-T1)%LR`^6I`MF}jL(&xV8JVf*y7J)S_Q5OAVQq*RJ}4*S4>u(MK-iCL zpO$Nr^4OmfCz!r^(Czi^4^StX;NU*2_5I~S9BiRH`mtfz9*?VKiuY^_{BtC1$CE!lS}Jc2+oNvpnbkpGOxPYigC_Lfl8Ah! z+3r0sG<`q31A_z!$@3DBr={d0!os9uVxJ$7Xq0k=w-AqMf@uLc$mAo#_rK4iVKzl= zn6x_E%iB*q{qHrqr)NF5y>nQuo^-`hANV_e<*R@H>A&{#`sV}h{?p%l=jnI<_jiB% zou6v`>Tmz`|L0f!xjz1xFZ{+o`r=o9VvRSHAo^pMCe8&+5B( ze&$Q>e)g~ZkDq<_Z~y8)`h|b-&Zqq?A1*%Uf4%cF?|d4f_|s24{l{Cw_F;F-RCMlK-ud{>XU_ie?Vou|AOFP9f9_BIME94!`uSh` z%YWl%e(&A?`JKiu{>Fdv&))g3^xZo@`4e|ObN>0i{ki^s|Fw5k^^Ok}p9z2cxY6I)uxDyoM_GJL4`|9vwCW zPsl{0()JM&wbNfJNB7wgZ#Y+pSih<%ezQCph)^p!?Ib)xpdW{K>{(k$dbr*1PkKFU zsp>V&ZDz$^cM;HBKyEu3tl%11947|Z;fN9h|<&AdvTjBBFS}tFOPGR;iGONzs8xP)qZ0$|n zspkJJG5_zuIZ?Nk2kkx>mr)o%`G;G{eg?t!aimte|AC+V;+MYsjQ&%8;a9)>KRo^A zU;6xK|K?AB=u7|nOaB`b_MiOxZ-4Q3%Fq5EU-(~t;iq`wb3gyZ-}&M@rJnxsuYdk) zOXY%oT`1rEyTAPRKL45W-JklozgqsnSHJvAfAgom_%FWjkNM;mKKCo{eEHXZ@o)X& zFMsh%%i+CW`e)@A{?@zy-Mjx&8o*|*M)|Kl|LO1sSo(G7XlpPGtnkbRx}X$0BmtfA z!>5fO)_4Fzjb|YLedJr`^i=#{?10YUUnjEmgTNHd48xK)CB4NE3s>*=+h=D`pBUr& zD*{g{2oWmT9jBC%iu1a%xz8%Xh-uY`xo`O4e}Au!L5Aal4>UZ7-sQk_4wp(uyFBX3 zSd$$x?DRVA5pr;XuYj(@VY<7~Dx9IwlbttZq#>h5vUfi()sMtaDcTLQf2p3k^*}o{!xo9Vr zc!X~!kL}>3Jv?R+^3M7fGNDF;elghBGBkgiHJx1~-!9tBJd9;H8VxYD8LvDX986?g z`9f{)#iDk)5S^QaD>lht2f5`Nsr()}l7q=uel1qofi}BTbo&Rr$)VN|zwY4}gc&rn z0RR;@C^4e(Hd*IVc{(`k9_inXwpE->_T}0xVMJ=^{mGaoNBS`MsO;k!gfZ%XGDhfv zGxK9BuWYH%UYP~h$W3$**}Uf`Xjv^KZc>mwnhg6)2~gxpNR2KP%=kvXDPWqSgl4MFiR1M$hA2VahgeaVwHu>_xs zZ5{O5-BV}>WpymV7&@!lV_|*gCmlwX-@!tPw$ZNf6`VZePZ9kPr59e6lt4ZVyF?6* zEpUTc127UoXU`Cg6|WVLdD%So0ljg{!^}k*&$CXuD-qjMuU$JoKVLpL?=JUySK=&Nh9dKG3f9nuA21FOULfU&KNVoJI2^IH9*b+-TqP{! ziy1N_u1V>8cy?3}p6DNR&)_5{CP{lquj7*v;o#&Bt(=N2B+gCf7{ghZ&UD2H;Zp%a`!-S3C!E~V52c6#FOpJSg&ZNJ=R_ z+qZul#@KA{%dKgjDstE_w-3gHefTd(vr+rBQ(OQH_%2PGUP&{RYu#g12|j`7(YCYL zADoxpF*xg>ToagQDC;dA(E6_U?9#mXOi6qew}|B)I;s|Tnzv>KIy0VF9t@AKosEvJ zT`}&^j?I3MK)Z_gMfkz#$L)UiEgviL&*-dkfO92+y%4Mh=aE>hf-$ie1U5#gAxkFg zb7#oHhvN&Dtv%LA33`Ji&agiQlAx&0Q-^3_AZ*@(S{vw-fWtr$Zrg0jvT6<{Lv+;Q z6c;iNL+UGSQ|Pn-^H+$3LvBKV0D1=<%@S-=(&QtBQG?cCqf220MOwiLu_@SZG(i2a zj@vt%SHaC0phAkjVibTTL7?wLaObc+@rEIB+>tE=!>_C8S{#mTmi+9xxu#$3XRU*? z27T)19+YLj@SpATK6K|ln%Q2CU&eop=i?uj{97&gTg%0jE53^B%P%coX9vScxw3qt z{w8u`<*I}SA0z(KmlteIf2=G&7vFqA@8$#OHNE?i-bFyfm#cSQj33#*E6X;7{Vv9i zU*_-j2R7{u7M%aAELTJ2uV!41k`60=ad{Y!sIACm6%V z6pzNC;l%iBoSH{#Q$NnD5m+~tSEk-w`4e%x%ciZ~tz$JG>(TG#C@ssWchkW6cB^?s z9@UTDjV~~7{I#6_E>BmlGH-fZQa}B1*}Lfh#^BvF*5kZeUvo7x@20Qj&zrusm22L( z4$UjaNl#qzZd#*WzL7!hao&ydgm=?ekMnN6YUX{SH9xNj!;jvLV=-^~>azVxsKg(= z8(-kv^ws=#pGs7~%0D?x(aC9%ExuOj8FZmuT{&PvDO{tv|Iyf@N%Z+%#y%MrphIte9;u^ODDR+Q!E_)=RifcxdJi zN%;p2UzV*CAEd5@Lny;6Vpbsk8Pi_+S!N5WZ=cw3+>bmSTZmc7DRAjvz)T66I*+6< zn=5)ZtN|auLzAN!Nsqgj$q>U|6s*g@q+s#eV0YEnllU zPDyRjt+JL5P6qPIF*0r5I*bkT#vq>%n zc=ni-`8qf^huvXtK7kl0HS>D>- zEgP$=>&@oI*6or%w(%5qU*Bl1mwWcn&b`*1?X8EnjFT~WvUnZHyg?4Q9;5P_4lTWkQMs2Tu=rw2D+`b#hi%@x zRpdd&^bi(09M&hw*<%9_2!9L*7x>H6tYg8p?QWZO_4i9*jW#+(;|Hby$fP~ysNk?L7nace1Smhh`~zSIZ|@aMpPE< zo)Tk>3_Rt;XTt=csLYo~#+J&&+s3j00WO+>j!}SpCs`9DM~d+gBwOw*6N6t(QNP85 zD+LM&-;)2{u>U@1|9#&6OVq&B)8)kXis^w}g@{Lm!UIEG7^(mO%dn%^F09Y~!T^un zav7m|s5nieRS4%fi?xXp5OFy4@hjyRg#}5=41yNeFi?q)EGn*Wvw)4Pw&(`0gk|xu z#iWV(iPwgm!I9(boCu*wI*BOr<))`-KWKM*=K4WPK{X(1Bi<|=-bDv6Y(L^l@eT`; zTXmC-AHq426o?z0cxE zJ0d?Q{}6R808@8_c7cG(yN!>pH_NTM@aZ~f=Dil9wUBe9By1?-HzEuX{{`$t|rdC*NY~9*LpRA*8?(y8l?wSq6dNyv~X*GfI3l8Jvorb?s z-dty2jhmb6WotW1a+^y-m{LL^p&+X1(i%6mRyWoJ-A%Oi&id+xeqoiUx#s$}@9_s#f_zC*3p6M!da$K=@zre!?vJm_Yfkd}Ba$1;%NAJVBI2MJ&#c zl8X!kt-JP~*}SZ4v-d0!<|!S|rogST^%$xkaDPbHcZfB`l=fHIC$MN@!2i~_M4c&f z6q|SYhh!B@X&_-vG42tURD~P>Ge(f~WhMN6l;orBM$!#%VU=p17l86(%jHeQjn6KC zrm`pGZ72o=vW%0fD}CfLWGRhVRDz3K#=atulGYN=^Dw971RUA0Ju1T=q_I6jrtZuYb^F;G){mnb(_ZImy)aQy|8u&f}UNJ#ZP z7;w{QLs6gxk^%QHtIe3Mvf7GzmUVGKn2na_!WFRggl1eVupY^%A_LTe1>~b%G@hBZ z>#kOYU2&OV7oEVzGVGLYoom=dlVaMQm^JKbHnaIK2~32GA8Fo+c`fldZQcpqb73a0 zAh)YTED3=`(&3{3T)O=$}BRE8`dLQm;UvbhLtv!<;AT+CYB#qd||r$ zATzpPqhxe_jHKlboJqAo`T9v0E?iS~bxC7*1ww<+#>z`81h%x(#|xhkYKz=39X^}3 zzG7^_gTE11$PWq`EQ^huJrz5fvcl@MZclD>g_Y*9Gwt*bMexjH>hCLrCRkh=L-Q}6 zAc$t^Zbi~ArC##=v&Az{7Cv)Yy4=buVlpMf#OhX_QZ^_rx!*)Db~xn(niTOSThzEjyrEsAWG>rXKHYN__Fxki84=DUM@q}xJrUp$f^>t zQx%KJe^h~*SJaz!2nd;>>8O;FONr@!?3Hqs7yZ95>+wm zoo%g{^t+r1vS2X5F%Dml9HAt5tOEzba`8UJJlMbrK9UtV>~+}j3bJSU#*J$$*H;Ky znl)2_)BqVONlLGNWhGX9($pq{`T z!uq!zyfuGSCS>;<)mUQZXQ=x)lRj z@;bOgdz9fJ@ys(KpyG)SX^l|wO>VAMe1GHvTd2}cBH$@mqzn_@tK88(lFLe(6no3f zN`(u00cleps>*kr9^6Uh zB_!pH5rHCOAy$5XBpq?UHkGA&O#pf$0LJ`iAXDtHH-dm?C!>-?Bw0xLp&#|tRC838 z?X0^TL-f@%p*$ZqZC+wv+Fd5Z{UnJ26Iyee52Ibv045qYLe!`$}ihaou+|wmHAd?*?fV>;z(!x0;yjsh$v&o(6=#@uWrcWn{ zBr6%<9GZMleMz=$D%y{{ayza%_7Y_?f|cCV%o(o~#&!`T33J)$A`Zw=*o^Bz;7qil z(>Pj&WE=>$HXr0nL`5#_Lm7iV!_-wUB|2w1bZ297`030J9YM6U1V+rOgwqX{VMDOZ zM@JD4d;kWK3=2r19j*Hi9}B!0XXtE@PIz&ENDGTc*$`j}=2V-%&P~csjve{zH=dPDui*wYzs$5!skBCmCI3=7~YK?vNtB z-5`J6+Ac_BH&X%Ak|M;Ldutk1IqZhR)h zJh(KnHuVJq=EpW&2)(50#|~jW`>d5)itR#;k19cS%UI@<35!geOcna@N^5(`GDUM_ zjF@MRkco9X?!nEg;%V%zC&6b0M-=T$i=%9#N%SP2(B+NR9e-nlIfXdupIE1Ssj++S zAvHORNtW8?i&W(lHjngK-P`q=PBz@!yNRT1?X}j++uPe~R?|am&})<=G+(L9uR;RM zW)2eU6(QiJ5_2lK+SsBhc6V<_GUrNpXZv*%h^q~x&YIy4Q6%&U=iwx!s+6o;swN97 z@u%7$Y$!c1gObR6 zl+cufYBo2gMC0XuYQ!>`ha1i2-g=V{@-pCW{$@SW!ng^8Zrztql0QE!ah3Wm%@-@b3clwm%|d4 z8LNqH=Eu)u!I=@}NAlu}iqcZv*RW3+!d4Z&lut& zIe)5?)+brAhq#A%yutIOdS0VgM}eS9Fdh~QMQbPo52VqoGZw#P8_;EX2fdPzgIvvE`MxtrA!PyFfn1fZhTghsqr=plKx?$;a5og>7Y1XLj$%M>26Hszzj zUR58<1|mQNLub38WsK72m6azE-yH_sZ(2e3tm}QLRjg5BdHalEJl`&!-XO*MT1u9xA z>EDax%=_=hc-H(g{{Lf|&wBTA3;MAQXam8eHuPhd&>G;;R`er{Xblsr?!0P$+K%R_ z$F-Lp_8hZm!ue|Fz=9VR%JTRhaF;*w1Xn7l&Z-NR(B(7G-UCW2sDjVgWJ3bCDcv-PLDw*Ch9r* zS$JLsL(WCK5>y19M+faQlr~GSVSFev{Pu@B?Z>ay*Go167*aDOwS1CFS8^)*NEQTh zP?$_vyxBH&6DBtyv0U3H#B$MtkbWN0$#2Lk)>?%JeH<2~KWiqV1`5Zw z4&AJuS%BbmphD8h=2HI{K8p^BmAz;rFHmIS5Ulo|Q5pckegsv^%+8#e*ioqBuV-;Q zgd-4QC(Pu+Rpjg@zN&3Bj<#<80r)V0;M4&|22~RsBMbUF+g8six&_^K-p^`TRHli4 zpV*rW++e~HWQI8=x<@d5oVpfiO3@;-db^`&tLVUr`7HXX*LC{SG=abcTDV0R_JSEB zWZ5m8MS0VD(qMnmW1N9G6Yi~4<+9TOl@mZiP0X7$lv2yYId(ss83wshxnR)+pm5Xp zHL*zDcrr)_13LIyf_>)g{7Z5J--8dB2vgAE$4TQodVd<@Umed+Bo=m6$2rJ(EYGjX zr{TvL?%$`wmy}{y)?+z*rWGFH@jX)Yh6!^PouhuEFVcbmIaYgEK3g03*~ovr&Jj@o z+9-7;7Z4SLiQ4f-rz~-An024d_Q&!tkw-zq)W-FUJm0>2l@mwM5dLK7C?8MDN`Adg z3VLvkKnuw<7g~!brrLR67NldnS9~>L30ftJgpRn@j$nXhTa;o6K^PnAiuJEBoK(|N zVcN zO|K4{|LyFj8c{kt|2cm%ip|YnQJL$bCK#`I+r{#^<&~M&9_9pA-)Xwas;r0BU!CZh zhO)`AW=z9fbyOVlPlqZBl!03C_w@QY2)xkoNvyjn2#y6R!=z3TG@a=T_wkQT9Wm-m z7qJ{kC@OmI@@KshG#xGhc zPH^cWr9GuLbQ)OuX&TQccckEYZ2yFUp9g|!r{+~?EhofYSroQB=(8h%nEqH1M-%!s zQk%v=s=wGqK1l_JikN$^q#7o*53yZ^*1@qcu=Um`XA;tEAQZh^nvX_oDEX$y-2tg@{B@duq~3$472E3jUF`O`<!OO`Yzdi%NOu4|~9wAoba*Sn4K`i+$p8cRNR{iBq~G;I6ppHOw7s%HoRk_#6| z$XzuEg}7-)cwy@BUUE{P32UsJ@x8l=@-i8xs;5#hE{U0RQv-C9d`mmTLXbg0ZNMG_ z385nuV>6Oa961zL3GyW`t$m1OwU7mJhjFIZ23dOv<2-_Gs#OJsn`U5J@mxR!mSr)s zvnOxZP=oK*V&4h+5}L+KbIkw3r1FV3AaKqQ=EO-lhnQw zP-7%pN2P>}7>T?HY``vRC0A?&0W^FC-B=&YI4Q`bCv0B{?htVz(et+2?Y5Zv!JvCs z(u|*+mDQgkXQf3Ic#BL-?rljT32u%?34y9O&zh^RMRqUKGzfBfCb*idx6@T{;Zy?A@d|I#$4h2YBYS zU=Gq7?9~aRq=IG;wwfZBx));QrH{Qtn?uz)KU_xu3w|D=Cvq*3s|o3e;(eNh6+>EW zP7-pDVHG~(0OLS$`;?B7qF9MUr*VZiOrsMpj+)j&8eHqwJ(ezk*v9H{*oIS0q8GC5 z4!!$0M<^8d#+&e_Uf4Bo^UcuIy{%4kQU25=R61J#hw9i3mwwn(w3xTcH#)979yXP} zOO4g9US*xui;*kc+?+|r~&;XH_Omo5uS`bt9j$ zAT|cJbuS0w#YYfe-TTczO?va=;>T*NwKG78Fs zTP+MbEwsgubIX*e8UDO_*|5?F2Lh78lA;|F7j}6@Fogide)o`$LZ)v8%no!baQGyl z4qCbkk{;dnueh^iGUQSq0u*{K9Pn7y@edYOp9Pj4$z{?22Y%K-PpF%I*X}S8!X$?@ zsYA3ngx*7l0h9dvc|zQtN`^|O=TIfit`hC2wRS9WMB3;GT6)A7|{Sj zFXb3!Oyw^E8uR&V)eUYh;5H_nuts|>K9RS9;K^kev%sWEOX<2CxRH4b+JWHUcQ{iO zKt|?fO=C}Ttt0E_8zx0~fpu;)O*i*wO52@8Nm4c-V#Ct}krwry8)0x7>Fp6BT*j+z z;afJ~;HppEtn98aqpXp?<{f5pm}OUJz6>Qg}qHAL)U82k<$IAxKBW+NU` zFV^S*ZFqn;5>^NZ!iHQ75V{F-;(&-W18G5*1}RB-J{H{5b8HlNy1WJe%zdC6(VTOk zMFBE4j?kE56n2m`d0lQbRJj@JzLj`#Qz?+6 z0h=AT9b+ABT{46y#i&rjV{^wS4n1(T5m8iPCzKD2+5!Q}6?9ssqDqt>(&8fOABSPI zljoZk5Mqld7IvH%xU0*G&X;I|g)B2mPs=y85DA7oX!W7^pj@!6#HPF=Emccuw+r0M zg`SJ?k?b3hfyT{Z`8J9#zS=N{mS1+B2DHcncFwfkOE-WmkaL{VMvk|Y$t1%BDXIX? zzJ->x1J%alB%>-F6SR1bt zw7{^sj4x`NjJa9bvA~;=#4572p7@01k^8X;yo54W$Z~&?lZsXS9~Hw%-^!kIq%w}n zRJW(>N+Oy4?3mG%T?rMvi9!lqiA%AA<=7^gP`EWKf?v2<-I~+{wC99Y1VSqp5rZ9$ zBB+;(kdDL>UD)+LCb*Enb5=|y8WTPO(eek}4yH)1#sa=N80~r|(U$@`Ca#EQnh^sF z3iu+Q+~@|rPHcRc*=k|Mv?|IiXo5*H8=}P{vsp1+IE0?J-3i_T%xAqNg>5(b1)4K1 zwe!{hnjMeDWI5s=We_6&+JxB%VUhV$kU_L)g5(LC2&Mi4Wz_tyl=nNGGqHpyAXs-? zI3&lr&Lb{plJ3(-IHB7K;3 z>E@-P*oAciZ)2)Cs5Q6VFA!JcMev)JV9LY_O1IXBWDF^wXLbD1R&7&C02 zvr@ryPoRg0%~py7|=oifY50{F(zHbqAFFQZE_4@F&-DxuzKti4gHz52BrTeNEf>x zu*%P)8#B2m=9=wApeTQeg&Wch3Ddrh%8=y0;Ayx8mgobBNjm#Fk~C0eRKy3mEV^dq zT4vURJ4v7bZ9|uhz^65aapqMgo@nf7INc#3cs%G5D5=Xt8zrI{b7ejD$$U9hb4B7q z*>-w}k{VJ&;o@T1~_SWGmK*E7Q}&sJmEuG2gCR=j~RKzx_nFKEe_4dZsBBK=7!G>&VZwU=cIH44z&*h06{*ClYZ4>oSYyu{a9c>5+m+X7lw*4J z99x`2I1E0I??X_@t?r2aB9%@@_LhLO4|Af4=noHF(cf*mTS`IwBfMT+-`Pf!xlB(* zq!4$fcg7VZGHnL)hg-tX zQF)GAs^QJf0kI<8Q6`IQj{Yse`=fHjRKtzXS?6rpxOXE8VPH+?-TY0b@+$jD7l(0g zDK<0oo3NqJEZfVWY>z2h_V^U<5(|opUfYLSq$!Ea36Y{!pfYl8A~nR^98)50#fGVG z#}Mp}OL@S-3$6?qcAsY6FS3yZB9o^+?i5iQ$0_@t286G)BXSxN2EoB8fXokKxRGDX`c|F4wJp?v@ zkczf;09*nR{D>fujbyO55vCz-#)lKA?BN_aH9R0dRGY|88vfxpm?EbP{=SpT@QUpdKYfqSvCM(r8o|hr7>50}h1AdN)J;h~OkP zQ@nyX)|%M!E{BKRGfWy|ceI*@l>XAlj^-TLSS4GD+~(_b*Qg3C0zi zL;(4YHK(a`EuYMGQKIC(1)A&7L&+aiycev63cAv@x%vVeF-~TL&iV>Qx2Zil<&LXP zVW8cLr@rV3n=xvGu|;m21lzh(Qc*-oK*QRn-sXs( zSAGsA#{ylv96+E@_!7)Rc&BG-UutHfDgJDbT2&=7tEyL1*1&3*qRk*G((ADl4*$=} z)V(JRkW6c>*%Fv@2c0uwWwKu3cKW1P9K+*yQKe?4in5ssFhLb(pXEEC$2LP@x?KEAL7bWF2bphFtMhO8O=@KDIV z*(hLz)(64Xht}2Mz$uJKQaEes!GgE|KMV9~@+`iHNZB8sC&iCzSe_uua?N3lkug!$gmWQnkU#cagbn5}PRIup;3)VjviK ze3|KP&LRS@Wgf30E5vrSekJKnCS0dz^Gdha1`|)v5F4QEG&bjc!m}QJ| zQjMl`&G3c%OabNQehjix1)__Gn6EOfTtku@~xs~fr3RUO^pyFI@_~dyAvp} zh+23=Vh{wH#Nc7b8 zcY*jpRHWS$+~aD>a2DOEn$;#I&V)tUoyq>VuBwN=c6>=P6fBFzW6u$)->r;_?Ho;p zyhe>fUeQ#o55#yzD;+UC<@u-M5Jb^I4M-9xQoLDb`aTtC`qE$IZ$+3 z7sr?YIlmc;a`)ecZsip4AcPStnlb}?z!o^N?Tv6m15i)qoqB$o8lfXOgdXtYA+u%x zK+GQ~UQ;egcv}X})j-INdyA_pOEOL(6>66bN4rcw#J))UM8MRhd{u-arQfZ!twkE) zbDNN4e&-51H$9EPL&+`~1pz&q!a!Z|i0MwUpceFzy1-;ICQCu$%io0n(C}I$0V;{C zjIL5AfDx9)=!pDn_jJ;OO*@_};w`EFUe`VTDpZWl2vX537r9LiO+*|viv>lh*MjPw zK_bIrzIrp9Y*m6qq;a4GPT9jP5SLdbanu~)g7Fa40bH&_=E9*)5~8D<+) z^%_%lEDqtSS7aO!i-=7yqwZlMb^FV?9$?4n6HBoK zI*mQ*J$h#0I{Vthuf^&-aG)l?%%wprn(a?2|IIGqR zOmMTa?QnpnrOytj?O{1sFnq~J981#+G6qe(*MbFUt#N(Yy(2j+ zQk0lfFJjikV9ru`+1#zQnfNGDkS z+b6ICzZ+y4b=$VuT59Wz6%Yb23Au%X>{dYK@HNl;%B)~Z%8C^`@h>8vslDZ681rz= zpENwN70W(T6@oB|Oen`wdOOVO$0MYj54%>nz-Im6A}?ok)+HH$aWsE+nv+6w!~_qI0}M7N z-d~XLRuOeNcS`x1v_y0&s~Qb#!*sh95TECS#3HdE)Y78ZJs(N<)TW9>0FbB|ynd`U z-QJp=OqkhwmC*1m7r6Xswgdtww4%C@R#&Cb$spSK5o$_w&0kF-&g@Kv5%3DtipV9w zD%PN8++nstNR?I+0g*`@3lw5#E6NUeD3of!0GPVQ!I{-6quMDAB1G-wCs18K6pX7H zdYSK)Ck@nO8YV4}@{vh`JsR>;(_O}|943~Er_Cx%Hd5dyT12xdV#u_x2C3IZ6wWtZ2D2MFCq(*59ZARRK& z@8W`CC#aDGDzgX}TxapZQ74-F$D`877V7{X31_+w0c#dv{$=U*7egD?lHD>pw z{yPnt`qSe%^nT3OR4!Np^o4YeJOVl=R^KxUR1Xfc^Hp#^y%ro_0Tb5kbO(&}7#Z&`pNnnw+2C zq<1@)3r9=~P;^~izoknKUIV>6$u!NqyX!Xl>UPsQed|66E<&I|xw<5H9~7=<;Ns2o zoyG>>S4)>&UH8CYZ?F-h6lc+xOUyb>*y6g2rgvhT7C!KEr!q7mW!+ z)5wq46KBY$HNc&14x``mP7DSh$b)63OOAbG5daWHMV*bCK!XM`x=Y>&l!XW&BJa>i zW=SpO<0TucDV((ARQ%5R>PBPJh=jJITP==X8dSELY*|?3gV>HHG=nVE&%m0jq+2C6 zwn8s=0YNuShz8bD{d;p=w`lCHZ^10aPTZWcw+m}&ae4y+ zZSKKrbe|Ve3hkG#=upHJBY~d`HRY|w#wIQA|g=EK+@p{UsGL0~(y_h$w*EKu>WWyZ0 zQTs?+(~8sElrs9mR^KNuSOHE*ov8xTwxd+KpFr`NK@5Zk9OV&I%y>72%FD5i){pwx zP&P*1N&|Pnvi)YmFC=q->z)Zv84QRaaYLKzLKa5^4--T)S*pPID8uy#Mw4Zg?|ggQ)`yPVd`&ECBnCq!!yeWm#>OB=n3Vf_Y7+B4+r-!+ z9s*_=wlfW4o~T94d=Js)t)(%c>Of2cW4!B2-Quoc*51D*F0d5*WAuF~v#u{Q=lMCA zqRMZD2#Cz58WAkz6PLg+y`3G9^u$`GBgFxA0e#v9i?hz?Q45^tpo6GF1e?T+XENpyr znfB4Odhw;Dson_UW-5hMJqdpW2S!|oK?%>&c zA;7%__n%yx$<0F*5Yu4_ulbJaGhF!}ydC7Stz8JiNslK`I2o9=dYwy|~2u|Z=IStvmShn+y0 z-;>e#vr3Oy#USjc3Sv&GF_PD*p!tU*ie*C09+SPQ)s%PWwMjfk>hGJpkVM@n;H8$D z7Z3X1Lgx`|HR=Ve4+)QjFP5;;C_RZMyk5gn331)|v{gk}Lf)u~aL9Ewn9G0oF-^+2Fnmi8P_wYz`wenV<}NNRHj?OgK<_D6~_GfyH9Ss#UT;sP(#V-OYGG zd4rQj^eVi1>(Cf7ZdajBUc|Zy`@kca&Sr=@lz;{u#V;Q< zH&iOiD;qJ4$G`!;wm5b|sS8rBRtYEMfe1I2kh-v)@19lMYGZMd{<=ve6E_VDOEXXe z_~p&+xTTU(NMcbI$9;7}KefpYNleefPx!1TvUhcv1aU*F5ey42iq|}1O$#@=1(|Mo z_!~6phZGmW`wN_dsZK-dn|wl~vDWONEQMG}tHzi*I8@hhI1Z|3|5h(RBzAJ9(_EvH zyoM|W6nfwCm!Z2`h+QIy(nyO7*@}0eoV8X}&uY_*V}W3>ipw9)2fo_gCv#G}xh)R9vV1+T z*sI&k%?08tj!WgM?aZ@s^_(eiD#<6_;J$WBdGW%R>2etg&7N0RSFPju#iJ{W-EKGb z)fUBQ2G6M2$lfGPVyL!V&4zAP60yzIRYi6BsJ6Y|f zmDLKT7SB0b>4GZ7P{1*{ju>oXI706uvsJcv-o|y=)JRD$CdH+?+9i ziZ9dt%7y8N*o^VbWlN>Oy!x(rg<{N{Ow(N!H8B#F`xlVN;87-f{>^H>jKJB%#S|7@ zjLPwx+IZ3oVlY5Lx;0idgIIultX2$yHd;p@d&oN2(T1Nc=G;HlK*$T~ZWBX+UeAyz z)?r=4vZ&*xA}1DWL?1j&o)1T2v{*?lj9g@E;1O7X(%MvaNC~LFL$3H$Ig-3&71wv@#o;yOxC7Izly(Y|riYhNj z5V-tFz7_9jX|7g+zh>{m2(fuVet(@>>Deo_A?)-4(ML;@>Tc7v^;NTG$$C&R0o9LS zC06gjg*(BJQ$)A_?6@q{#;_TQLrJe@BN+ov33RFMQl$oAvT zV|H9%{#?8}E!`YNR&*0sl9p?pj7}inwbA5E#er6MZ(ucU)TVVZ`4nw$E5ja&+t15B zAw`o9en<}^KXA8~Qbk$7xK!#Gvj?C;hEGN$f^a67fDi@1IfN8Dz3#gIOg#>qcuvrNS{m*D5*WPDxga zCeu)nE<%xZK+C+GT45S1oz>+v;~)r|AmgmA3Ap7r5pK1ch+F4hvr1WQSbl^_G}cnc zGyy5Vh@lYn=WswIdE|K*8b~T8Y804}iYA}lRgD;&@eo|19F-51p-h^c%)Xc*F0Ltr zEX?}z1&cPUvzTo?d>=QNjp-j%SOc8f@?N#(01JkSCCpy*DfoIvB0UuV6wBd)WKgVz zQCzS_peDUYprY>iu|P%QaKUh3ta(9oG}QG~OhOp;SY^wqK!b0cyBn6SVPP2U*u_WP zAp{Qzgd=QPaXz%3p{SnLyNZU0rHp@|A2d5LC1Pe!O1r$>BNwj*GC|X;jYOS|V222b zmf|DkzM6KrtwUaVI(~mk-g(3md*S@AE?JJ?asrj{Ear#@OKi3ZQA=q6>YyzSkGmSr zl3DM8-r-dhZX>Gl!w#}@`d34~oO*#?6rI(9A&?&rLSEn=5g4$^w<2W7x$0G-xk)nP~kF+0x`oCI@C z@&u31$i1DL=8_Ca7Am#xJ&?el9j?g4r=C=dpmzM3WyV2J`^v9+4WGKv=HT#Ei?cs`Qk;+(V2n-=(TtCbsX zz?O$*ar$UX*^yZz+!5zxkveq7MZF5|z)=O_)yX1R4R*qdmDB;sV+n2vY)Xmi(1J6v zRZ0YZ!(^b);$_m>o~-SZQW9AZO;zcNv^{@i>!uNnT)syy+_b(qfZ4k1PLd!ty>NXt zjT6Z;(E_+34l~1$EU*p_N$t}8NApqBSFbCA_M`g4) zjwBPT4H+v80m5dcYV+{ZtkyH*hPax7Y@CWh^Lk;7)RE+bw+YjCkuYb+ST64XhR}0T zf-P4=2nwKNay&)dlf+&iODpx{1_H(szo%GJ9d?Yul7#o@w;LF=vKmC+qu5h4Mu`CL zW)Ouitm|oaOVEd)WLZ4FNP{?UM(Q<1$QvE#0}G01Bgk}g!2?JY`{}?q{0CJCAccLc z@Zdm)4G~V2Y?4-~kq$Q-m3efG?0_f_PFDBp`SSUR#Irjqm3)4W;`u!b=4$U47@)L{ z`v^()YoZ}T_&ip%P=4_~WOF8(i^rGEd+$do3zgZn{zxi|1>dJs#vCLjY(MKd=i>p? z%uFIBE$-o&EwqA1CThYfh$mHf9CkPfXWVNXXW}yR%Q;iqp_R~mjF*)3;`gALL8l%d zgVceR$(tc5MCR^+&Sy6EAC0;AcupZIPMqoBfHshbk!;|6v{DGZfM}0d@XFTDQ>iIi z(kDJ2U+s9H$$Jr5qn0Fn!%Z|*E@<{8HYbypxT>0yc*6j8BdIt2Fo6V&q5y}K5#?o0 z5gZasQXWBxknZt`)p^FX2O(-uHLzok%)%>UD6g|2>|z4MUy+p_G*8MWL~A?2Ku~-b zZ9p2r-w?GGq*pk9^no-1GA@89)GsQA)cN)xR{f9@NvEk2jd4EV{c>O zDrcpid&=5uS10YuUD_Y05$bWd7PCi~HFCz2!`7&qRxMQv`5tY48?vVInX6%bR7BK! zBHd^sWu!Z6TqMcnR)%&CNU8z^vx2SLk2^ovSC!QQwCAMD+~qdz%DFm)!z$jX&U!4AG;y)a!2fQDFEwajp4_0mTC5huU^=V4;rL@kt+;f?r)YTeNm zF3#ez6+H)`9>q+Cy=g@kG}#y?{0oIX)x&~DNXHM!t(@qD{XAA3`o_aT(xvNAg7xDZh_~n>-}B z)i5E1`S7Uf5n+Tx0wo)&?v;!e&N>L9ILaOfLl?2m#WY>S)$@yo^qH>$3k^@^cYC2> zKnArOtCrABnc9kq3r`2BGnfJt+GU*u0)1f)@l`f>#CueH74Ri;AHuU0S>)jr^Ej<@ zmpVPR6_TmbkPVs904Q-&Oi`mt7h}Hi1C2*@u40wl(J9Bw5tsO_NO8+=!4C*d?Jt=# zz@Bc@gEQ;Nf>m@nvD}wzBt4fEo1+q~!@HJ18Ce^@4-vnxUX`P-Wg@n`8&_dvU#%&a zYJ7n%(qUyGRTkbQ#61NqYr7fxS``WJWohmr+cxMywN zOb#+kbHF0)XCn_~vMmo~-c_$O%Ub$zn(*-4f4Xrf`xfmugaN1-BeY>LKE5WSWNrS< z)YH{Ng!0s2UCJOenvf{0EtBIxHRr+#i7nEaEb3TGq<6vIRF7ZI(91(|2(bUkHVqj! z)psPx1h>D*MH(uVHOW4WoP=nFJqiLPibpPNuHSBKF8EfWl(KlgM>wHjFTqs+ic2Tb zEkWUH87`yA5k*T@tQi{yIAD^Hb$iwBY$7&eN5iBjIct#tH{4NVE92aCA)h_!;%_j32v-7YFU0 zs(ONl<7a5Vn9lnv)D=Qj{2I3@)$)Ey%E=F%$6Tk3R&)`hG91Jq6r|FVG%u(xF6pN81a$ZjL?fN zg+pcai;jYkYe3J~JD4+o(gYI^q9WXSlA+GY%}iDWN(4@a5e~bt;UZo={&=h^F%5S( z@c^*IYG$MYc&Rq3k7!LreAf%bv?B;dm8FEOvs=(-3>$m7E;01-Bh7b@3TKW;1wc$L z@juGW4VzHIJkO@YO80scKsIAu1qztiz4a&sJo37#Tgunpw0@6e1Gyr&FE|Y05kOQO zhz&OUlzAs8n2E2w=XtpB__Wkmjzts>CmnCiVHjc)@^Eyj&xpy6z9H-C$vgI~#rj&= zFtUNuFW?uESXPIjc}%xJRgY>@Ub^iCjWv7Kbljm=XQ&RoNZA*&ZLL*z-2*Z~?2GV` zdxMZ`NHV%DR_t{ipm3)gdJZcI->iq=iMmVbO$BgtgKlv( zb5w+yf@<2kAwGZKI03;IS9(UTb1KS_+Q*l`TS<4*|hFz~JRA2L> zI7y~Lbd|`8i)HoEF3m$8(=p`0%S+i{p&cvI>>>#xk>4GW&IFzmh0B77I-btaZ=NDxj>vcckq1VmWhGD1p z)oC+=-r9uCVq5mbeAbi*fh7~`DP|bL4cDLX?8y7i@aW{_KfRf4SIMK3-NBzMittnP!u#&P&fSC0^x0SqJe@k1^R$W^~2od>VV2 zYoU$IdJ`#BMCdUWxh<)_BkU`KiLcN}3<)#0xG~DGKt8!?V!kWcAl<`$y!^~+e!M{Q zPvdKj1QoDnn3Z&5eHx85bvgxHV`IqqR$+(*VYiM?=pn^j@1Q4l42I@8nPF5@YdPXZw!CD!y-5t*=Z3ANL30jNLv$avt=?_ zoOI^2NQ9u4qDV|l#PFqH=)92$8Bk+*wUdmJ{$>Kq>J)B&Kq(2s$`}EcU@Q56x#-Ey zjpWFI^jL(K2}!gHF;+qq^-x(%;Z8=tE#3*|C=4Vy-AUT#(ZZ;xWGz;kujs|&H?=%` z+z`=NX~mhdF1BV%Q&#J(2FTu;S~_4QOz0Xv3i@mz@Xo0nlH)UraydY;RfQ)p7tuTG zFmVAmQxF(TDaZgI0<(aStekG{o|#JYMixEJMZ+g+S}6fBZ(fU$&7D??&Sz9aRVgOj zkQkKi{uQU4xvRAVw45wP$dX|d)M@ZMAvbo!4Tnnhw>^765dg}DdNeCw%l4OIzkx*b z)oilWDq!?gei$jE?L56Q%WIb(MrvaLK)tnUn@h=FTLuXg{m6XCC}*1Lt@6k`VLKUt zk`s}t4~K=$B(%C1iBU9ZG#5|tP_QQa`iiG1nE=ciDSIURi-dKOuF61~@)6{bZl2UO z*~?;UdPO4Xk!r>1#q^3ced*zyD334Q{@3et4~EkVz(7A6l#+U;3xO*wjVHb4E`3vt zFfo)^4%ux5KA~$~OR)pB#*S^&qy)4%Mv-!06cY<3vR#$FbQ1xAZyoas-Tzq(SPM&@4o0{hsMaVf*B2M9j;MrA+i%jJ|1@%8+;nhs z=(FthU09Eah-!L-AzBF{vAB`PvCUX-AHoW<9BQgAWvXoc1MYUVhL!kp4E=jz1ZL#jB~YAd%YExhiM-_b4+0l~j@U9g7Q6|VW@Dt{7A)|1nD4a^ zoDf{{rSeEBQ`sUj5uam6X{ib|gab#ky;AllKW-G;Xe&ge%6R2Ba!XK~@u5_aRVCZk zjcT=?sd=|`J|w(RMN3kmq^Qn51<%Q9vqO|J8d4^0(lC|IY4_-JV0J}yzDyNac-nLQ ze~2awNj0O#yF+;1?0TcpUa+Q(OXO|!M2&1hG!$A}FpfzZAm@yw{YU|NQwV@KxDV!m zU!`p&n(Wx3!uR(qCIGXmlUo(c)Y@$kQF2ufo3kISSyl6Pr%T#dxu`=4T8!<GgddtM1`3q_DBHWwsJfU9z>hAMnKgxSPFLJQ!j^jZPow&P+Nw@wF|rO zlRuIzX-a^B48A?)rFH!ri!Iw(L}iv6qd5kJZf^5Na*IKLDx8nCbwrsj{aXmJ#>mVAO|v;C)kpS=4@TtgsP1c?YfHp1O)@R zqB^l^SVABX3pf!OX2KB^b6bP`5InGRKLDI}UJt=~zD&)r>4Cepx7t#lWqy5)zQ1eb zo?7kFedkWqMZ4Ult1bU)>cN|h=0iTsoq>)DsOGHtnqQfy8y55Z8q4}&fYE! zd98c!*7nxbjjda|G#KW8%QjKHTXk#$o_DJ*=oL&q<-3io)znIO-s)!iR;Q=bSlGZW zm}lLq)ihd-KX=#FjJf5_tQXZHdu!X_;6teKFk4BDiq$N*wYxz}6=c2{+?j4}OmtT-IomRO zqTEwEq?^c;gA?92y&k25bdTg~5z@o4kVQ_I%NmOfBpow#W}Xm$MBNb;5+>Sm)4meh z?$Mc^i%*iG2^uM!?w7rY6+`C$@}?`-uTa^A0)~6{aR!eqB|r}>RG&Kw;VrXeqW&HX z+1)J!1G=AkLe$I9o<^#gPS(i=Q_pz{R;A@Lj)U@+g_ppf0Y(%S$$6{Hf8L%?~QFiSv?>atHT*a7blHxFa~qKmldT z2k=L1vGINzkKSiou6P9-B3zztkpLW}>0WcI)VevY;3m~v#lC40S^$G6v~FPMk5JMs z`wRA(!H!)stn<-RK&_(R+Q;FLt4?j$jG~=fEi{0DReY5ke`d(TyVip-lG@Y^&MyH< zqc)ev2PhQ*O!Dl*{>XTRzHlN7U}JT~W(kH1Cnd5#Qw65NAWNmb)MW^1@)Z=3&Q}3lfhkDV047onvr@D|WsoL@w+dtyx_7aFr4%Nu75y6YGjPlZ z7DuV84gvs$icm#lERvL(ZbTEs5vDT3&P5|ClYZBH!)DSFT=F?J^Lbni$WGOJ_}IH0 z8X1HZ=~V^ zXds_fsig!n_M0pqx+9v*Mih{JB=Quxxmq?4XrUh4WQZ2F5W^A{`H*Sq%&983kuI}CC$`w2yzpNyE6$(zd^JRPY z9a(RC1W}(vZ6&rzTyX#M!1hy0u65$3(Yh$=svN z+etU*q}y@i8{HzBva!0(#WibkYvpRC2b+7kzR6u0bD+YVF5#w_smpWB$HL)j;sEOs z9lgOdOI!e?A?I9~WAn&Es_x7w;-xvPOYiFh|3oe6rp0R=nnBmwSXZZ|!Z_okk8_%`bmafc3j;cuj6| zez_0R`NcX@*WL(3_sYW2a;JgkYS-Q1LgNktYW8QB{ctJI#?1}8-ieRw#+y6eOBNWF9cM@%Ey}HMpPB_hbn|9CEt=;Xr zKCP{oo4yjcMTb7z>s3tM-~_SUVd*VhDa1WL)hbzdbNlXk$xIu~wGD%4vs~LY$GiZV z+ppW+aa=d|nuIa5#Oifz5@cU`tx_ZrNNU?5R1G9_Wh3cos2Y6iYAl$SPPM`A;ldtI zRmr4TjfX-SYNIcZOPmhmXP$*6X+Y6Cn{=Ds8OStB64vRngrtVaMy%N zKazdQb1D`L<|_6O@lq76N?&O;-|N1W#7i&=T*~HgH}-N50q3TORMEiWR z5`{6&n>OGOL8Pmc;Cs0m!3;rb*uZ2MPI8s7YCo`?T^=<2qYWCMUaNk2;sPLhMa`rtjS+x9EfNB9xxpTo2cDmdFU88a# z**&O7`50M$+vh)4t}fqN-d!$lEU#Q&DHpeuZ>0 zz=|g3Jy4`4YQ=`OC^J&DD3*;mi7K>cLA55JaAV~LUR9F}S6CK%hxW8AXKu{NLqmO2A@TZaX!{lrQj zQS2%)0PP2yM1=RdJ)t7X3GG4;>{$_e=D0xu zz^z00&pM{+TODxMGNRlFz>3a~khez!1%z&yj2(%?WZ0TuBBaQlMCf!Afuy;f-$2d{ zz3~Qx0ZkfsiMDPJ28S%3p8cuyfrHvECoURTodPgpR9R?-Lo_8cxu9YOnB#atKUs}y z1x7(t9RO#vf54|dn8PV~$pT;oX8eLH;^ z#D-K-K8mozRYCB!UZWQ+zpyzq21n@;rvqoINk_upXwhz@;Rg)(4^sE6Wtc|ptJ%ti)ZP@ zZZ=n3#!M`_YJn&&uxSH~Esz6D&IJ8vR|pcxssKu)b=&9}1D=~2M&r6iqYtr}ngzC| znS6&zN;--awXrGT1dW@L$hHfsTvRi>AvZvu2|-8QBcfW+1TuhINnW`AADI_O8War@ z5C}zC8H5lI$s!HnV1%XiIrptIry`krU|U_`%L@`p`g2;8(4505$}Eiv$N_U<1)&k8 z{ROIm5^};b=$(t&Nr@(@Z;qU}}DDUsxsYwT1DO}Ev4T}eH#PKwvy7-O zP(nt#$wsi8K-}=wS&~miegu zk<}4KAwdFrwtr-pG2tW1L!JdFDts#JWTb&a;+Q;?Yb04#8h3hx2&|lLJ@jpK^oWC~ z({De&*byaWC!j?0N^8W#4dUU{45e^P!OLDeGkAF>P~s-$p`_t?Ytlo?hAko#;eD^| zJ)_T@L8BnvJU~BqniaCMQ{|P4w3Gg56kR`bCO=vpmULiHoT%#hO8d1Kzh$6SkGI;4F_EO;~Rv=A-dWEg);G0ekskn~ zgOC_83mHsrC&~*!iFbrG4(ec6WgKmEpl@qzWmQiwmB2&FRf1m=n)bB>GpETd=VSK( z>Q<`!!5}Wgk8C{vfu6?NxG;|PsB-$YQK`u(g3gBhKPfl`YAUNHN&Yxjtl0nF>M@J)wC z#1%#pU(CBhFWr&XVpn7o(%+Bsi!MtL_fsz&Tk~NBzS`}S z_i*I6y|T>>cIEB%aGx76yGtcO7fQ?YO=H*}lsDV=xfgS(com7W*2YhQF=(~??O=bE zOM|Fs;xak=53efUQSJ@~Z*}_nlOaRsUf=uWcj%~bZ%J1MLM&uIloA@2uOk-eIX*#7 z_RG5yj^k4D#my2d=Wb`xI~(+PijUf;52Bp<|4nKH=vK{^x@BXZXdm(Y!JDl3JFp^| z&ju?GpDh~)WSF>qa+kuv&RhI*f*+xWCx@N$?i)xaRgw0Wid`n8gI#(1T!%B(x*U4o z>HT(h&;uBJd%(8?a@zP&WVBK>XX;HJvvqc#DdrD#Tbo4f#Z_13IEVuLV#o{dt1_q6 zBsp}y*^TShU%1MDUSjr6?`SNK-^ws?Q4nA)+C23Ky}_{;0=zctz5xkpnDiV3TQ*J! zyA6rqzG9Yma0_reIzv2xFtM3tVJ&9Q=C}Ah>Q--?(=?_PdBzxTr;t|PAMz5Mi3_G00i{oT<@JhB9|CY zw$}5n{Po_tCe)!i58g>0N@3WE)KTDWUyb=Y9p7@kBQH`2MPi>K=%Dy z_Pqi@wEOq*I?E<<;TA6YNqLJ-7f6KZ+rF<~U3rcWIc14h6Jm{rGm<^BLx^r4YwZR5 z>&A=Mua}L6zi6MdAb~9lFA(oK9NA~~ALqcF`RwC0ZbBV!T)Edqn60c__iaFHN zn~{yHO~o)2&9iv);@V`)M0cqoKvkNcS;rC?oT9j$L>)TgCi=G$B6kLDtI@y93xIz3XXRI`DCe$iMC-?dK%a}3!Zd&GA zYnA-KOE2lo^@-~60B+dX_<{y!P3n^%3vAzxWt6b+WC&9%9=Let#?IASdzQhAd)NM4 zdYQGYm=SvJ`g1Sbc>X0@qfkG8HY+N4lRg z1kX4V+?*U9UaV65^gJAGM&?zBM>(h}~ zm;-MlOtOfHk}UZ9|LJGP+=o0wXuaawkm)sF7h@^7F?Vg1sN=PStZVy18zJiP^4Z1L zWiwyrl{Sq)uIbns}C^NlVyp^!G)Kh?4t0tHSGJ7}Mi-srA&72Ig;AZ}3g(S{{UT*3ne$72&fZN*#3-z>GO^;BXTtNZ5qVc435NkAva>&&=*BI|C|+XVOb4u9(T6;vuP*gP zJSgicdf~lVsO37WO-(*F3T}`JBdHAt(Lcf?GTt)r#IyUDTc@(h$@dj0mevay>{z|w zx0R|hv6Y}#=i=IaA2L$oJIw)PR&j*~-_>)jt>9x6H!)@we2Se-q8=OMZS3Hs$~IYo zm91Ds4q>kmax=VSc>2uY$l06{ z?+^*JSaH~~=ekd^->YMWQU(8CduP)dXL8)}acu9}V+S@uJITR^F=N17K++}~U=O>7 zfe|@ejZKmAaI{+8LWv`aHbu%Jxx4bow>WRktCJu=l1~uil5;*lzD9iT!G|0Y#D@UR z@Bgpre)<`Xw9X-?#4uLFndj-QuCDj0u8yN3fk`(*Jy5Dk?jZM7EEe?4SA^BMJR{kt zViTxe(S6Ke44&g_6q&IkEK*PqJ!P_5PQ;L8!`&x0Pbj`*G6PbfzJ8sTU5LS{i^zP| znjALCs!cXF8yLk_t}qI0@Oz>o;N8jn>iOIq_FY&2BMSkvbs9Qc#ke{`o(F8`BU=J& zPl*KS!dL)&PV+K97s?k3yiPTSP&C|@>a9V!?dq@_xmLY8RrOfquxb;_2@bW(1Oi1K zjdgrbn#+=(_ZR#`z($90KM-vl0YKdO6qHInhdT_2#w>Kk7(RKRq<^M>`$Zk#OnX>Q z!1p(@U`6bw$4DX)I#C7$qk%9>fiS8_I+dJt0|cF9T4a^+hjjOdY73R)QD0!?d{5VD zYZW*w+`B-=xo@WMlZYr16ABP9XCWJ&1|Qe*nqin6^4ue&v3e0fjVo6ba{NyysBrCCk1PzMnNG8LHsGoe)h+`nmSTU9s$EilV@iaI=dl}kybl32D)f7go-hXeM)RO^WvV186F%T zc3n+H@Ivr{B!W;BQ+0`Yemgk-PW%Krs;ML4QIqWXA`dLG=W|2YpAIo|P7cUiRBo^2 zi74q*MWhaL0FQS)5xsJwh2UaN!!7w}jqVvE+x)Qj@POF2GVZZ!A2nE4lfjN~{fHV5 z0gtkY3q}z-d`#^~C+7qN9%){LU@_;fqNA^I3-*s_)u{D=lKbWLK8sm~&{j~C+rk|hU1dUIG+mn~w(fgp zWrvnIH>x(ui|%Mk)V)Bv}+td3zZS7x*_=tz-ns%19JJG(u{+$2FgSPL1(kWQix1fnUwn)3R&LemFvT`l zy{7}P=a1_qr8&d^E}DL-SKwx{m%}*J3i)6G6xk?pmxe&@qC62bjZ)A$BmF4WI#9=@ z5Z}n3VnIh?M>!ex*l8!Anu{(FUdSy-o&-FLFsi(SJaARUs1;Tr?TTF+rc$Mp?BatM z>DNhX0ns@H&}FEG5g_|PSw(ij4t>W+Cj8qnfn{@-?Px0%+)pYf)MbLa`%nezv8zm| znkn`ZUeFM$tqWtJo3%5I!`vLMehWdM2Kylxi<)Ze%mymhLzT{|-^YMIR!3Lpi5--& z4pKT8R~oiKJ8XU5{?i_mdFy0CA;U@0qU!B-u`do`F%|Tia!hc;0<`krQsb7C!cMSh z$EoG8IV3hT2w05@(6ZH?B2+9YZD_!9gAzJ)+YM1ZI7O!xOKi5>nI{6!R|sHFp_)%9 z-u7e=ay+bgIY|M=u{S9CV=4$cU#=n}X9=3FIDNtHY2Kjw#1i2&9WyStipDSD0-%MDU0NE; zFP?PT6IuKo602}AJLwyPB+oRvx?(Z`MWaYo+_I6_SU7)5^&>*KL;bX7SLvb3%9a)rwR zM*HfKh#8KlKDdJXuy0YLP#^Wqj_VXW zcYHXGlq?kV-Af5Pm86G1YNaR2DrQx5v($Qgsnxg79w`2zwIL7~`LKhb-JYpP63av* z7rdDjbe>oQt(FPWjxI?N9cc5N%T^XTl&>;eBo5byl{F+FRCDz?iaNs>eIy;u8N?JG3gfpAv`#v;}m9xS5{WS zPWCA6Do8?x^dcd}3}_vn;*u|EgbI(iBet{;g~-Yq%b$Nu<1gJ0F1re4WC=PC?B8Fh<+4)zC zB*T87#g(LU?kAar* zh-3=37=Us#P@qTEtQDIzI{aO&p?t}Q9&V8C~QM)wVkCs(BEUPoFxA(TD+tW=q z0h4IwF#24|cqil5V|DNFv5v-C6dM&Lt#XJ>v;Zk$02-obt3wujk)x{!vAFbFv|F8% z8YVrf&rbE>BprVb+$y}slm|Y<1NEnB>738D#4uA;L|40oCGsRK~Pg|IUE z2?K~=e3}q4i{laMMbk2{cOn@g8S;F%5$KjsjV86S{p9cUW_0@~H*66tH$|fvUSmVn zcY+eviSjB`s_QN=_zjgeUS?)pPp+F?vx%F%rfMV1l{-}A09-Xrwrd%aT3MUh>xs*( z^c1n=FPcrDG{&@<*U4bl7&~^N!*Xwd1*9Vs4N@T>+2DRQnYgIbc4TfGBEgG5VdIVw z^9cYV?g4rMw~w`oZClAy_mlW^ME?#QtCqVCs&T{BNx~k>0em2{xSY@f4I8%*x43x< zFrij*2!Vj?&SMYZyC|C8m8VMm3NIr0(RvHd`VA_-N2B#D!j`03&lsPVALr`y?o#1B zYu2}Z2*8IK?rO(2E*;p~)`U37aO7jT~$XCZ=Y-;uOZc=kFHO%{d#A;)|Af%`#U#X{h z)Zn~<0wFzTz7wm8r>iNl%w|roDdQr;;v8Y9$mbkTd`Rxm5EbBK%AHCN)hlQV%`z@? z)j-SQ5F%%RjI8Ujq?hPN4AXXF@J4~GIEr=#OO-p@P+GJlyO=?TQnJm&8>$%QXsr5b ziCOfU1WMJPfAgcl3;k7m6(fBWc_XeB+AR@n$qSg$ zh9>nZmspSKX34P1Opnl;*9{>Qv{#7qa^oZ4WYw+K?^tORR9Yo-S6fvf`7 z!bg^0Zaw1Riim1jOR(x_PA0M}h=$A?0aSWN6re2jB0Zf8v#ENOJc>$6)uCvBNa5l! z#j6bOcC#%jmDFTnA~?2CWtXKwV2X45(pUONjX74-KGoAaLlUp*lhh+VQ>rZ<%I#Gx zz`**fh9eJC zM})LU6cgG6NDw3;l0*r!j$9rV>Nz$wQ!e44#84tWkqJgrAgfP zs~T-4D+3apQ-E0pqu>Gkx(QE5Ufiq^p>y@ZR3K(8@@HZYa8=--6u(GbUX}fVK05OC znXJ&c5WK(;_~jiB0$U%Sx)r$Sn2P#&@V+{2A+c&a0Q6wuY6JS;csv>r3gW`3CS6+F0quV9ma@P*eKE3 z6-U0vW-A!wsabx6Wf@I21nD7O0%PrtiR8}Z&PgU|0M}&KhaB?w_z)|s3y$)ez z)j(aHL{VfLGEgM_TcRo3pAXT>gV{&#pPis_L=Bh{oG5kTgoE&#r~EL<#zqzAVS-9Cbge~m3RN1tU503PIpG()d*E8Ke+K42ggD@hPgo;DCi)K`4G0qlEh-z^uDmgYb%eT=IJ#O z8ob-DA_I=8_c9bDGGxF9Tr$uz4+ZsoG@e5_YA0RQ8cuSq(Yl2Hx!2EQ7xj3=|j zn$-k6*WRRO!zYwACJK@)(O9Z8>_!wxE{brR+7h=CqJ$eT$@ErL9(%z=j43X!5gkuT zo2SAx9@r2U{tOw_GCo6==-D&+fw$RiSp#7+?-z5$_mXFoRzP{>BztoJyE19MPszK5 z{2S2~c9pZkVPG06U<4uBEt3Kt8K>fYqA1Ms8NDY;8lEmVe|2ye_5$KOkJu}kPdK~Y zR#hbw+JA)~@B#oO^M)zU9^xBuUBa_tObMG^P+8|rIn))T>Ro^h>y#;-TePurCK}bz zs64~&@k7;pi}bW_q}G(*f!Jy#u>LM-B`p}B`|BMto5>FM?;m4zUbt-wZAD*QzSsT)%&R1;086u z9$k>eeNvY;wXIJ%S)WQ3x~U{=6SssbqU;$|w}rgcK9#ZxC$JB3b%e-|>O=IzvtAO; zaU!B{k$G61eagYs5yShA-widH<4ZIMiBKkg6{c;LVW6x6E5qA9LW-g0AvC#xCnpG# zj6NEbAXTcEZD7oz0u)8AMFgd;z3KRb2;C`7G4C>i+H375 z=v5}I$~Kn$w!aQPGqPI3B;!7IYbR!=7vypvKZn6s0Jm70oNCWr-gv zpIJr+#oZWov0J_o_7uD2UpLnWas-=XjgB3w+A#e^&lcWP%MnU!hzjd6Kxq4hrii#x zr=GlpAOjh$J=Y!|VI;Dk8EGddI!M#{VkKVlD&YBIQ+Sfum)aav!aNu;+Lax;=mdq$(V^;7g zl8BMHMK1!gp*SZfxF=n_E1J$`(KKeHT+*2+S7+UvjQ zS9+xOGC*Y~!(?(*pL#oq3&d<;sV@izaHeh%#*gLk;sNKAvG^Zc7?qJRe7Y~%wH?G4>c?jT1&zq@-+iLH{W0FG{eh_R`iN!pC!YXHK8aY#gwjQNfWtw;)c$?U64(fn}{Zz|y9H_HH4^f%T z6sOo;*)465M#Z7Az1Vg4Bf{u&L6MK~Fa`E{7Nd0ImXoG@mv#bySx)$fKKZr_%m*-0 z+o+#vaJrc`^qzBMcO}7)PT3c~Uv5s|ON%%@k8X$}0O6(Vbqg?f8F#~U?@3#u8jKws z3i7VAe^eF31-SG>(ra;D3&p3^z8A#T&{rioE2$3{(>}PKvw9hgNI$+vU=bUoVqW&1U2h_C z2i!JeSwxk9k}W#&En7mxzRwAnA+{kst17{6uiH!XO43^h_LBGt?)n=cVO0Am*(2Y$O!6g65E@M zA!X)+VdZ@@kafKGD@O7B+uDUz&WHP!ffJL1j~Qt{ItdK$ec)kkA46C!Am`YVXOxx1INR z!Wml&`VH|d(w+1QeGyM-YiL|xE}xy&>!L%(Xzfw3&2L%1s1-H1KwQFn*|o-8ui-sa zbxPANA@pT@bZ>i~r%|)|Six^`mnjXlXd`6OQrtGospSw(nf2-p!;x+=P3sQ$Tqmp6}h1RvaYIH|Lm#iiO0GW?bAt+i#!7Bg9 zcu|=bXPg{YB^SHaqv%*I<21fnI6Sl~xi1TxvYm(?R%H>T5_LgxrB>vU3iT6Z;1tM% z=hvf5SWtx>#)R?RyjE} zC0kz?ep`2<Jk1WBls9k-qmZoL++7B8FfO0L;U@bcVf zv(lSRUe-+kK#6YILft(hl~_ukq$7P9mn2YhfZ++RReIB!Waj+>kTk5{l84Cm{j=kw zSKECrx^@!sMO{VDft4UuEv4NOYsd-c_sj%Xez>M?2QkvM&~jppt7C#jVpc-8M*)GH z$xoS#j3>Z>DpE@zluEHF9cL&2ba(vv&@i+{v6c?ec}uxWTU(mhjiLyg+<-oOLr+iq zAkm1>2vmb3fvn&v^Y@iBKgeZ{bM0=x7X_+aWrg-F{WX+kMQPMm!An25mZ$qPmrb}TCpCpd8Q&rt2~d`k39xvt}tmKE#<1RKzhtpWh-!s zDj-OziQzP&wbbA62X7dk?tg9m?nqmx07!KUfOB0MTdQzvFrbq^;*=5)0#x8rEZ>65 z4}z4QGn}bIm@78q&DWieK3u|ag4k3N;xEame#DYBrw&*_-b&l`Zf;H6Ri;}KZ#9Nt zgzQ!jzW{D!=?5IdG9G> zruFH)ZKhb_8G9)B7p}peR*9f&kZ|>}Ui>aZK*&-+rcK$AnZziVWwnt*R_^G@wa*O7nCd2fppT=ox}ZcPnbwfBo)|1_Zqb z3+obpNQor}vzI=WP?HG&Avwp-Iv|!;Yza-VL`x!Pud>0KhykbydTq+rSanD2h%7yZ z4@b(L!0t0_nXXXPn_Tg86@0`SJFGqE<lea))xkJy4qD!IVl;NXWq?PMF`I$GxmpUJBWF1Gh zc@1JyBQ6HjX`%+fRt2aH&F4|5M#{$7fj!w@nIh=G%sg0ztzg^$F4CJ(fCZ9|R?t`Gr-TnkSQg z8kg7A69$#)7KhMw+(T*mJfUOH6lxBlQ1xBy-qHkUW3fco;w5^YM>zSZaIMMX)amPo zwAnWPvmqW_&X0U`&Oabq#-Y_c21KX0E35@Rtv>cRfwY7wm?Fj&u61g|2rW2S1|QAa zO0swd4y85Vo7P@w|>f$6TsG>s{)p-Av_t8QnZ%--bBA?$|Vso$(y%sk} z0*VB2UCK3Tp!iNvND?y=pQ8+S;V0VckL;c#aY^1>FZj{(os$b_ku&r?Ne(4Og-SvZX7)Gun zEuM6@mMqrXbIL!586kHBN)x^OTy@w2GH%6I{Ci{QAd~K&sw3IKf<9679L#j&h+}bx zq{53O_mFQ~8>;Q3;EW^wU3$uRETV53Pc?ddyagt1JFM7=sSIEa_3;$ zu_*hb&VZeSnJ-zoq+@v^npT9(;w48eBO0T)$c+N#EQ9SDT(p>nQgsI2@<$tpT5xiB zxGup!1sBAeBancTt|-1{Df5KYx(?|AqT1H26`1U0zJxLhP^_M>u1pp38bK!EZ8PCT zL~&*9n0>56>M!ecsTn9@zswq_$l_);OINtSK?0*ZqSD&`0Tga)ru#;bOF~j9bZxjp zg*lPnYj#Q^g@`ZTU}!`4r8R+r1Ax@yVCbZ{dN!&RxI$RzkYR@Xqya8t07i|Ek-F7-X*4?nwG4I>z6QpDRVl^dOzN7ec2I%7? zg~$?!_6Va$rMZy1Fd&pKa|wBD3L`jpbVhB7h{r5QFVS?wg+&*>A;Cmi7^+=}z%R&3 zkEe=^k;?N(XHTeW&w-Tcjn@n^E{N}Ck?48LqJvW=^iQT`DAN!^ZuxRZ$14X-rJnqnUC(>nsD>HM!z=&oc;M&&rf$p z_h$6q*nP+2sRhSu`p&KS=+@rO)@0^B9k1`rhJJB$XFQuvC;Ov2v%Pn@u)epZb>EA% zKDsxZ-`cy&e2E9GZM=Jb7#Ctq_?1@MllSh-)F)(Xv^N_~Z{OLOPPS->y}P+{cgrn6 zZZg8|-kb%qFdm!l88gFtwu~-d82|QU2KsjAbcFm{kajevoqSAYyz*ygJ$IKPB)E* z+2qc6Iun*Q_hvJVvbP(Wl0qyt4}zP#D-yW7tHvv%+2mVyS?J@$GB@P@RI*=NbxMZU;EMq{>9 zBrQ6GJcMbnAk$2D3ym;$vSr=QjY@!{tMoV8*#p0fwl$uQHI)CoIbojRY_bcpfLVs$ z+`K!3we*DEfI#=}!feys*c1@}Kh1AVXIqt@_+mWV9#41fLW&7+p)ugx1BDvWw5FYk z4cXt@p2KAm!il1R(R6#be|Pg%vaV)nH|`c&adX0BKIqmNasklGf-dp%Tyjp z-@BK-YK5BchpYlOug2Gd%~^&#q6o>_u~d}(LP~>3L{D26TfYl85*ubwC)h`X@w^jxx3mP;U`hdf~ImV zhvx^6tU)>d>A>7EM}tH<;Ohl_W4aSRoQ&+Q(LMR?Lb(Kv#NeS2G=Zl zjP~v@48u}VvKX#cBV-+TsS(mf=8PLyp{H*bS`9=@iMm#;H}hhT|eYJFW> zlR3hi#xU6~NsFVqN2Vbfymp)3_^=)!Ga!7qnnlUr&!&A-@%DQ1E$>J%;0-7K)W3g6Y4mwEkv>5jay%DZuEabs9Ibw&=io^t1* zH370f4PERJVY+ig$U_bYpK>>xc=_o8o(NL;SUn6RSW^_ZW}H$Dy+#z9K;GGN9^4?n zWo^u2L?OnH%HR8E&*=}T`n~Dt!yCGqZV;Ru4!#iIjqF48Sj zo)UjGksMNNe{yzkIOHA9XlE^^-D+G)kMxK(pF)PTH&z5Yqf%MBl<`^UWtpgP+-luz z$0ldU3fyG%I5kJ7mp3@>fX)!@i*m{)^E_d;_6}mT9HPSO#@Vx@*N4Nw;A8y%?|(e_ z*x&t_4?i31e(~wx%Y(t-OCP&2`1l-0h|lXgca9(O?+v;j{>I?*gSY0B_vSqP%HaEa ztN-nOack`dKmR$OzR%|$ul-=~Nk0FO&p%oFy}=iG_ANgDbnW*CUm6U4={-LG+u9!t zzQX69^ZDP`{%~+@F!=a?@cCzJe>C{*!Qc}=;q!m3{c!O1VDQPm8SMV*VC|0wTZ6$X zzsu)Oto_O08-u~Cb3T7+?N0}@!QfNB#^=wj{n_A%&;QEj&#(RY;Cq9?r;qvkg|)u` zmcgfg%I9BS`_bS>gTe40`TV7|za0GSVDOo5@p-iNSA%~T3_g3C&tF;l>%l+q`9Jx* z!CL-#F!<$v;q$dm2oax~oE|ycM_BEG2=OcLU~V0pzM5aZx=+m*A+cA`XQvMzzdEIk zmgxGYzHz|!*j=CWEs?M8(>|A{zHsx|!S`rz0|DIO0Py9}C6D~dzA^>T8-0`58egr= zRbnPlM)Wnmdu7WLy-MeLP58N(aJ0#tEr$&F$~UP^JvjaJE`9VCM||;_0{H&U_`d<5 CUgxO* literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/Build/Mac/TriblerDoc.icns b/tribler-mod/Tribler/Main/Build/Mac/TriblerDoc.icns new file mode 100644 index 0000000000000000000000000000000000000000..e5c91b3bd4828fbefa5bf4a7e2aea00929b43ee4 GIT binary patch literal 36969 zcmeI52V4}_7w=~mb{9k}SW!U~k)=veQBj&mSHRx8#uk&PX(oT2S(YYXFEOzqmKdXn zz1JAKC|D5{P;h4mD9Pu&$0mM0X4&64=eBe2-tQbTf7arq7`Ar){AKQL z7#1-BgV!qZEw-8Z!^eoMnAf~l*h@Uq_(ev>^XJc=J$>>7f860wdV1Q!hY#-GyM6Qe zl}i`ToIHN!4(m<|==c~TD?2*N@O2ja!+XVk`7$#Tr1 zaOT9(BZrQa9!#dBaoNnQ>})&>e~rDuUotaaydW~V(N2*tE$!ih)VnwTzIN&SnUhBk z|N7hY$-8NnIXThUSPoII^v2ao=TD#bF5%xw;SCdiCO&qrdM<-hSxj zCEIP3X!RQ+2g@e1Fujb(=!znIipC>7{bB08Ti36iJ9*^S-P^WayM3|OFO+J{yDA*U zqDq$b@cx|}*Djs@1f1*_DD^+O5Siy|Zt5DO@ zQt$kI>D1x9zidpnl}o9%P^t}Yi8mOGx?xrpL`lyyJfn-QagWj;+`Do0%pd!VQhG+DNH3*Q$Dy1`%F4b$IvYgrCVu)dotnC6{=My}@C?@u~sGGt8eqfBG0jc=ha& zlr0IH$tvA?O7~OEI@9ORo;*%Zy?yQ6pZm5ZCE=t|rN4ERI>wv#9kzV->`D5A+tEo{cdUz!swQ1cN%4Gs9+tm0kH}@beT!4WkAdGdNDHYA+IaKI z$=`Nvh~Zb0CQ;I;d_0erzxrKiYmmZpdy;0g{(5oX?neZMkgOH{bubg5bv| zrCe}|rNmJn#-XdJpgX^LlamADSD6`4p}}YW*uQOKQfw5l;z#^{Emy9@dHmS;#I@_T zpPLXviK7d#0-`XsU=d{t3Yp|@7}cw{kuDP&s7dzXTYf7ZRWGId$(`hO1O#NApYiFzy8->SFc{abn)z|V`%T# zxot;s%7Nd0KXUlUkwb@mJ9yyN{reB>+jqu*wtyEW6d4ufzIt^2$o@UaNxONwcsqOT z*s*=vFI%^4+OT$YVnV{|&ASdCJALNt*)yk4o;Y^&k0Xb{&f%xDKB^cmViXrg6|oE6 zJiGsU@!S1xSa~_ykDtAQUo*XW`SQgJ=RkGz&m(_cpasz-%;J)gsA9vyob+o44w4;& zTaRD5dFRgU+qZ7~{nwSt7tftNb^O=~`ojq&oZ{l5!tB(uDF;b*`A;XV-AhfyADG;~ zd*|l$Yj6}ed-5dx$)pliQ9;)IlgazZ&J`78`Ib{R(;h!UM~Af3`*&~M`0L8Wv#05g zGD=GmN?65(+4oK+?Sg-V{4#^m(y}rhk|6uZBwj&iS!CHS7w=`@nWofE z^Atp|eg1uo)~qVS^&j5_9&l}EjRpjl?!S1MiP9S+nrBa-8TW4ApaqF#>{8nNPB4#a z2@SiVblc?znXpy2)a?^bA0q+%c}$R4&MGU;xqoWUb}AlR(^;*qDBXVL;VX3Ff~^xB zw;pp*?#R&7M^2@mLtkx zw1$=>33LMJFEy^}tW;`BcEHEq;IF4=kgNj7XFCRCb%wxG_Y1$>c# zP$&}dXw@02DJm7NBor)F(a!5nb8twiEh=>{Giez^B#sl|RU%A7C~<{}TuzDJKxzS* zag~f#B}h18u}F}gelBGLX;4w2t>BZG206p!Y-Q2zo6mC#@q(85d3m{SAtjZC@C>VCIk7c=ec;HX+Z&{hu%SYUAR(df`la&7J%nfFjpB` zEl&J3mgeud`>Gi7 ziJ?wxNQr4JUv40i$>ltmF=b#cNmjtZ#-aN$Fv8HvkO?D^h&ED=m$CFA7$7UOkyX%1 zRRK^7oKjan?6?>gu_XB^_g)p_AlJvh1huZ|vP8MwIdmGXA=FHjigGR`Kx`ruBKhm$ z@^S$(P)hdzqd=!mk{ih+(DuD#1trE+bmZWf5VaHv#d#_BUzdQmx*98>oipVM#VWb6 zOj7jZVhX9%fOA~MR4Ub|^CjS&5E=2yp?sTuA8& zp#d~QXMj;$q|-|i6h?B|eT4=s8$yAp*O5$#0371wt%P}dk#nI)D6EoW#d`0F3XWWg zI-mj?kHp}69f?VK0;1ell(#oE8)=~(%u3&S$CR}A>BSVSnqF;~2K>s)A>$hhpyNPW zCKRG>q}t7d$)l4>2xXkYSS~GodMQPtQc^3ODU)fzv67F(;(R0)2?-=dW5+11)J7={ z6bhx12hG+Spfg1r$&jg|A@338guoak4rN0GGXWV9pq|(H7srE3oqx4bLFE~SR1r%u zH9~nb^t=!(;6je1fHr~VF(EXB{wpifnGhu^b^gHs%2Q0KL=6U10!a+XkivYyg+{P) z_CCldgIz!jTMj*ddIQ2lrLjVWdO)p^%Lzym44F2TG>{2I+GvuYEQhfdaij%cpj<5C zi!snMAp+VzL#Rqnaul+XXO~h`3Y4ap60tmvG!P2}8jx$z%o7>OXt`KI>!BBP;R^^= zoKC-21<~WMS}|2DX{ZJl3e*AtAulDZ<6Z5=#F@AQneSqQp$l>*Qq#Dq|(p1$z}TD%&t5;2l#FaO_Yc_sAtn0Zn)# zh?P)dpqD0sR1TfDS0R-^vS5mY$~clODMOh;4mK3H!~|_TNGg#?rF@WMqAInKQeOJ} zGEx&_rVuq6)9?gLp{N4-P9Y(r9A(iyN{dUQ;WM<7QL08U(^3gUC&JV)W-{v-@~`yH%b}>!Rfq~66!dWDJ_%;O}w?45W;sLVBS@ z!iTuAN_`Z{t?DQhM`r^jHB%t2fMp|v$qN4=7He>lQK5v9ka6I|L0O4|81xauLyn-; zDKarc48db6G*59cM^>R#$e^cSCd-Z0B`5%?R3?j-@+#$tY7@#eh#?uF;I$%{W`vlH zx>&DOz_E%}6QIVct5u4!=a*BYBB4O2MPZ}lBF3S&kyAp!(Uj~-CH3x(l zS3n3^940wKNrg=4gpdl*d2wG_u1F^3%lJ~Pn$MQBR3e16FRp+QQlr))DJ(khi7Q#! zQY4L&@hW{XOEsERdan|63@(%Nb<067g`A~q0S95qE$qV4u!gH*=24q)l-`CIY{24I z@zBmh`A1^}X$OqJkW}cb;PM#ALsI=bEscXMjZHT|93@}~VR#KS5)oP!)%C{c?7u%9 znlIylg{pbJN&`PqDav15O%cOR1Q}7RA=StL-RUR**fZd;$EhgWpPna{$)cbkSmg>$ z)EKE1<(XGgp#LEKiG*SqYd_2xG|`Z2{kWm92@OX}7VdqV zFNUE+=Yc3WQzqA^=<*~jTP=B;dZ1iVeb7;-(_$RH$i4XWd7+f2j6o-XYC`OTjHilH;~Ji}Wkp5dx!u>F0{B!LVs>3kAw5AiCQjr z`#kmNw#}P1Zd|{CzusiShK(C{olSk7E094K#lkLIsSqT<$<|n-R)~tSU%q(qXmc9> zp>g_?XBn@HgfgWv3gTia6nguDB=|I2t&)jKi{LIMCN~N0V(_=D-24)uR1pUst}?s^ zK5D2|Ddn;RX(HrlLy1(bP!j4GRkRY*ACm+d;M)zMNm3`N81U5wS{S3^SNriMIQg#8 zY=w7u)l6@xSg%de#KF5nwReB6Sf@>dcVp3Mu`=TA6>F(?W57Y>n>*pn1j;~Fyu0DO zRhoD;U!{MKAt3iutbsS zu_M1%<1qkp(CzF8;~0K6BM0u#>O?At<-8AMa0FzCW+2lHWSW6YO(5t086blJ-}ne1 zL*%cijz_55`i21+9srqq*qX5Xxat-f1Z6hlGx8eEWi}SXRc9j~!eq7-F!Brd@2dYL zEJHabpvOLv%s{vQgUvwr4Pi5hMTCyc#KTeIBhd{0%v{I4fy~@OWacm;Gu!_iGV{mg zH3_Q{YD}P-6?bl0}Ecs@|&kaR0lk6RF2d1N=6Yh+=Si8Bo_wC!S zzsG<90|yTB96Wf4mzTGXkFTHKQ2&6yz~DjO)emq25x^O#Gvf&seEfzE3kVDj2^$_B z898e7*zpr4O`bAs`pj8#=FVHNVA0|)mW^Ik7wQSop`H;CTR)G113d?O`S|$x>uipQ z0GngRjh{Ga^3-WFW+0>U7cN@#Mfeg53Oz`DP!kktUyPton0r5u0RsmOhBo@57>S@h zVd0UZMvWOee&R%k5zNh=``J7&w{XFt5liYpM;;V93hPf<^gJ(m^jY&3LMJYm_ZcwanKNbueO(vCYDYt? z;X}X@3M@1%a0PtN?~;MfUGN=m`P{h+7tEhGch2mYGpA3R5%>+wx6ornMl`GX2j6N- z@vRX)I!nXDLvO4i?W7qm%2Z_!*3SOyvpKV8&73i9>XfMg-(E?p3wyPtu-6E`z@Xre z(Ba_`5fh8okv%JZoq-HYpEhOkq{;r@AqoQ2GUCS<-|#oCX639X7pGeB(KKIsE4kqGqbi7GYb!mr1UVcr{;hK z6@Lt+A2o86_xDu@O~>i}fzZGJAhht|uyV(aL+N1Lp$%jk`46MVjU6*;WMo97*Y}k~ z4G2-qM9mBtITIc}mNqeU`jjOF>q$=b_%UNfj~p3+k1!nkeXW3P72IOBqYXrkA3tI8 zl&O%BW>4dnt|K|W3?DseWJJUWJj}>*`SqvOX&Y+H%a`;;Sc0UJjLYzExDx@@c z0%9hSuI7=3l7|P67Qy2m=f_4ugGP)9!$VjDf2dC2bOf!E!1bUAT*TNZQ>LOUG3T>+ z3l@DgKk?67cQ5Z=K56{e&re8j(yV0rsBp?&NJvm%(10InhjIq!GA&*bL}9szaVYi7 zLPjC=ESxuE`s`WLCr*NiKX%N5r1fz#LL(x=M}&oj1O){Q3-I{yJx~YFPteVdLU+V) z8|Y2(vdD=usYE`10VK)!^I$o_VwgU4%H)X?$Bzyl9u`hp4Gsz%=09|3f3W)w!Gi}Y zXuF8u4WMmKLQj%F>mf(Z|7`9Yw7#ZJojhsc_;F)Lk47UiJS+q(1`HeO=j+=S*?b4< z=}aa_Lv&a#aw_C;D(TMu?6bKuC!lFY+d=Ie>g(%047xJ_V)XU#_ICeY4WLhl>5^of zDbP1^`h2v{Kq`WyHvRLHH{tmT{2KG>l`Ht=4wo)nyma{+U({x>H)QY-*A;bgzjhS& z8#xp0G{_+I;<$}#$CPetAwPc%|h=I_SfncGpyW@&FfuPRBjL-B2CQcX*ebPRAgvX$P1N!&x z>*nTazoITE*p7mNBd5)QokBN%LN}}>J7kCTgEsW->*nIp$8KePWDu+X7#lemcAKeG zejPU^Xx&=UEGN{Bw&CpL#9i3{I!GIeoQPHjm1)O}3RnaEDjMSqZEYJJ0Cd$Jxoz!H#QdOPQ#H7Pg~kVdNNCl4C}Xij07j9`HM)&LuWr07AfRje4(4 zrGO*CZhBs+3qqQx00Y1bBS(#=<*=fIgRZP24cGJnIbmzW<$}IeaEL48X$zMkBgevq z5f1v$kbsa^Kpy|Fw&U62TsEbznH}=rMiQ?7czPS*qp22zLQ?S?FGtAehigF3p4$+% z1lP#U7DXbel^~i?05WnoL=lGaQoyjm1R%gPyO?Owpmbg@Oj`a_C`{MUbs5bUh7;#w|&;kj~3p0LJ5u0PVbHaaX>a|W{PcTrWoM^ ztw34NJ-iGoT<_7_)`ojMhBUb0#I?sASdMnczpC#Lq8B45pK<(@Yu6rYEkwa4xggP;m@t`eP3W+|kL7sF^Ib zqUD1IqfG7QeiIN`v?Y(rusO;HF#My91L4R4F@+X+fCg(qixC4*j&`?AiX#o5Iripq zaXW_P%qY^})+j4`o+HN@>4nl-&?1c|Bl>&v>+8lHEdau|&;n*3lWTVqCk@{G=wwYe z;!Z5u$h%~*B^`d>e(r8g?sq|ZvR7|3@$7p{%uVGFu;TJSYwT=K1yBoGjBs~%?Ze#& z;yk~egdNY`kn8DYWk)#joQ$0N=%h7s#c)>_XS*ReFx#|WS=xb=aA5T2A|>w30cnk7 z(S)LF;eDJP?Hn(Fbf*>9&W^C>Iq*P7ICES;S36b=cY>_leLQ;P;EJ;i4`j$4&$&gP zK02WcaN4_Ekw%i?RJ!iE2DoB+e`}=WITE17`xsYh--C*=Ivf}dnU>j$dlq3?+WY2z&04F~6x3bqY7s-uXTwEP^b)v*@F62dy zT@U{>@{~spyqa&^1-$cW#E5JPBSP3PA6Y%T<~Z0?b4s`vxw<(bVJ!qP+z#gwc82!7 zyW8qy1jNd7Y2k`=@AE@unFd%98wesi6$BK~P$hbODQQ(!B6rL4%E{)QZV6$bOv#c#nA1B9neT8F4}U2@@W+ z#?#H9#EYuax>8e}Ru()v{GnhFgEB4Pf{m%u!gHdUnP4*$Y-WQ0S1>_-E>vnYrb>&E zpTD&_ltWnW-zd>atnRcyB(U-2Ss#S~V;ai@H{-12@m1p zIY|4VwOR4`20#ECg8-|_vl@p0lZrTnMOF1#Gz5GC6{30wSZ{e2R=m3ClND(FzBVBy zdM%$9h4YLFeC3M&{qX%a%f7sa+qGhW#?K*Vo z+||OehqaBZor6;!S72`g2M_TEI2Z^dBqDNL-_Pr!fSx)EI0VC(hHQ>8glGmP&AV81 zx3aeB&9!%Qc5&<5ztUzfFq-g4mqm4zh6PcjVV(>F7RR`SX=@0vW9Kf=Rtrn(-gb^o zecVutgNAtfP=G9K1Y9$PJ1=Yy1{`F_HbSj7YY#ox&7zZ~gWm`b=U$G`?EV9QI0C>7 z2n-oMB0PL}Xh@jjqK05VPrb#?UAkIYbRKm$PpOhTi0SJDLDCihmO-n*Vo*?s!{Q6~ zYAZ*Jf>IC$Do3L+pl6F#ZQ8bjK)YIWx3pT1FyPHV_rXJeVFI|NOwt!yfp%Y1!+=ne z`)<8jD+&X8nzn7%zN2}UZkD(ewh1s`dPrX{xK|$!8;t=6x9bc|w6ub0-18h@KuP+e zB4zR4KMV^-w|3~N-pkwOtLhj~SIKstzPLoyZ!`uR+!008vzING+bSCtr$t|)t_IF9CcL%Q|U ztth)W3c!GOJ>kB}(-W?5JgmN|9|I1t)UO?v@N$57Wn%_WSN#3@_Upc+UJO_YrE^|A zAbKYkG_gH1!Gdy8-+=?s&5x_Q#nN{%AcCU0z7)j^X$o}m}> zL>=H^bO4HHy&c^90>k$N-tIqSVbMC$D$W5#gRbZ8&B4@LRDG9Al?PI2^3{7h(AydE z5Im;f>xZ5In12Tz*ftd2{5d6$PqzJ|zYDa?30;zNJAGZ92fka8NAo}*7wQ28c-|o> zICMmC=-0^?uAW#u-P5CA#C{R*z=8xfXVgl2dv0$Vn+{($kO%toMGt29173$E89pM^ z&v)1`pMhxZ^zS?T+ZA69wQ~le4#=o=PwVzy*Tn;4bu7@w13l^iVWJ!q77A+$=oNK) zI%t5qlY@gZ+>bfhb8T&US@*EAG6S3Svq0a0h}?&wyg^wU1{b>CL*P<%076JGD(LRa z)&_#?0cpD}vRNMs^c@TcpGrjN!rvd}2}~1bCm0ahUe-OWEG@cPgGDgd-LhNPu5G@l zfdy6;{dJ;PpszQbcOk;S0N=<17tWl)Pj@?Y^5lsV$B!R7di2=wDVC_sV6TgL=T_g= z$pYCF3-ld|ha1yP_V@Q#2`unx&+aI;uI8OOc4+x+Ls+0+0FY8N45-uG`GA@--OVk! zAq(x>w=?;+0VRPng!2m+p-;8mUanDK=%rhyu3b8J?AYF{Z5tzGs9qN6=MRL3!U}Mm z?#u&ZmD#5Qilm)co7Sy3-!^~+`uYY_O#vP-c#s1QEHKl_44MK4Ok1{Oe_KBb^z{Ou zfOx{-K?6LvD}V*Qux|}*0KG{IBg5|+#sYl@`$LZQ8ccD_-v0v@_|m>*+t#g2TedLb zunfLyFbnh>1hGK6_3(hp?w;R6zY6+uXfte<0Rs#*fCc&uKzw6>N56is9IRuYV`T#k zQFGWVLjwkej8w8fRCHLaW)LtO1vh>B524!P4iwVKX3IL#$R9&Zff#VCB^5^Pg@f(n zh>!aA9fW{^n+sY=*4qIEj>9Me3Dj2VkvMNl9A^KpQ_L+akWtUE0LVso4MC7`>a92A(_1nNatw>(g9rq#6-4$_d#$HoqFd=JQW7H!4?yO7y2 zvD*-NR>v5O4Zfjb@BO;LD%R)I6S5`}ck9x6AIPt946t=EBnH1qgQHW|hxvT0dO_rt z-MW~!?FW#}@N0~TjRzBFVf<(lbs#<;OO&S}t#&j@1hgh{$1#R8f~FeZXe38;4{Fce zsTkDJJ)dsfAsw1`>R{TZ2x|Av@i6QhaLV%re9QnzYvFp{=8(rbb!cy9xE)I0#)E1( zs2B24i)$_vq>-L?C-csoI!!FPItss+fT3rRnfmqDY^Y!%wX7LyY32LhX^fom$F=h}D zU^XljCd14mfsPK<5Y|-q=iQ=(G1L4dbf9+JR`{q6$nm8NNo9>Alo(Qs%G=0@&14;g z!AblXeYAE4=M4$h02?BMiF3l}XK3R)wS15{;p(0yU*3>caSZz(jKQ4_zd-Yl-5P55 zVs&+UUQ9@W4EFCll96t`O&3RvWQ|2d?FGO0to>c6!A$M79b^4aBSz!sD)PLdF+;1{ zAS_41#sEK8<5X-vedjMmx zVrnK=fYkDz9C6u)ahMBgt7hX+)ojPYOp-BUhShQ%po*)ddhLGl_g^gj{PXpT_zO)I zE&BX({>lH4Q8hvJS|Y`r-mPA%nNgvdws#m+&DCrFWK>OGy|!$?=L<2+#|VC};6Gjr zxDYZnF>M8RRt84Src#TEI!XbBDI3EjWhE zj|qUmFf?k_fo#l6U2=Swc{@{MHk=b1{bCrfO*>dM#y!$h+!)xojmgIZz%Vp6vv6#z zfB4mPi@TbAOaKg)Nk^OhjrUKgf3#@#5dkpRrsnouAK{-=PWXrb7)*AnuFk_g#y_c? zXwm2)0ROEmT!TN#KdG72t+4?xn4H#@?x7#&pVUrlYyjZDO?UUOkMs`#bTwQqNz$f+JhyXsV{|^t~ z6Z`+*06w+<4-Mdx`~ScIKE3}B%L7f||APW(3jZGxK$H0YfB>4tKeZh<=up~3{u>-X zQ~8Gg8gwLUGXJEyK>;+KfB0U2dZ3;!r%mXeR0ei!Ys{)UfTr|M3T@3>8yVL5<+w@x zlh=E7Y+3gRG_8N7zonURogL7${>fdoo$Bs@Cib7{V$sH^uIrH|_OJA?Zr`G=0cc|X z6<*vQ}cgz2@ z{bB#T|2CXDw!o$y|1kIN|6ltb{vWr$+5dk#|C;;1x&Qym{M$VLn&)5h{Hy&7KvSQ8 zn%7_R`fFZ)&FinW8-V8ggPuS51pK`gg5R8foAYmT{%y{`&H1I=Kn$KU&=dYSi9~jnr{%AgbG@n13&mSLs(5S^g^ZBD8=Z}xB|EZ<- zNwQ(jfBdKZKSBLft<5LV^G9``e?CF|S?%u0`k(*$`1-p#+>^tcyTbEtwV!{Wu>E7} z59@MIdfA)T`TQS7*Ix!tn!jxMik0}Md#zlteA)a-!ToJI*7^J+M%SNu29KFCbI#mP z`I<9x%9vnJM~nI%z@_SM9s7F?3l4?poKd--19TljX7_%Et n9nOGXvdChy*_==N0*mNZ2-Vs7^w$CgdNDu!i?aLPH;?}VimHe@ literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/Build/Mac/VolumeIcon.icns b/tribler-mod/Tribler/Main/Build/Mac/VolumeIcon.icns new file mode 100644 index 0000000000000000000000000000000000000000..a45ce9b26da9f3e82239ae0d67bf88fd933b1301 GIT binary patch literal 37447 zcmeI52UHVT^#7BP2G~U`Ygc+lu`710Sh4r+>e|+_x~tAiLQ}-Lu4^yYP=v*SbjTLn(QmvNRZT{do0%V36(+`-^piw?aQ zcJU$}CcK1&V-eUtSS0K6<*V2g*469Rv1`~3>?Zrx?c29*5qJ2}(f97f+`AVWE4Yux z5%Ii)gt)8WcP6(E0;%l`u4l%+kSkF!SS<7YeJl=(XC@?I53oeWLo5kPHc3fIO~ulf zj~-#^SO)X)WBdu8$$a`0&k|%~&xARG=iHp+=u1a#C2XhFG6g3tN5!RJX$-0d<1q#u z7|fzm(+h#2*YBsi zEd2oehze$9B_ZO7#UjzizJv^yGZhM4iK{qjwML`W3U%8^j!yRS{^9-l5H19VT#HN1 zEz?WjB2J~KytJe^poIAzyu!JA zPs{&+fApjBeNn-Sr$i?Hl$ixXfd6DGi5wnp~lz%&FfdMQ*dB`|!7}*mE)Z;gh!z9I}Rs z*%gJ4kA65!nq=N8mLn6|l9a34ef8d@uJ|+jZc^quoE~Xh%#oDmC;eS{g2Z$j^7%i7 zQe7~~(Uzs%*y*bG%4hY#e`1oeODZS>gqSOll)sAqTY8MdbzCi(fAO_M3w?Cu8MpRS z(S6LwHAgPrPkmNeflenb#v~kRd2Y;M88&cCdHapUMDS^f8^^bn<%qqug==dNCyu zOv;v&J-c=O5ZJ=Bmylt12hvn0~N%LHOyL57P6>(U?Im0(zBZ-T3DqWed|l znraoHS!L8&lD~)z+3yoTonTX3c=mQ;Mt+5eHb+P~Rg(9ak=GBBES(PQVQQXArBPf= zFIA!8hZH-v!xQxlV3_*+wk`Bd(vyNpYEW<~R>hW-Jh>2kh-9L!Otl(=Bv(cxl_;s~ z$bS$Pc!KWBw_m+8>~3;qQMJD~^eujJI^i(M(BWz`l}f2psjkEqE9fLCOpMrOM|b6I zU9~qnCgtf{49)~DC8{_wadF16^dlrogDmo)r}|b*kzAi%$yWk=>7M4Y)qh3Cre+nx ztbrlIt9UY5mE`TC!#PJth6YpNO0Gf^bLWkW%9@hYYe8e^j%Ev1AHE!y_UygBA73Vu z%fv-#2MeL28dqWpu3Yo*RzVfY0&VG|n>!VBN7MT2j$MmSe_ke`JT--Wa+&B&>VXds zrxsJ<3Z7i9&A9ejs!zJx`=08fe6-=@jRzSo%aNa$43l$Ym9LZkl5K;r)nSk`I;}>l z6Ob&e3REyTSEkOooF~?xbgImZJ`kaoiP(JlR^sE_3JeB{DU%80Y*Bvv0YxB5=$KkK zNh~NrxDKLMqN*f^5m7(C^pe&QXWcz=QXj}};%rpXlUE`stphC;3VB7|y~8Sh$T6JC z6hZ|x5jlg&xK+wmp*fXQzDaXpPVL?5Pvs2X-#;`uIrFs`q5^jaIi}#qDspe1R15Vo zxSFYgGeOJXRa`~E`RoccoJ+|37m$3&AAbga>+eBf_fnr0O7uFof+H_~c`Z~c_^5*+ zcQ6GZ$7NU*Pbz^E zQC{{eEDFitS{79NV3=A;8JtxqPf}HJG3`B_%K`go88g|h2d~7Z=alGG%+PF* zz0VF!Bq^^5%`TFmLXKb(u2d49Qv5O7uWlbeYnU$h_2{(+8P7{)R9I*%R6N=Hr>CEg zkU`W5V}J}(1uD3NCze!QNqi?qsjn$cy%AhNtMEFlKXEhh$;%HY7|I25zHyBPMj5UV$W7T>Ll}a1%}0>Wxtl{M}$!GB?S+Ue9%Gt0Zo`m zAY!7Gh(QFe>;PCtD&)!sFbU=jKRHG8-KRhPUA{m^P zVQQtg2&?2(REV;|sO5sIt9lV1>OYj0G4=doUu1knPLYfX2nP|3RxQp;IH}YiUvLEj zIir(Sf(l;25$9ca@qu0javxmse@LsCC9FGe^}&fAt z^=^_;1skCXuIO#(voifm%1?~+zod6%+;2y&CuTk`QNYL&YID%gY1N{aw=QYbNCKBL zG`mTbyrNQA!4(&u&(zN!Nc=0?=IKY}j}6CfCOv&wsz4GAV0#3GPF?xp@?{OI2pCmc z7+UZ|>08086u&?Js6@X)6s24bGSQ0+-V|~>IV-muG9K~^{J?a4Ql~1QVc-ZR zW6E{@BvYpLC7Ievw721kKU_#ImTS=#NK>44bLR_MjQ8e`v(c&9c@?x4D=S#9=XrqFOxMI=e_#%3J`S3XU;7Pr62Yth0({u7A zkh~hG=QzYk>J%lJ$DXQC8AioX3u3ZLvBnow3~CDp6E&s)O);*nBJt=GHWs85WTu!ai;jjp6IEA5*|8xz zJdv6&quy3xS7iK?=Wpa_n~TFXm#dK#Jvvydq$?=13Zi7ID=RDHek4PIffQdNQ6$`Y zCDu@4lAO5nzC&p-b5Csl)d!g`-zq3AUmaI1ElfW2UWr%XQkDdrOSZhCqFm}nvb7S} zZQ~NIGUaM+C0aPpgh{w4d`L@~4c&0)`opZ3#Y*IrA1F!k;}4fAFiJ@ZPH}aT@``ei z){kT zuseXgsmc%1q6&(nl&gGtF{gSJ%1^rNKTj|9$8V>iQlI6OsWBKSg5;|zbE8gG$fQtO zO<=pC2_Ttr*cCHmkY{SNo8T&+U&t=guRw*#*S1yZErX~6uVhLfKocFZP{mc|pMUzE)>0Kg@qa^WnTocYznA_zzfueF2_a2k z3SEs5qe#R|sTTPJN-U8`31g*_!kW%i7KJ`8QBbo;UHs_wzH9pU(gYV{GoI&*LCBXh z)u}#YN8W@TF%)Zt3Uss*9I~8Q1y)Q{RmdlvOj#D14igeBNbfW595_iU@j?Q^dgs0mP25U{9omEw!3)WXEomOZ(9c;8kbm7%=9 zcSI&+zAUUFAbt2SYo44*tU@&#h6aQ*5)gv?mg~@TmmwvdTqz4rq*f=cw)|<#$sImO zN(jZ=fA`hItlT0w4nFX7@^_C(Q^#h7A^}ocIgE=CqLM-QN%2Ov!mwtOA{SI&IaP)WgptB2k4H1*er#I%xZhynzj@ zQWyHyrkWQtVAkthbEl4?(2lKx|f~ty6E`kBVAsLY`7`59` z#a_$P&pKJ&!$`m3dYP-6PDeu~AsL=VlAm;_P+W;sFktt{P!brM(yg+HV!TCJe4NnpBL4W ztswP!P>x=w5Wg4;DO90DIu*GwCn`i}nkpgrG*UF>VMj!bF6R0C^M#Hs|3apIx2JxW9<~3NUMFN* zbI{rjZfWHuS?6LPsp-pUls^QOKj^jyuI6jV%CLt;Qd*|s zS^RmyJH1TG=A%(*FJ2cb(ftmsyg2J@l0cuy^cjO~2BiVpAe|&Uu}DfUCZgvLF5;*4 zBJZ}Gyqo^AphOh}(;hTJGGST~AF~Htl%RouO}~bxC1oMWg%WCdNnR#K2*>F~PH#RF z^EkJlR1G2!PxiSdAW}UWs-r=BDNMtMT};*Ci~?GvDlh4>FHf&B4hy}XnOFD$WwcIF znsYS@Dq{7FLgPwDLm&3R)0aVb9QRbjbyJA?xzIxps2F>I#k z3rDnVr!`==uH})c_)D3^Xp;`+3es-w3e}6G`~LGFJO7OcGRjmfev=;hP+Ub{w9+#h zr}l*yYzs_DviMBQi!!`l&?dH{8e# zp^mR9iHOK36ssWVU|D&a5&d_NCqbvmU%Rg-KY#sJid@mDL~ou(pS@ooMyFRmB^m)c zC!vO4`8fPS@{2N=W+%j=E_rhIm>=?jV3>La?!TG(vfy1664%H-6g<6u@f;Bnc#0Vk zf}IKq!B6wgoCO>2#47DhG=WN=#-1V$>CNMZqaNiJ!Ua_@lwY;1qUdF2a{RsM=%aVA zXw!S~X<4sI#7eDh7s=Ir$c{U^r48x_R%dQLem5hp5cZ=l;H`?BgbQ0I({J&cZVrig z^6Jfdea9j^EH?gT*tt;rVsKdSrL*DNBKYBvx9=syUEX#^-{z+8h4V)a9*>B{ z;so({0`WkQ$b9%vkR(jTQhZZ=(}+g_>AN#{iBXZ~&V&W;rhA!&1f4t-5ug5AL}d_M zq@#;s1~sW*TewXduH&g?&q7b_+twPVBCmJy&l3@GkFwyJSAV%mU7*2*JiYI`34v9l zMjqR>6}871^~k?>hTKTZ$SFqck@~0Y1-jd4AhPb937${4HzWVpem*KW^F?VDbx94^ z5-_bOae*!h8cTETpW7m!8=JORu`3KN@p8+ctzMb+w@+T?O1dC z#>32)Z^Y=9ivUtzpb|nkxvk~t(B!?bdy`&t%f=8WXoaOxOcp2)R0x$(Dy~9Ykaqj8 z_4*boi3{;LMP*Xxo}=Z-#UI{e-aWEL|K{?b8;=W0%EgNPs@=+9#V&auTtDz7<)uZ> zAB3!(hEW5~Uv>1si-I?W1%(IR94AmMdwQ8R^8MXYh}ko}Q6<4fChpV><^#Ca3Rs zlmP$OY01~{U9@GE@1+NVw7ria()T_Rq?tSjUExK)VIn*mm+F_sjyu0kh+|m%Rt#g~ z!hPrCeAC#evBwv;(Ko|S#3uWunZzAfJo(WmlgWz@#0k?(QtusJNXKtN9JvSYGtVt~ zGK#t69K4ru_kccaIM|ZT4eyS| zgsCP;w}Thxb(aU;Nc2tR?wB!o`s{fNk`{92&zU)8<_^^8X5busqnU!s51`SRm5Z0J zUbi9XH_rNBS1et178)g9_nk#IVh{f#JU&I3%85I;anok(J|Prv34F`%8xO?^QrRhq zSF!Q>hTL`M?SYAI6;7*dJ9)A%knRG&uCu-PoORJLAvB1>7fMIrpN%b}n^6`x8F~$8q`^EFj1~ zcz@9L?Lpi80*L@WKf;&e=l|OjXAaty2QrwPM#r1oXv8#Up&N9v8vl`S{D-Sxi2lP> zM#fcqgsb9(8`k|QScCm4T#K*sUC-RG0sD=&apND@;n#*^tHX|Nn5(UOm#*Eq_2}8F zPhZde0|pHqGQ?}x@R6fNj~P2|{Dg^NVVZ+ytLVQ>IOyIcv_``3n|-=(1%$FCBZ-06^A-0%XhFP}Cp^ZVnwdZ1LtD ze(R=>o-}3Z^cl0}%$Wz0OOWJ}#Y;x-HH4tKBZ4-^qc;pB75UJ?EAPHn>9o@9ZR4lT zm^FLOy!i`h$)$@IE?hJ^pjN`xteUV*?%B_?KN`(p!-omVg=9-@$nW0@b>LUVc5!etwNTee{7&r6pqTCiZ=oY`~7 zY%m1MHAGO(3k@NNjUMF_A3!$O6+C)cRP^}l$^}Sj?wnaOXN_5H5UFcKkvcjDM~@ym zYDL*DvSHflx%0uHnbW6DAG6djXx9*+-MK;F(J1e+W5;_3%6F3u?#`P%d*+PkQ>RQF zJLjQ6_^u(~yQ2d~j`kiicKigNal2Hz$%fL+Q)Wz`Hf72rpD|Nn4MBO05X##-z#Gk{ zNt35e#N>O(21k9SO_@At;=~DK#@#W5>NTOMUf{s-d)6-lp#E@3-G`t-m^e(-U6yJ{JiAXppc6pL2G7DpSJ9H*89@D z&_&)5G1BxJ>ecgrA#|`6O$X)Y33xj}5vl4gscqZTcL z;=6PSRLc3YR_2gG9UEa2AaqSgDKTKspaK0o`}OJ7%VTb;0jv?JBGwo(eA#ln=-fF| zH%WGqjlzcXM@qeW_2}U-8DI%qp~I#3+vFOVV-sMGM~40aQmF9GnLTULA2QIoIka!T zK7Bx`n@5*%35I~jCIBAy4PORf&Zp|+j0u|+AQUyEH^kPXTeq%V+($$k0w66Y02w%9 z@j|-b&X_iJ+*UP6#SH1*vqyJu$lc9l@D)QSWHXvVZXJn2n>7nnyUE_T4zv;mdw^2c zE^e+aPJJ&Lf+CwB6uEBH{JFDdK_*X~JlSW2a2MGqb+Ef$#>ufqh(S2AG2qBw#=t6o zD)?j{p9x+_sBpA13e4Hb!QRgOuwh8DDTO2#j75=6h59*R!Z@$pp!H$A0}9O1!Oqs! zVXq-vvKg>0#3iRqfZ?W<#&~<3+D-CiTGK+94ZCxY0dUe1lqfhk-UkUyM03-7NtH0m~3#(cp0a z-U@nlCTuWUj?IE3!+50y%`1-&p~D*v1#HlO{Q#1lItzSBwsck}8ymuwYiBz>-Y{O- z1o6th22wE&_kyz3f5?5{0ax44mH3h-`#M|M;I^0@dqRvM#1eT2#4^|u78puw(16|x zia{>0WB&pOH?do1%$BfY55Hp&vur{lX6fI16bci{fB$|x4*(rhPPDas2*Fk?Xm1N@ zT!;QQ45OBf5w+ahYXk}t75$!L(m*cL&937SI3p+RY%n{_p6k%#vO(OkG2)iL_8LZO z_3hoAP=MNBoow5!26S2A-OADjt{Z)a=FhS_c#oY`B>Hk6jrHbV$zLyBOI>kT3H>e=1H zd3ONGe>bdyjje5mNpB%e#q>^?1JBXP9yfqtHbe~b%5YdRdcw-%GQAvN*l8F=hjVF(dlJ;scf5wbw0>dOx)TVaw{gMkatw*@eC~4i1t?UU$+==7pbI&lK8PKJRyQ`D^;XsmgySo(*S<12P>+Ran z4tEqdah;t;-!P14Zgq8Yb+(>R27^^HvlC_~uxC3s*g)1h5>B`?+bhx#p1IE1#o4j* z42eI=5)T#ig1a>a@l-2tU3!MpMl$Om zk~z;FmMxo3Ym`tw69-y>gh1~E=ECW6)F6sE#m)}1VRd%=M@X{ryxY-U2p|sJh3o3H zzczx&K?HN0En&mzH0l*Wn!KLdK_5xAhJ8?N^pZ=_%g7Og4cpenYP|v@O7WT%6T+B3MfNOf2(Vj{kzK;H_M_X2YKb zkw`A}0Nvj#jA6%QnQ(Zf=JgsjY}~Y2^A@dIw`td*Q)g>iI|nCcSNARm0QT+IJ=S0n zDP%DiEH>BFyndr57R_7!gygJj?Hyg*x_ER4@CPW-YXtD2apQYjG?+xLVzStLkZjn* z!m>rHpIWv6S!)L;7xyk6JwUVnpdmwt12BSHTwMvu8 YVdf2+fM(0qSX<_zQbZzmb?@HS({msiN~DMGPz`b3U@(d7-O!?0 z^Hx8#ZQr5Y^kN{9>Qm!~jGjKzd+11SxZMRes0Q>O=)A&U66uC~Yum14=T6Qy5s7>m zm-;F{Y5({kNDAFK>*u`a2S}t-i`H%0cj#no(|PneAd&Gt1E||?eR}tHp8f+Q(yn#8 z4sZw2&cSA76_Cit!Jhr$&QH%CJ)OrJP9l$3BS-8Uon0J$Qv!(;FYH0xJ?iG+>}5EK z+}#QJ;q2(|=IfEmva5vwow z8zFFs)b5<2Byw@*?l41u;KN4@hK>q)0+;(7^^358J&%DzX81V4-5DnU1lHC+oiLO{ z&a&$Tg$pW!7XsuXrmWv{c<=g2p1pf{2dBMz_w3+sC!h`v_O{kmojSGLX9$VxMw3V% z7xdu;p!jrk7>e$h_U+rRU!U$>`+1M)@9acj5gV&c9XoVr?)L*Ea#YuWP;{s|h6pLT zOWeOd(6*jEJlvff9i5#W?d@%>I(O{YzHM8JjXyvl2lYZ96QBwOAjbiHL9#o#2j_|q z2MmCfEgFT6?c23&-MYyyKR_b;^dC;iBAhvN$ei_S*Q{B+YUPSwfTS*g4@WGV;@r7& z$M)^pw*IMQ%O-Pvh(vZ9IvUk;xQ{<%u;+vfG6)Ey=;t;d)&|PIWwXW;euzN2js`YP zL4m;od(IUBRXf_Tb(^1BwP?}IqG>~~A0m(rV<><i0F6K<;%w3jxSLeeBa^fgFS)I$1(&O`A4u*udQ5j^PB-&l&Osgy80VpI$DDR3LS& zqXmQpLiOsIIbHq%0=dZvay+PD!?GUas&HJCuIaDpL%i2>=> z&BJy%z`nSS^&2&)k7T%e+cOV8i|l+9!;`X8QdIfgISi zSNCpR-Jx_sq3e7Eq^2@>9F7T-i8I)L{1AZb+oe0eVi!Q6_O@0Iw*WvA42Vt0V6XZi z{Mf^-Yc-j*>Cp8h;KwkYKZ8C02jOEES9c)A;0x5Qw%!T^AD7*P)greUh)w=6@OTWC zVM+?>Rr@vs+$K-s{{mVPcv6`?vR3AZo&m*4iaGvi13Gp#s7$wS-RTCPq;rfRmGERE zTnn=M8-N?(s~12CaAQzMSeYoD_H8Y_3PEWtqirrY72U{>!R{JWD{I8z`N*au1sjE} zsS4i#Rk>!q!1ELyOy*tCDjN4)OkUpHidr3@UN&BaFr$#cWK0IBTt|?yJX1SjL=QDK(nwz? z%xwWKwQ1d|W%I@zqJay_2ZEH-2Y7JP7o?gTter68AS8YjGpPkES3k9EZfR+{SPl>1 zCe*_i%=%ZsskdE(3{L%EgWw_%lRG0@pe3cbrA7S?DFGy7Gr?f21HmwkfWa{p8p;)4 z-0a^Rxnp6`n70`iWuY~}V0M2E{>XcQ5ND&INb#x%S_3SaG~)Ly1$X`wFqo`!AQRpc zWSA=qg^6~}EG?QeZfIs12F|?g1!rtZ6@1Bq`jDkB%wfYQVmnws8a1qEx(*(!WbE=~ zFk8eBBrA7`DJ1q}!{Fhe*5E{gdh7wY;6{#0r8@!R~S0Fg&=ivAG$a z;ZXwS-}F1kV7o*Z!UI>B^Z8tcrwDRM^~)g!$NHQ>3~)YL)EEtKL$`zG(5>|egSg*h zHd?l?vh;S47Cq!68+BarcXYQuZf1BwHfOvrEhXYhdHi$O;j z9G0JfM6Wxf(F_?zGM|k+&R}mckl$T&f$G5bQUl+ zzw;e0F*qh`YC~rTsg~#68x4&ukJZM>IEa%4TZA#VlWV79@H~F$^O)sk7Z}{`=W1tQ zeCk=eTZ=7C&N6uQ`yDnIdhPY;hPxecMX+qp#((~UUjIR_|Df0Z7WBeDd+hFyK>D#c z)|Rl(AG=#t{n*_fXKH%vZeBI?`u+GfAG8qgr>l@An_O^Vb_NnY!is(2J;sUV}H({Mg;*kI-vX&CqLgHT0Ud@3Y75cKYa{ zSEqH~cS$JvMv=y-u&~qkrtK(}Hd1zkKZOV$ggC`msB!k{e*--K=|Su0$c6lsDJQ|#c619tm&Rcry3Xxyw>*kWf_CfpO3ya^2>wQ+}_#A z$;lZGm(LFLn=g*f-miY~X&Vp%B`3$9%)T`9o!4xSLm7qdK0Atwi|N<&|LK?2zeI1r z&-DMOpQ*OhZ_^#$6=j7EHT{mo&wkBc_6j~8XM{snvaQKymcIFdHMP_TwrNS(X6`p_ ze)`Lwl8t7bBnzo;DgD&kS3eJ}G@^ags;}&Q`HK}QBih#;gnfR^hY+r@jCSb4YD)mt zA^4^n;~lZ}YmT15y?P{@ak!Do;Z=Gwg-|mG`?U#GjV&Cbl zQSHkn)MDSW+^F`IOKPz{QD#*88oUyw)FUqK=pCq5xTmX$+{z&p&%>}Scwvpri(5uGt?@(l9`&DCWwBPH4 zk?kwy)o6cgm67dhw$x~Up2EoXwL5FHzs0!a4+`Kpc1;fKG|r{ZCxd#UrprL~StFM} zl1!=T8qDm*C!QMT$LuyWjDM3<;~4!!waBps`)!_m($Y9T)>r_IUj9h3#sb*W*yWET z6?1DCf1{T_lGJRi!Twxhmp@?TH5EXkmp>?gXKN^c+l*fRNHX%9x7?_c!VWQd`6J1s z?-xLRq;cl_XR1$Id^Z7`Bz@M>I4=tAzH7f_rg2Purux3?ckSC1e%8`BFQlWsYrpGz z0RU;cNUgCXY09c3X zsQi)S{f6JL&%0!-eEUi_<68x=;r*}L8S5AMR^N!w7wTWXw$GeWH@iO+lQ!UM3d~$<%n#mtK9ahl zUl%}DK%JF8l8pEog0PR)S@|Q$xF%mEfZ0EvkC9PdJ!$o&eP+{mqgwo2Jm2ms`>pC~ z_lGGtIWVA5 z)YtFYeLex$&0>sd?@RGYkIyCmlhe-FA3XWo1BKUT6M)Hat($va)m~^8@;^;LCT~Pt z-1@J!4ij**>8JKt{JC{=@9U!n`^-L_04%dDb-4RMPDIpW|675WZ1eqfxcfo&V;X;w zfJ}CSy1Ms;Y@;Ky74MVyIZa~eU)R}x^E-Vq0hqkzb$9VchgjzF$pm2WE$i;$m$v`f z>XQk;;y1lgXCqG=%4!df^j9A&zQxHp&i|xV=KrohU_YRa?cZo_{%-+bnOSVDeJ6KF>5xxj@KW*mY>DnY;Id7V882|I<+qy`-OeGy50Zfb^Fvl+pL+@rn=mK=uXkp@XbL;NIX9;iRmBC+QHRe(NI$_sB@7{K;8uC~lH$Y6bS=08;{k&%^T?>C=az}7& z$Bu6uwRM8`cLZ(6)-RhqexO@N%X-|88*nC*&2QM!s>^`UQ|B&OzGBs?RjZA0fbPm) zmd>9(Zg6+opBkI~d-~y9D>&v&ezJD&>osN){NX#}9W$m)8b7?hhkZMX`aF}5!w>e^ zy!sYxtX;bI8w7uUaFnr*kt4hY_wVUu+s?9qDf{2k&p-oU+MrqMPIfL`y7lN~yrW0A zuC5MNZ7my_eUg1>8HTStHEYncdFu{V@b#(2JK)RuI<{?T(ZGzyX8t?;V4uk};qc8H zG={HTZPn^0V;!yFyPKLdX;_aBU%mF3dE^0$jlN8_euD-L8yV|p*q}ju_&Odgvi_BQ fCVZVOo5SVtczk0Wpv~n#?5xiRA0@#5x5xhg1^lh@ literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/Build/Mac/background.png b/tribler-mod/Tribler/Main/Build/Mac/background.png new file mode 100644 index 0000000000000000000000000000000000000000..fcd940a5ed61f1a2be919d7a7a6026403772cca0 GIT binary patch literal 7512 zcmds6g;!MF+r11Zp@ag`-BNy{4$e`{GU;{oVGpvJ=rbm^Bc}FGl}>fv185ky5IP> za8ZrmH~APaN3jmg3s9$wD&@(A##U0y%diDG(9l=o}OL- z^0Nd0(&!aFLVnWY$dxH6DJA0gO#^ZXiuv7;y?#h&8MvlPt@QzIC<_sAy~bqO_)Knq zA8_9c2_iZL-p2qFh?`|opF4N}bM??0`MPH~0MPD$^HciwD?T)CHT|7QZtExS^q)}E zt>dIB1JkT9_oK6g^+>rR$Q9yvEjxsc90CxA>f3q9M+#w|=e%heoNgEGyos@jiDh8x z<~a_j0eqZ zieCF{okTI9#3jThFpEB)yaHEP)@U}phwzu_s%xjQ@KFB#6h%$V%@ zshmi)!1!cK&J&@QuS_6>;SVOwq_HL-^()l9#NiG z5cw%la$Fgj+k}72DnrmJ$q?q`>y%C99%}?XPchOmT^L}V+RGcs6)KPUyy;R)duS5= z!{+^8k!^1}-WjuI7JTAv{o;32H4NV>i7^DEDnt1eY_e*ox-7A)@vCFp6TF6pVyen3 zXNxl|=+|$P_V&;ZnDSRj-;H$D8F}4MpH6l(st|sUA%UIulIn8mL3htH8{jwm%a`+( zmuXKWYR_twNL1;Q(CtjZ{q;SyGCP@A^F?3w>iD2%SH?%~(}Wkj8?0}IcJ;?L%Do7;mEn;AW0 z%fq%sUD+C7jV5QGC(X@bJ7nq&vqU3&Vo8cM7cQlkoI;`l8^`|sF&`yA?zyTNUOwj{ z4bYPL6rjA3xM8}B4Yt^4T`K$&+W)h!Ji8tKZDV51)o&q=#F7EVP*1bWK=;mcUm(cL zImkWF{h&vGvTieOxc}<%vbI=O8~QT%;rWp}26MF(X`b^}_SSgk!o#aK#KggWb2}?} z#~?*ysXpQOqiYiO47&}*-1JXYUqqfC$33W@3R$83!@9+LXm=!ymfn`0p}u_5tv=Xo z#-I8UBb;qEk0&eK76Dkqo4V1=2zn`r>vd{T2>#|1<+k8?z~#Wv#V_))Jn1uBC`k#8 zf;ZQ3yqH&bu9RS~->N$?{&w}(%XuGe3T z%FdR3sy}KPfZh4~WAwmk{l{b7Ar(tz!vKTbMtf6dFW8!}8Do>j>05W=*75W7Y485y zNaQ?Sip$!#%7TaEaMP#O)^YLCu~D-Cu3dqeNHEh#j*5Qs{$CyBzn%_mcg%DY_Z#&O^)q!t`4>5d1){0= zx({C0Rxi9~jhrXEB!H4n35;?gU(`Lbq=+k`git2W7x4MId-*UKK$wc~Bun6dmHX~y-NWwME1TtY+CeHqxZt#zZZoUfvC3}8@u^tRRyVCJoe{>Z zp{GHzUd6Wv%8h-Cp$Y6bpjcSHzUo0^o*y}OxM4PWq|Rf;O2bY~p*!c#nqDFN9MY^f zY@N57kB6G?|6bFAOg<-Dk)xt#ratMA*mzG$Ap4$>l}P6UwVVfwbNp0Kl!lYWpoWsh zGPjMsa9ONLszh89&8O#j!{&Tyd=h*)#^%P`Q|wbD)xob%ZLn*@OC^gd%d{Lvg$#x^4Wa8DYMUwnWY)lm#p=9 zw0RSq&pK(lUUW)v>~P#=$7QD~&~MYDC?D=EL1Mcy{R2uQUkBWzy=RU73 zHxIYWOnOP29>y?6C)qH#^lNElp=_k!X!XeCu+wnfTeJh|CjE%_*sraMEiGzkQKu)! z#@#yi&#)g31ty_QDP1Jtp9*V|v+GZMoaxL>8KkTN9r3ADEQ`t3*;85=3@R&JA~25G z4=o&SMYw60Qpm#$kOBzWWGT&fW3jK#WmqR#`Kb)UN-BvH70h7infDojcRG3!e0(bD(#%=xCd7glMdsB_>|R9&1)) zI&c?$<@q{fXx0|5ZDTkPvU#1{EW%aUHF0OIuB|Z8G#|G6*(cV$+*hjU&sCUmiaqIi z{wJ<&EvgGy>D!Bgb>?ozk=MIARS2GC%0xk1?b>N3wzVy)aTK*6*4!Ued6ac?xVmuT z8A{d@fg`R!Z7t8?)h-Q*WoCXuW)Lk@KsJhN@}AxbZuik9Ao`&SNABI!<23Zh&IZMi zv^q5!&yjhXdPbl@=!E&ifrO}=)egcH!HXZ!rSDemi8$CV4eCgRqGDtE{*q-&!m6Z?6jW)Q- zJF?zZT5Wy&u8i}o@Rr8bpEJf!v;(XatXGh2Pvh8Mljd{bNjLxj!-kHHC{Jiy7V~?JRqIPk%y{?sfBn8Qv(@?4*{T>JdZ7ZQyf2h zHG~FpV-))_?lQj}7syO9HU)nf0ih@|WIZon4E&@J>FK2b&H%E5b|FKTwwiYYUDG~^ zvDNuq$VS}ICqRdoME~49;0ZtwAU-m)ApH51BZe8mL(iw9ZizEldm#I5F_|mQ~2oqFU$6jpSO6kSo_PikL_~T84h|f z8$+}lUq%%-bNRG?bCW0GENUAkahK%P>0EGoCh_zU6h;X9N%{t2Tn#%Gh}=S+YT_`+ z-xeAg5a6&5voMCU++}@rcY};lZt9ejRMgHJ0G^S`!I(h{4p95uHeWuu*l7#^^3V8V zGzp1gyVCyHC$x*pdr|NUh^AJC*A~LZ^xe|^T1bGOw>WAy`Yppm>`~wk@CKOTbA7+e z-8oD9aqrvieXNN2znGGY+oG{FQ7qe?jF52cJuOfzmiTnE;+22 zMkjhPhgdC(lbYzwbCu0Bd)5r3=$zN%=CN(xCoWB!sO|MNfi_f?45La-gr89B$q75`|xHAWJ`D!RaRm;W5_2e&C#~C4uTNB2Wo0+oG;S#O-v+w zF|*$kqKag=1cjT-1a^sKHv5u!=|r8xJyuj59Ua$=gwy(SpGSNkrW3X9i8r$F=a};c zWCT*s^>uZ#NbmB}(#_3MT^*f~0$8G9#Y+PNgJet#qmlUBTNju4M)&1P^JdB0$v!mN zu0K^M_~K|JUxkW_s<@S8QK`{;dpzx_oo1G#H)ggeij;{HCT!D}RAGwH%$Aneb&x5N zLwYa%3|mE~AQ~DTQBn@%KtophlB-Y`Z>RGC-NVK9h+k2p*vXpw(o!B6yepOpJ5^g8 za(m-|-NIRFPDZcFUhdSs$Pnd#343pii8zkO(LCkc)d2(}FHf-(WB*-s2*ejAD5|p3 zWvoQU;Y)x;P4nORd6fM~9ukTC_wOILNB);}+`CGEU=}(>P|zHC1OfyuTUAj}QBtBB zIO0h-TVW<3<~rX3&UhYicXztp@n2`me>lVH>+36wYyRy_)#Bolw74yGY>edpu^0;u zv-G{%EY9&!-rigKm3L>4^zMD?YH;IIwRQ_lo^~J{;JLs8bxxInQU5_eDtngO*mySM z+D6GAYPSy-zO}Wbo+2=iDRJ1%6teTDSZg{K$+mJ>v9`7b;>88~;W8&V0`XJ6zC5i0 z(c$N>sH$3M_H~{1MDkc8L|tYlr>7;oxAdum0A#`U?@r6VKf&PNE!$A7tunh3a}TOa zjVjHi>K%>1qfHla9G!=d4fUrz?fyjP3l80#K@_(ZO8xV8{PlsDfq~(0edtt@KNMrq z?Bfh)pJ{LgsiPjKo^of~P&7jZq!OH+*Qin>@SdDLIj2TRNl9biDW=A%dvt!(rXA_M z)}J;$K7MfE;kt~sCv7{Bp3Dw&c6OdtK=K(QB-^^4+V-mk6397$Wop4!&i0p=WK*9Q zBeu4;6GK9paW)zf*i=3|UK8I=F78tIqYUdi+OtYvFGT-|!YWbSx~g*?a$% z8~tG%6-hw;9UDs;u*D~3+&QcRvKQi66yWR*PEHzusuPM^hU;PoZKyx(;vKLNjN$CW z#Kd42OZS`Gy26J5L#2lfq3fppWB!|~$wn41aAPbRS6Q)_?}2y~xw$zA z^a(dN-XI~1phuJ0ddM>|v5@QI3D>sk6De%V;egn}EGk%P?loDK%X15B;k34()zLzA zP|*C9l)4U9(ONmOyHu}+Wd;eCBKvG>ximAy-Tx`VV7)E=hmZJ-_b#@pie-pc_k-?@ zj?B)s_+sb5u4%i(G!K$4O>I6Z*!kBLo3$Rr6mqq%1-)LmBD+cRl<$cxJ{8W^5zPSG z0K5cwcuLF4$|@^;-Vxj;uA9bF-3ALT(};+PVP_CS=PXwYbKxmThYDvondI^oUdNex z*sIkmxAyQm7Ht}W#ikTV5%{DaWp8dSv8R|hkm{t{($HCi>%wG>Re5u>c#9rOU!qRTTyvm8U3S3HM#3qy zz}G`C0#XAuNgY?gGIZhA?e^xHpP%1BF#KwipQs%yZe)D?>&eF??`$U2`C5j`7seNX z_f`@-<+;f)j>GC3k0KZACoJfmypqEP^{t+o9*H8A!i*c7rW%}QMlJUL9qrCc1j`3- zh0O7gp}+E9?jjZfekIGVhaGu<12_H@56!m*k}^vB9c_%nQVBkq-~OzwrnWYibvYl1 zl^`uh`NL1t*AaP-O291jv-gX1==OGV27w(b)dOKDevz>zP4*DP($L@E z-`m?8bcaK^GcDvD|B>C(zur*rY(Nrt9UmfQN#EP8EaC8YLdlSd2xf*s+;4$i_bj;A z!}c#OD3jcKOZ8zndl}C&S>G(*DA`EI>_` zOyEg-1U@;JCcSKs-?S-`(-;AYx}Lti7wBbRD-^WeVXhwJ0U|ci}Yuk zOTVH(+t>Wh7C>@?ovsQmC@XsmF7W8PDQHJ8>mS453{pO`Z6VNIet;$Td^H7Zw~VKD zgQ-GR|Cz@=5=yj%0|rY6hzNg7w1LO%X0H-~~lR zYoNq9xZ*^wfV#CkUI8W|^{;&A6KyxiCz%+6sS5D5phZh#Yisc4Xd!4q4nsHuR-mKV z4Q7HaI#>N#0W1%+P&hj%J42usRhc#Yc+>T<`wLUC3_f${^z<|sP|85J9{#4VWZ`n9 zKjw0bU1$Rn#@XJI-B7l9lSiTBK5+ZJp}}V$T?7W-nQc0(K+LNJYMv_Xf;-Ju{!~^` z!65FoxWC*Db`c#d?WhIbo^ZgyO7B;GQ`z&C#Lu5UgH{%DG@{}l7$6YV4EiM~o?tir zj-&Ae_ubdmcX4r1M}^a31_rQ`r6?vJMp6ve(O|2CGc3{|XVkx?d@(JzHv|=u^ zU})jeNR2nn=jTPM>hB?pUDsK%p0kMZt~=ie@j7hFHd$s zRGR}&UDl0$e`YYXu#g1V`eRUcL45&NJlzt=Mf6$=Q&Lib)Bvvs3%@sS4MOJv%JTD< z!FXU)W#R1P>F3&(6Lsntj>b zIktb__x--_>#utdq|*=#H@@M#0)d7FAu#Z`nFRoGe@a!plSigFVLrV0vyujav0*f zW)70zJ*$HzI8O0cq`6GL`<0z03;BCx)m@$AOKMeoR=ns zIW{R_tOAe8e>DqFeI&+F)&%2>n}Gr}RtPXp&{@>Numa)tSPOWHCj}qZOaOI|xtvMU zPyiCVR)0uc0F-z*L9?EQFq*c$s_+;$I@E-~XpEa^JaIGl0HVAA`M*}{kvL|OV;!LS zbR5GRtH|@%0Xnu(K$4+x+yYSl5mgIN9TY7&Eu4NdK-U<6kN1%NVY-xH<9S8^s{e5U zkzigV2?1#Q7&;J|7GHQZ_e6lD=K;)F05JY==Uu$J9boHA08iXc1Y&P8lwjj|aA-aj zP(YGNu|mwCBWJj$MX!Z3h^Ak5JrRI~?sqhOcCCNyVt~hq5N!BGd_IE3^ls+fH2HQ4 z!0uz%q!s|279gTQry9R%t^a#D^Fn}QP5`)RC5g8CqNM~Dc$>L59Y5~9nnTD4M1oCA zRsgg{*SN0w8Tiob-}=j2Hkw`#M1zNv^M-bO`Qp_`j<6lv3_k`Q5bc`qSb(}f@}k{ z267ejI`Fs(I%d987mS1@Ae$Bdd3-kd5W@M_MR#9Qh!Fg29l&9eysvHhtYPjOTFhv- zQR_AeFn9}5>z0K`ngo*d0W@T4unNz+#Q96k1sFFu+IyRFB)|=K0c?H};LX>FU(gRQ zc09nL698t-t=$jx6?Xmy;I%CP|Jx@C>lApN$@)p6fSd||BM%TT^jmUHJoh)Mi})4i z*1D`6OG3@_E(3V$b-)k)9^n1A>){<1fZp)cdG`SVSO6M# z)4BaDwvn9zo;MskU$+p9zkuuCX$cFUIu{@k2=cub{+vw#s4B1r&+|C}gUJaD&jzYc zf>uk{_8-g~{>IjSJ9r*=p67D^!1)U}_Y@Fz3Yd19hkxp!Y$<q-2gClBEZR~WkdU)_+x!K7Yj{UQaJs>|Hko#1y7GZt=y|>r? zKIzodwcmK`0hcNud+C>Wb}-g`ATlBVnkJtM2($)}ytiQ;zz4ek&b$C%RHr}S_U{FF zc{5<@PaHZP(B~G_;(4Fomv8(&dF`=-$yfUdbN?WDuca(N1k0{AJqobp8GyxS0baNS z5CPK_^j8SzFFXnOy`8pcTD);QXcw#mbo>|8z~8qA%D4R#@U9*9^GFLp!No#2%EI&N z18DKAvY}c`%rHE9Kj6cs0Zg7jw0d0Kv{r$qIfVD#mYeF2_dsRcYCyA(1MQp@fR36k zH~Ei002O+@gS{Z1;|FQcNJCISv?d_O0${TM;aAfNwT*(jd#96kh10a);9^X|xyl>F zqaQngSc6k)g(B8>cfAP){fsmDSa2;_#gBWbc-|>Mv@$5vYFZE8bpRpaU4f~l|0X6L zKmG@a@PNdTlUD&Lm5S${0yNEM8JKL94VLq+ZO|2XS60&HW~$x7I??s`dzO`uLCaWx zkgI8>@in+>8^1&=sHx`8Bjma^G+bZC1K--=fQ(sJnlKy7J#h> zh-x)$Af5-Y#s=kiS9fX@DQ zD&SW&XxSxz#!Ymu_BE$_9ZSM&vxB(B)&^QuAoOZlVgbfZNOdwuo4dfX+7~lZ!i}0h z!%;%k8gl^v(Z<&)H#BZCSJmTj&u%%0+~)y&hILBub=nPM?|T-7kut|Nw*p|9y&xfy z+e};f62KILsH^~X3nO!E0kCJnMkPcN&9tR&0kFm23M+tJ!^m7yK)oV1eEJrrSVkrK zS#lZ(kb}rS1w@>_%?br5^SqnD=iUNDFio5Cidr!R_~ChxH86dlJX-+FZk`b3G;JoL z0K=Q-l^olgTY&oNm?)=dGeH5^HBwP`Y+J+$R11+*^8)EM?Z`Q1#?OaiD=QVxyA$Xb zgeazI({5d3YL*Qma%^)g0M5RyazX={9D!azSf@0Kn`GqPa;5Oo9%teo9L z0^m_A2modiK_1Jdr0HVj-zXzn#}-cbA`I1&LH>C@EkmFsi@%uXF?Q#9;SO80j~Wfy zgu}z%S8q2QTsj?w?!Gm+4>0@KKs;}I3*}q@>^o*-`A(YHZ70o!gQd+|5Xo(o=#ui@ z15mvGH^C`j%%|%4fIz^b7O((nnPMk4@~?XT9aSAOaF5CX8+Q|tMEq^SHU34o2~ z4Z%*Dn0xS&I46L{PX_JCW5R<^G#=9gjwxiuj#mSpt8M8rU*f4S`Xr65X!p+z+5cf*fHB>HLbz; z3!S5(@YU!9z_w9y-2yxt>bz~|Ecb2h)0!7ZJ`*Th0j$27mV%GylS88rfTtf3<{&ED z!abFw8VyXEsti18r9_bH^K>0fqH|CEw)4Lm8Th#SfG~Y)e6XAJU7^n3v2snOflrYF z{6A0Eq6Mn`)pTow+Fz#@4xG|c8$;88###Xn;9DXS=(W>_a8G?kFxoG>I-2|F`2>m& ztrySMnMf2jt`B{k5jU+@;`ub^1k#xdlI-;1e9oRx_srWay*e`OFFs6xkxbf{&Dj2G zXd2KLEQZ38vokvrm|7Iz`+2%uqoMs<-;3-vhVH#X$t0Ja-Wd8??bqCq>C|vKi6H4F zN!Vr~ED9KL)y=SP6qj?0&4%xHK2cXUS!wrzjnEAE~Xnn`fcL!H2CYpbhzX_q% zumFbg^K`YDbD-n$)sdQhYSMjkF``1SUR~?IaU2*gZc+yxwOm1n+VgZr-uT<-E=%J6 z!CO`Zo~A7lV5>zVE?hrw`n0OnQE5>?>yKN+O(Jl&3;tc}$45B+*g0~kdvw!Ke| zE#A3C2s}0SLxk%s>*5vGd8URFZYiw{l0T3^KmHWxxaf*V9lzqe>%4(?;bQt;dBeKu zRIfIhnE36lK$%(v3-l1@cFPdvP5|6j29Zu;{gFSvD^k-xuzGo9;1sEemqZQC27GD) zAX*t@S%Qv>RuV0n8mZ}L9lt2RQ3gNkx${c_@?A6N@~WsBP2(2tTNe*FA*duv%fiwL z16`_s!qnn9V)50>{g-3;Ea?tg@sowsLFugw;--5kHPA^Kw{T0D_pe-L_xb~X0}%r6 zXOT$$^8hZne_Ek4_^UOcE-`rPs_Im3SaAHLZ=86zcPp$sJ<(f`KE%>!|S1R-#VLSf+P`>1)i6kJJ%IKjVWM{%@YWJ zQz7sJ{@`^h)J$^9JnxrEau@|TH^dbUAf2`y;AZcH;BO@0U%w(go==HNaw-7+2Czi< zyjH*G0zT3FxWhWtd_gm<2}~jp#HuM0Im#hph;!$9 zHV_5W94Z9W89g(}(|_)9Rq(^c+_8jp(S(dxphQh)0i1?Egriiv^TujnhHVBefJO1E znQ5FmFBD*tWF(>ip99E)A163zQHk7RY7PMqalT>prUyqkeH-%JNa8zXT>!B8J1pL! zpjWI{#8W{6-~c2)6X+*u5S_b^jM$DMe8egZ#3&GPM*;Lllubdvm6X9rgXjHNs|p#d z3K_NM`{6@QfpA094I+e573*4`TU%lE)-Cip$nTAEg%D2W6zEdVk>uzmn0LhufG0vjwnx09FV(NFQMe!$|Y* z4)ZfhU^IaRX~u>H(UIjG51D;{>z3y8+8>Jm6cYIH)(6azRSV3SzJi|kHnpyz7@&wJU z?Llb)OQ-_!DgY|iMlAropU-kB0Iddosz497;U;YV7hnMWAm33TSW(0P0000!%F}0g&xSlf z-V*@@6-1;+300&h3Xdkerk8Cq|8wrl?93)Jo8MwEk8@XIE4f2gk=~q6=fS zNx_;JZDL%!HYqANx3V-ir?@aURuim^jZF;J#HMNz;9qElb8v8QT4hm5>ZF`pt|+)m z51mt8y&zSse*E#rV;+x-si>T-)}*AQsAIKitu`7cqN|paR~IafF0bm(3gPAC6jv2i zmM*9+ttbyhxdpQ-7FMT+hGIeHKdw#%3(N(TSH+O}#S~V|S1&GDpw`61s?CytF%1$^ zvnz`Ws*7{rodNYoijGZ)j!l}P)nIMnV+Y4-Q)6S(kX=kEluTrdt0*chS<)RDBFc_3 zpaqHwstdX$MNHd44yjK?<&=twvJAErC(Nm+uBe(*u^>2ma$InvCN?H^F*cb-8=D;b z5dSS^76^^eXcO4>RBN>A*yL1FeXu}q;lk>Ql9D08^Mhwq7L*sxd1PR4@VJ7)(sC3n zvJY1>r&O96+e_4b#5vIP9n!mM*P=pmvo2UzSw_64s8C&8Ry@DByt)dSR%6+;MTMy) z6_xV~sxt}}EGR22EI{v3FDftMd}0A_lr*(Kdat!M7h+c4!Y9qyojuc>m6}~qR#6FV zQ=Ac(rj|+;wZD)(vb3tYqH>AaZ0~#35YQ3Ejgzhp-^J$7w`>Z+lA2RdxDeZFR8Gdi zg{4KQaar0}ZO-uIBu(z{oM9SGa;`QpJ0~eOAy=CdmmLq4?ZhXlDVsN{ys8?UvDhqg zSodU#_&UkVDXuJCR9uu>SuvkD<${9Bs^Te278Iw4PAaadSXfzD9D47~T3uQqjF1*i zf;pZ215;D;Doeqj#IrLrY3fdCqPmg1=~#iXh1e4^lAzzm#;P@VT@02KQqBcsK6c2AmOOB69 zh>L}{2EIdja$J-)B{3>4RudJcNrrbyRDu@Xaau?NIZ3hb2HGUNPk?-Q$Hm8KhYU|l zOv!>#Q8O%8lM<&%)aJ&gBqrp>#tu)8P07s$k0e&K(2P_2o{e+Gi%Vtv0CPI~0j8!- zC|z7!Ha({lx>psptwx)irk2bRSy{+`P?l4a(~*-5OOs(4Qk)SDzDJ~q(mIk`7%eU_ zIWayeJ~kyWmRzBslECcbswFAxH4ah|G%QVg6mlhzD>N!J7^J`zQsR>nV#yU66uv`q z!WA4KDGuBNuRssi6uc$_C0wzrIB*ucVkvRpM0f>WxDsA)p?FO^X8L{MBM9Lz2@qX1$so> z>s`I(>X8L{MBM9Lz2@qX1$so>>s`I(>X8L{MBM9Lz2@qX1$so>>s`I(>X8L{MBM9L zz2@qX1$so>>s`I(>X8L{MBM9Lz2@qX1$so>>s`I(>X8L{MBM9Lz2@qX1$so>>s`I( z>X8L{MBM9Lz2@qX1$so>>s`I(>X8L{MBM9Lz2@qX1$so>>s`I(>X8L{MBM9Lz2@qX z1$so>>s`I(>X8L{L~PZ&oP}eX#pQ5T^Km$oiKjYkZsMuVexE`1{IcLh#g%x5BRv!j zD~1Lamlwj(!t&Yap;M>iMkkYFoFO@R*;5{zFgzHJwN+IIPnbGv?5OPE&}g+f3r@Nf z!-38D3l>%vSE|)HQ*weQj2$(33LM*vRjY@O4-FO{!o-7d#pG}#5LT%tR8}k~uB=`% z7NkZ4LrhV15uBDxV~erS3sQ?p3#&7nozmtNFUf%Ol`*Wa1(k3hFlIu*?BZN<_%3F` zxRDi=rAsT|m{J*>_(@YEJ+N`?()`t>c(^sD5Km@SRPsDdX;sxAes;&=AZdOSb`o3XVqL7nEyNqrx&g$z@gt zh$2xp6ycdtv2ZwNYH`|ClbjltoSLAW0f!+mt4n?1*j;sTZdt)>u~caKVrYd7 zI8VlZB?eXV7EM$0W`=UQ>p!AHZhT?pCX)2N|2f_xQ8n}v-gt1VivQYWgY-I(>;N>m zu%Haai2rTQ)1g}CK?l61cg!)5FHZlZF=raggK%X0f8&fLQ8lyc^v+lJV7}rF+@XK| zZ<(`Xs%Q40-r)yL!7p#RJ^WNHoHd1A^)4uYRd{}Bc}Yb+yOQbN#AUH8%_^F^u&TPa z2oon27YYkYvlm)ljdoag_i#08ZB^EJ>Ft!pFAC9c{8ACWcll@`TTSE^NrgKyB?9n? zcyjC_IVl-V{0mblPo0pEVv#INFi@(r()^0F<08L@cC{|O^D-Z-*`0O1Rh^|4`oape z!%AP|_aes^rS_yrp%?t0aNUmel=0nY4PZKS_C}fHWNo|@bwW~-MRKq2k_T4y>B>B2 z_sDml`9{H54S|jcIB<_Im@hnO3!|B&%)J^9taG6hu>Sw~v3s zH?O5)V`H@7#==s{+5`-nZj0bSZ)`>3JV8)NL0MHX;1pV4AXNjdp}-@(=LFk%++aS1 zL?}dRnd2PR)azVLe#O|ame!<5R5Hz6M{A7YD)I%(Z&-G$B40y*-o)M33}>=qdk1%H zDNkx7Tosg(|0Ec_14y1bxsprf(-cLx?vh(K*C_y?S_QC@u(`X0 zS_fF(+cc4TCvt@gSB5jW2l>bo!D19Un=y@*q?Rp&W+bI9DS@0axY~1EY|6^Ny>Cv@ z645i@%F|#i6jZd3LS(kXJq7lFdP)7+T*qvw9ESOK@`zOBq?YoNK+1F-<%|DZHf|Fq%YpuWW$BE1>W))(G=wXzOVpZrQiz{;1NV3 zp%7oNWd84I_=0bXKQRHH!KsJ0xERyWuR|WuuU0l9!KJd5>pN!IBe-+Z;-mL)8i$@ff7gxeKSp*3b z0=ja<&MYyeV9u!dvxDK|KV5$tKrx;&eEJk9iZd=6-!f%f)$C%66#Ig*>L~@YEz;Zz zi-E6rarLOGkyFNvodvkhDA18lwjg$zQ&G7ztE_Z3Zz&J9^6(=KHW@e_ii%4L7M4}R zSEog=5hg*7n*b!)lH6t1Yytr)GOiaZf7l3f2Cz`$E6UN1bfhL~Qk(~6sNEAl7!dsD9PrkD~7)MA*0>@`09F^x{p#YzcCjm&(XP~aw!G$l#w|4Gj}Lg0=Au>J&sZ`cPV#rM->4l zaGar@r(7K!9i1Fqot#{~T%29J{5)J;J^b$T@$vKVxzEd${Ba-bO~2?!GLGS9b(3J1H+msM6Cmicm`FrLgf*FlV7JJ2HC|nV_Yeg8YHP_-6x^ zv$eB#aCCBZQFP3NInRdA^rRHF3LB-Z($2x&(ay#-4l=!LY(u;?c3Be(d_o_4Mr-f; z=E|>z^$+u#v};yeeC4ypvK_+Tnp}Uep|C2!|L+^08<6vtDMiD7{Ioi;&+(~$6gO^q ze)oxmmzr`T-d^>>*YE5(dAWJ@H+z4%(lV`N(TneX_U*o3ueK(QcwqM9FTJ;BbM2{X zZC;d83EJ8cogD1#5{L>R8gEf%4lD#&XPA*jHdrd(}EnO;;T)iUL+QQ=X4P;dR?{4Zbq zbJ3|EK6zu$3$r8l=4^<$^=8HN165BCn2|uQoszb%rbM-H@49J6wrBsvaIWyx<lKE2Y{!DBnaA^QGSs!2>oNai zs8@H^Tv)DKp%1)K(~&K?JiTb&z7>C-Y)BZoYuzVr>|Ghu{0u`SG1L%-%9_4R>7c7Y z&g&~Wa%S9jlA%7W_?Pd#=%DLo=s6?mt{Kl8>u%3jX3z1Q=amaLJmF&MzpQ>s)~h4* z18Uy)-4_#k^|PAyeYWdss_4oS_0t*Z*|-B!KL3!Rn)(}d{n8$=ehx!zde0P@ zknKmmc_QfKrUzd+@pVwu`JaAWzUsV%` z;o~X)YTSG1@`uaj)SkF@_D22Ea(}yF+h)AKzwg%Y+9P%P(MzUJ|L&dlH_n^?dPe_= zrU#xH@ad{=w-i1cpZrAi5}&t9o-7&hM%^2x{7Y$h6ZD%tdVR@#E#HSe`_vodt31v= zG4+Y1Nt6D*W75AKf6;A}^3j^X<#*O@A2luQ-63J#{Tz*j$zI;C6+iUR>WCw&?7Xk6 zZTZxF+xSoVO>kS^T6a3x{i6*p{ORF;jvwHyN$>ZCPrzffVT=Cy?fKDhziJrjbA~ef z(LAW;gCM(WkKA{3`ltF0P%HKDHM+0rANVfw>h{&U+jkh&8XZ@S3VY|h6y})7rq=>q z|9JI#rbP^O@|&d@`AsqH>a_i3Evg;2mg`^qeB1SH*@j&w+BNG-@^4>ZsL_tk`E6hE zPPzMJ?c84uUs?04$MLzIWnm91SN3!4Kdj&HCwINvC&0P?uDkX6Q+bCTjsE7F9Y0ir ztarH9ykp~3H3NL7 zu{LJwidHakX~WlE;6kN-x<#5^^!2 zv@-LsY0OW?3e#^j-}$~A_SV-|b}%PI)w`iAeh5jDgSD%>AP=c4;xVF6|}1Kdgb9+d(+-J?WKLn{gu7(rzZUI?TgF*eEPGuUO1(F zrEL3dzmvz?R#3;jJ5(_=?c=xy@4x)+cJ<`FhYlXxv+6J9f$yIwDvb&Ho9U77{l3}x z)R>#&H{J5j`qHrJgTg=2GwU9r*PMJr|I&Mh?(Tkd&+-eWuj%r?FKK0{e+JC^BfX}n zeMPd~khNz0(yu-nKK4o7w{scl*t`#}EOlt#dE@Hc72~UHzxO+nZyFPv*IG6&^XS9M z!`DJ~zn}kWi%0#(A6!hy9iJDp*rPoE!hD8`d*K)Cs!^v*BWlh+@Q)M5-`dqN9a9GX z>!qgthqvUb7-~__-Ql65!q$CP!JHJ^Iekj)l!Nr9nwv9z7^2p__hg@UTmI7Tfe8I> zdiCdP(v3gU^$VBTXRZ!vtz@Xx_TN=D=YRf`s`=c9LGubD1_V8t_I1s38w}A8A3gcg zpU$oPYgL8M@;_a4%lv5e@j00XW}le({HK=}Fw{Hmy`5S1K~Uf|JJYWYbKlK3{Hf;b zz^eS2TmQP}MrKjLtDD9jePMpvjGAxmtgB8sJ1t$k{hIyMhsJz-X{TaI-prf7E?l;{ z_dYrt^4gv!Pn<9Y9gog`u5or|tkz4vE#kHIqs^hutuFa~ zO^fQuU70HvS3P}s^Q(cT&)d}bg=Y%;w)Q)+z`a!ExzKL$wwO(;o?2{h7+n&6GSzTB zaPFcDzrMSyr0MSCo35MuCckyH|MFMj5`1n3om;ppZhz5L`?S9n?tLrcWw#%^&s;T~ z+Wb@b;QG5I1#|XQIexumr(0;hyEC(@=Fa~$@x4ALPkBAG|JfNH-<+v=`B&7+BNx2lW$NEF>Vu09zc_x9cUwf|ft(*R!v@ng zPU^pc*8PQT-O1RxvHDHslr`Ai*t#}{x9q^yU14tBUqr2Y<&g!o(+;$6rdO~1W#jWF z>z-{~zwLPH_MsWk;F8X3GL8G8dAHlpw=&5H4XK}}KUw|ZiD_A;ya9GEwW%^**rA`e zbXx13^Zkp)-#DIDym;%@rL~h6Q8#Ks7^+D1z!k#>+gGe%D2Hah@%bON{=G0{Pv(gB z)D@oXL$6lW#-zAbrWWrwzPzqhfA`d{4E4pOe{8!@Fr+2*=#Bd`e@w2q{@MBx?>y&k zAKrMN|G<7@RvxNs*F8CL?`u;=oEf;R`Q1FHVG9GbE5qtP_8zmkWgh+DTKyX^J#EOt z>1k$hOY!QnC0lOS`m}wi9=AsK<+m9@#+^;-@P?yprS#=VVQ-B-^DaX%zY>dRo_IX| zg}qPB)ql5W_=$MH@MptaSQuD7TcHG&_P?sP0;`=o}UkYEDbo*a5XPwvRhXmCf zfEvD?vg1hhoxi{kT5^z~wzSopsbHx6YYd4Gm}6X6#Zsjpo0W!7sUQa4 zoh*J{r*h#35RzdWaX1O7NC;>8>NQtUN^a!{XpV)mhGyU$`v-!h146tcd`_owt1Kcv zpid6klJ65GQ2NhdF5~>h5?6%zw3B`zeqM#i(J;< z4r2?J6juhvqMWjY<)|C_yaP2BB7;kyb5>HpR4iKu_|M%cAa=EdLY`&vXV| zRiIxO6$)>^|5Y!4${(Ua_fdVJhCx(+Dhk>(66)Cx>g-I}w^G{_t(1Y%D|RaN3Ik=L zXoW&)L)&h*qcLtmDCba&2w;e^Yr#L`tR>! z^mp;~_H=W0zIm=)sh<^VAdNVIkI42(=U28nYc13ewVt1!{oJ4ik3BqTnj z4)S(}L`Gq6>+BmEKk5`DE{YHJg+v7;+PV0LC5`_L5|<{0`MW?A$`N#N^$$;-@H-?v zks9vr3dOUD15zhp;!~*u#EC;DoyNo=mWh)wahW)=Ps9)|F`|!IW^W=|sn;OH!Frvc z*BN?81{i*OJHr3Xj^IClolGre1U|9D44W!3?S`jL6i?9X{2rd1hlx+}Q#mdVd_xn* z;-p`k9L`SqnCKI%CCh`ciDY@etuFe-WPh=RalANE8|;HC39=Az4~R^wgw&N;(JHd& zaKa^wt6ymR$X~ZIPAxN1BLm0^1=1C^PF||P>5oDB${bCIuL~|`U?EP{Gojk-ceXLk zKaNR`?2Cdh#|f5HPM&=u5_8|#&ba(IETZyjo>Wn53KoRO0p71GDU35um*0qB7XHaDNXp@ZYo zvPX^0n@CSOF`1s^F)?rK$gEUNWRSmyvz-kt{){ZGi>JRTG!p*j<>b`Vqp)^!OHR~A zM~3wEadSdC6D%*?K&HKuo0nfezu?etSO%VeWgrvo5*n-u@bz?YKsqC(hgF!Pjje-| ztGlNUtR_#uYSP))$J5Ql(H@tR21T3}^+ z2v)YOR2_9d0S`W~4`%W5gFRS$xxq5j9Tu9N@bZG4eSG@^5dR3!a=pbG-(Fe-u?KwjZ+#qZ(dg`ezw z1X-^~QUl?Ic>{ol<-nW>NX0x% z3u`j;U}MEMN54C+sI4L2H`ag0D2^{A-@hc^%gOik^;;PweY}+1tgGL~(6r)s3Ay{C zemi4?yCUvxCu5+MrsIX=c5VGG$fQjQ^ZjnfHi_={!oAU?G@1!&A;Dl`i~`Gk5MeNx zL@5UelR=bnm`h;{ier4yM+l$MXcDFU#F#*q!Dtkv9bp(y0n-@0zja??v?FOo%QTY-n2kDRyP5qUql7er-k@XJ#q0{u z6B11pi4>$6b%u7u&wSOfbQ4Kq+Qg|!qaL`8?Iuy`VMakiqM_X=N<0TPVc==71v62$ zv5jdJB;5e{dLW^|7!=)xL?$3Iw9z7>TZ|5x-e80_fd&K`qY;WWv>8P-cSvg+p-v_v zRD%>`XjQhDYjT%sL^KAUO}7Y|)RTN{JTw+7-YCj%}iBy-TtUI+|qDz-+8H@!5*C zcZ^&wq)@tvWE(`;H0P|8?gE37^>ya9xWi>JZJ0?zrml{!G79FpC#Ao@D4L;EU7e`L zxA~4r87?qXs|j+Ae2s0^-qxFYDrJQHHk#z~ZKYUyo9zH}D`f)dSTa;vXpCDXuK(1- zSl`s%gnb++Ogx3n+FLrVgHr84qibwzWT*xc&=`0c#oAjq3>aG(n2hTgo0i5#a7C(x z21N}GW}gHy6CpFTP$nR2Y-oV;6sVA~!NfZs6xa?_hE||zt%sqLZlGFGEtF+;Q6*5Z zotDx8Q9BSdH8RS^dLRN%ZQ}djO(W^IOfzMyXA~_BbUkR_jx@B;m2T36h;F7@>#%P1 zlmQAf8KF3Hhf}V-2|5{}qhJ*&Jvd!mJCxYc0J4lt!q{>X#|c9-MFUj}TnKwrqtF8paPni> z4ea@*CaMioZvlU-ZwE<`03(esB3?I<9!})YhB~I6Y6L1H4OHfF)n@JW77ztBZUm*M zb`!K7WdgD`NEC@e61tJ1>lkH29kiXI0ZTNQnhh<=R$i;?aEny>I<|qKeV|089tMLJ z^K=0v;^1Ivq*~yIFHP9MXnCcv1qw7z7)qeR;fFGURt6*Z3e!lnHiIERma$dY#+P}` zh(lOIJII1|hH;^hGQlimY6DBRil)SCIBYQBFn7V7S%;x?@S_er(wQeo<=SgdDCmvF z(t6@C6wYWyY?M|*n|X%5W^6PyP*`jmdZ@7pYzJgeEVx3OXez%-!;nKY1EB#12<#)x zHhQBTJpl-5etNfAd$kcp7s>>-qd^079T=Ypk&!1-ti1|`sHfVX5^d;%=oD;;hIU~| za>WFuprFN-`UcRAB?3QgGqjtRC^l=aKpuz!Co}0mEfj<-ag09h0(&w@G=c# z(Cj*BYD;4vp?L|ZTzeVlpq2C{Y`74&Pm(Wae&PS`;`SZNCDS+E`Y z4~5=NS^!&#E!4VnD%%Y#TQqQqO(r0DuYEhyJlYpo?+=UXfBij z3-#J`cFq3>-N9$rl3>|jQt0>S;6Xz9=U-%$f%QFo>uW+YabF$l4C0o-qF~7n4-qfv zsCLR`{Ta6P#La?Jy}?A&%DU~LNTi&!=O$lq-Z(%4G$vZ9KlD;NJRvHd+IJH=IOoEA zbD$igIr>Q()rvIdEd7Ml#z-sc4}H=K&y;pWTMwSUYwl2NE3iUr_1BMYf2oCPqLelN z*ipMjx&EclW{Za|d;sh880rO&apR~wYNGj-M~$L#sT^t;l?A`rjHfiPeHKfp;S~cf z{EUS(E#zbZ-FPz}D?tvGn+8i4T|j z!zz1^kDMX!aLd2&0T^HS_}k@UsEwD8x7T)0TQ6@PUuWW7{rc;|OyR}>rU+xCaiD3C zaj+@M7;TI(sg1EFjZv!z^JVE`^l`)D6B3h>Q&Ligq@`zMW)967mYp*^H+RIyQKQF< z9XD=#-h_#hCQq3%b=m{dADl7cp@$!tng3`(!K^~~CHd?*b4usVD~t0X!c;NFxJ(oV z(ulAORv1Vl!ay1k2GWSI2U%etjR*s2AdD_kXuUacbX;zH0#Odcp>n7jDhJ|-av+W< z2jWmU)D4vbaYQ)~H?OR0-dy;p5p<_9s+hPqwyvpcU593|bsfRgbv#$sscc;zdYG;2 zEUvDlSl6<7^X8UnJyBqmDu#&TYAa_tuC^Ub$JN#z#LbzbAp$d1v5D9es2x|^92w(r zwaxEnJVy{$I%jqXD{zcTL&TAm1>=#HH5;$ui4tttsnbZyg7HYp3dSR~oeS#Co?TMR z3Jg?fQ+VTXj=~vl$+qi8X3)8>=dr>#H|cD;IiOr|QBf?Z@O`Q{v7-#<9A#|z@m9wF z8?(1@4l=UjtvU6`=h5nKK_@1^B2^ z%-sa7fW7X++e~Q7_wT&+$)?|b|M`n$&?&H8`RYHKpFi`FhaZk1HSkfTvLJ{sj_qYD}CkA#XfO?yDNk5{t&mg)L_p?`Nm;N>2pY2rkvf*TGCzdFbGG$T2yH;|7XH+IY*qK}_y1UGteLjW1v_nv2b)`GP;)j|H)v13NF z9R8}2piHO9VBbZ?xA{?GMxy^XC}Q-eQ7q4Ws?n0;{o0lm#`f0MFL%SIU-LrLk5mWs z8$Ei|$PojHl6_TUV6xz=!*yyL-4}jyThBP&`u=I`&{c=q82=m9q-`eTq0VDKgOMXf z-0%5tshqKbWRpVg% z?Kqw%aJ?MIDRIXrU%B@4&tJ*s>qm@(y5;5$&&iG?l^mlQFEtr*<2f1j9tHn~esAf9 zTyqk0fqFUF*~55&c`y=pnhd!SovW(2Z*{aB);1?6dstQ$E6`0ffm=s#qjIWvOcM_p zT<=4_?_fL-H!N#tW(29Si)tb?rg@N0lQG_d9etg14q`k|Z)j#_1}kukYBINyu$To4&vEw=|#rlil`!k1`dp*BNRN(I2;2>MMP7^ zk_!x@aYplkR)e4nqrfnhow;#FgPEI~9Ts0;T+)ecNhKl;5emK{W4PIvow~VB{kIE@ zQz2J@fk4efn(0Sd%-!7F_vr=LXXkV=M3@BXQS4M~Io0jGz}US*^fp8o1lo}nD&s%} zgT%E9jO_umq9H=hlfq0cF^y2`l)i+xz=JXS_b5wB@Bg_ z)i5Iv(*wC0`l*KFGD9?{;#m0wM6PGYX@7$V@nREbVid@8$OMd&rZ5to0rE_(W) zJ_4j0cyeD=w#5ufCW-0wjO}mW$xngw23@31ARflA4ulEdFF?Esntm#1KEVjY%pgv4 zUsV>r3=oF*(?GreeG$k%p@H1Qll!QKid81#T|pgVN1FWrXnr{|Qa{ijkZ1CRjIWnCLYXsV1G`K&t669XiLl}f3^-v{o#gkbi zVH1RLi4#%TK)Jy<*uYb|s}gZs$0-em+>*PD;t}vodLRYOr#P5Q57r5!38E<@?l&m? zfG*NBkXi!a*uR?w>!X-xu6#FDJUiu)0rsyDw;pByN1!f4RQ>X!D3j(%T~%?cjyMuO z0F>*|Ar*t(Zf9)%o;(;xjRL8bpRP2qP`sic0lkqPq)2LjnCrpeH0Yq`sa;eWoZ85+ zxC-RHNE272UV;$&L85~`TFK2V&Z<~`CYS|b@dJ?{RzJvg@ZS&q9fazlbkR%2jhG6w;Am^f8Rutju5>zM=CZHb8v6VWP>bj4p;2s0Q)V$P3`tOK7t` zx)>u*WUm?sVu94uMLOx!N^U`*R7FJU(c2$^ z)?P1CNUx@273`uxp&DR-=pD6K2kwa0Ve76_n*@q*6O4M)dIL}hRoAHvJcW%a9LGg) z)%6fbU!vz)K_8=2>v$R^(CA_4w0WDkJkh|l0#-;D%V-2$^xztbK{-gkE24)`qCkK? zDON%kYZT?87xGTP3fJ?wO4R_8$+o`^bkoP`G)ip<8_HA-V4Z_6RTrz*Fj_@yFowSs zefk?Zl&aIvT18Yp5*&96hPqo8t0U1*e-iOlg^8M(E!Ln_MhDrETwoU07R%LZm6}k0 z5()PO(PnQ$bLvQL|GpRlxA6@Qx3Hv6qthy5Li=L)+t%~`kO2ctA~PbZt!8jokUxoh z`vwNp1}jzmcHM}4D zkdqE{V+jxPv9AO;%cYfgX-EV;hQzZY$q%OblPmj!EB<_Q{!$FAs3R6dVHQ zD+WYF4jdd69TTh3#>JCBED1WZ(Lf9*jnDJZQH@kK0mwixt{}$C8^zp*V(teq{lieq zAP}RD1u+SUB%VuR*Cdw92INpsGc_E3*oQD2Ajr)f1o`-apn$$0C@2^Ng%1EhgQGx@ zMoS{iBv?v1>=G)Y)^X&ux5^yti?q)$j#fh-t@W_n;PeB3(qjQGoUnb0#24ionT zaqYR_3t{(mH#-X38zLfKyv>9(t*cr1+Nxv6Yd;jXe_V3td{|XV^EyOtdq7|RaS>pS^H7vU1-+;Ku99ZE47_LN* z02m6e!wtsa2|L`JGmKXSS+etC2v`oXw1FMRpGSc9Ek4G<21PWnE-2R<7Bo1#h=$Iq z|6;s8!gVtV(vTp6I)){_pNz8X=s||Z^pBC4kP@*ZrV&kueS*!ZCML1IKfUQJ<28>B z{_v0VQG=aP51)|EBizb!jCUhZf?Ji6CulzQgA|k>G73wnp2X3Nw1`ga@pZ%a2;?&pHt=!Vo!uU2mgsf~?WPuqm zAe=A#9gUt2dDLbF-ZcegQqS7wk^JZeiq4>G3^wl|L=brzQf$7uux1ppSZ14u^F(4d7g%Vk#2J>UHME@nhU35{-ohL2-;QjZj zvxEjBMnSzoLxNdxQ7{m7n$5Ve83!Wr1dLCUY#5G@5@97V978G@8WIv54Fy1ySQPCYcM^Mq!8IPcWIu<_YQ5o*TOGwA;N z`>E`Sb{ccz^8+py7a*GPV?al*hHCQl3=Rg>RDpe2aawlt=9U0bM&rAl*z0+tK@+gT z`UbG#;&UYib2g^SJ$rMHznpWhPfKE>sX(=W`}&Zzvt0t2xamB1i^2WnrppkG6`O{z z`1=zk|0JooEbH1Ij87)|45x9xefRb8_w$7X9n6y{ye+v&7&j}jhC+W_D5@c$_wi;q zQgOv(p0s)22aZ3Ze6%lA0_6L`PbyfBA>1@W;!dm^fk}X4Icq|oM+Ttx^spgXrJ{Ux zRUmR=;CO?`XA1y&U~p$SQgHh2sL_zJi{0gev#EJqyIF-bS)=PE65yv$mZRDjUi*|8j2HX=o0GD3H`$asH)Ex;+{ z=m1c0wl?kPBpSBklb94vn{0;whTS6l3As{17-FSde#k|}Yd1Kl!_LljzPn)v!XEU=U-r0!9+5U!VeuOzbJDb=DQt??lU3E z+?P|q2C(rOk#ewQ_rwu$VC0JsU(PkFT90&tNJyVZBa;Ogl;485auKKEU8EYw&cc>c z(pHEkKTE1{8)+ihz>(#Ya|zWQF}8uj)GM%xi7=CKA!vz>;{ z0YuA@$UBIT@DmG;kvS0jC2g`3iaSq;`dLgD*x}!YAnsSBMg9z;@@}Fi6+3kb!(ezl zP!*CEnE+H9kjg8tKaQ3B;0{CC6Ahm%~$+fhf^EZ?y_=o03*a05r53n(<`2&6eWyRrl4vnc2= zrT~@%8Hsj6oi-f86bHD+q3IcDhw~no%<8;l6C$rXF4TNJedVvsBL zQ1HJUuYgEn{%-09WmChPSO17$C_y_r{e-h)Acwza;YAR)WI{o8HQwR(+;LU&;49#_ zno(J_@FP$$_6VNq3m`8Q1e^Ln&?yLB!6XnB0s$#}g_Cy}`NV4&xyR2K__1dk_XAP< zVJI8!QbTYJ!)I*B41dmvKl5bMFcgOA(ZCf0^l?-o{Ad(k@$km)?9WGq`$Q^C{w!XPOAmPU8Wn z2}6RJR0h1#;f0@>kd_5GecyL#s6z8zb?{d&`;eH)ogu3vw zZa6>eQ2W!l8;$KoF2m4Pclp%5op6BI;rPB&m+RULd`8Qi3qSwx1IgI&^MyMtm;uM0 zXa_^ft+R)|{T|L4JJH9t9y)sqvgo^Pp1%3Unf+gFCC82(kAJ)W%nd*ZC7Dj3Y156< zdpB$$r;r_wf4%qgjV4eKIoZtL_paMgkB5?-Ovg9v{k>!Eo^_kKGs=#~H|^=1{l(^b z_Q9XV!C)8zW zr4OuIk~`zr&JLlH=hrQ%$zab?OC4vIBbGef?$lsmLYP)LMPv@U%L@mPZmuw)Xnvx*`hJF{2?6_A@O ztYHjT@(32U2038KBiO+j0)ZTlRX_quCP6uC_ym?bf^yfbz!f;P%1q?e(F-h#5~*hm z%)pXK@CU2d22O1<3U&c?U|E=8j4qH5EZGDdy8uFvTk>{=iy+6>6-%=D9Ap4SjAMZEJ#q!3dDjXi=dk|#04k4%ma&600zsFM9NviW3XfryyA*gcm_)@ zp;BF-HOR7A1#gh$>H^Ebl8;pF#_tduunz4Y_0&(;xE83A2uI3f13o1ki*#CaP`#gudKmIK;5?= zzr+Whh`DZ_J+%3IE9eqVb-TXWd+I75$zo_nL+jm3Cx6^14>sYfKeXxlpU&NCCSkEV zj57&~ojtN`lRWr@i~i{5uWL_TLERt##zyX1>#qEE;JZz7a1<`}hc<8g@x+DO%{n7Q z@@_+v&Zuj?ed*MJZC~${1E_EZfQnt4zW(tTM(3a`z|K+C5+xPx->cWls z7Cqql-C}OCw9R!lE}cDf?8t%Iy?c*BNYSnKz>(vp&Rx0P*ot&FA?)b}GIgzucW++( zHb#1=$$^WJ0{b=Q3LjHFz{gatFqa_WdI%z}mzcB60s1WS4D&xP68q&x z@4or^Yp=Yr?`7r{*VkTu^WBeD|J*qeJF*KH4TrGRoujn5fT=N({9O0n!qoVvbC`G7 zy#d&8(7r1XDIOX_$Bm1>CvY2YT11UEpWl=EyezV*j;(GX70$W%J?PnG$1M?BpEtn6no+`7*?xKDK@JFEu zj0VDXsHFvV3j`#Pb18Bj8Idp$1Vk(^(5#>&Cdk09F+rrINkW9&F9Q)G7SY$NkQ8Ew zN)jKX-k$}$NBht)L zSR?_Ru}H+sf)hzXlLT~zCXwcrL@EjBj8r1#7J8U8a7jRC;1X$eX&jTj5;!Jeeqqdz zfi>wX0c#=_AdF;kC?|nZC?{eO!XVo<=!r@a^h7Kz)Z78Pi-FQl8UsZvRA{B1L81i7 zL86F-3qyt!LW&&7hjoM$v3xQGlS%B}0j5aHzjyd4V)=gN7VQA1k|mxUdIqN=mhWSh zZ-K6o%}KX_=pdL)9gu|hA479ziwL@kSiWHYdk3~6E&twOuZZR2uRGX9kUSKNN)n1i zEG5V+MHZPQPzsqvEJGM<ASO@>AVyk* z99~SI6kd$92s!weKq>ebX%TX0GJ#TPGSVXCz-0oZz-6RG^Z;wdF+^HKccEwG_`8Wj z;}|Mkd^dq<y z97}FtUDh2y968?Z;p510cMmSdl3RG<=pK5G9Cx?CbmW-3g{|W>*uqnUwQBdEb}Y*l zxVweCW62y~UO;pW#3RSq4O||lsQ%)Sf8DI3yqzMgGlu#~A~p(2*R&UNvt+kR4sJYn&;CmE@!i`B-AV zc&H?Dl4ESJXr~xY?ts%2 zSth>$ym(Y(ZLsUmvNgRM!2e$##rgc5H(q|>`DdTq_YCu_>+>(X{Kh*U{UmGvJ8(yF zG_BnPm~ycFNZ1K>z)l6{bz=0?sCxi8^^UL;43f!#pTP!d^t}L~dPBSsY|kDUiZoH< z?hP*0%ToKm5%Ax4+#{%}7exEOB#4Lymj+Px47}=j^8j4Ou<~Il7;x{%#9Hyw$U!om%rgaJW^xD^2(DuAUJ|H*Zmd|5pI8e8ngsp3g|_1Bc5YVnufk5lbx8 z{4^gf1CbTgEr=|!Nb_@cxHM+g;O<~%Neh$)(HhhZ5G}Dh^8PB^F_R)D4%y-BL;7Zb|b?0dWON0dYyQOCfUwN+EMevr9pB^_76?lIE7e?&>Rn z-6hQ}1?Cmd8O%$XSqj}NpfkFcm|1WlNw}|o&TwDS+>-cT0iE%`#N0v;lLiC}=nM!Z z%`S}))>i@{Ow2Eg88VPzeI+2n!~)ElBH?lvVu4Z^Vqy`(Alo&lm`V~HQCX<$b?~ux*<=;E#GHLnu z4$Dkh{=EY?la_z)(9Xp2%>kKkdH6GxB>b6Jig_P6Tow;4Pzn!CEJGM<1xkRs$%v4H zdkgFg_eRNx=r#hJiE@yxNB28ss>;K&Y#ij9sGTHCncZP-K?~dL1~0XVk7x^*T76z}O|yUI$L|CT_TO^t}$9LtwLx$k&0* zym=dL4W+Ne(w+j7HRQeyOy(Wia4Tqj9eB)}wc%C}{$zRNQU5xa+`K;|)nR#C^SRcv*%ec<2iHq`!0hU4|r-d0@B|X?KNULo=uJ1OxcUJ z70C_=96+7Cg>txGw1tTKDtOHFbx5Bn+ASp7;%?!cxd?l#IP-D1lR_HDT>zq#8X&tiWMj+1hyJfHLXk#cdKDZiIDKi;{Pa*dq1`v2Bm z9<@zz3Gh4hik(Wm;yR#v(hAtIp>4O@(Y6X3C9Sv#*q@BT0X9P5zb7bWa&YzZ4-A6;bcG(UKY||M z5*`*B6zK2iibS`myCA~_XuJbLBGvIpsi}L0FsUv{@#@Hs0B@jCDDD7_yFzK_>=_U? zb?#$}mOk;+Q~Q=NPq{v^bkSpTXK4aFo$Zu_%0Xe{;N~9^^V4R=epZOTn}ZFPZ0qP5 z7@2YmlIKPSdOF(j$R~4#`7w2nw=*W!fjaiK&c31XqfTvR92Uig`a-(mE}LQJ z;vbeY{x`^2niS^mV#j7Uf^x3@;i(gThm0pu!~I=BALL*&2Bc2HjHglubj%ns=`>~x z>6|ecGnRGC=o2x7%ZTV>QATgZvLU@D;T*2l8G4<;Jo@xHL$5RRI)iI(H@FAgz<&U{ zVCf`^&yZNjq{=LD!c!-9Tol2=3)WZR$$6OZq@eNN!95@{trF5#W<{&`bs$-#`Gv-h z{BQ$KW~@ zSTUQdgx!2p5!&>TZyjo>Wn53KoRO0p71GDU2@1Xi3v>@$P`ml`4ILbxmOW~0-b8xR ziOKXNkBNC>M`opJB7^)rob7CIaekAfb@B99g+|6`6O&U@kHXU0Ejdve9U0Qs$IS`p zZo=~44P@Foxq0~o^a~CRhh_Q+Sf(@KE}_Ay0AEiR2c)|J=>Ckt(Z<%n$<^J{2Uhzh zV72e;>*ML>;%JY{{p-{}=TO8n@Ev`_P;K}0yXQ^kX{|8bQQoaBH literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/Build/Mac/icon_sources/default_document.png b/tribler-mod/Tribler/Main/Build/Mac/icon_sources/default_document.png new file mode 100644 index 0000000000000000000000000000000000000000..7a14bb34910ee8dc7bddc8552acd7904deddb186 GIT binary patch literal 5282 zcmV;T6kY3yP)U2_%hu+Hzx~b7q;1q3OW#)WMTtn-$v>%|IACNz1PU zkQG~6Y#G}&76d~8Kpje>ehy&k_U+r>aV81b`Bck1 z09ydI0qotrefySx01^U_5;B*XPv$uw)Gf$7 zhlG*?m>?<2$Px&$kS%kGG)s}?8S*SgmZiwD9C@B2O;dF(%aElRvOL4i_Kuv-=YRVA z^Uwd|lTSXm1>h9Gs@OIT1VaHpYJUg79)OGAeDlr!x(bj|s{WILx=5er`(2u#>6+_4 zX#&y!w4Y^}-qJKhnxgLUx?_9ll^>+YH04%YAAV>z5CoM3A(g3?c z2pW_vr36H=0Yd!~Lc>uo0VQAkSQr(V*If675SUCRm`s#ttyv*P{X^{QB}5jCz|Jl z5J-|lcNldBKhJr)M!Z>mGMNBq1_9?MzU6X>hYuglUw{4e-y9wu{uBxXTi75F4AlTY zRSUF673N4bui1{9&YSYOv_zL?Av3_iLBOzS0`emsJa~XS&*%I5`=1^j9{vm)27(YK zfPN|F?=P&SvzyMlf`FH6D?go1^{;9BxdtLytyXyO-~qO`w0WO zne|f5_SyJDHdVE`p{%qO1U7TQ)p?U9;GNGML^?+i#Jzj>uv)FKyStn0@9%$h?b@}U zK}AMes3yfm2Z9g)2u^K|UDLTX5SVgZ977Hi+NaLv;FMA#O;g;ta|b&+JJ{RXOAZbW zKD&19+Rvdtu#FLeAT$7i-JA;un)qz|w$3;CObugd0@^2sk;+IZarf?B?C$QGAovAF z4uVl?03w%B&(GV`ylFnnayD7K%xpH(CctzOfif9&c6Nrdv$F~a_V@Qcy?XWPFEC;d zj2ZxdI|y9(n?TU!9P$q55djAQ0OWa&qoX5iZ*OZ5T)1!{IXE~NH3){z2G*#_a~~>` zr+w*|&hhIiy)5)TZxg>}mZN>MF1p4|faz3Hx#QzwY;SLy%})*v4n76&*7fVx|EoB6 z%C4Vl+Bi4>Vv!9502({63j}TF&FiK)F+m`NK%VC~K0e0QRy7w^P;zi^@LMV6TOWP& z(WpSMF#rJkK){d9>%5aqu-E`Z`+RfLKA$%=KY8PgH+~CXR3O+W008bF zXuD3wUHfJ^)7hk?5p73DN0`lKZOtDY2sXk8u+`z>6WBW4JT~{a2FUZgV&_dWz<&WC zO;Z5aC=hI%1|aeo>0QTdri88~NumK^`USL|oSf+PYzhcA7647uPJ|kOI!PJp=?cITZfod0q8ci(*%i^T$`r)uy%`Q($8C}L-4 z2bV8j#$%5?hQ}X&9G5O#!lg@>_$?#{2M3=4;2_`>6a*WQ2$DddB_y*O?QQV&3Cw5! zs7DS&=3-L5Kb=l7o6V4A8J5c>?%cVf-!LLsUw{2|Wzp$$I>ockK8t6bc?MUmT+!3F*O8VS1_GUEAJmH77P6F!wlT*FB`#jPD4u`*`G2}_;liK4`s%Cy zNA)GuU=&3IsFCAgynX9X6|nQwp5Q@9YY%VeDqEx6J%Ld>CD+|rnmWguK8({ zXIW--f^BOIgb?`dyYF!S{(U_4)Kk;fUVH5m0RQ&E2OoTCJO*j}_M#|0KskEqlPa-D zhm}%l>`B-=xC8+J@K8^^Gss83IYVg92hd2K&*%DuQsSLXr}|YhN+T2TWD?g992EEN z-NSOZoWJ_&tG_upIeGQtk3YVt?o6A2OgFsma~n`jpnNazB7i3WJahN%-QTwrG-GQz z7l$OI6o8QW#Za?eevbC}ugr7g1)qLlG)-}Kc81f_Q!JNDoSd9su~=ZaTE zQHS~R3154-x3`BYSFRkr^wLW|0dNFh0Tmt4o5{T&;M$!)>vfh72;3T|*G`2K)NLjs z2apt*#~NO)!eN0G%=t0L6R3w1N<|zPIH7lr`WHfI7>aN1gFw>j4M=60!!2G)t|T z#|DC?kd%HGiv^x|;)#EJ`Q?}Y;l_;{|A%t)=O|5qH$?!zQ0K4et%o!Zt=g#wF-tXF z0?{1#Qc9pO3$y|#NfJyFTLU z_)jPkaE5_jD2ZSK0P4h0lRb*ck3WQvX?F_iHoD9J3IK`D3ThxQnWXw>vzZ2f?HO=z zQ>WX0X`gtHkB@Qr^5qwy{4VYX^d5ClO+cHyGtW6l(~)23Swveq>%d4LL|*aU>6c7H zmgkrjcAs}*6S#N>^UmkF1acx-I{FIKD`#`4{u{Ksd~Hsk8D9^r!xs1Vlhd9%pmRW} z4WyJvCJCm+<10-=j&w4qjsCXC#~k^*!)c!qOj1guX*xso#Z#Rp>qF-{7to|3+~fd& z(9YYC_Ea+e4K`rH3%!p*ups94$^8~`mG%50|XaMpq;^@ZT$T};;nbCx)bxf2`(vt$wcJ` z$Oqusr^p)!1OmYteSk8I(`S4;h}mbKa^8UJVGhWNGx;ksm#p2UGtB)2DU~D1)5!cS zBm^4$z(}0HAu8La++hF2k~=VMetkqM5SY%ObsrMQ%iIE2{cX*{n4$pF`SGFk0--&# z*yIn`vWU&cfRqKl5XC2P?KyubB@01SS^oV2;xhm-P7^RBz8_NF4+!0vMOP4T8P#KE z_Q+>oj%{=Al~+v=gr>DQfo(R`rMxS@ulz3l;UH+pj?yrP1uOm;Kwy@mYmNKm%>qE@izj_3iW3M7 z^R%6(eudB;w!3Vfwp9~rLmk7mM3WE#xs*ELRXT?1lll}9h6X`24PZ+Ed6e#S(5!Vi zCW>5vK$<3`CTP2s0FV|_$=rqDv<4GU@60|C*DJE00kvTS^q=Yh#x3O%G4-cIh8m!o^HmR^xE}Te+8Ny&^HcX=wXC zv3bn)S)C$LyrDr6%?D@#L499Q`-tUEVp5Ua1_B8bE5Z%0p37v#>TRflK-rl@%hb-A zAw$cWm@|Qs%CT)MHB$|*k$*Kfp8%lR~fstL6kBfYqE!! z1MbV1z>H-M;;sh}!fP6rm)A0a^LCFSXE4&*1tHX5Fk|Iyxdbk=J1Xu=aZ{h78>BZ-16mt-nJYMh00W|S+fj~%UjSz;?`DFqQL5CaWzv;AON12~0`dCf8=82VGK1#1u3y`}`L5wm3a$Z$daG@trj0)+pJTk;CbJs<*S{pz^lOvN z_d`z#MPCm944oGUn=y3a6D9AEx^1B#5f35x$meog{DYyi7ALUn#(c`VlyT*al6Pou z`fN@hkL!Fv5R%sF1ez+-*8!w5t`V_|d|z_|2VTWz95>WSrh)UdJ*Uth2o8WgD$3vP zhxhWGeFRr|sCQypedhNDLkt!d8ch^nmiICb0jGhvitdu>Bd=*a055Nvo3`trD5?`U z#9{HVy}hS_I~+k*nXW+a1p%j6AP5csj@+hbz&YM7NIxVc#ECc52oA~D25MWxK+lrm z|IiTtu;$r7n@#irf-2}%F7OEU<#SCN+Y&&wx)PB<3i}sFhERA7;Q5Af^j?pY$WEX-;cpf=m9qS`!n#w)KG8tixg-(# zUi)TGT`9g3NVZFsFz9zP{$B};34}I4x<&&oDPxKNs&$8>^_c)mwy&>h^DWoMG<35M zHx0JuVO|Gdoka{qQA~g@`8slDIhXu^e?7(0u>g}rJ{rNM2{vjraA*Ug=LbCN@d1qx z?Vqjdn<%P93jrz4iy;9TTB_oBk$^A1Q$*mv*XMdH6A;Y@&>a{+<`5u+4hwyFU3`SL zYv$h~jG+PIn*auap&`vW%7y0b6A{?16VK>1KpUM`6ws%fj|K^mENY89VyS;%Bu?Pa zkzHh)oYPId^{p>i)ST;xpmA5Oi*cCCw82LnN_(V*&>{aDK zNcSnGGGplIDDckcn#?MAoimGVZD^4dD-*aDAD}De-BYOl=@iL=;foK=}A|C=h&Q_{<3u1>B+r$7monMgGg70D+*A&4PR0 zzFBra^Ki~<41MMfM!zDciA*$`Q^X-;4|Wqm6;-+&>W$LZ#be^^&OV zc(}w*mA{*(tK}Qv1N0$pTiYQE#RKmp&$FC6uZ!JZuM;@RFfiOaf?)Fezq9L;8#oXD zAOywJ05$-VdM-4bFnCGzlJCevw{o-BL7|nX2xg32!w9o{UGl^5?VrvR7hM0BV$u#YTX8jTa^L!Da2Q%>y2#g*SR zlNgdl`;8!15wnIVy>8S|Ro{az_}LLOpK|f?m`Y0H)f{ zi$*PN%%-loXE$~HBLMi5>EUxA@LWJ|6A-FXNDl9GVY&W@{7D-791P}l oZwlElHd&84J{;LPdT{FfuNbZVkOur?CjbBd07*qoM6N<$f<(&0p8x;= literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/Build/Mac/icon_sources/default_volumeicon.png b/tribler-mod/Tribler/Main/Build/Mac/icon_sources/default_volumeicon.png new file mode 100644 index 0000000000000000000000000000000000000000..5ca6d539e49a534ede45dce78852ed586682c5ed GIT binary patch literal 5744 zcmW+)bzD>58@?Oe17+Z-fk8-&5Re7|WrEG}_+<)Z!#@aC*2M;$>~X~RlMo6U6%4S_pd!kN2E8&=E?_rsfxXEqu84{t|C zMv{~j6%Sq%n@%mIMIm_4<=a2`^PNA|V$%rknoIWlxtC;g&U2dX=8hiAeNFsF+lRAp znGVwl(p#gGT}|GPN%(QBq$6x2Wdo%K^$!e$Ra8{$3=a=W6^^cv(latJN2i;?=WDt- z@gjr}ugWRU4^k6UdpWgbA3&A?oBr$R#$|dLA3>kv6Lqm6hf}hC2 zPSJV8bsjZS!KV|CGFdJ#aJ5W>v+zpO6BCC!t*xyy7R3=r0$3=N{1q;A(4_uZd7wpD zkh!(XXWgw=X2!<$ZjLlrO>eJHUT+`K6F+%n?}m*TeMoU}@oNtMj7ZbM-@JT$L7Tg~ zySAVcIkP<3QhLyV-IFJqzST7~_lr$6wc$dFv0=J7@AB(3suAMl81^WH;fxH_4(eTB zTr5#r!qiia;ASqZudm-!?C+>$0?&Qj*xcCI_=-aMWzZ2WiuZ)#Ika%EcO}1%NQQao zto8F;=>3{V+kew){MDjj{y5pC$|Bpeu#xn*rg%W@SX@MZkg`UjFXQ{TP8e(Ch4;|& zHfgEFinW0UzUV~wKoz=FoGH;ML^>j%G1j@FE=>A-=?zuUgvk)`ag79Ne?PMI;}7ms z?>K4N{==b4HG|rv;#8d+<~})?Q&xup%u!dnn2wD$jozQb>hDx7b zX*#B0g4iu=b2kbq)$ZMX`(!t`inKZ0;JwTVI7Hvb#b1aq9E#OBJVK9m4_RO7L;rTN zXVkc8=}DtmN;6lpGnj4$c!>UvW&Ky_?dG1P{Mg~XMdm1m)9ag91A4 zp8sn9RY2QVFk5U{g$NecQhsE8-O`sRDO?z6gOKkRO$OUdRZ9>&`tQD!1VOY$@F-5v z-*}oqz8k?BL+5&=zH6R7T)|Hn*p^1Lg$o;xR{c5Yia_K0Zn zDy~<|mbTX`5%sL!-`(cJa|U3!fq*aXTQQa;d4qV0ZJ8aQ*t?=^%b#uUqQq-cKXt=4A19w5F;nO1^?ee^zRzb zbB~;das9N3?JCd~Q>14{1y+x$LA+QB<;!P);qZotglpi#Q?LIA1&@lw53y@acY?27 z4Kd|h;rv|BYVqY`$EbG=QdB5=sticmfw7P7wj0WV&176L9YRQnqWs?y{;LsMas<4>Gr<4k+>_>&s}q@8mj0p^ zLez;00C6XLG(-MJdK`)$!hZJ+=#9%W)P#)w*;iScTe7xv;mFU{9bO&)fi*VK2Xuwt z!6;~+;e(x$KU~^Jq@5M<=qvDC4HJu9H0~a{ejSi!uiAAU^nU9c<8i$S(L= zHhAXa4ckXlo>L|0L=z<*^KKduf|#~3(bew3@gf=M`8$t((CLSUEshhfyMP84`F=aN zIjtseU`3eB4J_!)O*!&iRNB^Um7?+mPlzLi<60CyYaxwEdd46c9W6s_%Zjylb1W3!HScFX<`IIhFHB4!*FY51-^Q7vnMagTlj;AA)us(>FRF$Q2`)&A#di zLnn(O1SZb5 zz`D(Mg%WZav5`s)xtG2uM#TtqndirLGRVr0~1X zZ)|%%rEIex6C~oJ%s%)U4c6cEB)p?4H9U$}!kHX8J_M}hiRnf0TH<2YZ z-Gi*`k~n?a@P+adxe_#ZrwU#)p2eKjhY@`yT>a`4?I9}Qo`lSu&+bjan=B6^+0y2XG!63{*~hH zU&4a+s#&q2$aG5}n?k%ZhX^H4y*|eKy_UsHXwHhaC569&7$Mi|OUT(RVV9%KfxAew zf80;J)=9X3eOHV!b^vm;t8H*&Y$?(< z^VQE#2p;9rZvN3}ei}?69)&dNyF3rms|fY!u$Dd4!+oJ-)<1B1#`KH*>tI~IB%$ak zA!PGH?{s5|Z*5ewtkV;_kMFNgu@mCE{Fk< zsqv1O!m{Er8zsZ#uG^6#QrXV}q-`%ZDHtKqB0R~d&$TQ02EZkSz(d#5uL$)f{wxVB zBwLA_`M`tG@z;V?{Y*8b%%Qq@ghPkRv1F{zXaI{WPv_4ARjq2z8n>zs=}Eeh52U%7 zbMeonE@EYlPd9|dsz@*khM1byqj*#uNi5^8k9*Yz2|}6*7vk~Rcpvzor>&)MTVBXJ zU?hk%pvu|%<0hp(dn$Wf{~5&H=@a4lF2}o6f>5;dI5RJP^Vp}63$X#-uw?X`iL(P) z33XFuTm?|w1*mSlmxy0q>#)b+;d=ms9na0T#!Cyl%MCi(3e=S|S%; zR}tb*jrHnJ)yCKI95fYC6+v+%B!V*uVfh5%6-tqhy38m6epb ze@t0HvA7p|?==^R=k4btNsf5UXk0Y`U-yEx@7FAm_>Qwt@Q*I^sUv8EwwD|JfpJ4V z)qv;bbc;SH4i%4oCQRcwgNxLxzh~uJJ+r3#<2N_FQ^>yYG zT|^?=Fee#{2zX;z(ikFZM)f+sfW%R92}TW48MwtA0nhHu3-c^haY_1fe5TEC$q_E3 zr+p(@A}~(Jk&;pL;6J!;L8ukg)jM{U2Z*cQbN)6CVt&#c-!^DtOtSRVTM^XDys?Wb z!V#D0+Z&)-Tb*j>4udiF>h5r2` zkUT0cwO;97LHo6LPpH?jQp3jD`s48G+`Ok5)`~|1TR*sLv&P9G zI1TNo1mr8vS2ZUVn7HNc{?UbjKx@;YeV#z;Ng(4Z&|2F~q5Dp`$Hn zcSz3MJX*U2a|-0Wqh9Z)i8Q5F<0hIAIfJETYc>&wY|@J*39u5EUBm6pOU$&C^26>5 zZUIUyikBvQ4W)_ru4Vy z&&CDBR=T)d53G|b#f`kcFl-;~g(+5fS1(pL-v|%@mU%TH`c|3*)I@=?*uA`A(O+Po zWg+eExDQ!-v5%0kY`~oLMqG-%B0sQmHlk zUM}8~tm`WW`%gc+qlJ(2hWCt~!8!xX8pm7b5xYT|ztbtxR8@k{c=Ph|6qS`zCrj|7 zt1Rs};TMX_E^;u6-}Kv>1GT>e-@DBnv8BRBk>Jd|ybFB*HX8~6{R2|X{1o3=5${YR z7tqMxbn^FZ;Mw6aCDN^$slK9SO*LaNPB|D$;4bm@!B#8VBgSujZY%Qzq+nH8Oz$cw zMgwt!o*MgZ+1h^=!5j)tQaxKM{Ia!^dCohKsXp)^C@gK|MLlKMMSmo^%u#L~KKuB@ zslg7nR5lr}noU2RsnmyR>1NaTI}IP6HhOCE*^|M2&y39Y~8=tpW|~NFs_)>#PA_u6+1q z(CDVWCL~|wn`M=SPNPlavi~5{o&9aSg~HH`P;yKr3Ta$eA#i?y8Whp0Ik^7sp7Ur7 zQwzRw_t>oQ%p^FMEu{DR{Jda>>dOdB^VPJN^P>-1Q;y+k+H^#4&y5xiCq0>MI1CsI8bW;oAheo@ zqfOhB1zVfOfWu>DWo04PQ}x+Pgp09n-+BcGsu*}*1|Eng@hdf^{(ch%Zy6osY&zPj z*ou-mTfE(9v&rFDsD_!i4~~~n78gqbRu&c(vL}jy$lt;u+4eBla`kNbjrT2Aqz&Bt zW=Sd4Kb;d7jmTkIsl4+e806L9~<+3l$nC(|LiDt`33Vlb0k#%z&rdhxa-5QH+0~R}tj$ zS)<%cJ)gbDSvxzjCu^%xbn3^ehG%3F1)w?wErxZD6RQA7f_9%uP(QeH0bp&RopTTe z41lTVvavH8@^+*EhCJ2LK{{CK?|&PNN)lFoOUn*+~&bp@aOQE0yDzi@oUfF*(bSmt<#P2t*q9_xX# ztO0uungYNLfD!X(v%0wppZ3?_a{DACTV$T~UpKy^F4A?XAMV?g6>N!6+YP~eBYOQ2`W)}@N3Vt8F<7Y>{tn2&Ra`LeJW>UPssm6bL#0GT%W#u=How4kgMFPX7Ev?DI77+c+FA^FbD?%nYT_W5w`Lz|^*N&Za zX~rL7hG}WC9DzOo32F;S<_Yzs<`mzz literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/Build/Mac/icon_sources/dmgicon.png b/tribler-mod/Tribler/Main/Build/Mac/icon_sources/dmgicon.png new file mode 100644 index 0000000000000000000000000000000000000000..935b9f28d445abd80eee9825804241db207fa4b6 GIT binary patch literal 9878 zcmW++1y~f{*PdN=VPT0Smu4{tX+b)636(`sLXf*Gu8<2YZ{~C;(bpEbv z)l2Fqard9N0RWuwe+>krWw4TNqO9*5Xaj)vP13``0B}kq_3HrOAr1f=mH;510su&t zc#9u*0f3KFQypXYW^&8VD^BFG*WAv*x|_~%lUwq{Yqm;`m<@o2?$ zP2FEp&+ARaRx>Vs`kiG>4SS}qE0Dct*chEIIX^l$KChWO&Mb`>Ww*N?Vp%%nGrQ0I z^MzDe%#tt|$JN>Gj~2YMOLjVz;GE$W9ba=Hr-wcE?Xw>oY`(kh7@w6+8-zagZt9(y zno3r=T-WtH@^-Fi1QA3wRhxJGcH9IDYQKK{dNtY8)5CS>?H6dUk;c;(?8r3U2Nhp^ zT1B_sGPGiU=4ols=~`j-#m|#dQYov(KRAEYz@58YIp*?P{qf$aJTAj(thKQyv2A6g zaB_TnTruG4g0b4<@8CeT3Nc#DcL6?Av3$3u4A|VB>8NWBGS57&Y)1!4STlZGGD>!}md?`ld zonN0DFGO|~$NBcYU~IeV1xJaIqmQ58Wh}SX@_)F}y(vi`1X29%-8-V4hlj@}^U^$% zk|4JjYyo&cE50_~>1+<0sh!lz{f}b5!Zk}Q{`(<(Mswa|Wh5Q&^>nA<>%02; z`UerY0&RPH6)iP2M~y>6tuxTd z1ofcUFCp#7euf~`nc1aMbl%_J|H0rB!Icw&!50)15Avx7z9v>^!gxumFi6YH2Yr{K zchN^MYtXlex2GbrIzvlmg13*PpCuWCZBSiYQ`kSASHf&>uv^DrTzo_?WgpG{DjeI& z*E*`Pi#{<+e^+`{Se>(+;X6=j{nx}_CZ%Ib6kKFD)%%hzM;!(32@XXDRIZ%YVc^Ey z8GhF>;)R;91hhb;RrUrw%oXSa#a-QmpqOw&#Hf6_w+Gq5)Wkl2oxPpmWu>A3CoC-M zEZ}%)WYeYNX%lV7`0s581|YwDB*VagmhdAOg70KCm1@nx6qS}a!E7}I zWLL42X(64J0H8c19%1d5&?3NUV~<>^J{B%QF!!q%WjDYccq^$Vi{e#H*|+)9)~Fd% z%3+W=s_>iWhaUtfEWvmvi>gR;(fgGmqnqbiacRuJjen{pAd|os_+r+X=-wxN24PU~ zu{LrNUbwBDuF#|Ef!3#@*>Jq~zON31YhoR#}XxM^8 z$9q~PfMmPGI~sH6#j=Sz?{n|C0xw+x>qXHss>CUoZpaWskD=~Vgy3@}h+Fy_v1|4o z_GOCP+?V!b+5${BO99IQdhuN;UW{tXeN8%X6Ub(RC%83?YE3PDrz^w9_jMCvsSa=6ahn%pzA=8^9r$dni>!&n9oqi*#FT~ z5$NLqD$ZKfpOT8crd;@^=cUMA5bgY4bH{T~_Tf{<93wLSg~x$E@)*-3kSC`Bkm*r1qqru7 zIO-h#AK70wMH{|uE{CenP)cn{GNJ_w5MjJ^kq=e=ngI7_00Fcn!+NPX%~N8B{dM8_ z9>;{YO(kYD)4mG0OOc^~Q~kAQI3@71vh-{lT||r=c6{z+n`%U;`I>0Z#%lU;#KBh# z7=P#Xip~0-?5YAQnghuDY`1jFVFgT{NgEQ$__9Fi(ox00QkF^fwf5ptZ^IgZ+cXWd zxJZJyyF%`#Dpkd65-P`)&T0Yst+H&KkkrAa|C z)(D!qCh1&M7x0t?w&j=}j|rnuL;!D8!$TF}vQbo7H8IYjX$DkQ-$Enb2eZ52=Ns@^ z3G;CcgpN>k1+Cfp)#R@()L%K>WJKJj{Ky*q$PNmVdNYLK43(sWXd~7dL=70ex}pGt z>PeKuoW~58uq8dERK#fd4(nb!1Ya>6sOx^srqdXBFG&<7$nI`v5qbx$`O+T;O1J6y z1uPP+(O3aNOjQ_6AzTEm_x@-V?yL$MkW*oWqXkRc1~v=-4fElTRKXLdE^w#YKeV30sjzL~Jz#8rNrb*g{m9{LfGMp>~k+mdy zj}`)i#D(z&MnO}gVVp_y)IiSgU1)>YMJ{Tn4x$g@K6%!*_v7bDfVyV!7sW-#SU>p2 zr+W!st{)OLffwb0Bzr?6wsws+IMohBvh!{_W@Bcuh-+OQGuJcR#0Ei3hu{Zlmyn@M3}i^6q;1`R0QofpQU@l>tf2Ew(d+GA!Iyyo7~)=`)oa7NX(b6y%ay~itO@` zLBfft32{8cgb+AxCdBDi*hP6bsvrkOFr;RKka0bzLqzPhKA;@Ztb>zCu}?#5_)_?xp53??ni(2XEpHM z^FZ0KU+gEN$r$Gr_yCtDNUKNPIt&Q7(x!H23!T8{r*&) zzapSRRIZMWg)ArtwXc+<$iv`j{fBF%(|z?pgL1)5;7B8*5U8)1gcr81@B)&C=wFaWw!8yZepP*%3 zIXCBym4M94Dj-8bdZ$4&wUAca-pR7Bh2+3l+uniW95%qhz(H^2Yw@sV%g3eUEV20f z!nC#DR5vWZbwI5oDOzyP_kDwIqFt^B3v0}H@4}8Eq1CYKuy4^XS zcg`OH$w!O;_p0xNRPahLVaCBvSzAg1eVrEb=4Lb~elc37S^VR~C$YOspnWYA@xvh_^QW z-x6PbrA4ISVC|Yx+(r*TnTJqWY+xu?yS`z=?n|T|IY*rkAl|VN24VjJ*2z26XnTz1 z-FQu~cX(B>aqg{fJUSV9_9!>$$&{xiGP~h&PTB~@XRN;Af9Bju;|=qfNtJq=w&^^M z9yq;d1_K2BZ%AC?sP1)Ng)FQTC4s(7`AY%RIl< zIzQYF*l*lVB!~OJ6f*lVlg#L=p5iwfwjZ5S0RyML^zavAun7@Ln|;6l`%y<|y;4$n zv<47Q%Xa*o=xQPp}RG@M`y$1L5}*c33n9Yt(^4 z6{MN1cClAkKo=zedOx@&MfT?%mN(;}TOL}vWbiIj8*6c4MbvXz{NbU;n8(bW0l97p ze+{j+qJCIZ4QtD~yXkyLK8xk^?Myr{s8qY zGxY#6?o1*H1E-NY=gdIUWf>r9{YAMC9F^X6hvxqUtRe_%e{-?P4!eV_&Qak3%5eFH zrwO69IVNvoo*^Euu!I@2-S4^Sg+bn0>%Rf3Vje6D%v$|-`3l5L8r2>uQ~`YJhi$b* zxPrcnf%3XT;=XuY7wUY*&nUq74t*=>6$ro!mmbYT*F>WA6uLA9e-6#)j3Q;<8O9-= zRR-<@=9zMU;vl1Hc;pjA8uG+hGbh*HV#XP%dUq&hTjrT6-Pg{C^&QlLAW^i@SM`bX z{WBS~0`TAMV*VWTH%d*(A3AVX&cS>U;W4%w0tF*};MCc5W=cg0wm?`lW{yOQ~_l)+VE;t2!WZI3h`-rbwV?FypvA#xSuuiV>o z1OvICKb+-Jl8 zCB%fK9{7+0R;6&>V4H7pIoWbs#9mJ)_>hJueShFbm?o=a*m8so@t@O z&aN6yWjp{2cDSYIVvj#n;npL0iTPKuGIJ9Dhj24xy_9j9cYcQV;#Ife*YiiTiEHyTviKp!+3*Hbx7I$ezcqSiGzT94 zhps`Z*$++4f}X1qOTPpEJg->hAsZve4;FGt+`w21{WixAWHHmkOaNSo;<|Z6hU?G1-A`knf6w?f4;mo zIzR1t+)j+Wm}S%(R7#Rc3t|7q7#lU#ymD>Y^Fj$R*DW9JBxLyvOo1PBydGosO|>iP z53?$(G_H_^pq(5{Q-=SJZ5xikYD-x>e~7tXl$4S4Xt#Uxk8NuXR_ljE#$j#n?Cwf; zKT)O`)-<9M`~Y28vSeK{Z>B=G+Q5N-fqQME%s20f|2UJS`82RcLA`HVXzyU80pNgZ2i$b}I_`})tCSJGm-X{h zEC?&2^Tz?gZwC-D+RnMpF-#I zr?=a0efJig0Zj*F5T2{Y;a9sdfo`kuho4VP(B6vMH@LLJ#!_x_bg^Dn4Ii zle1XPQ1F6_YnRCg0=do{s{D2qwq`B%Y_eOePl$rM0ViVdkG?0|y(-Gmxad~VHC8Mi zwqazX&hlaY*F@wv=yxI09AjB4=?&i@axTUaB@R4w`3@c^}X9j;BQq$gjMAh zHFgseZJ*@*?Ee8SO)2HXM{o|wE+td{%Q7vi^I7!uju|Uh=rP)N*K_`X{{EPL_*w!_ zu=mkF(46ioN<~e$(q6N!+U%LMZ0*I3-^5G4#dBroVZRt?iVM%{#YCxmJW2B{{FkeQ ztwHN)eA#9LLY1?`@D}p}3E5Zd@OkkL_SceYb={W3&Na}uOzFDF5C5g<6jYA7?D)0N z9*uhHcx;`xwBXks5}EBzw`p5fOM51g>HU{$(JRRxbcw+D1{jQItt{1sli_^0)SG7e zBm>>ux2CI{=G%dfq=^Pj_0u6~HcEv2pjsYM;N#5k6#L>?wFQOJP`V3+&mcvie6zeK z`9c7ZaSe{doY=IHK@aNP2tun2FH4ycQu1F|^n%O8*;wxsd*l5$*m%{05x*d~uLmeS zN0AW39Usj)X3iLDFhAziEObqW<7w!V-DC(J_$P!<$=Lnr8nSC00685RldM!SI$5bs zf6*ARw~v_>Sa^Pj&v{ojmi|OTyhYDY@XiQ!A%cWN7g7MJA%$a0pMm8XzIsSexaeR1 zSP&2X=gAYScskVH8LfOihwwWSLm%YCatrw~6}|pWNSGT#s9sw`4_MkforM}V1>Um* z10^12u!V^POf)m=>sZ3fSwI2P{UWGU)!at?7)N@z@*#4_N_=)F;M@^LJfOq>eVzNm zsGfz@2J{kxyz^%KsS2j`O6};dP1I0c>ec)H4}~(~{g56Cpy1%+s#j93GQL7MupW&; z;5}**o+yOvX_E=4;@7mYRS$mXMrof0(Ggh=+=>_rW0=&pY>GPC7+y7Z#AeJ>tj%T2 z_q&9bk=d8f3rixZlEdX<$OiXWS1-mN00h?JsS?YYq29WikxHBmW1FoszdSr{6-h7+ z%^+GkmBEJ>l0Vv1F8v@a#7(_e6?4wUzr%NdH@ERsqZEC=p!tGlzrF|)B!;O?e$&=G zo^uItDGVw@*@8+~ru&u~@ZKwe2UiDl@eVmJHL?Q8TkrXE>;;zM-wda3S*>uh{sJl5 z1X#*D@xlt-i6g~xCl<=!%gg6y z$8R{}(`dBGy4_P-$Cp4jF1)*m=3}>k#UxiXO;cVr>s>{cdh=2rAm|r>fPk!{M8*Mu z{n548ipT=f+ua2+ox6)g!=Yd6rWgO*Y42gj=h=)Sa8p(Tjg>}f&=TFe`6&w(DdnRKseKGd<%?t zmj0iGmYmS-u6@O1iPv8qz|jH)`x*Of&ht&VlgdW&JLJ)JjHL9)Qs@SQ;r5Mpd&l;# z&OQ(@f%Et3GkDLZkcS4a#@)hihLh1TCIGouo% zWPbsbAQ?I1CR?t-rv2o9jSn%24PgGxVFl@L$;~w)VQOD?mW3(1{gUHam*na%xen$e z=lXYCVubGCzEUW@x!~77O+C()Q=uE&x7@BUIr6AX`cLAn?CQPI(NVtmzc(Ngg5O6k{|;$AzRY*1c}=~QJR4id@7$KUzN#dO zE^F=N`rBscHj#eJW8u7+Yvh@~KljYkY>+s0GyOyeFr2=6uBiwh2!&be_*+lVD>@*gz@V0M(GGow@#s(_Ap)t4N#QQ$5l5n!B}Q;lQ`0r~cnp&& z3^yQX?NlAbN+7Amd9&`f8POmVVQ=p?se-0oVkDuW5^GmkzsjKzoqkjC??3;HFW>95 zu@M51xU&QM3`H-7OdX00H{1a7^T@52wNpQ{08+usNGOSF#KhXmi;YyaBWd(}#&>4~ z8-LtPkd+K$r@2;nxaMFiw^F2IX%ox;mdn|hKjFmv$*B0tj59^vQ^H57IFhgLVNH+C z4bM+1j2aMLgoIH#w1f~%bTqXU^-7F*L-0H2@DYa*o>lT`iM3b3DC3yiaPf}YRTFud zcg;@pgVIBHEjFK`ClSvC4##BJXoBm?% z=yJvDKXE4z(+vED35JPMN*ALm8NSh}y{q9&*jMb_Z%mpo0l;`teekSVS36WR6Pu~z z1b#8jC2==(%YG`4_F>Cnzux|-?X%_`9xfzVR285aF61}o`Kt;%;=oKoqAKi776os+ z721=KzW$#$ARf(c|9pg*bt`f>0YEt>@B0N5n3Tkul!Td-NDqzLtehoJASx{EzC|r1 zdFc#u_g*!#m6Rsl4$XNW=Qo$Vno%iEt;RIF;ZOz5sWy4(;qRYCLd!`r9T1)ivtqU5 z^HsTXg}OZUzdDj?v3>rS98&n9(4<6^#16!%6&`j18E&5b{{H0?PO7l~Xq?N6%vXxc zI73cV?k87Tq$rb$7>Nd{LLZHa-AG&NQf>0E9sqCx1z!w4Ny)CJuEd%wDSwt& z#-%HvY#J`61=O)LgZkLjLQ>urVCXCli>Zk!<&jymVOZcH53j7ebt)(XAVOzebKc9; zPbCboq9U(Nth-Au$*OX8J}mdQJ*ye3(DU?}mW}!8P>s843w>QY67xEpJYJ{PO^{km zq~J!v$|*^)NxIp3y1!aZI;;iSKBT9_jDJYXop5?a((KL{E;ZXev!B(Yq|ZQl_ODTK zk{ysTNy_xCtQ_@zxU=M(!dwRRI@q>v!$+;nI(?LMq+ zjfu+-qi<-MEs%||9cbE&2totlc^^46J{V!OZDK9F*KaBwja$pQt@M5Pjh&wSD?Ryl znbx7-j*(v{gVi1s?<1BR(N)|PfmpYfMfsiiP)1y--(M%eqSHut$cDrlTLmjLjr`fH8jz8$@H0QwHoatm=>xlCTf31=fa{Semhi09KpY%tEVz6 zmzp4Nc?1V7A(luU{7r)`!;)y}OPPoM6jiK>P4%Z*~=bZJ;8VtRkIE_8k7>Jj;W;tUa3j^$xH= z0j~ef*nE#rX+}8p#-X=W(Qv(D36yobrlw|Ubs)<}l={1M-rdaffc9g*={HN!DcRYb zLZyvCmCpz5_}yQC3i-XFR8+hV1W0m}H8fCFZuq1K=O@B~0HkP!is$LxpJY!myi-_^ z!7cxliRU{_xP@s`BoaB3dlqnI6M~TSzmSdLQBjtdq^l0N?I|8}{~`9n)ZfJdr0!+{ zKTkl;@VZlxkr|yEnMTbMQ@YcG;lZn!?{op@3P>??^}&Y2ZfA$_VMTw=YPsys+HN~| zE9c+JcDbj>-IJwQCE`p`_Nj0@H(=ZCb`2g!$%A*QRp7`bUNjLkaofZL-IjgrdykDs zP8==BCG@drfX-h8>hm4q^x1dmXy?$Iq%t>sH%;jp4}SRUoUi@qw&Xxlpdsj$SV?Mt3IBH4pMwV22UuXd zz4u~;v+LFiWu{#(PBzV3>go)$i${DrSb6iVkQEkfn%mUXOQP4$feg(f5vJVH@AqPC z&zXg^INvtGk)lokJLf90`^yP)erqF-2bBd26yfvN@LGU0s0wso0KCXlh`0u%uyra{ zgYN(yK>vY0h2HfLL%{;K49Qse-%z$zxt%;!Y=PabEpWW?N>5d?;`XTW#<1&%f#J6(lhtdW=X5v=zVIh}nN8ZW22 z_#fP9OaSL(_6UjF1sHPbsY41p2Scj>%TiGZm5mPprW%ogE+)Qpe?^?D07MaZr?9OS zBrXke??j2#R87H*3jxHZM>q*t{X^Uo4-(~uovg$URrfV#umF>)Y?^e>Jd@agUH1T! zc`$A5WjWA6rT*ywH9}SEyl4gF^w<)j^Mxm7s3oxBU2A>++o`K3AlvRvJgUdOnG+e5 zKo1v=_`xFozI0H$&PiZv1QdFr>fA30}mh^Hv;H8!G ziY~KqnG}O?Q=fKPoq6KcExD<^ge2mluDyLZS^D=c0x13Wmq-V^t>X-**GsD*{Zj^L MYUrqcRkaNIKak9ALjV8( literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/Build/Mac/icon_sources/dmgicon.psd b/tribler-mod/Tribler/Main/Build/Mac/icon_sources/dmgicon.psd new file mode 100644 index 0000000000000000000000000000000000000000..b15f55b1a258681da6320b4b92bad65d82a71ed9 GIT binary patch literal 79031 zcmeEv2V7LSxBs2S(2=gF^xlUKB1jjdNl~#QjEK~w*mf0HU3D$%TGlPrb?traZN=U} zQ4|{%uz*TuVA{R+f0AJk>n`u@?|uIN_s8qKBq!gTBq!%4C%H++D=;RGU=Xr@Z1^(^ z---lFM#_BY6&N1LMj-^j?ZYGpCR~_ifoVNLRhz!IJUaBjzBP>B_ig=g3Jx@w7J%%}p~7<|i9F^Nd}bojr_s&OSVM_;2c`Y;0`oo0~Gh zCpI{=Jt^?@H=UTDpX1}?G-b*Z$0@Fk*}17sJa2DrCubKY7Z(RWIOI*s%1@f=kdxc#6Uo(|6L4$hwOF3vt~9zL$lcFrz7&d$CldS~RjlrfLU z?3A<#(>~(C^WpJ)T)jH;=!E=`2h>1HQhw4u;?f!W0UunR?A-Y5>jFSuUvnNdG zXPjv~J~t^VdE#grW8=uA+ zVC_jscH(F7Gx=Hhc~G^y_p3G~*=Is_Ze~)xUs6s^Mp|+b(jKSDSt;#0ks}w1uhTo| zzn$xjL^?A6o=$cIf2^4uK|Vp*8QHm@Hhe!4YcfALEtQ`Kaoj!ZUEDq4_JZ5n-o?Yk-o?`mZlHO&*}Jy7UVT@(u`e}lOdg?{4PB4voSk`d>9iqOL1U^@ckS>_b|6kv=8C$3C-2R%frpy&Dq<-89$+- zJb5m%rwgXYo~}T5=gBZ`_9&JoenO=}g#iVgKymYOcg9bsP;f(a!V?t0(-qVMJpm6- zZ}jv6Bs@`CuAnUFiBfR|B|=Y#3r`#uRLG6z2BGK)xrL{U;)&f}=;;mlgeS&$yMx=) z1<^f0-QWpI>BjR0p6H3_5U@#^J&SGTyjB!Mo0 zyS1xZTwRhtm%!cH)h(_rNuW#MZtdz8SC=HvC2+TPb&IP@66g}RTf4f&)g=jZ3EZt+ z-Qwz!1iA$7)~;@Gbx8tU0(Wayx460_fi8i&wX0iPU6MeTz}?!_Ev_y}piAIx?dld+ zmn6_7aJP1Ki>pf#=n}YFySl~IB?)v1+^t>R;_8wFx&-dlu5NL4NdjF0cWYO-xVj{P zE`h(OUCQsaZSu2VujUlkl!>Pe>{HrZOQ}R<_x1_Hu znfLMq)ReU3d_QF+-*o;oKiFUCC}WnB3mXC*W0F$&p?LG1V@%}0?A)~J*|1G119trQ zIw3qnYmffv`1xsQbE{)A+L@W1E03e(o0kvlQ$I`&NSR4lkR%}65z0zmWSo}I&kV@T zO`7(ObrSMI#vr0RYoFoq_k}}DAnO^h#|pMaAP3_6#T+GPpq*SD*#U4@%KByAnb|35 zPibf7u+Q|p-L5<@*rw{^?lJ;4L!zL+%`a@b%jbt?B&Bxd3RTaCTJVGYWO64KsFPfx zzD{y6LpuLde>!b)lPA_uMW58Z|97DlHQjAaNBg4mUutuPLO%#w#{W0=Sf40bhw60qSN}qPB^U4q@8|!PKI=2Z z>(HU@<_C#FZr=Rk<|nUUe7wx+%}D|)d~90QgzT|0OXi<-T<@8s0V$IT^78p9h#1RH zerH&AXyKQw(GLvoE?T2svdTU-y_J0BMj;ZM+*CyFza3h9wwNe(C$8YbT_PNO(z$c| z8?vVt?D&7zmCDiX?%wZ_-*qsM>Zda;w@5!U^1CS4FLVFc%m-Ias7C%x~a_?@U-sZw|g{=ZP&56k%(-F4u>nGPRo<7dcTE^ePfyL)=R zNAA{LJYbcLt~%Q6F6k~*UpTbYFwqf%8t$m1%y%R0cdgkc6<^*g{KMAPF(=b8COa=b z4xfe9d9LG&Z}5M=#r;wdk+!x^Rix!*J0yYHy_EQmCc45gUk=W^c%G+^ zo41dv$G_Dkel%+T@9I@TR?`1Wcm4%2e@TD;O$GZxSwGV*cK7L@n!EdAi|{|vr@w17 zI@c*s|AU?S|78mNVPQX`!Jp~EKSTcC*t7rNt-(1tDgQt8?w>1Wryf3_yLbOrdiQq2 zFCZoyW>e7DKKT&8!4O^mmLVBxU6B;he%R8%X(Z zuHm~lpZ?|qpYz%q^LRW%(J5OU^BiBw-p9q1TZ|u;@=IJkQApp8GJYvy=OW4z{`?2a z4~r--AsoGlzP9=)<1e{ISI=a`A8 zECHqtB%CHpfRGG$DzwLvQGWmRw+@O6gfNH^@RVavDHw#qI|}A01U~`tfO386AFCP# ze=3Bg|2tt=GKe)go|%Dta8fh;dnZR{>}N7$p(x*|Ea>ECC5%_Z6eKC2F$c(Qnf%zO zzB}_WKwef5JY>>9Uirzg#AHs}QAh!LG1C}sJ-S>QnTd;dBEVO!->Kf*;%RJ$NwVu6Y@Lz1M{;p z+x@c;AU1WPJRJEI^59rre%{cGh&=Ey-b;sr!+@*>Hy3-tgFEr&rsifBU%WxWIn{>Pt6a{8yFuMF&<8SMhq3@ zCD?BXIkdYxKH-$eTsURaCoJy17Q`jfq=raH&zYkX#pNbCw z?Ic`H8UMhrjsS>4L}h0oSu5pd=R_Ce=keu=fx?tCK#@M8s*caj2Zj2Gt^!p&@k2O% z_pA{!LKSX?e*<>QT1wPL0+^)l^Yd z(bUyYRn^e#sjaQ6t=&^o75{7hBlCRtWT>gAsHv;jXg8o)h%~+X%N?9 zd0fM@#^gMAy^Z_lSO%|%PYF5uN4|&tt%OQ`(}B6?ZWmNFhgz-N^xeUq&X+xJ*?j22 zofm?k6DH4FwdL@|yDwWk!-l0!ng8?FBbUlwwP_MeCghfl^Tbu)xZ^CC@U+;FgRe}r zIWFHW(?K~n6ICVWdj%`Nkc5Y-?cvo#5N&Q3ifS)e^;D;cLG_ZauFc)*x^qNp`IwL!UoUpw{gwIhLo|^*BRzA>_JU2pwR+cz ziXUxQ+OL=NXp_O)-qZXveS=khZ8&``%Vv-#O}N*DvWjjQNxr&rQTF%R!d{~X^qd@d zdWoV_j>`gUp~j3y3@mDJHh z|En7gIBhLZo>VaJQO0})y`w+t{XFrKdkGc$aCk-Gy|-T%m~B6L%5&k3&3-PPVXeb5PFDV$IC__xmLogE;br0S!|T@z4wmnW z8&`XDsOOBDHHVWFa~f8JlBN%hG@rj%?R?oK(1X~u{F$h*pvJ!DsNTId_Lp|wm~!i@ ziOY{^?7HWcFTHzsY3Af3Q&a1HeMn6@w6yHh;i`T6D(_Eyn~`K*n08_)H{r^SBfPrT zjN;3;Mm#&reL3$)Tg*$BvoW_6T>iQ~b=WulKNYK9TI80>FnG3DC@h+CTkLBib}zlt z7`^DS27E^!O!awY2z}S zCK5+PDdoJ_A9f|fzV3LjP(%6lswcZ2zDb||ikR83X-CT?@!Ed6H1SuQmGtIrsEkrU z!cxKJ(%S|%T7Dk-vky&DPt=8^-%Yl?)=NMWm?}E3eGp(o03_4Y07uI{99%VuBw*Z zetvS&ABXSuYa6BUXxN`g{U*C=EKv5J-+S?;1*tT#Saa9W=&72NeNl_~>#gxwZ#}=u zWVxUCbC=2qMH9>W+-M&4vXHZ3Kegw0PM;esQ-(h|2?7*sTfUyYB}n*I zFt_e?Pj{h;VPS*EZo!R$RNi`;SQZ|2N@e`A#-TBP2FH}8tb8#e<-qM4 zZ)gwZOnE+L(pWXtX{SK(vk4_j3RfrBx9;(LT@gg#wQN&sK)wq|g(J$t!(Zp{NhFXpB^*}NvVRjt5l;QZ_2s=+uk@MOZ>v$y;vf3>l=wHedxVGmd?sl6 zv!#nq9P-Y{dnI~oqY?hIVN~g*zQ5B%&O@5epH@0yjLMB;jd@nb46pguFWM?TbGz3a zpM-mx=AH5^t2+I4LB?KQ&5XdutJe?B9C7@Qom17*O0QG5Zoe5j^7zvDwBL(%CL5+F z-8n~TzTV@Q-6yAKgjT_VrS+jv>pdH;H^2I0y;6_wpRL(a7??wCt?>%hG;Xq({wl0+ z`|X-3iwtHqoLsKIeMaJevTETaO08&_(Y&>}8g&MwLej{4~t6D3!Chc#^(ne+4gE!rCpBX?WBj{(e!OWLdoM9(Ytza z$=>OyWM7(S6O=mU^%1Wf>2pIky6EM+a+;VC9z~kkPHYMuGHxE@`KIOTQs3NJJ$>+{ zdcCFMfSeiq{HV37hw>Hdw_TrX*8EHT8~5o>cS}}JD2$p>FsV^}VsqBMlcj=FTUU*^ zb?Hc8xuVI0=E`JqbH>eyVf7z28{G2eRbQv%a=(-Nr6yq!>ii>~ zEr-1VQbDB_t zNJ+~HYSC(VD@HC-p5ebMetpaAi-o06XyTiZ+imDFLCDn>>DBuM^$rC~_rK|5aK=*Q z(eXv=?6%kYx^G(l-3UY&EnR%VRum)ka6}lcuM(?YyOZIXtRx=Te&Z=>|<4zE9RnUs?Df zk0xpl7fvtzapl~VuR0M5W0y+z6q1K;)Q`~8`f>Sh%-6s1|MENi&CXORyhp0v8M29Jf=MJRr%sI#`U$Vup?^sfl zzli%rQ}g!m95g3G%>x||Goz$9NZdNbV@6(X8n;J zP!{*XHBagXw+hC#S?+CKO}@EK6DbcR&zul4JbKH_$2+O#cc{F=zdSL^u_)c&W?nhc zc6RLkOtNA5`oZK0NMI*UVC*}VW6~s+Po5}YR!d2d*w$i>;C-@u(g~);&?1Es$CPZI z7iNy7iLXXeIn?u_%!+pBi(BiKM9&)B>h6DBE3SCv!xL0xKj|~_N{8#(W^ze%-Q1$& zILYr8zU8$iit5aYN{k-;Tzi5hWX|Jb@0C&2?|2k-@Mw2^#6uplg9%NHuPH=HI3ZTy z(e9EwGpf?4x#-@HZxY!w5$yk}@X!3BKev-Nf2=N>b2>cAP%SZ0#X}iXxjB4^vjgL<}(z-1%_l zVOI_ywEqTSOau{egezhw8>yLslVD7bj3jKqp-6BUTKE^VU^oJF1A>F01Tmh`yTd(+ zVcm{_p&5gRr!qkMiCl&YcBAp$!})12>_N#81awS{K7_J#7D2#_R{P)osUJ=v&`(bJ zIJ^5edm6j6|6CO0iM4?tD+8Lx+raF|>#Pl|cW%I;e+mp%dwF5j=$%X$litaMk=t1& zOop5XV>l1!RAG}vT6k<|EDA>SfUICNaq|@J9tZ$8{A-^; z!oPDC5E|EcTu+3;Qi)Ga_sCOzZp}ApD9R=eD_@PkQKVbani6c2D zRJ%lI-g`4dL_lb+d_oGd3C#=5mFEXdF3D4j2+Hb|Z&cRUY&3psDtlNG>zs{)S+qx>a0*=vsFXTI(U=p%sDB%YxlAG$=g$ zt40s_XN_h!|H;jbNSemaHFidPG77R#-jL37i3pfao(3;xE@4bK%gO*h1D6Y7?2M#o zxfnk!Ed}}EW+8ua#&|Rx$VQcm{ID!>YI`{Lr)Q;SclKxGXdhD%`W*_nkpJJIp)}RuLQk0s`7|J(J>aR) z;gKxxX2^>iW z+L7g8-&?tXXd>8b_tm9U&9A6x$t${=gufKr(pLohCE=#=Ed`q375l~A9id81L^Hus zOn*Y|qFGeULak<^1?G_w-)y6qtuqu_z{lu)Z41q)^2F}Y25^gWuschJ3F#P0Z3a7(|%?unvJklIRSD@x#7De~-4XeC-9>6O$UG~=Wy_9*N?o)T^3 zVW^(mNHZQ7VYf~RxL-M9ci(bw*ZN?$-3xFFVzAq%7Tm(|*c~GPw{#|Uj~9ZQT#DT@ zr5hpXZR|Fbz*6#ensHRQjc6m7?5z}du4^F=L;2_ino-^tyS0kJ{lXr*``!b0tq*qF zy#Ti$2D`m$!7WU}?ic~MrG?m?Bm_6P6uW0iH$ci;*+P`SLUKLL*smxggpl$k3OrY} zkcXkTe;v)Z(-XV3u7bPD2D^Kef%~-^cH2~eyCnj<-Cl!RI2yYnTEH!xj@_e$;3gMh z_jKtxNO==mgc4Xpu0<(}2oX*hJXf`l2U1>xQpRqrtKhD(!R}sn!2QMzyKSn#-4co2 zE;Zn88;;%KP2iT~V|QX3xXJn0oiAAfDQ{qlQ3CVH)ih(Tf|w9P${Q%~TTSI~8oCfkRxO;w~8KrvIt#JzdAsR#Z&s8+@wgGrZ;Gum1d{1rKeFnF|SL4CNVN9E+;A@U(_j%NSPdI`t3kz#l zMYAM190o;D1dG8M)dqg*dxdsC=PNPtuU2aJa~G07(ab;9nHbJgUJdiM%yarIVn;y>$Yk-UY~eR9}V7fKX+f zLX?BOJs}Gmf~Bm(7o+^H1V5lz%DN+Q{^#{-57!NEM&--Keg;cf&%Yl0;-p?`viN## z&%xgk)8V&4@_Oun{0anHS=VB^WEHJIZM5mB$^i;XNyl`+MW8&6wbX+waUsK+M$RK| z{6cf9vfNE|l-Ud=g30aC$IhRBMzDrf65cEbb~e&dWI->*>+@@FkryyWQ*ZC_>RI+(dupK(myPE zXyT|XW7J2D7#bTsz}?nJM~w@g4In)xo2#bN*V4(;FDNX0(6$JT!Gpqr2YBrERd_tMwZ(oj|8KpGUhM#|`$4_67k zGpTEA>!GTluCAt{tf+u;fWs*U8-MtrxZ4ydE_`W$i177Wifj{O!jA|gf(aj@A6(w> z*BdS`uvk0@cft(@cCPs8Lh!);bRrxHd$=6%Q|`Bgksi8i@NG?4ffZm0R)7&<(f-#I zXx8{bA;>mB7J_ldh1`9KK43$a$)f>_=hA;y{ zkI;wo48etNGq|ka(gUu&fwvh{82Y*9P8ZlFF=LaGsAMv!d2Ah@Om3JU$RM-m99CB9 z*jQwpa1%!rELpv7?OJl3c)jFzY9sW{o1~kiTd8gIc6tZ7Q@WepL+_>b(SK6=#RsGZ z=|h|Yn^qQ%9fYhD1=qwmn+~2lL!G71QRk@(^hN3tb(w&f4C)$voi3(I=o{2cs+7J( z-X_cFJLFwSIdxBbU-CfV{Dj_hOxJ3b+060}n-;>@a zA5agakE9j!W62ZgQ~H^tQd&hl=e~GadV1HA5w6Jc;rfhOvG>&Zk}|54x+T3WEu-#8 z?@A#t>3wNOZ0QrpQ}UU(lKb*)W$B5H`5`E#VnF`pljpCNzp9qjP_M~1(zjGCStqTh z8>Ee76WuIrAq8|R*(MdzqGmBI;RxT}KDZzXMO7NSV8@Yj*B`y9t(AThL)u0{3{p%= zXemjGsTOD?S|6QTJIWOqXWS7#Y&v|txZ7dHRV=c1}`CN~lJNDSCQwcU}~V$#sqYe(Q-#H!CT~E7d?YzMmx3DX$F> zQCxL-&*V@PQOPyohaD#`msZtv6q16xBC&j!VjV=3yu7-9YOpM?BbM(vb>;TUI0A|J?^KyMOVvSk?pl0OOAqCkjYevWh$Xw(;V_`$z&59{*toilywIvG%~Z;(KrE8*qiAq&}_%3E1fzs`2*8 z74bN!z`PBI&X+uR+t4mXS*bou=q1EyzI*DoL|mxQX&aARxKU9jeNXU6t>ki$P|raQ zg8OGSjCDn3G)n~z<(`M8ek(8P~rPz6!i9-iHt0#RBCW!H%Bww}0L`n0|gC-doy zRYDw4scmTkP#h)KA&YkWb@g^-10?gg@;`+r;-{B(=Y*mtimnMucNbkNd(n}xk9FY* z#E?{7-Zv!}#o)S*TCw-c^*b-APDykwyqJ27BD}nIa7Gx4pyHGC^Zv8NcdMJ4n(<30 zFS`8AkWdey`fG|0&y7TQ<^HK_4xBH!_qrYaafTiM{LPJHi=q)+$uDdD;R`n(ylw8N z@XooBQ1`HwoLClzb03($@#v+}M|DV^A7`W-unlFWei@FjAzyDke)(2KJ%tsivr?d! z@-AST?iQ^bg>w-;d+W)ox1Th&v@6~Fc_;&kwA??lX}l{M%_v6B-*M_%*|Vljubx~O zQtB39TOXd^mI~N5sG!&-yH6M2scQcCH6^K=fE7NzurtdSVU=BnE!%sxhS~ch|90@g z&HFVSXi|(`V2VV{CDa{Y4%L2W&LD(V>Yujm@TJlRZ(7^iT=~mE0$5Dl23xJV_~^n2 zgyjZi|9BQBtM|GWB5QIcLZvwKe^yJS& zary&iZ2jxnoyU!BxWz}QQ&PH}X9=$7qSYf1QZan)j-ukbPn)1ok~f|*mJ;eZl&R_N znT<)VXfVi){(kqFlJaLQ?;3W*5v6?%fGzjWZk_-@A=Fyj(!J+y-g_>faI=8K1mef# zxdKSR!wWkyd=XN~b@%4opi%&NkfIz{!&nLa+=kpVmY>~}0w@uo>YA`@|ApHRp0`OP z2ql8vMTTjE%t@3_@RuH1c8EkX=h zAR82gdbYNT%loE=B7|bU3G0ttE3c>#qox39e;LY)3?_Vf^}x(<4D-wQ<7Dx@$8RMR z^jA{QlY2A`FCvZEiK?z0o;MgL7@W7|RLT7(wUC^c5&^phAV5$T>MbNIeo=gMQ4GQ< zg-+Xk`sRbD4J4%63a|=b$3pMlYrsjWZ=6^WkJAmGwd-u@gQraZYlhN427M=`kiBYn z?OvDu^~*4X;zrEhd;a#L$`)i{P}Ea7ua~p}`Sy0v+A*$ZHh>$uWdFsoM^yqTRotHW z7XYZcb7tds0Ej`^hb}*K`EJFFHWKVaS@BS3)>wXSOIm+~P<9>p%aN<)k6((Y4j!oM zYPt_iR|rBV^-o%J?E3vD)#45osH19mbZKuvC{Dz8;_oLxLT{wI6U~9U1vcS z^`K3%B0PX_@r#mU%i?f&_?*2L$|@=wp`vBs_aR*J>gLH+LveV-_xms1eO%QHjV21G zpr*N$q~_MCH6vZo)P`d0(nD9upFoRH-p&BpOX-`kGry0uM>7^muESOyxpx2Q%QgT= zAr%nu-LqR#eNdpH{it7$6+eK$yHGmlA;|iB7q({x;4q)0btg+6KC6a#I}rMVOS|&? zFg+k`W6{k=l{I2SZ+>)n-}E3%_siaL=2k`J8wsKd9$!5$D;(2|h^hFcU zxs>>2>8am_8=z?z#n@#>u0446vIPf7YHk;;A7g}OT9gci{(P+XQRORvEac6dvw!fN zafs2#H79RWR8_aZa7KZwEkD0?k{1qe9KWHc^l?><5C_!Xzqs=&9H8i#w&~36C(mDt zHbR8P2bcFw^+OR9y>hmlD|_<%jaULBB2c|YSN0bMpb%xhf?em!DxOq9-@RGD6^iR0 zUODhx7>*S1_3pD5j_uyIgWT1yyJ1iH-j;oed-tC>eewK(?+0N-@a&!2*Z#I)-&yLs zRXKmES%{EfTNM|bLWtmU(eS=(@Uz#V}9Ip z^m6I_YJ`=IbmYCRghDNQ)7`(eWSJnWvih(^+fQCAd-z6xyMB4chZ>f%fNZ&cZd<-H zLMru4_;EM%j}>nLC^ISKW;+5xtGRo+4F+0{UbXM+&GIMp?P*JKTQ7wc80E9I;`094 zJOoskwKtYc#?~mE>rfq^3pz2Kv)hcKc2*skaVg+GSWyy)3Lvgs*?41|xzj)gM zQ8$y^wx-(WWkqXaa8SUKqqiz+>Y9Yr;#yIIu&zzeN(mI3>)yP0aA{Mb0)=uLweHfx z%I8&;RWF~zUD^7Ku2iUc_VC*76je0yrl>G>Ps#lU*mB`|kbn z;se?G2%(^qcCe(Z^rrO27P#q~?9$@HQw>lEo0GQx+RerrN8x6dTsxenhf-vY-E&D& z^5#a#%{MnBC9F$3#wa5)D`DIDVqppU{PvY|zolj!E2L(zzn!~s`*~3byZG#eIG6_n zQ0T_9*F+_(^S`7H+Z(|emio(iaS5yV^qN7aysW^rry-QFZQ{-d#>8z9a=qx65ER03 zTlv@3;$le&WBG6h8@?Rc*<#kUzgG6c@f?;MzalDT%!-7t$XViI*3}bBJ#dKe+@qI8 z#R^MC4I4Rje9A#SCwbiH;iH!z$g%J3Fr?=<2QC3*OLl6;R|QiJPUTF_%Sy}H3Xsc3 z=2{>KyGPF63)jWPob$g;n?7UKiP_>g3bVhR`SrBb=OxAL>z5CG(;LH>BetKrCcRD- zb1v*${`HKRhYGnfXMDYU*9FM=b=HMF6P3|B!*c!U)bZJCTg14dK0&vx-7hQ^g5&Dx}-#SjeX_Jmdw7W-B#!^ zWZ{nEMQ7{J*Iy_<-+E4ZmUH^#?j^(YQQxS*=o!qPI`{jwh1K6TFKC)CoG1NGI7d30 zGp8^;v^Prx#y%6~`W~xYRAzADi^_rzuK(z)mht~pvsyOr<*n0<7IJsbYIV%Ce)j|?LLxb*Ft?pT^?pdwwS*`#6tdYmY3?4Hp=(>>iY zTHP~R-7{Lsh^zDTIT)v8LgD|8LiYUpPkW)`TpY>t;GrLGg|sVDO2Wt zFJ3@>U$cPtzIlGrJmGiJxx^gNZ0Wb0*)u1#&ug(@UQ1Q1_EHt5wiN#TsV#fL4wh)4 zMcT3pEzh!m1yp8uLDu_ITe8JjX0S}l1eQ-30s<}2l3&REpIxwJLBRO{?_b?hTisJz z-BVlLQ(GTOpnGbod%@N}U9fcnra}MVVy$9;{hu~tn>e5QI_m0_TMbXLK8WEzZk0sP=C7+>lA?gZV}c=!2Hbutm6Rr^5QGB0_n>O zuZ{rXi;J#4J3Ylli>1D};EJMN{@sEr5_r6lEpqzuf-4EEGWweZS7Pcg6y|dat|YLU z?#m0Vu0aB5LDuJ1T3v^wQ7HU#E38oX7gknXZ(pJF`9)O-ffmDkZar0T`y!+-ETxiA zUs^^bp{gMNpI<_S5=WiMXO>V&U^Un0mQP8j&o7-qE6P5%Y)V3Xe#sPCu=cs-QWEO( zOQq0)xX&(=!b?iKmq_77L)}ZHx|c}(f4W2}<-G}|Y+bxWYQ*mMmq?A2Es=`Z`lTgO z(Xt7ppx-~aL~3;yUajP}`r{>1fhdH-TlUEbr4`;dp6$YqCX{|~!y(4AKEFh2zAc96 zPyPIa(zmh+C5?kgCZ}*3 zKgdA7NJ{Ck>RaW9N}rocQh-Bi^$1$Jy0 zJ{`DZ)OSxAkM~oPanXg%eny0G3-OSFwt=Ci2=+h1K1(G^<(Vq2!d6z|Fd1l>HHWFF zuG!1zv5A6lUp;l$N{T)paT#0<=b4L_E_=G1UdH)h;iSIGEJSCsRQ1P@_i6Sc-yRAK zBwaR3)2sj}te0{6iU`8u=-4m3N3-iv`lw(8OF@tKD-c?93{{yZ3QM7<`!*m*XPBx3 z0oJ!M6nlFgDyLZs%zD5^YeY~o?0*gj%Plo!1g5g_fEz$qZKK^zP%#a?1B9(kIvgB@ zsb)Up5fBb|=qX?VOWk60RXNQ*-Cv)JH(ImVJ*?7R1Lazffg;{*&0(;$Z1Nj`R6eM; zQag#GZClt1q^g*{%6Jzx?6=jmUqk|_ZkVA8CP9vSI{tK*W;c&9QDY(Hge|ze+|~o7 zHQh{3QR?lp6DXo-mKt~iH=C*4cfb+YnIoNN-GhxcZnK#xMxp1>W}OvwS{xaPrEU^& z;||T<;H0Av3 z$Q2+hwS&^3Ri;o08s<@@Kw9Ih$89HZG%bhS2huioeMP*zoWtN~TaT**(m|hIvJL1^ z939(9H9#uzGf-|varNx-8i8~zsIQ8Q#N_Hbd@a6BbIS*U0y5A}U^Y|9zuY<&>Lz^MpEqQkW&k!zXtSy?dmw9l7Z9LjX=6I&`=FXT-a8v+}mZI z07y5Zjn&~)8(Yb2V#0xHP$bW7uzTxmia9DJ zqVzEJI|P*0Oe-DOR}U0U5A)DdKq05ufl@-_!scjMMqa)_bG~!bQ^W)XZL7GOK=_fT zuOuVrSSQ{G!WuVyWhO&`0UPjb##I7go0ox#jG%9m@&*X|y$w`Y3^+?%QQs!B1ql1x z`lzy?m4ztQSxrFL>DU(&7+f98p%su3=@LhXgEOLO;=dm#HRJ5`L55I9wpwqeuiiiz zxA~iED>4~0p#VGRE&NspKrb#2bL^vri>b)qsQ0!F`K3l$PAfH?Np>^WRsvDNDr%OZ zR$p7+j2&0Vl4m!l5|tY#=S8_$^i<0ui#J;v!*>ER__|;M-wI?m`f^}Gpsxz( zO99#pZUY;#E%CN&^hDnv&=zg9ZQKrT=th2v_Ko7Uu*KXK5avKZPxRLexabkgvxMww zBUrsA=Vm@mXJ=$hNmoak$(RnWVK+3BE-}aFjCj@FhofBVv3qbs1sXrn<(}9(y5$Ls zcUdI;BnbPnL{Gs_lT5lg1`}qHm4HWE_Ly`doG{Wx5X-6n0vnQ~OsX{vQ+|JrF|Z3s z!X$-xn6~4&Y#b|v9aUncR5%Om#bPKOs-hVvE{u}JEQ#m`1$-Wf-swu%lmuH3#I%SO zGghl%s_FG-G*c=8BrRfzCEIi`)v5d$O-Nxsn205m9_o#0{VIVbf!zQ?R-5#!DW-+h z0#6b)9|-9-CRqaKm_Yimtu2r(3U1iiL_*e>q!>}P#>0>?LIEu6fg(a43BW*F zB3ekc(XC9$eLoDz7PkQtn6!YxNg;EH0oBS9h#p7a@L3YH<_CqNB9ZOkq<|@`9ErMZ zCTkf4$PuV`ln|zKVSfOtrR`N3hH!sJd$bS+ZLAPH4OkPR@^B5H;Gr!DMeLH%6l_eBIt<18vO?9!6LX~bd_nQCUWG%w?zP__=* z)r6{qg5uV}F+mVcHnW?W1#7`3faoUWm_1|xqDxR@s)^OyvPBC==T)Fhg}9&u02mU8 zOf|6^n_Bku#nA^;B3Z)T4>1a7Nm^hxZ4=$dYHB_PC%ixgqF+(47Y^4lA(nAyf?Z<` z%;s~>2*4RzOMw=k-D?aX09a&m69C8tX4AEPm_ruq?SpVq7S4cu=Z#bYUC(SRgB@TX z?inH}icJa;e-_o!0KsHEv*~d(4qik-;wYHWCaI+fffNNcz8rxAe}SAq5ZOjZYvI_J z`UWvXte$|ZC63}BQn3i?4%I$ELR68cp54&&I!|UhY3`GXDTqn6GTNv%I2}nMf}?2~ z(9U{RQ|&CUbbv>%6B3aSbD+V_YXgOEYy}ty8DL0E^-EN+;bnSBB0{_<0U>FlS>lEk z&`V}3+R07CmR^ekR*#jkp)cAlTpapWHHw^A?{6#783CS7~M#SAZy|l z*hkG2y+}ha4rg}@R$f93D6*LZxibXt_OiqR#E#V}dYg~=X&e`{fPfmI2#{_YIyHdV z)GU!gETI(kPqRc#-^#4oK4%3;mmyaGlr{s9YEo!J?UT3_A=z!B)+I`4ki@dNENCJd zaoSXyY_~rf_8m#u5DpqJQQI&0^E}t%MoVL3LjyEpEOApK?7oLOfNj`NL!c3Cp-A{g zFT{`=bho*&vAzkVOtxScI@SR3LOY8YyG<}Ex&m~gz8UEf)z%2vgMx!lps|8I%`B1l zh&9gLsH&z0s-A#`kq}8cFrcqOwn!{F%R?Y$+UrKDo>?aV#bro9-c+Ye`xG?s(qdSA z0JyK~8&S23K(x{pq#e*A;oL(r3H<|;EDyxEd3EqIF`7saEM)v+9Q71$i{mJ)sBfr8 zl`nv%pGkroaTzGsb%r>SVkZ4!6dnkuZU7{zevXi$rE(w=90EdJOITF(L@>3b1V?LE z1BjVyq??#RF{qgobO0ftj`5ym7X7w>gOKc=$Dl&cQ6aKJDx_F;&@I58X10`Wn5~FV z48x0(_F9k?78;%Qt$Qr0c`+Vavu$rdjY>NNC(A$D8%;`~6L0+*7s9<1K=O)`Cx&*u zLzhM}DM1E8G0pcwJ1nnElxtaPQiP$OVR5h&W>-UVE<4ZweQ%UOpywo6wD`0Ysszq} z^GyQT0SPjlAn_h@)TGlivwA8Fo?uMK<=<>*uR|%`jZQ(1(Sm%IwB|QBdJEoYjr6}N zn^4ImSb}JWpIFLjxcQS6YKK|Oelr#~{y;CKeyCr{`f=WHIIrw%XX~sN9L*!Y9Iz#Y ztS)rG4!UgM*dBC$)#xVm|G zczOHy_V*tU5EL8|5;ib=a71Ku%#hf)_@Tpwj~F$2j5|Is&Q8hR-OU}a{rdX{1O|t| zaZ7`vqK6ELPZ*jAsIf`oQ~0TAlQJ^1vi&(I1v^zocMnhRe!hPGz&SKDI4CqMJZeZ> z!qDL(MvfjcE-5)>0z^nppOh8^$Iw9p4Iat~z=3^e;K0-wOXd}fj2t>Vapb5mYjv5BL`j2$;VnJoDN)#tRWW#!$;1V4fF*w4JgE;k2 z4Ihbg1J^Y_ZT$lg1ydN6$5(H9AfeUnsn@kt5PK+PU7n=liEmNgQlP2=Vj~h2;6g66R zFHymqhg|>A_j$rg@ptKyQ-on4!wttmnkdKz0?UQ$w}it%2>ko>Ntw&NPfFIO`JG^KMq8mI1FSK3qp&G7&LHT*dWMLNJvm%fWN<+CXQk~ZlX-7 zAVNy=w8e|QUoe0Eyzl1Dnf>jo!kIH>=0%5w;s}2I`@7@Og@)sVbWrfLiJ-B`W3unV zu?lQF*r7;ux@C4iP*A`Ce?MOzA9p>Jg1UQpMmsoZT;lZRdRldN$N+@s=i}w&4(BKW zre7vNkXDZyJ7(xNt@X6pvC#g0zP^C)aChyEjd%^#fOJSR8EfjOxEVq~oeb#*33+*V zxVgIY#g@E=LQpDSrp=>9Bn~N*0P0MLr?;0UFmmBJ8R9b*HI&0pVq?Z2tsEBhEd{8H z!R~-@bLBZZIT$IR231{sP;%1PF(C8AVZ#yz&8?%=ii2I+ArAH?Tny5Tf>#46|FDFF zxG)5&igZLtIXc+c+L$U~kZuf0DiKsTE^bKJG62=a+M$%}?QE>AO;s?cXB=c%28oUe z`=gfTj<%E`C@Y4UD#r9lK!|u$rBQ=JcLOHZ0+`s^SOdmV9b=5+hsDRo#^6d1i?~or zvsYQ!+S*91nAVnXk|Y$&A}%2|7O5O4X>dYiEzLM)FM?nz7_f*54TklQIEWqr8aXgD zARU=`(r?d$@muL_F14bpnN~X3d{<{W$WjRhwF(MY1;*ta$2lUJ-7?0^%1Ua@va#03 zQ=;l@XPj7g7%0<#kaJ)u9WqX87SXJg<`!0@HD$x-B?EHY;KjkPpn!fU)d2k7bU-B} zec8jDvXy@oY@jLWwroSWz~VEz`~jCRl-^s?R_ftbaex*+KyRY-Vlr6&UH2QKmLFBmhm| zWK1!ks@V_98sP8Ozn@3&6)@99V~wngA{&6^FXlEK^SK`7kd znOT|fP6Nl+ab~u*lpWLFOc^0n^!o+)_xJ7R?d1|&26pq(K2VoN3!&`C9j!3VLKWeZ z4f;W%KHi@0j>}qU#a9DOtgNj~hQ5T9g(J-z#d$1MYRAiPV~Dp4%#c_HSo@D?2;Np6Ott zg$#COh8be$#&cRE0qE}*pa!%J)7sZAg2G1Iy8&H#ViXD%Lz_ zN6WakkkOjaW|WP@mbPPBLFL#>;e266Uk+xbX5-{&Z$7HIjpj(^T3OowObR)ZI!GNE zMhY0BVGHlDmFopi?42%_6r?9dFr7@e2%@NI3ooCQS*{3_`EsBIKqT!xmGuVa_cs~^^tt~8UY%wR& zK?0lr@%v3P7mUKuLr4Z;K%Os}{B#s!PP#I$>k9J{LtE0FqhOXrYj3qqEh3Ra9>yN2qj722pEOD0d zNEfE9wrpg{=wWIDoRRLxIZItwJX-_YaUwC*?V~R19V8pK*$N$U|=8(#79yqGE_9Q z0Ntw(WyqNDMojCHKYDuh_xBG(c}7Uon}j$b-l|Lu9bJ8c-iF2|MkA}irk4B>8xlEk zbX4fzDCl*dHwhRJ=*Ym9FblCXG%__eb3BG@>PP2`pFF*~DmDbcQ0L_5$ihHQ&pv&P zP0TE<%p+fcO?@FD0QX(K{rWlLfryG?A0rd!3~lVJvISsM?+^AL0Ij{Zm$xGxcc`eM z$Ts$lPIgm85Z=W1!hL~9te8C1zn@Rk4<)Z&-TN)v0SqKNTR7;}%nU{_5QFW6 z#$7Nvk?BGx>WBOH_w)1hbn}af9^mMJZ7wSdGgA{2Sqw$DK+qkmXviW*mcW1kV4Zq< zxw|;p+dDei+uB-Ln46j!8yeym4k|uqkc4#ttmT0Ifb~Qjlru83ATJiyC_|>kMuvU* zXyU<)LyrOBI0CYFj(UccQiLQH8wWv)4NwMEgVHdCo~f2 zKA1Wo!TxcjbQ2iQjg$HUs4u)Kdb%>s>b)Xipn~n8;6U%BMlhb&nfB@1+n{GpT^(%= z%u3xb8jm$Xf&&NmdyH=Z%r1+b2Ear|TdRkf3}b>`1wep8`nsmH0cM|>9wei!t*N1| zf=68HmLMKLfMMh7<1|4Gn8T(zkPIMHRh4BJ8)WBUgqNpX8XPRDdcvfKwq_56P~t*b zU|cfnASX~KAU6+p>kLr#^QQ2-mTCy0z>#5Cj)76SqfT zAMfVYqW}$O7qIIg2I!1oln@!l>Br!8sL(~hYMPFiofRmEao^csHIyi3LTGaU4%LQ; zHdaVGjr*7#1@mqj5z+uN&$Dr;F1#5y%-B#btO`Q&3BwAAe5ywSuvXQ_YHEV?L-#wd zPKE9aMiB(R4VML5BesU1V!BQjA^19cDqIY6!poU3O$)Itu)>1kXr?3Eb}qpnhCy(J zJxrIv;nO`KZr?rzz4SFrPJyY~76{=EwJ?wU9S&#p1a0r#OJ7e*=VOTyKRRHiILh3jnv9(~Rs^wmyohqoc{40p@L$C73dv&meM} z4-V!aP1Moqq1gWo#Qh13A?7v+-K&j4l{mV3I$D|<%6hvY?n@syrhd2pK1xYe>!FmD zIG~_CG*p!eV8)sF5x({GJR_x<6%&?oQLPlsxHtG7xH*dRB=nV_a-xx7sm@++br*io2zu{3?H@Mf{krTg z`YjbN`1O>Sr2KrjJ2Wyc9o+D{x9Im}@IM!*TLZ$k<1}x7VgL~cXLlZ#J$gdUI-e(Q2iB4?SXAh9FFL`y z4@^B;fNf+BH~K*A2$$?caT_QNnw&HTWXCU!VEgFf@(+ODW{-s{4z3s|<6y{_tX$|B z0l!)kM-0Q)a3TShDYU&zp}3r=lp#bkls^*Ck@z_Xo`b+AlSl-(Lje;2S0KK^z#j(X z_Jb1lgQq`S1K>hZ|98so^&creD%C*9yR1%SEPwg0>L9mlshn&f9eV1*r4R3h zDZCM%(EnN(X*a&8q^jyG>#E4ghAP2x(F;bM@GV_WbEOqKH*VRwWed4gyp7sU?U3#y zc8PaO@#*#Se)0f)PnbvPqP1QS5rOR zNOQ^4bN|aH*)Q88hHq0e_jO^HPqLp=j$$fa`{sj_?8R^{JzWovS0WoimLSI!Vgcfr{No?JeCxFZpI&oXgG0^hEF^4)9lv*YsmACrJYZ+CmT5p zjdgEos#|O5*W?@WEe-iVXSvf&?B*8uUQfcuU3^y4t~FvvgjOVf{oqi0PQ&XLl@ISo z%g8(QU1>RakA`0XkUXRwaVwryH#CdMj>GL&wjvH(@#6Onx92uBzIlG-(5`K5TSTA^ z!tLB0JNFzqQ`I7Y%7#5Pm-qbGh$1QpX1{;9J>A5qe^s#qEwbUzcdpfoa7yS<`$g3_ zul3X4JJg8HD%!KZ0hlnCe7FTaRlDBqm?!TI%^mb!{F zYueZPaOmpo6|kHFqQL3(tB&DB&P;v(^m@95)BN_qsf``WfjD&I_DA&~DSUqY>YcbK z2fyijemz=*^7h{G?eg_P9IAEu{W>gMbb$RvSOx;MvT^qN2iUWl-`qX==erd}98$RB z?pquSonilr7*-z8%H+=VUUE_4qUDY(uw1|6hoiR6#s37hpGnqKi z8^>)%lgSv9%wm>1$xUKR;)WZ7$PN-gC4#siA|l{!+|fu7T)+hcWfMUV5a@lW<$mXM zqt#7!b#d?f>66DN-|4P8Roz`x-PG@U-&g(Q&XJSct|KXB$SG}wGsfcim8e5&dYEEu zo$MK{MBQ@x;CW7ClGJR>FK#j7qX7KuW4q9bGHsZhpS?uge0yIZw{uBy>`F5p@g4*=h?E#*4&=e!E0=~nO51| z|3nz5f^&~_`0xOa`}hj9n!IMYoyR?)*QpwBY;Wjpks|ce)zc5P=mSSy_lZSlHILgX zZC$}wy;Rrr=z0pLi%LqDodAP!%dvGP`5bMN?-^LDv%X}TvoxZe*@=%Sttc*&q zGu4;spFTK|?WEs}gnq9BciK((vq$R9*e*1STpwX)w@(;#ZB<8(@b`of+ScsSR?F20 z$vb17pvgSD*4lgVOHKN=dxy?C>Kh}WZ!G7=6U$MIgrCv1c#m3E+d1^JCPMe5BD<(( zZyBLc=UoF|WqclhYkyohn!%%HrLAi}VOKUhMN{ZnPuvSS9dsRlas1$=GS!M8IW={ z*$Ch9aGWOE#Rzy%CRN?losL`Sq+R8Q@YUX;YmMzfb9fkI>;nc!+N`Q6+~cs{PO7Ro zc)#6pA&2aN=yo)Ryeq^mdq750=C10?`yDjp5nWB$_u8x{3&avc*W=x23$cwQ@PKp& zbJv5@xjkF=2vzfeJJvGChE4at!;vG91W|xjYvm|3|y}S1sPd_1{>NO~Ck}B7DG;0kFL77AZQO#4r47xucGU58B@(*x z=i#Rfe&bOlLy{5Fg;+0YvrQ(0i6JOmhL|oim8UVD8{J@J z2(zZ4B$cnZkuY96UfpT=5R?5i z^}7VpBx0}PS5_6mNt2*h*rCpwdQ=S`U4kc3m^ zcs^erCSh#MzSUyDXA8=s*m4-ZvJ2tQ@+gexG-rz{uWnaLwjQ;Vh-Pd?qKnbJ%=;a0tzS1LkcdQ!CBR3^HOUQs9 z1B|Y*H2oG&hbRdhq9@PSqaRon4R}-sn#MyN-W{W%VZ9hy`{;VwJ-!A~$=TDjU6!9% z5Vf(f0l%j9Fl*F?iuI&553i-x@N}1wM$@GeHP*8W<#9w-;z=6TS@SsHuLnw9SDfm! z_f!VGr&o_wcb``f*%4igr?|J?8rh*@2{@&S(s(;lNfT9e_} znx&(#zh{nrkS8S3$(tue1EaCj$4vZ=M^<- zEZ-0)_#_;l1?)#hxHb3$Zb&dNL&hDa{kSCb<7SjJsPTD(GEyR{@DvT9){GQ%Peb2Q zk#(D=TNi$ArIkIj%z^S$BCg{(s=};!Dsad!hStjCXPx%)5^VjxGI%AO=n?|SE3p@$ zyBl6f$p&lmiW)v;!t}JJAj?^=FUgv&ybuqTYE>(Slw`!DxwCzf^I$~#zcAL2&fRv<*^Ku%1e4o_g- z+MN>vQ&al(hj%aMWKkJfh`MCWH267Kt2N1KZVdssH7go5AiAMJ=c;c_a}Zub zLU_%ZX2>X`@j}|x-RZUl4J2gPggl1#4J{y}w5@qtd-^mG5M>h&3*jwM+9$^nZQUA3 z2(;PKWSxY71un!mF>N4=e~Q^_S(1V46Qyk_40mGQKo*^h$Z1kp_KbujZ-hAaaF9XF zP1s59(&b@1Ru1Hq+%+Z7mNsjjRPEUI+|oe?sW)NekJZrW4{r(y7wqaw;r8ARvddY) zM=D#aZ>W&5BD^D`)_k)r_3+Q-jhzM*^hmP` z#;k3tEh{*@cWZ3unpMFiD{EG;E4_j@#3b)JbnZrVi`obWB9dKsnf0AbRoBiR$xPpt z^aHagCxO}Iow#{xT1M`fqKZdPb(V9;8$$Oq?T;$2Upke4Aaif#ZtuPO4jejhq4;iH ztI9y(GeZIQ5Dco;hn1y81!qs4IG%M>aqRf1GZzYPRMfR}>CAA)2%r44N!RtH;X!%n zwX45mUh(?nYRSzzRrM_$TG)9rzMh$l+OD?dhMKC%ip+Aaipr|$`li+{tpVZz(E=T8iY@@uX`AK`HS`U#{aJ@Ccb)7bw3_1<`(&)_U@apV>yIWel5uSxx z6!1I)UG5;a0V!h^2%X?}H>?QvKn!ds{P%*VJwUnAcFE8$cv=shCPO^!O8AOp-C{E0~pRuyoD(w0eP9Ep0TYqDBZ&Ma{n~fV>3(d5a1Zs+)Qz*Cn1Chw$8T z!NRI}=>h=WSO9om3YK2YO>#~6#v#J@Pr>4>xlsZLU>`&P7rbVx-x}mtzqQUK8&o1T z_$OO|*Fc8?uNAJLp$~$FGi)Sa0~|@fz7;?cm6#-2ba>q|F(0l>>UVm5U3wQpxJYr%Kdz8<$jB<3qqp4fJ8H9+KU2} zTv6ZxAU^mOAo9v}ibi3gyF1>tR}Lz<%E9@CF8Qc0@X_R%_R2yfS6Mjgyh~2%hdF8V zEPL&tlB+$OeoO!??TcV($ZUIgqLM35d?Uv-G4(~nblDs`0i%*DV4RZSnws_jYWnq` z?G%nmuEKF#ngDXz7n9RZ?NyLUt_pHgk`R2_7vs|pc%+h&|BMsBQ2i_n^&K8PrF8fP z0VuVf6{Y^!nd*`lF+A86P#yRTs2-lnJHR^9fN3o)qV-2wq%)fa))gg5#1qLP{CT%%QAj8-Fm%P%P< zMom2>1g`c$aCNN<*(ot<%25G$wGYCp-@8zz5~C($xhAYiL|DJ%*RB$y$ECYQtbH(I zUF1sMN{kLna*bJi5N2J-FMuUR2gJArtv(pE&ga+2lA{AghPp(pJ_xlQo9RqN>)oxQ z^Wz@=4y4X;rO}m+`moRi zb@f8jb@N155?<-(_bv+Iu2JK86uv}Yz}!lNu`_6U8L{Pw1e$XmKpmn};RWcDYGR_rb zl>lTtGK)vFOoC`R%q3x!0AW2ejZf4}j2#*5lCDaDuI_(>NAygN9sNxSKUzgpw55Fa z6la=fa$LY?zX$=VJI3~uOv^wr{kKAXrV9S>&E;E0I}lV8urfbP1ys;dksQ!dZcU5} z_=f<7dhm6JVr=QCRf6#8#g80`wTYk!yIqk}0+G|rV;zXO$&sI46oyO-mVr#wUhOR# zJ972`fw=U{w_``x>A^jMMhDGLcS%cS7FxRP;~Dhy`f1E`k7@6&Kk|Z<-k@&kO54j` zkWyMt0bxgWUMzTm8JQ~p19jC!M_!Q9+mCOV%Kn|CH2(igN|*PRlCiEnwo0d8xrOQ`_M#r1t znD~}Bw|I4|D#j2^MY%;9BdCqu5s^Z9Y1j*1`hu6f&>uqg7}$CKzx9W5#7lo(y%_A_ zJ~w|s{SWn}@hkdkW-_K!@f% ziv{x1u{QQT1031={I_6U8en4qNMvx0ePbN(&y9;lyO9sa z-Dqf*FkbpE9s?vL*8pkAZb7{C@4awH8k8)6mrg*u^u1oVC=G}Z%1h_;!fDAbSTHZ0 z)(bl(->-%9(%<#Qv+19`;H8s#I6L{;Iy=4F3r-qj@BgIa`ajJO&Phkvn?xzOCQ)w) z}{l!TpOv0=LB-nh(LR*DJ9oxD&VM4PP!`4-j7Pj^`m+vTOcQ09AIZzrQ}*x z4cRS>lYTnJ&eTfDHMR0h5zI;7x3|7ha;&f1K9u6KwggD_9TXFPLdIFB!F@ z!_q7!qg#IC%pwftnd%CPLPiNp15h+>m{Tt?gPUZ+S$-mlqGnmP4^o?kzvR?)%-|*+ zandyoH8TKLfrqXf;MAZD;!9pW?2?6=U?NJ}Q1X>?D>Im=f2J!C3Jt-mlG4-{E^_X9 z1{3*icf~b$g}xG2PU|Ll z>w*xR8SdA^M^5b~nQ~GH#0>7kF`IkjCK;3Onp!UQm-pmgnvD)(wB922|9fZ6^2;z>D!3@x9 zrB~x7StW=v=8S=J-{QGf?I!ugB~p|CQp_A~u$c2baFc!_2p{fzS&v-&?z>4B2x5h) zemC8ub6tQzDFTC=J#Rum=(PXhYLi*sSw{ySaK zJSn1i34Ud6!~)ms4y+1Vs|vo7-*zJ=XSpPGGAwL7{jdJ+6>g;W@6&{$xuo|d(PQg_ zn6Vx~(}EAbKyxq9T>hofirNlvYs&udX9)OmK!P!BF1GL5o4m$WN9Q~ zT*^UnsSe_$LByE4iYsE=nbuRpxRxtoJe1@R%_YTgM2tt{d!V_5WE&A7tloKLibaN*Mj0|KTCqg~uX<*wJZ6F=M2D%4~9wKjZ>wu8liK%4`>c>p8`7zr} z!~YrhM?H3f|I_e~#s4UHO#4{USf2g1$5U`Cc9(rwOXol`UQ3K`2iO>k#S2@rAe3<} z(Fr5bkdjPrgr1D+w>zOWEy49-Qzm3BZU^^jSSXxAgF{3YI3kP1smwlBbjceHM8v@i z$ZhS5Yp*j<-ye!=zZ+0D1>$<>eW;(liR*wSsCDn+`pu_M8$ZYOUvyAYp}79sm;qZ$ z1}AXXKq!R<*@Ozx55(eRW-lu`>xBnIHX7XMiv}XEvwK)k)lgjf-GKUWAg*7ofV%y4 zT#ssmT00NdueU>O_!!sov`|y)aQ(3XYI+l{*HL?5(^(QV+Q24yH!C_NS3}ylShAmi z!8Knr5Xn#NVnr1%gu`e2kWJl6j(cOW#_AQ3G!5`Oy?m+!y z0*m8zMi#8BT??7ADLYM^FG7O4+ z(EzrbhPI4r-(sj6{Bb?#F4V0PaP8j!wPr4^$F)GMe;?N~RZyFk;(DPTYC00vOU-Gp zbQ?QH0uhv4XdK=f{no}TG^L%X}j-t1*qI^h{fPo~P*~W^0Mu`RRrek26e7JKuv6szksiiO zG+`Y}zcrz8s)uz<#?V_>@nMA+FD{m+GBCV60HVGiR!6MJPiDoBheD(mj3oUEl3Dl8 ziBgEBLdh`Fp^Y#$%pkhQK`IzipUEI-3O!HGGQgNw>tPv-WsA&7tTJp?)*f@DTNlU3Jf`>HoxDWZmn<}lD)cufDBJcO=D)UzG)Y;=d&`XMULkd~r!=@I( zgEj9A?{D2(LM-buH0Z;dDJ)~wprO7p{0?}D zrQQREPoDYr@Y6MI+Pyljr}c%2pZ)2#{)2r;G2V_sB$4<1Rp9Uc^wAef*GI+0*Cn*Y z*CZ(7<6_q@|MDO2yft=Me=h>wZN`lic}ZkGgI*o~hq((Eeg4&AW=YPs%o6W!zW(Bq z59d#xJYq;656kE3EfPyTeTNJm_r{Dr|8?O}tQpg%zBYEm(EbV!i5QlmAVWbc^-%PC=@tJm<0nnYeBEox zqzOTzUj2{$N;&L4ipBDTegV0n?|_$n{p#=$nf_i7BQoqi2Kg!Eq(p=lU`29wsmxQ+ z2hw#056&9G4werZ(9hS$O9q)3I0H`(yMm&6ycG(cETxx^Lg5YfhzNMr4L{F_Bw}}o Wlwim#8AHNt9avE;g3~?}{`+69+Rbal!OqXw^gD9 zK@h!r^_D2B`+R@gx#!HxGw05oGk4}ZPn5o{1{H!00RRA%mgYUfE06oX!C_Zzo3d5U zm80;`eBuQFNc#U92uRCdx#~pQXd3DOKmh;M;X7{(2 zKhyz$vry}vim~6s=F`9=J`?}TX)}}al+@t9?Q)tY4~_NdAL<)OfDvhs-7u^zvyHxl zv-w#2q9peXbsg|=+frd?7~~Zn0Yl&xQm5}`<0(wf$w3hr31#Kz&BXEK@yxZguXUv5 zxYI(jk!sHKv^9(B{^8X1_0Q{HPeMo}rEOBMS{Q0|Uk$F+V1hJ>b|~5C$DVV&daqfs z@v4BSQ8c)>^?aB5Kdk@=j;XBR9Y!>S8l|pw!~+HL-)V~Wn$}d#_EQ1hF-rWq2;BOw z%v&V>oqT}?(XX@UFin^0+4_ru>e{NwjSMe1H<5*Gg(F7to`G#ugq5Q*l7L1K=KMRx zNL8+rhog@ygAaTG(aL+&=(8yRV-&)Op&}k#t(eXCwH`f83kJARgisXEECOWZ!9&*x zZvlbfC=47f)*C|b!hB1Z;{gA=y@fXDjAVhUW}126Bh|1Ocq!@*9*K??1q}wZe^MPp z9#{wKdSzXqu@Gq{V55Yq?r#TsHbjCPZQMf$#WsO*oSLbz9K^Q;Spn-2nP-nx0eG%j z(yiBf>Vboi{br-uIjopcsg7PcHQHPORhICaBP2a7>qjBh%^wYN?_d?y->D`sS1ARn~4JV@6DYm3w+uQ=E_=0 zePx(-#HB`?<-eRd*G{i@HY_u0zUKzLNMobSvLjKRf3XAxwDpJ5DYyr2t8kDjyh2uc ztdK!~n0QyzSnFsstmWeT^qh)aHpr#tou~uA!mfrI;DqRLi0HeruRZsxrV#2Z>4ktk z4a2pOZ8H{3kgtnM{XI~8WAjYuxXZ%sd|Ps~Sl#NLS(S@|5k5!pJ~}5@KB5mClCTvI z4ZDf%vI*yTCt%G~0(0?j#MY!>HYP&JX$W2x<29a(eg8*28^#RKFyf5Sg{O)C!RsHP zmL#Jar%AzkcS=rOVZTcyC_Q)QREC{p+!s4y&@Dl z)76g22vJcKbxuS1A7J=9j`)L(4Btqj=F!Q`emlQZKJ|igWznU5h2OKu_E2`9>KX># z(%VbvTxmLqp1g{^4-jkIuG`ptIJsQh&+NTT?2K$MzY1-f2}=wAS5rbmDyy+j zvbIm)-yh7g5Nshv!Vp-6K>V}ojn&?zX-eTssJ#a?#fOH5-~{yOGMg+aCX8yJL}!jJ zfEMloo6ESdA7n~3b_qhi<^fm)Q8653-mMV3*_9%?bJkD80FNv4>Zd`+lvybuG|Lkp z38a1plLOs(PG4cqt3d&|NZ{ENdGw3_@%z*FvXpx%nA1zA>nWL;TL?&4{EMeArhR5j z;V8;pSeKs8+b4IYKjs~fLHxB7n56~?FLcCe7s%~Yauy&+Frfv&beZ>=!};i~ zJ$tgH&oD5G_6AK_IyEN#-}`AK+j>4-3Sq;D(djU+T+3XJAI+0b?ZXCzkXS}=~B-y3P0p)hzw|aGxFt; z&0wzD<2rA_>o;ylDk!AFT0{XC6jmS25`~GE5aGtrt?YDM??O;bq9y3=^CwPhH0|)h z!4e$bA@x??tcIfm36b9^=B!iq@}gy#VNnGiBn?Q7%eSe zmvd`~G7MO&9JRP5fualpE$Q&GB#~5IkiW;M98a4<@I6w8Vo?hLWcn z2ESOm*jm1Fkg3A2ieUs9IW>0Vv&qFPW(z-FT+i3W!wFY0>&=7fG+`j?IzC--ti&vZ zD<8`y1%nQIvUi+j$gOFLC@TK(j!w3bE2+A2jeqoiOX=lr&=Dz?A;)Y|w1Sxg0#h<=Nxe%8pe{`Bk7!*+bGSWIV#gW!`oh=q-@8B@#BHJfh-!zb1 z{FYNlZz6RfGEMrMUNNYZbz*A|)vMl*QtBgk`OnGme_Fv9CWp zc%N%gI_q_DykhB$$Gk6y9+Rd+mVy;W*Q%7?ABH|{E@c)C``Q>qiEon?p@K_?MnV7< zn@Eh6{|0WcxnCn~BPM?4WaYMLaA7JgRXGh9$KMUC>hUA=uV#)6& zARCdE+Y=AVsPr0VuJZog{CPLOS+(1FjUh-B33)mWCffyDb06}cJ5mNK0<-)mBYe*p zy!ucE42m`=Ln%v)W1@l1zH$1m*%r}x+&*vMr~Yv++XA$ z)5apF4=hVJf*tJb%K{G8rIIEpw{TxIrZFa%tfn5a$R>}>XP0NpcOoh-NYv6r@4ZPe z!kUcB#`!WMvky!b>WlSZAO3o_f9sox8|$R)RGg?ba<1|pzPo=0n)3fMY4GF}Y%IiR z{AJ{xP^~D|?8tO_4IhutC=C@+_#$w8kYu6koo+jvyGfLcfFejUOY)&-SxZcTA_#|9Q@ z^AuIfQV`xgxnC!|$zx4Om>5mFTn>m}@?%limA{UHsI*>=?y*l0zrX5JkW=Bi7G!Ag z>agj=5PMVl1GWvNADbf*8ogn!FD)Z<;8>+m3sVocgTiLJSz}exOeN_zh*>$>bvK3X{E&ebk=^932vxf5e@q2T|1+315wQ{yN3Jdt?Y4l|Gut>?` zT!o8jKx@qK-c@~xtBqnfiA&&pqnD3-7ZF)CI0Tj@3bDEKjv6*93(BQeSk8EG^GYGUdJ z2sfZG%Q3KSpp?hp^1nNCVTm}J3#iA24Do9=xQRw2E9tOxN@RD+0;hq|PiSQ__YpP8 zds=Eni8eI)x#qmERQO2h4GPe(7tY8SSljgj9YTG{AgAGcZiE#mMFM4smxzd2qv24*e~m`AeVbT!I&ZW1mTGgfoRL`0 zh%S|-tsy-{+56eFHNPD0_N7NA%?ZQr^w%efmEK>CXm*Xx= zY+%K!ZB4&^S+w%_Y)O%uxL}9TXYEz<DW$*~t)#f)`FekKkpvo5g?yDJ^4p@c_O!$jAh9-M&?7{e4xs8jAv zVVvXyyD7%)zSPz9&*UBTOo)Y{M~}iQKl}pW^Av}Q$4~X zPCR>f2g12!JaNUkc20TkpDjvSvHW~@lBRvq2V9^vd=!D<2fdiz!C0L|`x<=Tb&uJn z#5!i0<<`fL!Ib3I`FG6N`Y=@>jGdjx$n7(JmjUNdhV6bHX=MUxT)U9v9iWCCzi~|# z^61Yb&Nz@im$9A|i@X-sg^Oq0AEfw55KJcu!MgK~+fOsF2IQXjSLA?`S%r9{q8ma` z*=*uyDDke7O_?cq{vmL=>p4ffXtHF#>=(b=%ki2OZADfl%Z`5XZ+TrW!-jk;oSWZX zA~uXF9!d}qzn)2hD$o)pMFwbPy5~E%nqi7wO~~&iYT%pCjppP+*!{-4Z(d}F^l9JS z^keYbw2GWy=~`RC6`LpaUCf>djb&E%czH6B2M^oE&++(DnaHQo)q^8b*07^NIIUZu z04<-zXcc%%J@XAlphjL6^N~6J4cXYZ(wbHV7Ccr-zaaGz)^NmZ`_cUX35WyKT;Qw# z%7t$c+*tt}SX!dOQKltWiDW$>*8pX)WI#GOiyu952h}ekF$^mD{V^f*%B>I-I9jaqn#arcoZM^Dnof{-J7GW4_Mnz9^EeN^W-<>_w!EiTX%4~!L^yM zo+DE971ER}*`5c!zz1I#%~>{^5Uv-h_UhifeRvl;D~5)DOz7a1`RB9xs0bV=E0OoJ zMjC$8CX}}oh^jr?@7gX~w(J0LxaDM~J77Mjem8GtsdZHkSFpZ}6-%hzMjB&7he_(0xF zM0cy}gL$66j@rG$m3qJ1DL8&c$cbHxLz;!Ymg(;|63f;<{U=YI0{|@y0Gz0HN>*|I zb7+;;&33Kp8c9_80T#pu`B;AMmjUY3_s0w%($o(|dtt9WJbQrcz}ZM*zpIrNH5x|a z6R0C=qJ09G_vz{gY%C@D6ns*D|Yr$R=j_}cHC_FB z`Srl!{^Bwf&}h763%jMwH(+>;sjqXtw6IYU`F#CJ&dSq_wIE-F$<1#poA^smuD(G- zCWJ7?B0cBijxgZc=NXaM_X3GAt@5kzu~P7%VyaJP)OR{JunGI%lVj6brpM*lY*9_6 zO?rILQZF*}%|I&F3Rq@k&CY)oVue)t!&xQ9wh01tD*uCrq*-mOFP&q0 zffajrtM=ORlM7!eVo+$#4zRkc!SpGf7S8D4U|2Tru%0nWDo?lGneyJQvL(CrvN(%Y zKB1@%Xj9MEF6KDeGXKeg;z+Q-e4urVy-n}=pvH?wLA?Ko89JW^CBce8OS9gUc*X6h zgG1e(hcV%aBk2MgdkGTY(|M{@G(084c_iT$eb_fL`Ec4WL+0};P@!{QI~uMNlFcab zHc|dI?RQT*GG;u{PBShE(e)3e=*tL|gym?~hp6uq6v-3j3|{`%?5!Xn%WXrMbp12l zB~SVyxPhadyE(A7iWWBL!Q~w(PVl^9{MZ$%CU$a9*`YNPs zC6940(#h(jUc5@uXEkT@WzQMSUN>U`Vww^;gN~mLE zLnCM-`@J^6-`{H>!1n&24@-;L6(OQ*@C8enGm6u7?j{aMGg!Rn+LJIWl+uGccb zWlF(WD93pU7He<%;SmE+;Da$B2mu-=s63#rY zaX%7sp1l!%;np#5v>HfzPLe~sIIy_5Kf*4cu&zTU$;Td<9P4ANv_Fw`#u`;TryX`^ z%ywO)S>jK%F;w+lw~Kjx>yHxHYL6bs+2+5c%Pumwdo%4|Rml%Immfj{i*LwVeN}GE zBpN^3qr-H8)#^U`NrUunt|qR)I=%t#I1YMsd)}a1nHs1{1@*-AE?BI?L%00y$wYM> zmm~_$XFm^j85~nCP0V>|K)3qw-gggF-b+V7UyPxb3Yo&zAOH)sim*p|KWALk`VWLs z+E*fyAJNC*^teQ1GjxNm=_lxPO`fQ=QoYv2No1p0w5>F`LTneTE90ggCZvnBqcTFP z+0f#_&{c}&$J^0qRAqt96vl!_+Yac%+eVbQieC{gdL=y*|9mgg3K4vCBW4cVqA7t> z@p`%0B~Lk3#TTo>18I=6d>pdx-psYRmJOg$v?!&c>4o$vU!{i8W(uDBPfi7y&`OjH zqZM!sBk2AFGcg7Db|L329wrT|)R7nk{O6-~`)^n8`?u9M!8{8E+p1}a_peyp&+dWM z+J;OI8-*rfRen)3KY$g#1w62fcW%JO1-0ajvDi{JjN@L;{6O>HC0Z||a(8AJ z<@_PUEWdk9u?)4L1=(bmy%qRy-ZZT`MsH;!j&JH%Rx~_%Iaac3;kEgo3$AJR6I?9l z(b|q3%{20G?i_7$aO*R0`L``Hq^zVpb5Gy@QUk*jSYGky=b}`c#QLgfMdJrv0!Z;W zIX$zGHM6)Cr8*21^c6q%mIfzc_T=O=&GL-n9)pq0U%(R{+4SN?(PHD!Y#4UPKE8GV zj+Kj%fG-9Vg5qV*=;XBigAn#)ZS;FN!(UE}!)Km*9uHowhDJ68lRL_2O@5~idohLZ z^sD20ZEpiSfukY6f{40QExD2N`k!%l>Ap!yPdCM0;5R+olFm^vhc?9 zQ#GgFh0lD0UN||t=Q@v53j4$SmAt4{Lo5w*R4Ql3^>TJPrG8T+?9wlEb806N`rXoZ zYM*;0|37=m*kXjhYybB)IQdmxk@-M&i_rDJubZpJea*E9zosIyv+7uLMcD4n1@l*tcs5)> zND-@wr*zrsM4)H|yW;8eY;D2o>^LMN~5#?If z=4hruJMtfq#a?g6v&Jhf73xI<4{l#1Q{kCFR`K2FyJR@kNfmx9+{A^u^?KLOkwSA1 zEe-XhsT&ulg{T`Uoztxk$nbvPv6q;`j{?Ge%c5jqA6jzkpJGcY zc(v=@y}G!S`k={|xQ^uaASo}jiq^z+x9~1wMrtMjYVU)eFkT|yOYX8X)l0W{XFQ13 zbNH+Oxg}4-p0Aue=j@wWn#OBm&9m*NQX0XEg!l7*D!*xT6pBY*>OO59e1Py-Bf1gp zmCHQ32^u0FaQ^T&ffRbada=}X*-SoIvrsY@w-YaB3A3YTwyQ4>R73^QNi-gx^D@KN z^QCV;TgVf|E#kkw2+CkW>#eEmE@+WG+-aPmAp_;qpoBOxk{T1nVB5}Kx-LdlHtc79cd*2O2Yg5%a zMqOV1&Jlg~bmc595-?l!6=L|rFg)llFA6s#FI#zXPftnM=1j#O^o}k>s|wd9-~01e zoNn1s1N`)g5Q)3CH)T7WsAhO9XUc+ ztYIP_ThMZ_(%u~E?EA3DCUiwa<{ERb#L9X`U4*#3i7qPF!9WNozu{0W$dxF9?uk=* z0DugXQcpFcp@&Wx*D&TKf&&+Wni>Zdh?~z1R!W5?IE+2l6Pp zxGwFVzsj~JcT*(43@t;G7?hKavSft4$;P`K> zRQiPbHRKS6L&Nn;N58o|S8(1r1Gb;MK%@Xd>wiHfgZM0qLi4j>*=*CDgAnIwJ=co3 z1EZCHMxDQhbMd=FupiYS2f_05yIOuIp@Ef^41; zvlnNZJd|_;cHKBS?e#ycrQ3{WJ&bMAG^je~d;gJkeOvFXgYm?d)4PO-_r;xGEd$9d z4tv~N7JfuaN=o9LE7{^gesHK&^FH(Su$?CA-jI_>-Wa6Plq%Yi6wq+?eUzK>uE(#_Z8o3#jfXZfmFsL=TpE?8YA8r*|A}z= zrlcl{(gfW#LZjjSWyJaFM6aM$ECfkd$Ky$|lmJ@W0*q!sjnn)Uoj6N+aA&Q;C18w} z23N(N%TVP{muem6T?1s2fkXFJKqsgyPiq0Ed6LaXuD>7V4zDCq{gNh7+QUu+PeT&^8?z!m^I_ys=`|s?{$$vDy989t~Ec2u2 ztb)nOW5wvD-Js^x4?q$QzThmGWq-*MR`L~x;O(FymK`Ued@quy3q!_4gp)~cP?#? z8k5pVpQ$ygbut+$j^jtX7J+xbV;EKerv*fh_?kS8P=1fN_E%hV0FFTtDRE8!IiL-a zhtK&}Hhwy!zou7Df&(<=8tT*F7CQbj3P9$d_J#4$(@-1;4(bKT7h`rC_|L*mGDIOj zA)T8EMW8uFh<0P?=RrM#Cj9c?qpE;RGKnuZiM$y(*T5^85`Y3F2x-)Q7inZ=zfikA zWE@sy4;DRoXNR)B8hub2_)bw1nZ-GBsWb_i0zq_6du4$Yc0d+h_=(P(a0h{#F;-!N z`--67(24_q%lw3Dzi{cDlXVV=EJ~VXf`CCnaU$GYjAvL}d~J!5|HUN`z4LHj^*Z@IAXng8$JnO$~aLCDMd-uu2kt~>0Ud(OG{o_p@hoipd26Fq3m zct*kq|7`JR75zFeHUelN6Fn$tlxK%=Ydd?-aKzcG;pOi_)@F zyp}CmxHv0zSwHWT*%`CaV)2`RgS@>cO7?<&-UCU2*W^)Syas1vrg?=2hV)4d4vq2( z2@H)43lEJ7@Jh@~_ew}h^$HI03Jng9^a=@%4T->i?|3IKFR!@F*>hsYB_xVf;jEwc zyzJ~Hu|Yx4KKpFovtfZ5nRA0eVq#)~fPQC^iS+l$`VsWi?af0{{mAp76mO!SrQZy7#w8c48k}7 zV+UuZrDUfi;2V#AMD+=d=o1_@Av8EPB0M%C#6LJRHaIwrkeg%H)J%Dd%9x!#XL)0I zm|@oDff1OUlAY2hF6Owk_|X1jWKPJ)SQsyi;+T0E*%?{$GM0D^9v|lA8xkBCyo?N4 zNN8}h*Hq)Lz*%4#7!n#GjAu|tXi#u;EbTrd;FY>GJ7dnAzFv#GW@V-{QdRE?JtnkZRBD)S$G5X^YYpXJ=t( zLoA1Oc53XLjLbzT+3_h$mMlz9O(EJ7^vvSfqE0L^3MDS62KDc5wW$(Q<1KX3R9sgx zO+~STGZtoKLT%FG!{UOhfz8Ihrg&I-R(3|_@*tDE|E__+L>M*7dUuRdY|$dip`awO z2^pzN$yg;N#4lZ%K07veQ0U(H6? zR47YYoRtk_Of&Hu+&G?Qbai+pq-CZ*lQugsGh-1|$|Wh8S!olNFG=g?JuWRPV`*k; zn)lx?*6j2-HCIRrB_Sns^@BiTlQYvHP^#JSA#p)V z?K+pXEWI8*ASHG6fIwr%q%TWbI5{C5(<_UNZAfTzT#(fgGbz`UKUpuOSxIe5dRZEe zWk_0lALt(CW(KQ`ZXvX=$mq!Mfbig$$Y6TJKt+Xw3dc|i5sqPiM}!DC;Q@pyiXJhj z7%)I^1SC8 zaAIO8PD}{>O}NESZZR>W#u&;ohO{I!I3zLviW>lB4~Pti3WyGf34mxqLIOfU146`1q-Byl9R0kpS<%}oZE-5L7A~IK&wJv8#6Hn<8)vtK(3s4OC25)2%SVE19}oo2&Yq3kl5s*c7VQFbc6w@dypv~)V@3_j$V`7O1KX4qV#iNh5WxqTNN$}zJDqH94NN6FGcz)c zG>&mu*m-W+?2jH%zIJBY_g}+oH_QHTI_ZWiH;46j*SSNg3XYm z=&|j^w!7@K#Dyty&ADRe(=ZD0*iUBsNd+2YlxSR#QOu~PvHD}S$<0`qX^5<9-~S%% zvC=i7Cr066Tb1$WV+W*Jf$F^gjZaNkh-<|EHRY+*Ez?B@TGM>ZF_icj@gI zXIvB#!5No|4(TG7&b1OAsM?}O}fNO3rsMUI- zjVsdHi~J_awPEgcm-&!pW7hc%w%OW3zh;G6Yo%|d_hyQ325riaVix>=p}MvESa z2|-MUy4qL|I65@k8g@iflm)n1cj*JG@N{LmW;aQ9F?>n5R>ROShFrKur!1;@(yqCh zSyg=FWucK*+rTA@0>@-zWsj$^u*_{84bI?y{fgUA7m2otri%2ej6Nw)yBZtJ-*r7~ zpyz-6niz&KUwwi@CWJ)AhR4K)MOxTC8kA_m*Tgzc?ftgdg-J0$0U(nyb>7HJ5(J&uVK>v+5_5U&juHD&sG`OBycs=0eJ3HPCG<*gA zn>BdJlG*=1+}-Qzrr8~SVDsJmZ@IgR3%`M5l3-0CZ~Kgo_@=c~aByHK)VO9T)zA$X zFWsKOgWkxD)CDz+=A}yj_PhDLmAb@HX!b;-h?s6gKZ);l81xHtA0+Y$4FpHTiW(odg zGHLjo$}AUoR8UNC=Xv^zVP;DN9uf5114qOdu2b5XmXfH$0=BE^ybEo0d&_g1N_y@{ zk2r{RIMH+PCp11njAAEKOk*jPjhf;NV}j<)LCHcK?L{g9^3^AwnjoQ+^%NY9IHVU6 zrcVuo@~p-=19_ld*8bTPl45HmJpZqhQOOY248~YO&!qIMMvlQksYECx?IwL8QJ(E6 zz5|62NdYDuAbiC!Amhz2HM}~IQ=5r62+|;@#wgK5+h#)-=0GQwG7Fh(W+u)vpczY< zMN~(!flKPG{m&e>3Welr@y{H=SRgi-oGxCva3TJXj@u*d^5WT9$+PCC;!LC)mzo`d zGun*_h%vk{WAR*^(|;q==46|n56aG1B%ZG#2;=6?GnSKc`z!?NP0M;>;m9nUNi6EI zqh$cM#8*a7k`l~#%jRZgEL~y&vWMpZ!egf`Ub+Z0E?s(@47|vZ&5Yc#O+TyfK zTsvn2z>pxXBC?Z!OdzBrY0+FS{P}nH-v*J5Ck&Z90ab~@CBnCajLMpuW&yEJS(rT` zWv&IxEj0~vY0I*cvW86b^PH5Gt_8J)41$l5VGV@dMT?5s4S zVvsVYg;1nAP?uR5*-)rDaAyqhyxQn=Hu@4~j0?V!{&MHK?#c6%*%|6gM)*kgNvi1i-)t5vqx)p7Z>-|?L0kOdwRC>aH0RiKf+1vqr}zO z+11Upg_~Q8R&H)?t;oTxl|bUr7zX?y#=`+!T4f{gW~3ex8xIM87jxNxKPKsqR@zDE zKQx&9vq9%!+J(3e+87I4FcMpdjnr0ZC$o33vvCPSp@)rax0WGx1IMO# zdOy87)V|ewU;R9&dym%R4$le;&wTOXU|G+#jhOFj8ux&+R`@3?Cm0G6S{@8v_%V^_V%<3UHx7w(t^X|F3Q7t%zBXd;@ZLE zTc>7)D|*_K2FQ9v9Jz!>Y-&4u$i&F(v|LjY>TiKhO%wQAjEfDe(}U^Hs3)9wxoG|2 zq*2b9ol2%1N}Zj2c5oKYEc+@-H=Sqfc_!!LxN*nNv$-2*&aG%2J+@oV;K-@^;|X`4 zt@tpsmGii99YaHJs}}ZuvGoh7yKWG26 zd2|1Xle=Vn|15KD==m!=bMpJD@r!#N_;K2btmh|%pWD9vQox=YKm3?qwr~G{hpJ~2 z@4mCW>N3xqU-50_?mNS$WhcMvBr93o)qZ8p)sK^g-Fs%& zzQb8DGp9`*J>-Mkul2qB^qis&?>@8bGp`k|6_ zPdyXq{KLxNi>J#vs*^WHZco1Wbco@>KYo3F`E8yVGUPR%h!5J^w1^v=@mU_%{?Czv zo<8O0vDD`I?z$ZR^E`8IUo!hm|AL4iJTrOwPeX6Mqdhz1wC=krekTL|$k?8A^__QJ zmc^%xId$~pk)4-M{cw#N5a(f7=e~LM`7J>6}@gPZ=h*LiqXFJous{zBD)xzr{$D>bwC9;kt$#}z7Ah;8F84oII{VSIl@Av3 z%+^fR^Q(1j4Nmu+W#2yy;qT;rJiR3@(1WWgUAn@#_u2RL8>+@;ulOPD>MK{~f6VER&<%vX5@RP;*Pp(_P?&1*6)k`AB89<-!BY)>fHAmbVX^dN5B4O_v?`(WH0>m zEYHj>{6QDL?S<4oo_;3ct*=k4{nKH?JHO0U{~q|=D_LEyY$$$mNzd(fW~%;?RJ~=+ z`LNetdh3rWpS;vZ+JNq#y!_RfqdsmwzqRu7kKXPWpxJkS($kkZa-)fS-#lNsbFke1 zVr(?e_>5%}xrBQIZf>aVUp0b@^?j;x1AAmrgf#zXQU5=?zIXb;{2$kL!@T*!{bb$? zN0*l9BuR+}zihR3Jb$@H#6R(jQLtB&-F0)44{xYaKi$*&B$viBb_KiR`~0@@$W5Mk zBCl|H*E0w2|9-uv;=N@i5x?NURemoYyL`vWEUwsZ{_?0RvYura5a-M5K2Z%HJ#*6W z&5rhm3RX_su=!bbxawF;*N^5NbN_ke35SQryKD?ySDmBtUe}Qe{(6($-FLq2U)*-! z<-Pl|_P#deDC7Rq{vXmiy*qU8%I{NWTz#Jl4w;@YYE#XVsCO>%%&7Za=e+XV%EYdZiu-??bMp5cZMUj2w6X6L zM=9-QJ~%OL!+oB)8{1X!@|ZlHDc(@!y0P`h!}c4h596YsUiQlP=%Jfm$9_dTD+v|p2sVpwK(KdgoDm~q6*}hl9^vMawSA_jMe?>^f%|9jI z{#sc2&cc;xOS(=w@YAo~&2|YGI=V~Wug`UQ@~O+us{IYeicZJvW+XdjY$zGVz27F_ z$@uQ)J{i{TP}gfKzdznpx$<7B+W)(~+YSsV=f3E4-IIHF!^8~*eQ)hM_ro0@Rp}d* z_iib^IC8toLyvwZln0+)-g@y18H?WPxjn^g!E0V`4}4E7Ps^v$wM?^I)$ z$Ely5P~2&(I8-y1Uoac0t2ILM^$R_{_v{?MNutDZ^QqKp)xiBzdB&w6 z_4J&u&qr?Zm3Qg-@rn-*{A1GO;-ydZ-7@JPpS@Ap>*oU1r=3-wo>-C9I=JmWcqaI# z{+FKQmf#llwfV``q3@oXpXWKnIh1GiKhs@tE-^CoogT~HX|=UQr*Ul2bN5<$uG+U< z``T)5?fJ^rxFJE>H7|)j-Br&lmg_M^pWDawKfR)SrXq!BzVFg8Jyz1AQ1;1;?=CG^ zx~$dDx7z&trQyZ1hjr^O{~WaFUP+g`Q)bL@e^>KMM#NV?x^}Y5dOz>$mq)z*V%pS2 zMVTx76%JXMe}o+SqupUGmp**O%)1kMPapb5|A#}_5m)m@e7Ew}%IX|_$%k7)4$i#b zG(3W3Hm}{lUlXLf_S1F)-nz3P`26X-7v>hIw(j8G{qWNjTOY0{OiE5zTd*>JTU7bW z2kG$xRZ`%y+gP_4!h2YU`>mZEAi4kNmTj*oggwjiE*+lKC|< zkz|_7L@F`zkUS@?;Vp0@PdZ?OFa1l#+5JmWByoXDetOcl#BrpUzy~f)AP@ET@vUoI zpc$|RCXP3M=3^4^V%@4Kz}zH3u&5be@;7on@W%O249C)01LDb<@DGfw8x@S4$g>`k zm}Ox-U(zHcrn33ACNRG6J@pwQ{#;l#E~ET`uS7B9#!KVO)&WRAso>O#WbLT+=GWiA1X z$g1`~F93SrOUh>m60&=U-*|c@5^`QmB;@EpTJkqYD1`R7J_*4DM$7A$5Va-zJ0)ba zXbkWoGQd9x(}nSdb)pT^`Y(Q3K^I!#LM(h*!uIyx{qdqb?u26}^xBDI9AN^mcZ7|^ z-Thh%&fU??)y2ud-quDU$zgcLNg}m#aBbP9eaFsSu6O0T@SUAHb!^w#!_`SlE+6orr1I!Wqz90=0E)**V|@FVijV z*pdJr5}=)Po1T$>@G|}LojnQg5dqq}w(lK(oR?{z6o7fSGE42H9)xuxTglFFadw70V++vUu%Gs zCgAQGpvnaN1wgS!Y7=l*4NzkO?yLb;nSeWLfYm18&j3Q6WZ<+W;7>I`oe8*|0EOYu zn}FLe?jq1&0{%$GonT5OtO@u7fI~ZaINC`ioC)|nZ?Eua@9to02HVOz9PiP_1;pfb zE|p=_M**fy=+e@WTzZYL?|9jlK5Z$U8R$-M=N1kyOqqeSQQccPfzk}b#rw8(wyObs z%i9kQ=-_H^0T~+9$<1DBA_GoRNN0Dn%g6|j;i0u5Bf`4W!Wn4|(n4mzXJlB{$3jNc z!D)t=k43XInx%nj1!iTlCj2KfVI(APc^#_-Avbm4kSnBc^|L4Uen4ujt8|F~$dJH} zxb{knD+xg2d$%XoXIF8-ku)_u`?(b>cdv3?x#GF(^r=aaJ=>5gx{J6<>6+x3wcHG?p?F;`T~PMBk5Tvr40uplnJ5Va3+Weq~8!=1j2^kkFOt|q))!z*%?vC2LJD{O!VY#%i-8zFn9OSZ$N_c3T`f;IHx1j;=@B)ALvy$*S5|CakfI zD{ZpA_C#r0+F#OV@$)J3DWhZ9`_(yyd(2(#j^piHw{G6NapU^6Yuwe=S1w<^bn)Va zb7xPVJbw7#fjzr-?K!Y-#Vt@s?my7px6QeakKrDFm%k$gA$PMAA-sC^3MIU7{@m#k z$B!J`zvq`7+kg0FU&{P++FFN+qcm?}3iZl|0Dh9sU9 z`O~NVIC5a$uRFGF-Td9b1vwzqqk~c;vvI z9Y21vas3Y`56#a7iToj(%RFH3Ge#ozR!7pb>qPjMFJC-=_SEqs`*v^t?yJu~IePkF z`a{rE)TZHX@i*~?`Fa~lbm`)`KTjMz_}fogH+}Zu*2^Gz1fp_E#NFp}csk4sgkVW^ z=JfGH`*v>K^!W$x-z8LepehroSW4xDL9S7%mQbm)r;h&q%MY7A`{3>CpgAxvA2g*V z8lo6pW;CQrm(KtB$H8B>ef`-7Z=TAdGzFk3dBi?sFupw6XH+NdRtHfMS1w;Td*aBR zpT1fD{#ysi1gb(%6*oWy9lUrRt=sYK7azQ_QzcN%Edo`MNF@l}i|7NU3YjTauUtBR z=8uEBw|@EI8{g`TG{vAPte57>CA97EuiL)*=*>@g6HN(d3i8-T%tH>t%he1o5qjI} zS4rEB?A^Y3-J2irHB_ab%C}DCdh^D$tC!E8KDzJcZ$5eZZH{*^)m0`^@mA`(;dJft zh0{lW-?`;cN_0pLW2Y%W5#e1*l&B){+;|g^7OhI`*o5w%m=r}W|umvH6&ojd5DhSx5iJ9X%{AHMuxjn<4z2{LT~ldmn{QKj09 zl&HSzJ=A#P>V-d#{Ht7*|urjo2#B#y!gdc zt5(0Uar?oG59uUPfvmKUFJKC*3fO!;kE280jE}l8I@8`{*{PX31z$IzU z?Ad8^(-$qtUi-r-OfWTQ3+vFKiMMZDI`;?0?DMs+uUwY3c;Wo{3l=VZI_sI|R=%|1 zz<~uC&_Rm}^=37;dT>7{=kA@GSI?h1wEKsz*8TIOsjtu%xa0Lq3;K-n(<_Iy$-cr!Aj<@b+sjtzPjw_X5AtY1NA_zqaPBci&sT zXI3>RwM9%JTT~2cHlL}XzIXTb^~-0EANb|_&7Xhx-rH}iS@XJKjpG|{{qx=TK3KPI z<3U2IDlRJG3t0>do5w&0+#lS(cjv~H3z!5uwtl_gvvur;+(+DpjvsxzZrx{}tzZA+ zZY`)Pic3MoVJNvg&qog*=03=|d+X}Ovws}iv-8LAaNT9U;x_VM@n1T8_2rjeeYJ7Z zp<%e(Gi4y-FeW_u$Uow9z3<<Px81+&-0{=)ZJ^%v zJE78-aK(H{WpPCjTgXtk@?!U{;zE4#k7GxV96iDwbv<(E(1HE?K)&PW?LY4$l-g2h zNolFFq@tKZH)(qxxjoFqXxzAVx=pe&gahjdGh#C+_LxX-c2YJs7EPMw4@Y7UvS|g z*FAcR;;%SeI)CoWsT0QzAK3dFq2bHqsHak-at76-T%s&cZ0f-PhGom@d9q2 zSRT+$vE}lz%2HJcTg*U|5J+C9N4dEVhv;Qr;s~iiG55ed>>+giKXlS*9-4)KryERI80x86lw;_G`GRpFMqo zP$(%yH7IzYaa506KSTpDVNs26FSve{P!Iw}uBfOiSC?r^*%BVZL;kRO!-p&$f$Iak5U)fH+G>1z?Sq)Ja@E;Nl)LugbYzM&Sr zhY_C+5+rv0DnVBOT?6*48G0)s;9BUnt`lrtsZeM@QB`gL1rPNmBWY8RpN~23f?hyW z^ul=i7C|>Cl?t_719Y}7y0cMCM6v{1r{op9QX^7ubt#-o6nF0s0*SIxqf{%Ta)m-C zH&ifkJ!<01B&B6sDP!3-#~M;XS*fZ5sYIcyLU;HI7E}U>dr@H_>5Z}X_XxH^r2)1^ zQDuSZW<M~3&(5Tf_l%-C= z$t4x#Tp2H7+7}j(>GhDHF;x{B4G=YGr5=cUg{-`+thBVG1ln#VAQC(Sgh=p+NDT^B z?od&KW=qismadA|@L;A$Q9}udSpw z8nsrXtz-!gIq3@JP^qS+YE3m-psnO+skEZJoR%t;S{3R?1(cSjrO+Q*sE~_{1yY6< zG8LpuqR^FtQfQf0rK=PQDVdzrRe(?frqz@ygI+UoT2`Ug@mgM2X;!YQA=P|7-KtrmiMx*p9%Dzkus0tMj3a#28LX~UN@-EdYz*hsHR-;ohB5pBoHI#3)#-J8(rDX;KfvZJqR)jNvb9%;54o;-3 z8ZG42q5#S?fXj7NXbpiI#i9aKqSGr;iKbeMs*Unh7|QgO6k1hf(1@c|UJi66m=NZy z392du56$Na$K}BJ@Y5^v#;J7?iPNCJ{%?LA~!T_iOP{I|p3bU?4!xd3z z6`-6EDzD^;YF!VbwZ`Rz%AIs1j%pr3kqThzgbJM2JMr-{vX+(&+Uj zB$mHJVO5a7C>ObomvDRzCkJYko~2-7HZuR13~8}2oi*N&)(<8zf*s5B!^hl^MvQL} z_*yrlMN=bM0L*EEAjSCK}T8_hrQZvW9_NrMrNJc2X#nqo-P zJ>YW<4O+v90j{8{Mo>p9_F|%wn-j-HSC@Cn^)gt}(o6~5u?7w7DlNJWXSudg_?%YFpJ*FiF8!qR`8=FsX4Bx^i1Q|Cf~+N^MJui%V+keRLE_)9&6S z%_256HMNrA;i9fVO|+6(7}PY>NM^LXSvROjRBEMYQ`5+;ThyQzqyc(7L&I2xX23dB zzf}!$gJ3ttszwowR<$aHwmz#GHJ3Hfs#aYI^JS9_YbJBqzhGD+_B^A>>@OPDU;v_K zyuZV+RtfWmU_NY`Va-mk9FnnQG)*zARafeXZK?@oHBMXJpjpiTqrtysRwE|4$C%Zs zMC%x_*EPbd#=+$F_nXy-0kE!F&1mx?CcH+P)rg5q1GAUT%&bPtZNwZVz>TJ_zh+i5 z8r#gxYHE|Q&B(h{G?ZzqnAJ!NwT&{X71wK0Q@}=6*Q6#G;2LL76Rd1C_B3g^fn^Q$ zu%yA1hA+eF4ZPmopx48&W?*69ET`k}-?rL2d;jY$}+SUXo zwl=V@wLx3{N8Qz`@7p}cxsQie_8x!7fv)+8iH*C)UuCXvms`TijXJ9xuI;Rb_#2OP zI;-uh#=u#Phq3H&PHuH=Z2D#cTeE?!*}(RnG_dLN_@;ZR@dbHiR<8d6FEyoEb&W8p z{Y4M8ziv^h{!8XGX;EQOUZKH^wkiH;1@LU+;AF#7U%ICHrTmI z@z~o`bDE-9QY3n*5sMoC$V2c_qZYLWz0{bJ^1tk-R$g3&w(`{G2tPGA)BTHnYK^d| zDd2Ht#ZL`3ore6>h)u1IpW5~R3O}_5ENX%+39hh~erkH+MAIlgwHxqnd%T}o6D?|w z^;6T7RhxNuwKI8m87*qmA*@k;YJb(DCYYqCI~;jtSIJe>@vPB)YE84K5yvy)*=EH= zM*P%jIzigP!mX_#KehTSYQ(dx5f(L0X;-PNR4U<+qgObhE~Q+qz<&cA))+Xh!TY8{ z4~v;#nrj8Ot|H>DMSSoCzd*S7k!BJLn~IvYY%1UuWW~rv{9w^F3nN=&yw*e`8}UYa ztdWhlt^HdJZ2#Nc){MSw)vzx8*SM`2UDB%HlJ=L~)|9aM5SK9GOK06}O(FWeHP&s7 zx}}->t=0IwS@T;nH?R@kxcdCojGk|G{nq|l4Qx7i!+7-z*JmkQ|(| z1h~-%_SXz-*8SFqhguap)Fdj=DUMj!R9g6yS+%fLp}^?424_k`Nj;8hO|`HQx40@a zMQ7%*RzXZ%)|}QVV7{yEw5Eq)O^2@*hBe}@CK%RWQKRl$b}*?`YqbU#eeK{chhDI? z>}pMOTLZ5;7B#ib(rqoDTHny4ztL@tSk(R%w>2fTlv%p1S+l4SS2hcanl-mIbBo&F z?6zi4_eFN3`zu=@9?XC4 z)t6>{p!4Tnoe!q@C(6^s%X~gEQWvjiGGA8<^Occ0cMd1#89gvx$$WQgANbuB-d;DQ zTMIJb8L54%UKs$Ki0|S80Et56)-`tjHAEqLrgu9>Tsm+Ck~+5WNj`srcVOQS>*fwd z0Lmn`t{wbF>`>j|ow$21MfPk(;=rhUU}!;_uk7}%fH9JD_foz;N2QQ zVmM(Nzn!yZCogXwzX$$~e!Y5lck)2w8I&Q!jnqcw+`_XB*EX*m*VevGD|Zr!$bbhQ zTd9q$y)4JUR)+XXQtXanSV@kQ#YI!iw!61r+z@BL8Amt732Z@wP8q#> z$@X}%LB18X+yxkd>u*qLvye}bJ z`@4%UU;HLs$^=Wm^8yEV;|6?2dV&!?2F504^pL?f2iYVH9q)r61&s9Fl1JAcO7nB) zku&C>S9pnHCjI#)epXDOKi{~{^PH5gm@J&#4;n-bhDy>2w^HI8t44O`QgoykM`Yft5fU5*9V! zbtjOyDyi0pUdiha6M{2H^eoHUa0V?c=QTALDhPzf+f!c8=&&P#R>xIwz$g-{)KGDd zj0S3+8CEUy;DMLNk+{+xzYORI{+@zK(9p3<|}I-mzVtF}>@5Dem&*ps2b zSBK^>T28~MS(Q{%C^mxy7R%zjYdJ4vHQ>OhY%0Z4=^H%dVK9{QHZ0sRSiDG=stYKo zEegpHb7%o}eDD%hLyCE&5ka~J)N~|iF$c~ZHv33b1+)jCC(XjD7du)gy_!WauP`Fm z5Cl@riRBGMfVcf$dv*D8o&zDl~iMkrwyl4QlAv5GEeBi z8XhZe7Sfee5qgxf*mPx6p%BWYYfw8KIYwW`OI30_adQZ$OY&)ZbW&wu*%gN!0KVu+>yll&h=JdZm)BG^s7YAqpx{YO8oV zPFY@2uB?I-l+Y}dQTc7wRBNaH(t%kQ( zl~mcFj3L`|ra(UI}sDrFjut=6b84>hJ0xC?Z1vkt!bdfN^bx^#fN?%tiIRs3Az_qkORzxA5M@A&s>ner3qP&uW1oWg; z1PS^9KAhHg#g@LVqJC^}dk_ZLhy=aX+UUgVy7cu*V8PQ!XCSke@**;T+5(N!8SfL4 z*DLjMi2_2hHK>$ikj=4b?kC`f`O7I}mALK_M4^j>9p)*n!%|MTVcC#Au$TvmN)4__ zCP9!&apN)F#w2T01_(k5%c@Znq?fp4P$c2TY4auoOXN=Y7zIIUV-2`t7R8VosPu&j zmAR}){{$2cchOsI365>-T{SLHrrwd7aM1=E+*x_adKp2ovs)|I7$rc)PT+$J6>H1! z{GEP!M$g!^|E3ZbdcKM`tA>SwOLCtiHeQ={t1rm01q-DHxbfi5%Hqynv}2?es~#Ik zz~CV{3DO~bTU3h+3bL#)TmH19TdK&Wst9~S;M~y%+o*cbs6%AQBk2p!FW&X4zJ0=Pb05FgSdG%PGUA~Gr}Iyxq{Z{N6n{o?!gA24v>puq`4 zh75~sL1=>7hJ_QHXcWf}7&LfD;?ShwBS$5V9Xo!)6O*5uI&J#Qr)H(5&6ziU{=&r> z*%RCZqEIl7iiznP7e8RYpahX<^cWCLoHTjL)M+zjrlb(6^aTqSW+b~3BKNjo5m9JJ zTs+!9>S2d<9GN_J{P>AaOrA0Y^?+jboVoKrv0(nf(Uc~rT_mN5Clo`54jVLVTw2Do z2_wf%n*1aoc}gHbi-3&7VIHzeWhHYZpsN(6C{{hb4Vu(DCgq&6|XN%tF>3+7F68J9W-*O5obAZ-2-u zA#v!i;lq=@tK!?Q)do+_1x6SPb~UH#qsW?>&GWk zlHtRL4ILIO%z=)Ri29`x5~$yTc_~lLPJL?1RE*6N6X(CZ_VttzW5y(p9ywz8uwjWq z5~D<-DYNKoN}mrcPoIb3#8^EwWBRlyQzlPH9yKyqAW0fFbV$PB!I2`-R1(q#ErXV) z&z+M-#&^byX;Y_6p7g|o38d$vMh*u_;*i0E1`Z4th&s){T%r>reeT>jvz{b!7BESB z1`izAe+W90h-wB7=-)q#5_xudDxJ(&Xe6}vsYSa_9zS-BJ1RYL_%L^fKiJ~H!9&js zBrO9;zqo!Ol*p~qEHY&Y33@Z>gL2;W4BCzg;`{ZDjSUu&W-lgFl%S+MHQ{3*Js96l zK#7Tt4ib^l7Gf^Up>tuzxOG6v9oH8%^^J*+ij3$ZBBeqGm|9e9$sZD=F)^qq1}NcS z0U}b$TqDYqCr6PsJs2Go8xtKF86FxM(pyBDnTA;>N^->e1ZiXhY6=fWi1#2r5osDG z<_tO)CQTgn9;t3bsDKjKr;o3I)Nu+KLpmKMj!%3CNXm(Us3|xwFu=dJkBIan88b|& ziQ~tQP5cB%Rg?TtQ(!ChVMiLE791^!0 ztME&HuWEUlvccZK;{Bvz|6MzbL~}8N5)%^!M{UDG{O2yI*bXkq>S(Y)8$L=z8Wfjw z3urHT_b&#&E!}*44JK|b?T3wkA}1sa8aNcjpaW3tV z4JEIX_94S$f{))OfGnh5%`xz|EmuWK=|WsP64E-KON7e>vLA*Ff=v*=FA^EG#6*_XD8i z5AtUH483iA`-Xb?u>poYHi1DRQrnQ=K)-&u7yr1&E#VUXYg9MRj1y-4mvLS4!G|b0|F2-#;`kACd2o0fZn8eqOF2rqH zd;957s_&zvgwoz!T(nFz@*CW7mN-i5S%6Ei{4jO91+ttLS~yNMsNWDdjv zz2-xf+vcC{ut0Xj61oEx(!z%Z^~dUaC>CmC#!d>?;~B*tQS)VvF0Sq^f#1GE z=dNDed-n3}9S|5C8XjpPNW$7Ld2BElggz1*JDH=Ct2-)a)2;&=(78+ZUVZ_6f)EnBr|+rCrhwq5)OjgAWJ5r8H|Mq{at#eL$?;iE<; zCyyF2d}IJ@U`(LINYSoC$1a`Qjo+59)u}GL8r}z02ozZ5qEVn2Hf*>*p>dZuxwY`{ zL>(PFckSY}7OU&FCx?c`^~1Uz4;&O*xPc7yBlwOIR}artZQ6I})P?h6KCI%~Tpk`C zk9Er6B-(hg4oe(7XoxRCc9pb1>(Sk=-MV*9!P>g@rP)JAOi!OVY}jbhhM~CM3>+}X zhv2$MJ=>uTUA(&Y=;ggn#kW#jzEZ3$`Qsl$lF01~c>Ux1_Y^SO5RTqGe0+T)3d(p_ zb{!r;1`irAAU>{dTz3JbeOE8<9zA_~`}@BIe+mTktS~HLLPLUsf;tfdS6koUkWf5&VQ~6Rd{oLi-;TyY zDKsRgPe4Eig5hTC9~O=Wt-g3>O6a$sxQusx-JftEi76$=I;@Yse<1D@0lj^FdiLnv&C9DLrLm78 z4_70I9#M)RxTp8;hr4hzSwEuF^rt^+orV(!Euu%V!cngN*W&aGH~vM zweQ@CV6h-?b2qeY#}4h<1>%YOpSfi}hbcaQd`FHNbFnaj_S0;<0ohE2p^F=qTS6z`5Pq9n}EC z*~!6Hz==!1yoisZi}YSEkoNR;@o=Y!KxB3{0!}QLK=-4fu%7T<4jg$Tf>R?^2oi_d zNCcc{viORQiip5Kb$_*-ch)zsS3R>zH z2&MKxS+}kp8?fHp%p_vr!b}%Ref)iWNtsvI_P$q8x{&FVkJ67wsf%q-Kg@=1P`l18 zp1^uY<0oZKgZ2G3q**RD-cV0c*r|j2Hz+AxDJ2o*ARCLd z#eR;L-h0Y{h)E`KgEGq8j-cqZ$#A)eVM!`-_ zj?(sb(K>cg4h1+k*h}qppbKx^qYR;uHcUSa#PN#*f!Ii(e@Gbh0vh{g>$|uaOGsJ~ ziT#fTUUIqn9Y%%K*BRKq&-b{^R5G?+r}^(ZZZS$m+Ht7;P1BOXxU{JGOHv34S&hb| zWM>sC3^jgd`B=&zO+Sa)K4*}i?+R7xfh_p}W|0re_a=JgoOlMN)X9_c! zc>?M2#?$_e2HrTFO=V^RD-C!l$b2;hy`PBV81!l!GlBjZ1)d}DdmQLSQ8|o7>2UO& zNQX#o6lx!ez9v%njlhw#X*88_Jo-KWy|>nX!TxN${|R)&37^M}KamsBl|))U87VRX zWb9ID|7SGN|0l)%r_|g3WbjKu|Hvqlzr*n%<4g3Aj4wG3LLW&sfJo>S$q^8VEC2z} zJ+jmf!16!5$>UEJ^FlDPw)oYwk1>du@R#K4vV{D1owcg1{|erbWA$2%3a`NA*YFb5 zQK76_{d<1QoEqZ7yKs+46#`G_!l{XUOfmrf5K6%>~%)zx~IyTVuEUAK+C3VTVc zCphR-H^IT)qv!ABmnt<{19uhlGI$9ni*u}JJMeBG+rjBO1r;hVxdtY7oL*Cwd)Zp* z18*UrfYsCo$1guBBP_1dcIZ?^w@&3+&4W;vNzK;MgQs$mSN9a?4n)x-5BMX=;! znH1shp8}tocui@~v0BBWi+dYRk{SOrIb!xFa&CxPNA+m1GLt>QAO{==7s9H3WLsj6=Pt6*?h=ACec; zO08gIaj%BOj`|ZV7DIjVyDK=|F;}b&E=2BwDrP1jjYBMP{W733V5hgacaF8o;nHihf z8*_cI6RyG35#-WAn3S6_shdcm_XF(RLnP6L5=k5quY?t5oGjBy5H9!bHk?-C;Ge<; zSq?5TajYAiW1@vA$HachhUFAyjcI(wi59Dj87KB#epIf}v2f0zcPXlRLjH-WLKXJC z3TY&Gr1ohA0YN^O&K%pn3mI=dFlL}|H)WtWdLv(61y>m}R;pq>0=-G|^YT?lg25t= zDh-GTMkL?E@dLZJZ`rv1lMlHMm=8%_3U^apiW9esDzz+eh9Wml+N_FV#7Tej;NBhO zEyGRz26w$Jy;odB@YQ375LEBmjT^rB{EH1pP$49%a21nP96tA;Orx*G%AxSlog0_V zo<4P|@+5bH{ewTwALEbs9y)kn@2@*acA^cRf3b1%x8MI*JA=g^HwsC^RAvq1kh{U5 zDDU2t-?wksv{CVu;Y-5?{tMsrpMUnrx)0xf_n&XQ`Q|@A`f}UO-}da?YmwLD`1#z5 zDuV?tA&Z)3uaI0;X8A3CyYNtEP*x>H(h(U>3+KZ%ng$2 zr=|H8;$P0zAr&%T7zjD3Sa9pL5Urn7tZZGbKgjAoHZHrWz+? zd$x4_kxTawaD~r7ULjMS3@h|P+6;rht?h@ zb;C|!g`K?o(S_eO-Gk+dIvID?muxtG^}(ZubW^anI~;>(Ms5_J0lRoXo_CRu3oh0& zxL9vEe(eFWGqp7CDJFwxg<8nMVGua%p`8~zvh8%m8;)Pk<&fzEfeeILjOc?I3sd{C zKbWgw02f*K%<@%^l)*z>Siz!24GS|4#x7K<_)0sayx{V_&2?P8?X|^UoxGWcAP1f_ zsfBURvlTW9mB^$%r*J#)I(0LT#4cz_nTb&f&3G9*rB+v}>?#$7Xi|L+?4_mLe&1}}+1$07@G2-SLg)#a5X2>q<|MJgi}-$vysSH>+gBBB z1TUnvX=$D^#0slQT)RY8)_vH!z=wU~Y49p8LBndep1sou+Wp zpLYt$)_0Pr#Fgd?$$MzV&UjJ5=5#xCCE;hqFWwD)@te-vEiNlH<`Z!dIGHgr%HDtg zb24zcx_`43A9{Or!KO3!N=P^dG3kgiY!cYZA?V6U=_0P9c6>L6xuY?0HW8)mb%8ZjySlFO%Y?XCSen-M8r=$Xb1h{fz zRuqRqb6!SyDXJVaDoE>V>puTZmHC^`<&=VxIV)MAS*u{+q01rUcTiP1YE;G74s5o* zLBIhU1U8?$UkX+<@e5i;GqKp4aWnN%PqIQ_bA`;v(b|pzdu1LNmSKN@G2){m6xSL% z4Se#AjkC7-z#f|q2rCkR(a6i8q}U28*{0y8$p4x&Nr6xh5E_a9fJ8E~ z5-XfE%4%7an#jw_js{0%{@3Rp5I(rv)+CCtCMz}NQ*!;lW^0=tWRy|4g7}`(z?hB@ zyOq`;zl^5Zu}WQX{orP6+azR_1!zz?b-7m1E&dK=R{02dXbg~&k(ZTC6^__cLD|8> zo3smAYGh?rT7{f7oUv66?7onx3I%Rtrwd`{h_JR+*dfV{*sS_@X*gh)#@82e%azzY zEo8w#(jzOp6iTgQwT6hRfz2E;bs>0FR4R>maby){tc>>`lK(|kttq{6aC7~8J{&Xx zt4dRj9c#*flsg)c*Vhf~4RO>I(RNj;5H)~g<&nwF+GU)DVvZhzkzu|2NE~qM_=fZo zUi3m1A8T!^P-EYY(9f?M*lr>tydKIcEA=$*k4#~uWyk{LsHGD7x`90@cGc+RLwOal z0r5&}Eh<-nO%>$+bwit090fKi?Cl^CP-RMM4N~eHv{hv{4-roFY;2Kfi%G|@JBB7A zGH;Mk6G-YEw21PlfAfn>Tk_4tM_{C~8ri@|W+khSLv=F28wbbG;jOWQNjjE|CW5!Nt0$lzJJM+&T;<^=|2*Z1R*EwGPoV zWGZVCSL$5^HVth0v17}=xdb+dvSHB{;~g1Ep;X4!G_Wnm3EP4MJ|qDZYst%`PC|ky zy^&wT8;E3_$dN+~5MFI{!XPabtG5?A+3+?aaNB~Xee@IDtnw;XVdD|xUUB=#=ErPB zl5yo*F6UQjkbg<5v3_Y$8XWZ16?cwoZg{hjjHjFsZw1RltC!Dmg~3iIzjN%H26i(! z@D<-)$w!=Qnpev@8)KF&N7kTM+&{IY-hEFp?0fq9YC$F0TSoJ3IU?Vd74@o6u=aY@ zqx0X^zdOo~zy00KBDH}A50=%##6e$Gbp6|gwolpqeDF@0S_dvxZ|`V`qd{A7@A$U* zc2_xIch%iZ7jqR=)moh;6QiCZ8<>)n=~V^ScJH>n|4Np-{^YF^H5ed}vCP`41-Zl2 ztb;)-e|Tl@H<#*c(sC-=_to)h`N}FpTPC^1tgh#fX-vmD>#O9CZl2%q`9~jq@ImSO zy0!fK{0Fix_nyC*Cr5Y}P;=I)u``i{F!a^R;sM6sxagM1B(Z1i(k2UYBcK@=84j*VeFLG zq)l_smY5lf#H-d$c})^ES?zN2=22dgQcYGu%xt2q3wL8Jd9`VtU`vYCbhwR`Mkm^G9#KvU=6-l`gARzw+is zn|GhORYVPtsB>Wy6Y3?PuFkUy8GDWTw*_l6@_;dUT$ALb$IXYT|0OF zwu9g4yleN~!>2Ci6f5zPf#C2QW~rh$7YUTl?KtCf?&8&3xy1_D#ZhYc0)E$hrw6%@ z@{7upRj|NUBFUxM%llo}FDg?YTZ0xa=9PFz!p=p59_zUs*e+?SBQNyvbb)R0+pup^ zZb|#Ey!fvtA=~3${M!k#d2D*N_I1;u}{5K)For#r|m;Y8$-TkHa|CV98|LVK{ zI%)9!*Z1Mp(MSK9Z5g&l{ z|4|=+pFVo$%{P>5*w+oOaj*Kl^72bBu3Gv0bI&f#%FKLb#ixJ431DjP0M4HeV6gp> z3+6++iNPA8vB9Ws9D*DmmcoF6gJ>9np-HjJn&v7nint1lYg+t=?8ZA4jHXTnqeGh% zDk77*A6&4^0%*s4v5)riH4= zZlcFSE9!VKs!nK&j`fAMSlR@ah?WBLT469k>j{Igw9)Poo`So?s9F&-I#`dG@r>X} z!O;ANu44W}>z))I)RSVgWpIsPtHCv%J$$Lr4i+BN!D6&!APzsvKpe}6!^QoYSm~sl z7tEfajc*8z4i}W?Z=)>(cX(I??pW3Y{|k|wWsnbd^B^C~8tsiC@JhA}2jXKM4utSB z^Ue^`GnyI3jOKUZo#8>fGm4q0B~tRcvcWN$R%W)vN9)&Ms+Yb0BS8EIi2 zW@Nd^ccUg0y%lCg-;I`p;hyCBqmtOhEpOPB!`M6oeo}Y}+!_dA;?VcGdr;{>Me$U4`orp|p$DZ-9h&@A?HrDY(WLP^+ zO$&=SH7gqJej>7~9nq$hMMRqwjdnrtq%J7qYRA55Z4vusWmEi6z!82G<7x-WX=xEC zXJu1;Q3S4IYe&}cu!yV!u8nm_5&6{)!qdVc2oK?B^h$9N9H7i*zVZ1QNm5%0UMXX1 zhwEu)9w*13@zl~=R?|u^}*xaRa#QVm$41UAd>a=Sw$H> z-eX1N*I=w7!me>XD=n$d%Gd@&8VTGQJeK5u}6#2*zk&nxWehwT9I9Y z5t1BD>>A~|BC>2KnvyKQS7_j4omW~2-F-~ZCCcyd4lEvm1Iw5J4Mt|N3-DX@I3E^w zxIo-EIHrNnO^yPW$GNZ&MmGPEP+xCy)(3IQeS?tQm_=E9QX(0c5o5_GCz)9|<*KQ~M`nd% zS;TfyIl)Oq=2(DO+A?UXWSEl`WAR`#qm3oDlIOt;YYa;p(wCU3ubk=-QpU`NSO^Q# z#`I&krsiZMSjJS|q>X9Ol54B*S%IE6Gq~a;S0|%u3x;j9%&oL#@NwZm*_agxGb?av z=Eg237`rwC_+kN9X=5tAoG^m{tE6BIU4?Zr;fN0(?Ol{E0F&ldKJ^_OO#--F(p=4A zOhg2#++nH-b5w50im1p4QF+?bBPQ0i?8ySBNC-~(L>QRtHtGjUsl24?b?F?AzcsEWk@+XOb zQ&e?3wg({N!;!;%2M_e_-TnIW&paMWI~OB=5*x_c|2}mbCfvz?BAxiJpcA?&o|iaD zt(7@y$v8fAfAFE_;>~H1ZGaGcqdZd z8bh`F|014v!A(n*FCs)&HXDolFQ3hbU(|7oYW)l#0>&;X4>=e zL{WCSTpkYyh<*OTz>FD1pzlq2jj`DHl8Q^KN;>$M6Rf9*CWR&r8S+x=%*mCQIaweG zJnrNrR+SewV&z2xpJw@9Ug+d1J}2`MZs??ePvE?4&zF`o8x!b1y$2H~33SqsW_&JX z8K0L^(8-}bSh{@KyMG_U!%={tlLmtxdOkxBy_^?1X)vgyFEm$4-)CLlL7%(>tm=rUjiW&`YfgbyDk&Dd+^W8wl*kApIbq$O63q z+eaI)J2;_}1$tR_1ue^7!3CY11SLpFZ`Q7$&Dxzt=mc3(s3(D`N9dEoqbhU~(rdW= zw1&HLI?zejpzZe2+U`zk&`EMw92p*g=V%G26vXt&yJ4g2yMlIo!`x#?E7FhUD9DQi ziW!sh*65QkTMS;P6N9g!>647A5kj9tGCRZr28Z~nY0xJTKIQl-%k;^ZN}fdc{N(Tp zILVVJpUfQn0xt3-$|pX*--J9d7n;n(fGyHoVpW2b~IjRkhK@;WbJ4uA9>PfG`stCX7?^m@+4}Mz|YW0;JY}=b zU!?QJujVIDqDH%XKxda6X?MMV=B3CJgmy4|C;)e0 z+9t}@+$V*6d~jC569n%>nO6KsL0)ZLjY7zisJTQxE{HHn#ZIu)B$FM5a3@hS=l(bu z#0#B7S(g4Yf`kr?gExt=l>TFaxQ2@~iLmtlXc7-(5@9t3kU`e@k_fX@U|f*kkT}?q z2&;f#T#(gC99T(&RagM;2QtkfQW9Zz8^q8kM3W@2TE*O1FcMd3k_f8=0qRH5n2atI z!jXi{n-b#a0wH@zW(x{INW!c}1^A2WwhWJi&6^kCigbU2j>Hl+5;m`BfLEy7qV|N~ zB4P77hcO`!&9Oj3!+XP2gG4{< zrvf1HQRit>=L^~I$VXwVXyO>!%^LF&rt2tak`eIYdlygyK_e7{3bQS_{|)><7A$P)u#k_yz6t2S`fdV{xWe zM8u3nXdHKJ9L0NN0WuuO9}+$&NNX9~Md}(3P9%iMctV;?#D|Xd!XDly5MMD4c)}QT zjRR((oM9mr4un`N{#I~^FJ$E@9EhiQ61Ke}8#^H)c4A+&54w{#WhW&7^BCycHt&!i z+wZaB>X5OkWDk5J7ur&|Rgz_4NN$e`RD2ezzPzHI5z6HDUvDod25@*_LXb0*^4 zxH*R5Z*n|7$*^}IH%E3RVmNUIW-ytEK^TEH;9??jZjQ5dbHHB=d>QTXKs}(Z&!6fpXzAsm1D?xWz>8zqcuUmH5ZQnv!kOE(oI}3`wP}~zI zdYVcu@VcZt+jsB-(UvE9i{|vgL$}YaoaOgG+=pX_D-f*yOp)WuN&_hU9;c$Gcy9fE zMV@#dcmYZe;Z*XLRE9oPDvix++faXZ=iC>4L8#lKMT z9xeWrirb@|DfVsT0B6-JGJ;vDt;Sc^(_CQ z;vOwNPQ`C&@!wRuLyJ#P@tY9C^+vDoNh;p1#e-D54aMX|`-h6R!rMfRPf_t3=zW5= zw%dlN_;rY{FD-P5cH1x&zotk-6(v5WLzlh^eYh7F=fj8*YFkKS*Uk>O9J=%sMS8ws zCffEiwJoGS&I|gTdB!w$#YHpR4r7`OT{_bv>e81L@tW!jy^=A!{_?T{3CiB4-n|Q7 zR;1?Ipbw5(ia$}Oi0O}EXZ<#p(F%Uy@ubx7TXa_@uM<`aBf=29Jnr!;KB#>4xFlHm4hyGXu078o ZCZ!#6QgWb*S6FsHai?voJz+Z~{0sYVV;}$k literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/Build/Mac/mkinstalldirs b/tribler-mod/Tribler/Main/Build/Mac/mkinstalldirs new file mode 100755 index 0000000..d2d5f21 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Mac/mkinstalldirs @@ -0,0 +1,111 @@ +#! /bin/sh +# mkinstalldirs --- make directory hierarchy +# Author: Noah Friedman +# Created: 1993-05-16 +# Public domain + +errstatus=0 +dirmode="" + +usage="\ +Usage: mkinstalldirs [-h] [--help] [-m mode] dir ..." + +# process command line arguments +while test $# -gt 0 ; do + case $1 in + -h | --help | --h*) # -h for help + echo "$usage" 1>&2 + exit 0 + ;; + -m) # -m PERM arg + shift + test $# -eq 0 && { echo "$usage" 1>&2; exit 1; } + dirmode=$1 + shift + ;; + --) # stop option processing + shift + break + ;; + -*) # unknown option + echo "$usage" 1>&2 + exit 1 + ;; + *) # first non-opt arg + break + ;; + esac +done + +for file +do + if test -d "$file"; then + shift + else + break + fi +done + +case $# in + 0) exit 0 ;; +esac + +case $dirmode in + '') + if mkdir -p -- . 2>/dev/null; then + echo "mkdir -p -- $*" + exec mkdir -p -- "$@" + fi + ;; + *) + if mkdir -m "$dirmode" -p -- . 2>/dev/null; then + echo "mkdir -m $dirmode -p -- $*" + exec mkdir -m "$dirmode" -p -- "$@" + fi + ;; +esac + +for file +do + set fnord `echo ":$file" | sed -ne 's/^:\//#/;s/^://;s/\// /g;s/^#/\//;p'` + shift + + pathcomp= + for d + do + pathcomp="$pathcomp$d" + case $pathcomp in + -*) pathcomp=./$pathcomp ;; + esac + + if test ! -d "$pathcomp"; then + echo "mkdir $pathcomp" + + mkdir "$pathcomp" || lasterr=$? + + if test ! -d "$pathcomp"; then + errstatus=$lasterr + else + if test ! -z "$dirmode"; then + echo "chmod $dirmode $pathcomp" + lasterr="" + chmod "$dirmode" "$pathcomp" || lasterr=$? + + if test ! -z "$lasterr"; then + errstatus=$lasterr + fi + fi + fi + fi + + pathcomp="$pathcomp/" + done +done + +exit $errstatus + +# Local Variables: +# mode: shell-script +# sh-indentation: 2 +# End: +# mkinstalldirs ends here diff --git a/tribler-mod/Tribler/Main/Build/Mac/process_libs b/tribler-mod/Tribler/Main/Build/Mac/process_libs new file mode 100755 index 0000000..36b7be9 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Mac/process_libs @@ -0,0 +1,32 @@ +#!/bin/bash + +TARGETDIR=$1 + +# process dependencies and their exact locations of all libs + +cd $TARGETDIR + +for i in `find . -name "*.dylib" -or -name "*.so"` +do + otool -L $i | perl -ne ' + if(m#/'`basename $i`' #) { + # skip references to self + + next; + } + + if(m#([@]executable_path/([^ ]+))#) { + # remove @executable_path from the references + + print "chmod a+w '$i'\n"; + print "install_name_tool -change $1 $2 '$i'\n"; + } + if(m#(/Users/.*/(lib/([^ /]+)))#) { + # add missing libs and make references relative + + print "cp $1 build/lib/$3\n"; + print "chmod a+w build/lib/$3 '$i'\n"; + print "install_name_tool -change $1 $2 '$i'\n"; + } + ' +done | bash - diff --git a/tribler-mod/Tribler/Main/Build/Mac/setuptriblermac.py b/tribler-mod/Tribler/Main/Build/Mac/setuptriblermac.py new file mode 100644 index 0000000..dfc9f29 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Mac/setuptriblermac.py @@ -0,0 +1,148 @@ +from time import localtime, strftime +# --------------- +# This script builds build/Tribler.app +# +# Meant to be called from mac/Makefile +# --------------- + +import py2app +from distutils.util import get_platform +import sys,os,platform,shutil +from setuptools import setup +from Tribler.__init__ import LIBRARYNAME + +# modules to include into bundle +includeModules=["encodings.hex_codec","encodings.utf_8","encodings.latin_1","xml.sax", "email.iterators"] + +# gui panels to include +includePanels=[ + "standardOverview","standardDetails","standardGrid","standardPager", + "standardFilter","TextButton","btn_DetailsHeader","tribler_List", + "TopSearchPanel","settingsOverviewPanel"] + +includeModules += ["Tribler.Main.vwxGUI.%s" % x for x in includePanels] + +# ----- some basic checks + +if __debug__: + print "WARNING: Non optimised python bytecode (.pyc) will be produced. Run with -OO instead to produce and bundle .pyo files." + +if sys.platform != "darwin": + print "WARNING: You do not seem to be running Mac OS/X." + +# ----- import and verify wxPython + +import wxversion + +wxversion.select('2.8-unicode') + +import wx + +v = wx.__version__ + +if v < "2.6": + print "WARNING: You need wxPython 2.6 or higher but are using %s." % v + +if v < "2.8.4.2": + print "WARNING: wxPython before 2.8.4.2 could crash when loading non-present fonts. You are using %s." % v + +# ----- import and verify M2Crypto + +import M2Crypto +import M2Crypto.m2 +if "ec_init" not in M2Crypto.m2.__dict__: + print "WARNING: Could not import specialistic M2Crypto (imported %s)" % M2Crypto.__file__ + +# ----- import Growl +try: + import Growl + + includeModules += ["Growl"] +except: + print "WARNING: Not including Growl support." + +# ----- import VLC + +#import vlc + +#vlc = vlc.MediaControl(["--plugin-path","macbinaries/vlc_plugins"]) + +# ================= +# build Tribler.app +# ================= + +from plistlib import Plist + +def includedir( srcpath, dstpath = None ): + """ Recursive directory listing, filtering out svn files. """ + + total = [] + + cwd = os.getcwd() + os.chdir( srcpath ) + + if dstpath is None: + dstpath = srcpath + + for root,dirs,files in os.walk( "." ): + if '.svn' in dirs: + dirs.remove('.svn') + + for f in files: + total.append( (root,f) ) + + os.chdir( cwd ) + + # format: (targetdir,[file]) + # so for us, (dstpath/filedir,[srcpath/filedir/filename]) + return [("%s/%s" % (dstpath,root),["%s/%s/%s" % (srcpath,root,f)]) for root,f in total] + +def filterincludes( l, f ): + """ Return includes which pass filter f. """ + + return [(x,y) for (x,y) in l if f(y[0])] + +# ----- build the app bundle + +mainfile = os.path.join(LIBRARYNAME,'Main','tribler.py') +setup( + setup_requires=['py2app'], + name='Tribler', + app=[mainfile], + options={ 'py2app': { + 'argv_emulation': True, + 'includes': includeModules, + 'excludes': ["Tkinter","Tkconstants","tcl"], + 'iconfile': LIBRARYNAME+'/Main/Build/Mac/tribler.icns', + 'plist': Plist.fromFile(LIBRARYNAME+'/Main/Build/Mac/Info.plist'), + 'optimize': 2*int(not __debug__), + 'resources': + [(LIBRARYNAME+"/Lang", [LIBRARYNAME+"/Lang/english.lang"]), + (LIBRARYNAME+"/Core", [LIBRARYNAME+"/Core/superpeer.txt"]), + (LIBRARYNAME+"/Category", [LIBRARYNAME+"/Category/category.conf"]), + LIBRARYNAME+"/binary-LICENSE.txt", + LIBRARYNAME+"/readme.txt", + LIBRARYNAME+"/Main/Build/Mac/TriblerDoc.icns", + ] + # add images + + includedir( LIBRARYNAME+"/Images" ) + + includedir( LIBRARYNAME+"/Video/Images" ) + + includedir( LIBRARYNAME+"/Main/vwxGUI/images" ) + + # add GUI elements + + filterincludes( includedir( LIBRARYNAME+"/Main/vwxGUI" ), lambda x: x.endswith(".xrc") ) + + # add crawler info and SQL statements + + filterincludes( includedir( LIBRARYNAME+"/Core/Statistics" ), lambda x: x.endswith(".txt") ) + + filterincludes( includedir( LIBRARYNAME+"/Core/Statistics" ), lambda x: x.endswith(".sql") ) + + filterincludes( includedir( LIBRARYNAME+"/" ), lambda x: x.endswith(".sql") ) + + # add VLC plugins + + includedir( "macbinaries/vlc_plugins" ) + + # add ffmpeg binary + + [("macbinaries",["macbinaries/ffmpeg"])] + , + } } +) + diff --git a/tribler-mod/Tribler/Main/Build/Mac/setuptriblermac.py.bak b/tribler-mod/Tribler/Main/Build/Mac/setuptriblermac.py.bak new file mode 100644 index 0000000..564b9ca --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Mac/setuptriblermac.py.bak @@ -0,0 +1,147 @@ +# --------------- +# This script builds build/Tribler.app +# +# Meant to be called from mac/Makefile +# --------------- + +import py2app +from distutils.util import get_platform +import sys,os,platform,shutil +from setuptools import setup +from Tribler.__init__ import LIBRARYNAME + +# modules to include into bundle +includeModules=["encodings.hex_codec","encodings.utf_8","encodings.latin_1","xml.sax", "email.iterators"] + +# gui panels to include +includePanels=[ + "standardOverview","standardDetails","standardGrid","standardPager", + "standardFilter","TextButton","btn_DetailsHeader","tribler_List", + "TopSearchPanel","settingsOverviewPanel"] + +includeModules += ["Tribler.Main.vwxGUI.%s" % x for x in includePanels] + +# ----- some basic checks + +if __debug__: + print "WARNING: Non optimised python bytecode (.pyc) will be produced. Run with -OO instead to produce and bundle .pyo files." + +if sys.platform != "darwin": + print "WARNING: You do not seem to be running Mac OS/X." + +# ----- import and verify wxPython + +import wxversion + +wxversion.select('2.8-unicode') + +import wx + +v = wx.__version__ + +if v < "2.6": + print "WARNING: You need wxPython 2.6 or higher but are using %s." % v + +if v < "2.8.4.2": + print "WARNING: wxPython before 2.8.4.2 could crash when loading non-present fonts. You are using %s." % v + +# ----- import and verify M2Crypto + +import M2Crypto +import M2Crypto.m2 +if "ec_init" not in M2Crypto.m2.__dict__: + print "WARNING: Could not import specialistic M2Crypto (imported %s)" % M2Crypto.__file__ + +# ----- import Growl +try: + import Growl + + includeModules += ["Growl"] +except: + print "WARNING: Not including Growl support." + +# ----- import VLC + +#import vlc + +#vlc = vlc.MediaControl(["--plugin-path","macbinaries/vlc_plugins"]) + +# ================= +# build Tribler.app +# ================= + +from plistlib import Plist + +def includedir( srcpath, dstpath = None ): + """ Recursive directory listing, filtering out svn files. """ + + total = [] + + cwd = os.getcwd() + os.chdir( srcpath ) + + if dstpath is None: + dstpath = srcpath + + for root,dirs,files in os.walk( "." ): + if '.svn' in dirs: + dirs.remove('.svn') + + for f in files: + total.append( (root,f) ) + + os.chdir( cwd ) + + # format: (targetdir,[file]) + # so for us, (dstpath/filedir,[srcpath/filedir/filename]) + return [("%s/%s" % (dstpath,root),["%s/%s/%s" % (srcpath,root,f)]) for root,f in total] + +def filterincludes( l, f ): + """ Return includes which pass filter f. """ + + return [(x,y) for (x,y) in l if f(y[0])] + +# ----- build the app bundle + +mainfile = os.path.join(LIBRARYNAME,'Main','tribler.py') +setup( + setup_requires=['py2app'], + name='Tribler', + app=[mainfile], + options={ 'py2app': { + 'argv_emulation': True, + 'includes': includeModules, + 'excludes': ["Tkinter","Tkconstants","tcl"], + 'iconfile': LIBRARYNAME+'/Main/Build/Mac/tribler.icns', + 'plist': Plist.fromFile(LIBRARYNAME+'/Main/Build/Mac/Info.plist'), + 'optimize': 2*int(not __debug__), + 'resources': + [(LIBRARYNAME+"/Lang", [LIBRARYNAME+"/Lang/english.lang"]), + (LIBRARYNAME+"/Core", [LIBRARYNAME+"/Core/superpeer.txt"]), + (LIBRARYNAME+"/Category", [LIBRARYNAME+"/Category/category.conf"]), + LIBRARYNAME+"/binary-LICENSE.txt", + LIBRARYNAME+"/readme.txt", + LIBRARYNAME+"/Main/Build/Mac/TriblerDoc.icns", + ] + # add images + + includedir( LIBRARYNAME+"/Images" ) + + includedir( LIBRARYNAME+"/Video/Images" ) + + includedir( LIBRARYNAME+"/Main/vwxGUI/images" ) + + # add GUI elements + + filterincludes( includedir( LIBRARYNAME+"/Main/vwxGUI" ), lambda x: x.endswith(".xrc") ) + + # add crawler info and SQL statements + + filterincludes( includedir( LIBRARYNAME+"/Core/Statistics" ), lambda x: x.endswith(".txt") ) + + filterincludes( includedir( LIBRARYNAME+"/Core/Statistics" ), lambda x: x.endswith(".sql") ) + + filterincludes( includedir( LIBRARYNAME+"/" ), lambda x: x.endswith(".sql") ) + + # add VLC plugins + + includedir( "macbinaries/vlc_plugins" ) + + # add ffmpeg binary + + [("macbinaries",["macbinaries/ffmpeg"])] + , + } } +) + diff --git a/tribler-mod/Tribler/Main/Build/Mac/smart_lipo_merge b/tribler-mod/Tribler/Main/Build/Mac/smart_lipo_merge new file mode 100755 index 0000000..3097e61 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Mac/smart_lipo_merge @@ -0,0 +1,46 @@ +#!/bin/bash +# +# syntax: smart_lipo_merge filenative fileforeign fileout +# +# merges two binaries, taking the respective architecture part in case the input is fat +# + +NATIVE=$1 +FOREIGN=$2 +FILEOUT=$3 + +ARCH1=i386 +ARCH2=ppc +ARCH=`arch` +if [ $ARCH = $ARCH1 ] +then + FOREIGNARCH=$ARCH2 +else + FOREIGNARCH=$ARCH1 +fi + +if [ `lipo -info $NATIVE | cut -d\ -f1` != "Non-fat" ] +then + echo native file is fat -- extracting $ARCH + lipo -thin $ARCH $NATIVE -output $NATIVE.$ARCH +else + echo native file is thin -- using as is + cp $NATIVE $NATIVE.$ARCH +fi + +if [ `lipo -info $FOREIGN | cut -d\ -f1` != "Non-fat" ] +then + echo foreign file is fat -- extracting $FOREIGNARCH + lipo -thin $FOREIGNARCH $FOREIGN -output $FOREIGN.$FOREIGNARCH +else + echo foreign file is thin -- using as is + cp $FOREIGN $FOREIGN.$FOREIGNARCH +fi + +echo merging... +lipo -create $NATIVE.$ARCH $FOREIGN.$FOREIGNARCH -output $FILEOUT +echo cleanup.. +rm $NATIVE.$ARCH +rm $FOREIGN.$FOREIGNARCH + + diff --git a/tribler-mod/Tribler/Main/Build/Mac/smart_lipo_thin b/tribler-mod/Tribler/Main/Build/Mac/smart_lipo_thin new file mode 100755 index 0000000..b6fd13d --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Mac/smart_lipo_thin @@ -0,0 +1,19 @@ +#!/bin/bash +# +# syntax: smart_lipo_thin file +# +# extracts the native architecture part of the fat input file, or does nothing if input is thin +# + +INPUT=$1 +ARCH=`arch` + +REPORT=`lipo -info $INPUT 2>&1 | cut -d\ -f1-5` +if [ "$REPORT" == "Architectures in the fat file:" ] +then + echo thinning `basename $INPUT` + lipo -thin $ARCH $INPUT -output $INPUT.tmp + rm -f $INPUT + mv $INPUT.tmp $INPUT +fi + diff --git a/tribler-mod/Tribler/Main/Build/Mac/tribler.icns b/tribler-mod/Tribler/Main/Build/Mac/tribler.icns new file mode 100644 index 0000000000000000000000000000000000000000..7db3c60e17360a3c9ccc7ed240b251553b454be6 GIT binary patch literal 39546 zcmeHw2YejW)$iS+-nA;WEU&t>(yI5~B}cB#@W@3ZaAqf4mULqsADpjiueyu2#E}=Kar|Sv3n{6aT)v@BK9S#q8X3?rHaw zd(Z5wx;I~bC1dwr-@WIr5Dtj*X7E978#I95Y@VF^K*{ zgBj9o?1*zr5U;YE)1-UtuwzUd6YT@iePHyELmYL8POF3;IL9`Eo3T;5L@qF5NF1|S zhaHZ5K0i=nA9D=ah8)hqyhO(GS=6B2J~ZrfjaIN|#v<4w0lxnJfkB~>?=TO6sZ7Vc zekAtSG5ov2MxAP1~+xUbv60uiO3i z5AN@|Z0lt}WN3tB~*WzUMmKm43te%zO^-OuOuqkfu-5XBN@DcU5{uYgT$jW>$77 zDWqa`>1nvh$<6OHGFABT|FuiF-1F-1eD{t(#*7S$8=PCabkhM7^HX>QXB6x6s~g&x z!oYljBAVZPr)OtJW4(#_#3q@bwe7~DA_EXvcJap7-+t@ko%&RMThr9C>sz}o&x?)5 zt&%k_XnE6S{j4oCh~HL~*8E)*9c>;;P*t#;jOu@moElqaV!jHNtlj8(zgN6R(ZjqW zW1bNYXMgc#1U`Fhah)nwZLU=IGUd`ww(ny~Po3uv*_FkY?P5&f4ZZZ075u79QILNJ zJ^tlsetmfiP{u-E6y@t_-~S2s!M}IQ`gT=pGUdWy*M6tc zHN0;xQzVSsCMq0D@F{xaR#7=-Z{{EOiR!V@QJZwLSM)&68nG^LgGO9Vi*!REr*qg` z&p|z+@7Nd;oC^tPKpHZLDi9hGT%JycV`!)!qpIi;*l5fj!y_)2(=lYXovYo;JTVN^ zu^!Q5*k&92k`K8kS@NMltMzO(VL53M4LPh9%V`>l8g@B{hxw4%Jn#h{8gfDg46$Kh zK*6yh?Zb{C2ePY=3o^O}2Fz!G=@DrgvfGCZq7O2uIA%3l&VsQ=RyaFV$-pR$#$_J6T z2WdF`oY`U@8noH$Lqm>XjE>k^Fr3tH<@4o1*T|@CDTbE~U^sIf?aDG8)MNQqp^;=eSu3Woe%Z{D9Hg3dlQCU@8W7iX( zoq6$!H5;~c@9f^TW+lv9UX9s5hSA@7@P~I^wr$JC6`dHZYHV#^{kI;`)A7j0b*q;w z#!!9hlFs#S_lh3xZ0=g#){ez)S^}1jy0HF#UbD2LiPUaev2Np*%YX9j`@j3idxIa` z)Ul|gj*Qf?cFXpiJGbuGzGnHR@2qcYZEC2*8frQ>?}QjP-M)YCo*u&uD;nymODgv= zudeOg+cs~0Lo|+fI(sWC%ks;?Y{lj+8`rJ-E12yoE-uI_#?aDr>sNPmy@{c{d3o7s z`55Y0xpMi^?c+TRPJ@7tw6f=k_)oTdXSkIJgb;%+mB z?@PD_bN*V9nu=j{X=!mKkp0dqNvQis6yIKu#)pdva(kT^elSg!djFXpE!3x{6~YB` zGh07~3%*^KlA2o9mYJ53p~tW$Gqvh%Q9QjRB@Oj5Ej=S6Go6O@DM?$u{PL>QG`&8Z z1~aoVUpK!x{Ch5M%4LSAFMOySuj(CB)8;@W*g&b8Az5{finP*~tUY5S- z?H*BiNTs-y1;u1$q$V_c0f&41b5VTO`!*I7txHLWz6^=R-1Y}i{5bJ8WV-mc=$NMj z(euv{_rTG%VJIv*O4D{u5WV)WwZMHP;#Q^(iqOn^S`fWI&ttLgiQ?<=81oAbxB}VH z>vkn$dpFEf6uf&1LeJLZmpMU-J zHzuyvpIH$yIY3qZ^OKS%T}jb6>P)#5$|=|Q<>w=3n4Mm4ACq4y6kRj1WvyK7Ko zv$1^#n8IuD9#Jto7oW@)e6ox9)NeV2^Uj!PqnopqJ4H|Ikw?dDgZ%am(Ti?JMy=BQ zLD8FTM_eNoH-Zwkf=e;!-sKXU3bPyTHG)IZuv5M#0(9>f9*_`j!66AGaWomDVH{Ir z0QMl9^@zdcLkF56oFe+AIVPMpBr#k>OK>QLh6KAph8jp8s9+csAeahgEW2P+oWoR) zvlf)LF=3RAfvpS29AS_He9oFNPKp8G?ZOdk&^q`fW)MI+s}aD7VJnV904UE6j0qzu z9LGj)5tUA>)nHMaUH)sfd)ZTW$zyVOH3S8+TAdi#Xe@lq0llgU^9sxIGypOQl9<7CRmvZLk^N~)F66} z*l`}CCH6S|#h}#!cFIw!S+J;wu{2J>u96(U@NG@pkNDfQwNxiofJs6 z&jgFY3g$i&%mt?>;3H#p#{m}L0re)B3l4uyosOuZWA^9ZU(dmFC~%2+kjz{Hi+1%L z%p>sZIj**sq!~QRr9dE#D$S1f?_|m#A7*fxMoglCDV2dIMD~4limA(sGE!4b`ku_5 z9CdzvNptN1Hop+Zl`QlC3yVt1E2?U0>l+s|x3sn|Ub1X?S8FK{beZ{u#bp)M1XuB!|PXLx&+>~wH*jP$pxSewzpagVvzHRZ5y|4-?3{~_x7!uH?}6i z<8>wVjZH1B8$U3KfzH>Tdhz#nUq-O?YZr28dDB81(r01e{EXH$>o#oOy1iR?=IwW0 zyywPaAK3@rxN>nvTT63ODMx7ExC!80yLN2bwR01s>%Q^YHSLR97dAE47ZXC(lC9eS z-nnDThIOkUOzjt*-h5mw)uu5hab5AzI*Sv`_Nqn);BiP)l^pG0l_D$ zYd6_z)B5L;EDqP!R8^FfWD_8FmvIiwpB} z^&GHt^M-Y>)w16afQGX(Q#oMqdNSd%#l+*u!n~X;obd>dv1rZ86`e~v7QYO@@AI=V z^rX3X8_(T)~D)HQj$2JY1tB(uc_-pqo}Z1Vb)LTlZ6zejzjg0i^({3 zyDWhIgEOLYc%u$`bFEK zvw*&s10a{wTVK&wS5sBmZ3gsscHvuqn%5+!rg5m3uCx{gDt`>AY|N0D@+flPV_7)- zr+}@OuBfuSv|xL`QS|tzAt|xjfn;W0i{n1o##5JHR$Nf^0y5nXpkwI?91TZ*SD%)i z&Y{VYd$rKN|z}`NcGp%KK_FaJwY?l z09GfbXXQMO)cb6{F4d5x&LWVIq0HpK#MJb(dmz(0Eh$jLpjQw~$W#!}BQYg4x$4ho z<(=G+BBTj=AzhIHpeieiR@y%?5zDda6Y-xrbZHz)Gu2sH*#uQ2CJH)_l&w#$P928{ zS*q;pbVRHw2^JSpmFc<(szR2Kt;|V}$JFE`AywihWGFJx;t;6qp$|u1o0~iiq27lg z8xUn~mi`W=O2tBuj3k9LMVXu&xQPD98I(=qNL2+4SZ9`MbqV|G2 z*gU1DFWM{-bLXX1FDOpWLTja>x(+#RQEN*cz@Gj=p%HVV-ZFObpLFTR0%#Te-PT6EfRM_rYP*)AlN}};dba>L!uX64cy*^)8K{1(tVf8!` zJ$JJe!_MoADI#NOIF{OxB~7nlcZlHyYH4wVE0Bx649(r*x^Fs(;un-nn*wnCP|Gr!H+}vX_ z2sd6|lMo*}f8N|cNat4y`@u%lkKcl@)oSO>jSTan@v=5gEz5!CY_|G z9djq2dB=~u6NeV1mtRnD$V^voaG;OJ^cf28_(iMM&U~%uYKX)m%_)~MXyqvjHjgu1 zX3N_plR5-^o+@IOUHr=H(*^$11X9nrV(2T;#ZxT-)A8qi>ud1?FAl$T7XNGQIcuM< z_qSR5%-UyC`Z_i15C5Ge(#SpMg) z!WP9xQ>NEb<`umI4`gS&#{Sq6KD}NurC)64;b(t4!}Y7%8lt?Xl}FTUrH>{i1!Hw|?-}>wSIC zzU7jo@9#m|I&9t@k%2;{cWI6RGbLMr?>bgD?Yt3@G`!Sa^eGge0AVO zd|^a!;w^lBW#9$Th|jO{&%YNWUgogh;LbRz9FcE- zja$>GYQ%m2D@-+w8b;jc&v10p`2ADzJ(`_wNd%wdGmPlWD&&zTL<4A;MvUVlPl}2$ zaHS!Y(N6taBgV-i zqXtYf4l5n<#Anc6bYaLeta40@DMrE0G;DB8qJ%m49efFmBRwv|2y;m+Om?MBCU8(*9}~9)@i$QmT!!Ax zp|mh$7KyNN9D1v~+O!}N)MPi>l!Nl*eKKbWERiQgo0z9u}2O8~B;5MmMZqkiDKc zi7XK{S(Ij)sOsx?`@cZIVFL%^X@EqwpNN`V2EdfVgPf>w9A@A!6Q5@SNuyagAQ3$` z%0tTJR16PusvL?(2@=(FX!&B{3>+pvP?Om(4l@8o!2$EQILI`hc$1s`oWQ*Sk>qfI zs>=wDrU6xdKY^8f=VV_II8XplXw3|lO>ip*>PhRMBbXDEz~2t%9mc2Q-sSVlm^CO|xsS@tzz!;uU&HVPy>1_CF= zFvu9qc!VS^@)-wkOm^VdZ%2|7%*;;W5FOd|lznG-RGVyU7$^>)Sn(A`Jby8n;em3v zoHp|4Qw|P}+>SOYo;X2f2Vn^Az&I^%6k(9r50E|s%mr4XMlh0tN7;88vKZm`3~Dha z{HfmoUN$r67_IKiaGK)WG|0w)V-p6H{r!Vr<}_JM*75A{1qCc(-Rd|XDhA*}%zT^3 z5Nal?oFPoUY;p?x z+`t2s(-${az)H^Q zO5jlTU?Y&mWrRB@Ef)Jes17=CD!B%IW~A(6c7P6+6VYv9qbMSyLoh6!ow>0n^p$wx zb`YK_`2-ujnU(`sa_v&~eFk=rmKcupaHn8YZboPU?4}{PHhyNb7|e_q4w3sAt*{l4 zzz{1wG+tFt3CJ025NIyMInX@tyl})x3l20qqNReY>N{mYf?%Vt)fjk_cOap16r;Ns zpQ3CxFa*(u$i-;kC0-_ndq4200m?HMR>f(COcDy5bjak8_Xn!JPXQTVLvW(e9*9Fk zND`xCe82Fi32r(t2KO2pl!!=*qpA%n%Dm0fsQ9?0;$1(@goy%ctF{x{ZPQ3=G4V z()s#{UqX^Uo<^|B&gnKh(IAY!{N>k#@xgSH6Wlna9C-aNzhNG){^_+3&y9zOWN(Rp z+WGm3mrgv-6z~1@*_V}XoLDt+GI4*}Kk2kWWB3Yao-Q$V{`}{oJ?GDlP1I$k!k}>p zy@`e-W3nN|s59ajlu>U;H)a?!4Ozx)Lyj?5k(ddr*vy{1+WdmTqT-U0(z5c3%BrgB zn%cVhhQ`JPP0b5i@Fm7Y?H!AkEM2y&bNPy{m8({*UbA-H`VAX5ZrZ$M>$dGXcHklB z?mQhx#bg@us)!V5aZ;5MDbV7iK#P+CElz5sL<+PxDbNBbVYebT5yZ5aLSAEj0T&F! zNHCI%1OqWH7>IGfK#T+Ff5-Hg+&NtZ(2)1?0mTVB4qs^P_ z8V#~*v|fJV7BS+Nmw{F?IiO57~^T$mkyTn5H>Vf^D=A*3>ez27s zlsi?>F0Rrx$gz3zrj0}+Tw5R+Tegqn?FVlYlRnxm`H|${-8D`9;#~NddCzi-V%y$JY>Rx9nk) zwFmF@>(;HM=^AaxctDW$6kQM8DaPBLyZhD$pMLkXAg;* zm$?jFV4}Nxd8dS2)zT`bo~2Sq60(ikk&D~4V#V^#WlPh!95oWE6eBCTx>lMV7DI1# z3k#~9%a$!&BH`9@vrUSo>w){o8W828c7o`VC5s7Hqph2gMOPzOPCq1u{({WKMP0UZ z>5|1A?Rt(}zj(>CEcrN)e@>{f0E;_1+7}UWn6^PeoswAxrWCc~M(yOB#rWYZu7*P!4XMs{4+LnxDbCvdEy_+S;;^@FTQM z5aYbW{>Q^&*x&|kI4*Y^$S-VerWtdz3unyTKg0KY)MtMC#7p>Z_~6wf8CQcOTez^f zX+bI`M`>G-_T6g8;YykM>R+Di5xvhmCs3$f`NkeG;)|>JT6DCNv@PJ?v|vFa(FoJF zPS$OkJLgc;^*;FYQ%@Z+j5~KPz8NrQ@TcqfQjtwb?`Du{Y-k`_q1rZN)9JanK#pI^ zJ(FGk@?$^y=}-5qcWcItLUJ`WHq_VYK(0l*Xr_WD=VqSBHo%J!(Ni5xUO;Xph*n=$ zS1XZgM_xUzpvl=;R&sLmOt%>6SzljQTT?^i0<<0S9zf2qrIYj2Ymfe9H4Wd<7RaFwX7VT1b7cixu{Y#_Co+0U3U0sFO zQYCWB+US3vOQcP1Kv@2#RUh5H$`}s;r8WMg{*53WH;adA;$LB2Gj z%e}#n)q8dGdg*loE)O$Z8U?HKvn<@a&oe>61Y^$K%Xkq)F{M^!;(e00=Zz2 zm6<8cSn4j7QvwY$9vD}E1j@?F%t)7JESXYT-LXV7Xax%LNuX>n$jC_7$8)I`Pb!%c z;er`2NEYP^aQw71X-3CX1>`z_8E?=Ea0N&py*@2fC(US=HU?9R*D9JJS(F4yPbYj` zN*tGJk-Oecu3*<67K1*Wq(BHoSTqCYJ_Cdrcq`bIZ;t5kKV zE}`nmnf}@%qF)8KCKq03N>NA(w8*toD!X#H-giXw*(?j53S5;=R(Ii)x+~YOhmVL} z-;<>BvX z9ZgH^{b)&4y+@8{@o zZqLj8J9>)O8X$wwja<)*CzZU3to9EeSLNnB0x=#-0cDef9Hy{2eWD>M@Ys)sx!e#;wHe zffrVH#$8|m?sxeD?*i`g^yNn-kH=7Lgr#O$=()(D+H#Pc zl4noB!p|ct3@Pj+C_mPVlRU=_(UwW&lSJhS%7_--b?^oDAR34nc_+EUbH!D zO;~Vt63y3>8;Bmh=}L(H+B``%%B3rS_ZYdHBJI%4qUxbyJrIp<#9Vig$;k#|g}H$I zO-NG|nbEX+tw8|y9tqb^n?vP@C$~pH^JalN8{30U>}#BR555mf?0vP_?kv6u9n@EK zAlZ|q)<5*xLm=3j-kYJ4%7?EuOG?I>WhaSwT`&KNEnyfkxh6i^Ou2e|pIG()-V2d> zGfZxH2Co(A>94_T&zm^B7b5q%;ptK(Njrq64<`$a5Tuuj&_mxqmZa5F>P1i#n$dTOXDRvj!e_4&#^vbEG)gF*+EgHLw|*7tcC#RJB2juI6tz}|AVJ11 zFynol$5JQB(u)_csc0*;DY_o=bgR^kDH7Y4rql3($i$oLfZlx5>Cx%=1hc6QligtWq1}?e2?C{C9eXRAhHCL19n|DnU)Z zAR%}eyagY@*Wf4k3ju~eAxH=|gb1NZjYj=jR;$3v-5y@vKE8edfkDBcny~PQ$hlGT zqUXoN#>FQlCF7?A@Ph%F_!XW!yn=(q9dA!ceI#m3ENa*MhSd=fe09aw4?kEG91==I z<`5Arh$JKu5&UQYe#-|$3JQ7mkM}rv=TBk7qq*5oCdSBn0GAQkB)xzaRKwf+fR&qrrAk&3Sk!B4YrZ~p*tqzMznz>%)mFDAWP&wIzbL(Y4NyctM)T8O}9z@7pW!hB~~_-k$x z6W?1v9!zU3_3Y%embO~*MhEjXe$!2nnACkhOn7f$RW&d(i=WmK(&qoubnpq0WIDObNRCfNC8- zs=E)y{y?3JK&Oz>Q{thJ+VQX$as}T&x(~S6un^|F>6v9MFzn%Hp>@scAYwW==b zF)?J9)JAY08PoN?z>PaUYaM$~47G6f@{Wn0(|{b7Fhkn@?NKrG2hw4kB#=+8zz$EG zDLX%RSkzc*>0~UQ)M=}elH!9vPo@V$T;57b!g4hJU=ZER&wg?2S-cJuW?4-WB|dx$ zl#-l;sdH1Pl$@V*SN-sF^fEvD#dfsuYPw$9BZhyvitj2(PJXDx{KN#5V0|i8kr`9% z?%N-E_~HFK#@+X_H%(%s<-TgJfz%3s2KY&`#DsWIYSz2+>Yf$J*xTa0Z~&$J|fFHcg~#1i108qqOQbkLAiud3%jNoQFrwu%{aCul<-Swk279y z-Chp-gG8Hb3gJOJOcNSH_+@gj;C&3qXG;7KqqBxgzRDTuv!8G-a_MVJx~(d6?RgJc^fFv{(gsma`a`UiFXSzky1*~=6ef< z-U^g{pu~}_rsmF@fN}^HE`%&uFJTiwmf{q~^9J~+my-hk<-kiUG=?L%ivs1jdb9|+ z^4FG2|w|xh29)A&Assze*?!=6Q z^j-7;!^rq72CcA|Kd_J}31K{Ag?|jQJOQ6FF!mwH9Q^|eo*RpkuZ%(Ay9=$LjS3TE z2Z+cEEO?F#cBduKQ1s&`Sy>hkj=#WyA~1VW9=ZjspaVC90l@9YSx~q;U-GmQGXQW5 zz(oGT<18=?29R?v!tYT)kM3bygQLe;fX2<0B6%U2P*t}ub{rkN<3WIiy0b9lo3}9a z_+G}=qyKZ@4-CJ8=gxD10lkrJ08==d;=sHJm)Si z1IG`^{jbgGWp@n7_#);pN$Ch@~8Cl+9${qe6p{fJ$~Ujwe5^cpa| z2s70BUJHyUQ4!=YAt{QDQEU{Mt_z1ZaDcHV!%o!U>n zX!-oRm!Ge%x0i<+5}e27D+h?Z7k*?RG%SLy$ZvY_*U^J1vg zmYkgOO)tz%!VCTLB7*%%fUned8bE>HpYRWz8>h>{@5Yvv{X17_X;FTbE^clp1h`-W zD3l()!E+NkcYgbt>%V)$4L5%C>xLV?d;K-v-nl7fZm_S%1sg!2@(PHE%Y1XjcL2Z9 z;HJ2U0Iv&vSr0#)8az)|^42$EHRGh6x_QCg>I-!Mh1xeXCaV@dEqkf1Z)e4X`d%o1 zxSwxWe17vgmx{Y6UXvdm2L2aj0ENQCFCwvM5q{(SQeD>;a zc3Gh2UV>(3>!z-<3xw*N8P|CsH6OzU*ad^_9!nC*Ye z?muVupa04IXGHQP{r>Sq?|&{a|8Ma9=hFY|_pg&m7yrk;e@@^3PbzM|g#5pEG2g#u z{QZZ*)=T*Q!PerW3xEHB_-kTwFXj6et%dyki<#^Fd873H%lyo`pI@TGhbVsBlpCwL z;P+4P_qMzO=jzJ8bIJH0tI3GInD=k+1=#TT{HDKMA^~ixN_BHD=KUju+9xDhU-9ir z#Q#`*PF$GZg|?sI4*UhI_}sd?FNpvzwiN54$ov=f`Va9}s(pf^QVJU{g#a(MRiwvg z{4VNmwc#(m`Dvncg>~OM{coDUcC4i$liohN2=!qAya5=XiB8F_Xu9)t+cy{Conx!& zi__yGgM2Px^(i0F8;}8-sCa#Td0lgBJAMp&$?nG_!z zJvS`a&+8koCm&F$y?p!wgYkQ77ylNl^y;f7nExs3i+T5zTVGlN`A6u!@ms+1tG`(K y3y%B=?)2&3eGB@Vy!w2e1Q6nD{y_EDy%SC|aZRQ8P3|N*7ys`!;q~7==l=pSNted} literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/Build/Mac/triblermac.command b/tribler-mod/Tribler/Main/Build/Mac/triblermac.command new file mode 100755 index 0000000..fd5dde6 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Mac/triblermac.command @@ -0,0 +1,28 @@ +#!/bin/bash +# +# This script helps running Tribler from the source tree. It is not meant to be used by the masses. +# If you have not already done so, build the Mac-specific libraries by executing +# cd mac && make +# This will also create a 'lib' symlink to 'mac/build/lib' when finished. Alternatively, you can +# let it point to built libraries in a different source tree. +# +# Next to this, you need wxPython 2.8-unicode and Python 2.5 installed. + +PYTHONVER=2.5 +PYTHON=python$PYTHONVER + +DIRCHANGE=`dirname $0` + +if [ $DIRCHANGE != "" ] +then + cd $DIRCHANGE +fi + +if [ ! -e "lib" ] +then + echo Please let the 'lib' symlink point to your built libraries [typically mac/build/lib]. + exit -1 +fi + +export PYTHONPATH=lib/Library/Frameworks/Python.framework/Versions/$PYTHONVER/lib/python$PYTHONVER/site-packages +exec $PYTHON tribler.py $@ diff --git a/tribler-mod/Tribler/Main/Build/Mac/vlc-macosx-compile.patch b/tribler-mod/Tribler/Main/Build/Mac/vlc-macosx-compile.patch new file mode 100644 index 0000000..a09a3a6 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Mac/vlc-macosx-compile.patch @@ -0,0 +1,509 @@ +Index: modules/gui/macosx/voutqt.m +=================================================================== +--- modules/gui/macosx/voutqt.m (revision 20403) ++++ modules/gui/macosx/voutqt.m (working copy) +@@ -39,6 +39,7 @@ + + #include "intf.h" + #include "vout.h" ++#include + + #define QT_MAX_DIRECTBUFFERS 10 + #define VL_MAX_DISPLAYS 16 +@@ -138,13 +139,22 @@ + p_vout->pf_display = DisplayVideo; + p_vout->pf_control = ControlVideo; + +- /* Are we embedded? If so, the drawable value will be a pointer to a ++ /* Are we embedded? If so, the drawable value should be a pointer to a + * CGrafPtr that we're expected to use */ + var_Get( p_vout->p_libvlc, "drawable", &value_drawable ); +- if( value_drawable.i_int != 0 ) ++ if( value_drawable.i_int != 0 ) { ++ vlc_value_t value_drawable_type; ++ ++ var_Get( p_vout->p_libvlc, "macosx-drawable-type", &value_drawable_type ); ++ if( value_drawable_type.i_int != VLCDrawableCGrafPtr ) { ++ msg_Err( p_vout, "QT interface requires a CGrafPtr when embedded" ); ++ return( 1 ); ++ } ++ + p_vout->p_sys->b_embedded = VLC_TRUE; +- else ++ } else { + p_vout->p_sys->b_embedded = VLC_FALSE; ++ } + + p_vout->p_sys->b_cpu_has_simd = + vlc_CPU() & (CPU_CAPABILITY_ALTIVEC|CPU_CAPABILITY_MMXEXT); +Index: modules/gui/macosx/voutgl.m +=================================================================== +--- modules/gui/macosx/voutgl.m (revision 20403) ++++ modules/gui/macosx/voutgl.m (working copy) +@@ -35,6 +35,7 @@ + #include /* strerror() */ + + #include ++#include + + #include "intf.h" + #include "vout.h" +@@ -43,6 +44,7 @@ + #include + + #include ++#include + + /***************************************************************************** + * VLCGLView interface +@@ -67,13 +69,18 @@ + /* Mozilla plugin-related variables */ + vlc_bool_t b_embedded; + AGLContext agl_ctx; +- AGLDrawable agl_drawable; + int i_offx, i_offy; + int i_width, i_height; + WindowRef theWindow; + WindowGroupRef winGroup; + vlc_bool_t b_clipped_out; +- Rect clipBounds, viewBounds; ++ Rect clipBounds, viewBounds; ++ ++ libvlc_macosx_drawable_type_t drawable_type; ++ union { ++ CGrafPtr CGrafPtr; ++ ControlRef ControlRef; ++ } drawable; + }; + + /***************************************************************************** +@@ -462,17 +469,90 @@ + static void aglReshape( vout_thread_t * p_vout ); + static OSStatus WindowEventHandler(EventHandlerCallRef nextHandler, EventRef event, void *userData); + +-static int aglInit( vout_thread_t * p_vout ) ++/* returns the bounds of the drawable control/window */ ++static Rect aglGetBounds( vout_thread_t * p_vout ) + { ++ WindowRef win; ++ Rect rect; ++ ++ switch( p_vout->p_sys->drawable_type ) { ++ case VLCDrawableCGrafPtr: ++ win = GetWindowFromPort( p_vout->p_sys->drawable.CGrafPtr ); ++ GetWindowPortBounds( win, &rect ); ++ break; ++ ++ case VLCDrawableControlRef: ++ win = GetControlOwner( p_vout->p_sys->drawable.ControlRef ); ++ GetControlBounds( p_vout->p_sys->drawable.ControlRef, &rect ); ++ break; ++ } ++ ++ return rect; ++} ++ ++/* returns the window containing the drawable area */ ++static WindowRef aglGetWindow( vout_thread_t * p_vout ) ++{ ++ WindowRef window; ++ ++ switch( p_vout->p_sys->drawable_type ) { ++ case VLCDrawableCGrafPtr: ++ window = GetWindowFromPort( p_vout->p_sys->drawable.CGrafPtr ); ++ break; ++ ++ case VLCDrawableControlRef: ++ window = GetControlOwner( p_vout->p_sys->drawable.ControlRef ); ++ break; ++ } ++ ++ return window; ++} ++ ++/* gets the graphics port associated with our drawing area */ ++static CGrafPtr aglGetPort( vout_thread_t * p_vout ) ++{ ++ CGrafPtr port; ++ ++ switch( p_vout->p_sys->drawable_type ) { ++ case VLCDrawableCGrafPtr: ++ port = p_vout->p_sys->drawable.CGrafPtr; ++ break; ++ ++ case VLCDrawableControlRef: ++ port = GetWindowPort( GetControlOwner( ++ p_vout->p_sys->drawable.ControlRef ++ ) ); ++ break; ++ } ++ ++ return port; ++} ++ ++/* (re)process "drawable-*" and "macosx-drawable-type" variables. `drawable' is a ++ parameter to allow it to be overridden (REPARENT) */ ++static int aglProcessDrawable( vout_thread_t * p_vout, libvlc_drawable_t drawable ) ++{ + vlc_value_t val; ++ vlc_value_t val_type; ++ AGLDrawable agl_drawable; ++ Rect clipBounds,viewBounds; + +- Rect viewBounds; +- Rect clipBounds; +- +- var_Get( p_vout->p_libvlc, "drawable", &val ); +- p_vout->p_sys->agl_drawable = (AGLDrawable)val.i_int; +- aglSetDrawable(p_vout->p_sys->agl_ctx, p_vout->p_sys->agl_drawable); ++ var_Get( p_vout->p_libvlc, "macosx-drawable-type", &val_type ); + ++ p_vout->p_sys->drawable_type = val_type.i_int; ++ switch( val_type.i_int ) { ++ case VLCDrawableCGrafPtr: ++ p_vout->p_sys->drawable.CGrafPtr = (CGrafPtr)drawable; ++ break; ++ ++ case VLCDrawableControlRef: ++ p_vout->p_sys->drawable.ControlRef = (ControlRef)drawable; ++ break; ++ } ++ ++ agl_drawable = (AGLDrawable)aglGetPort( p_vout ); ++ aglSetDrawable(p_vout->p_sys->agl_ctx, agl_drawable); ++ + var_Get( p_vout->p_libvlc, "drawable-view-top", &val ); + viewBounds.top = val.i_int; + var_Get( p_vout->p_libvlc, "drawable-view-left", &val ); +@@ -481,15 +561,21 @@ + viewBounds.bottom = val.i_int; + var_Get( p_vout->p_libvlc, "drawable-view-right", &val ); + viewBounds.right = val.i_int; +- var_Get( p_vout->p_libvlc, "drawable-clip-top", &val ); +- clipBounds.top = val.i_int; +- var_Get( p_vout->p_libvlc, "drawable-clip-left", &val ); +- clipBounds.left = val.i_int; +- var_Get( p_vout->p_libvlc, "drawable-clip-bottom", &val ); +- clipBounds.bottom = val.i_int; +- var_Get( p_vout->p_libvlc, "drawable-clip-right", &val ); +- clipBounds.right = val.i_int; + ++ if( !viewBounds.top && !viewBounds.left && !viewBounds.right && !viewBounds.bottom ) { ++ /* view bounds not set, use control/window bounds */ ++ clipBounds = viewBounds = aglGetBounds( p_vout ); ++ } else { ++ var_Get( p_vout->p_libvlc, "drawable-clip-top", &val ); ++ clipBounds.top = val.i_int; ++ var_Get( p_vout->p_libvlc, "drawable-clip-left", &val ); ++ clipBounds.left = val.i_int; ++ var_Get( p_vout->p_libvlc, "drawable-clip-bottom", &val ); ++ clipBounds.bottom = val.i_int; ++ var_Get( p_vout->p_libvlc, "drawable-clip-right", &val ); ++ clipBounds.right = val.i_int; ++ } ++ + p_vout->p_sys->b_clipped_out = (clipBounds.top == clipBounds.bottom) + || (clipBounds.left == clipBounds.right); + if( ! p_vout->p_sys->b_clipped_out ) +@@ -501,7 +587,15 @@ + } + p_vout->p_sys->clipBounds = clipBounds; + p_vout->p_sys->viewBounds = viewBounds; ++} + ++static int aglInit( vout_thread_t * p_vout ) ++{ ++ vlc_value_t val; ++ ++ var_Get( p_vout->p_libvlc, "drawable", &val ); ++ aglProcessDrawable( p_vout, val.i_int ); ++ + return VLC_SUCCESS; + } + +@@ -564,6 +658,26 @@ + + static int aglManage( vout_thread_t * p_vout ) + { ++ if( p_vout->p_sys->drawable_type == VLCDrawableControlRef ) { ++ /* auto-detect size changes in the control by polling */ ++ Rect clipBounds, viewBounds; ++ ++ clipBounds = viewBounds = aglGetBounds( p_vout ); ++ ++ if( memcmp(&clipBounds, &(p_vout->p_sys->clipBounds), sizeof(clipBounds) ) ++ && memcmp(&viewBounds, &(p_vout->p_sys->viewBounds), sizeof(viewBounds)) ) ++ { ++ /* size has changed since last poll */ ++ ++ p_vout->p_sys->clipBounds = clipBounds; ++ p_vout->p_sys->viewBounds = viewBounds; ++ aglLock( p_vout ); ++ aglSetViewport(p_vout, viewBounds, clipBounds); ++ aglReshape( p_vout ); ++ aglUnlock( p_vout ); ++ } ++ } ++ + if( p_vout->i_changes & VOUT_ASPECT_CHANGE ) + { + aglLock( p_vout ); +@@ -586,42 +700,28 @@ + { + /* Close the fullscreen window and resume normal drawing */ + vlc_value_t val; +- Rect viewBounds; +- Rect clipBounds; + + var_Get( p_vout->p_libvlc, "drawable", &val ); +- p_vout->p_sys->agl_drawable = (AGLDrawable)val.i_int; +- aglSetDrawable(p_vout->p_sys->agl_ctx, p_vout->p_sys->agl_drawable); ++ aglProcessDrawable( p_vout, val.i_int ); + +- var_Get( p_vout->p_libvlc, "drawable-view-top", &val ); +- viewBounds.top = val.i_int; +- var_Get( p_vout->p_libvlc, "drawable-view-left", &val ); +- viewBounds.left = val.i_int; +- var_Get( p_vout->p_libvlc, "drawable-view-bottom", &val ); +- viewBounds.bottom = val.i_int; +- var_Get( p_vout->p_libvlc, "drawable-view-right", &val ); +- viewBounds.right = val.i_int; +- var_Get( p_vout->p_libvlc, "drawable-clip-top", &val ); +- clipBounds.top = val.i_int; +- var_Get( p_vout->p_libvlc, "drawable-clip-left", &val ); +- clipBounds.left = val.i_int; +- var_Get( p_vout->p_libvlc, "drawable-clip-bottom", &val ); +- clipBounds.bottom = val.i_int; +- var_Get( p_vout->p_libvlc, "drawable-clip-right", &val ); +- clipBounds.right = val.i_int; ++ /*the following was here, superfluous due to the same in aglLock? ++ aglSetCurrentContext(p_vout->p_sys->agl_ctx);*/ + +- aglSetCurrentContext(p_vout->p_sys->agl_ctx); +- aglSetViewport(p_vout, viewBounds, clipBounds); +- + /* Most Carbon APIs are not thread-safe, therefore delagate some GUI visibilty update to the main thread */ + sendEventToMainThread(GetWindowEventTarget(p_vout->p_sys->theWindow), kEventClassVLCPlugin, kEventVLCPluginHideFullscreen); + } + else + { ++ CGDirectDisplayID displayID; ++ CGRect displayBounds; + Rect deviceRect; + +- GDHandle deviceHdl = GetMainDevice(); +- deviceRect = (*deviceHdl)->gdRect; ++ /* the main display has its origin at (0,0) */ ++ displayBounds = CGDisplayBounds( CGMainDisplayID() ); ++ deviceRect.left = 0; ++ deviceRect.top = 0; ++ deviceRect.right = displayBounds.size.width; ++ deviceRect.bottom = displayBounds.size.height; + + if( !p_vout->p_sys->theWindow ) + { +@@ -669,8 +769,9 @@ + SetWindowBounds(p_vout->p_sys->theWindow, kWindowContentRgn, &deviceRect); + } + glClear( GL_COLOR_BUFFER_BIT ); +- p_vout->p_sys->agl_drawable = (AGLDrawable)GetWindowPort(p_vout->p_sys->theWindow); +- aglSetDrawable(p_vout->p_sys->agl_ctx, p_vout->p_sys->agl_drawable); ++ p_vout->p_sys->drawable_type = VLCDrawableCGrafPtr; ++ p_vout->p_sys->drawable.CGrafPtr = GetWindowPort(p_vout->p_sys->theWindow); ++ aglSetDrawable(p_vout->p_sys->agl_ctx, p_vout->p_sys->drawable.CGrafPtr); + aglSetCurrentContext(p_vout->p_sys->agl_ctx); + aglSetViewport(p_vout, deviceRect, deviceRect); + //aglSetFullScreen(p_vout->p_sys->agl_ctx, device_width, device_height, 0, 0); +@@ -753,11 +854,10 @@ + + case VOUT_REPARENT: + { +- AGLDrawable drawable = (AGLDrawable)va_arg( args, int); +- if( !p_vout->b_fullscreen && drawable != p_vout->p_sys->agl_drawable ) ++ libvlc_drawable_t drawable = (libvlc_drawable_t)va_arg( args, int); ++ if( !p_vout->b_fullscreen ) + { +- p_vout->p_sys->agl_drawable = drawable; +- aglSetDrawable(p_vout->p_sys->agl_ctx, drawable); ++ aglProcessDrawable( p_vout, drawable ); + } + return VLC_SUCCESS; + } +@@ -771,8 +871,16 @@ + { + if( ! p_vout->p_sys->b_clipped_out ) + { ++ WindowRef win; ++ Rect rect; ++ + p_vout->p_sys->b_got_frame = VLC_TRUE; + aglSwapBuffers(p_vout->p_sys->agl_ctx); ++ ++ win = aglGetWindow( p_vout ); ++ rect = aglGetBounds( p_vout ); ++ ++ InvalWindowRect( win, &rect ); + } + else + { +@@ -788,12 +896,14 @@ + // however AGL coordinates are based on window structure region + // and are vertically flipped + GLint rect[4]; +- CGrafPtr port = (CGrafPtr)p_vout->p_sys->agl_drawable; ++ WindowRef window; + Rect winBounds, clientBounds; + +- GetWindowBounds(GetWindowFromPort(port), ++ window = aglGetWindow( p_vout ); ++ ++ GetWindowBounds(window, + kWindowStructureRgn, &winBounds); +- GetWindowBounds(GetWindowFromPort(port), ++ GetWindowBounds(window, + kWindowContentRgn, &clientBounds); + + /* update video clipping bounds in drawable */ +Index: bindings/python/vlc_instance.c +=================================================================== +--- bindings/python/vlc_instance.c (revision 20403) ++++ bindings/python/vlc_instance.c (working copy) +@@ -349,6 +349,30 @@ + } + + static PyObject * ++vlcInstance_video_set_macosx_parent_type( PyObject *self, PyObject *args ) ++{ ++ libvlc_exception_t ex; ++ int i_drawable_type; ++ ++ if( !PyArg_ParseTuple( args, "i", &i_drawable_type ) ) ++ return NULL; ++ ++ if( i_drawable_type != VLCDrawableCGrafPtr ++ && i_drawable_type != VLCDrawableControlRef ) ++ { ++ PyErr_SetString( vlcInstance_Exception, "Invalid drawable type." ); ++ return NULL; ++ } ++ ++ LIBVLC_TRY; ++ libvlc_video_set_macosx_parent_type( LIBVLC_INSTANCE->p_instance, (libvlc_macosx_drawable_type_t) i_drawable_type, &ex ); ++ LIBVLC_EXCEPT; ++ ++ Py_INCREF( Py_None ); ++ return Py_None; ++} ++ ++static PyObject * + vlcInstance_video_set_size( PyObject *self, PyObject *args ) + { + libvlc_exception_t ex; +@@ -733,6 +757,8 @@ + "playlist_get_input() -> object Return the current input"}, + { "video_set_parent", vlcInstance_video_set_parent, METH_VARARGS, + "video_set_parent(xid=int) Set the parent xid or HWND"}, ++ { "video_set_macosx_parent_type", vlcInstance_video_set_macosx_parent_type, METH_VARARGS, ++ "video_set_macosx_parent_type(drawabletype=int) Set the type of parent used on Mac OS/X (see the Drawable* constants)"}, + { "video_set_size", vlcInstance_video_set_size, METH_VARARGS, + "video_set_size(width=int, height=int) Set the video width and height"}, + { "audio_toggle_mute", vlcInstance_audio_toggle_mute, METH_VARARGS, +Index: bindings/python/vlc_module.c +=================================================================== +--- bindings/python/vlc_module.c (revision 20403) ++++ bindings/python/vlc_module.c (working copy) +@@ -147,6 +147,10 @@ + mediacontrol_EndStatus ); + PyModule_AddIntConstant( p_module, "UndefinedStatus", + mediacontrol_UndefinedStatus ); ++ PyModule_AddIntConstant( p_module, "DrawableCGrafPtr", ++ VLCDrawableCGrafPtr ); ++ PyModule_AddIntConstant( p_module, "DrawableControlRef", ++ VLCDrawableControlRef ); + } + + +Index: src/control/video.c +=================================================================== +--- src/control/video.c (revision 20403) ++++ src/control/video.c (working copy) +@@ -277,6 +277,21 @@ + + /* global video settings */ + ++void libvlc_video_set_macosx_parent_type( libvlc_instance_t *p_instance, libvlc_macosx_drawable_type_t t, ++ libvlc_exception_t *p_e ) ++{ ++ var_SetInteger(p_instance->p_libvlc_int, "macosx-drawable-type", (int)t); ++} ++ ++libvlc_macosx_drawable_type_t libvlc_video_get_macosx_parent_type( libvlc_instance_t *p_instance, libvlc_exception_t *p_e ) ++{ ++ libvlc_macosx_drawable_type_t result; ++ ++ result = var_GetInteger( p_instance->p_libvlc_int, "macosx-drawable-type" ); ++ ++ return result; ++} ++ + void libvlc_video_set_parent( libvlc_instance_t *p_instance, libvlc_drawable_t d, + libvlc_exception_t *p_e ) + { +Index: src/libvlc-common.c +=================================================================== +--- src/libvlc-common.c (revision 20403) ++++ src/libvlc-common.c (working copy) +@@ -941,6 +941,10 @@ + var_Create( p_libvlc, "drawable-clip-bottom", VLC_VAR_INTEGER ); + var_Create( p_libvlc, "drawable-clip-right", VLC_VAR_INTEGER ); + ++#ifdef __APPLE__ ++ var_Create( p_libvlc, "macosx-drawable-type", VLC_VAR_INTEGER ); ++#endif ++ + /* Create volume callback system. */ + var_Create( p_libvlc, "volume-change", VLC_VAR_BOOL ); + +Index: include/vlc/libvlc.h +=================================================================== +--- include/vlc/libvlc.h (revision 20403) ++++ include/vlc/libvlc.h (working copy) +@@ -424,6 +424,10 @@ + */ + VLC_PUBLIC_API void libvlc_video_redraw_rectangle( libvlc_input_t *, const libvlc_rectangle_t *, libvlc_exception_t * ); + ++VLC_PUBLIC_API void libvlc_video_set_macosx_parent_type( libvlc_instance_t *, libvlc_macosx_drawable_type_t, libvlc_exception_t * ); ++ ++VLC_PUBLIC_API libvlc_macosx_drawable_type_t libvlc_video_get_macosx_parent_type( libvlc_instance_t *, libvlc_exception_t * ); ++ + /** + * Set the default video output parent + * this settings will be used as default for all video outputs +Index: include/vlc/libvlc_structures.h +=================================================================== +--- include/vlc/libvlc_structures.h (revision 20403) ++++ include/vlc/libvlc_structures.h (working copy) +@@ -83,12 +83,22 @@ + /** + * Downcast to this general type as placeholder for a platform specific one, such as: + * Drawable on X11, +-* CGrafPort on MacOSX, ++* (libvlc_macosx_drawable_type_t) on MacOSX, + * HWND on win32 + */ + typedef int libvlc_drawable_t; + + /** ++* Type of libvlc_drawable_t on MaxOSX. Available types: ++* - VLCDrawableCGrafPtr ++* - VLCDrawableControlRef ++*/ ++typedef enum { ++ VLCDrawableCGrafPtr = 0, ++ VLCDrawableControlRef, ++} libvlc_macosx_drawable_type_t; ++ ++/** + * Rectangle type for video geometry + */ + typedef struct diff --git a/tribler-mod/Tribler/Main/Build/Ubuntu/changelog b/tribler-mod/Tribler/Main/Build/Ubuntu/changelog new file mode 100644 index 0000000..dbc04d1 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Ubuntu/changelog @@ -0,0 +1,128 @@ +tribler (4.1.9-1ubuntu1) gutsy; urgency=low + + * New release + + -- Tribler Wed, 27 Feb 2008 14:59:40 +0100 + +tribler (4.1.8-1ubuntu1) gutsy; urgency=low + + * New release + + -- Tribler Tue, 12 Feb 2008 09:12:17 +0100 + +tribler (4.1.7-1ubuntu1) feisty; urgency=low + + * New release + + -- Tribler Thu, 11 Oct 2007 14:56:08 +0200 + +tribler (4.1.6-1ubuntu1) feisty; urgency=low + + * New release + + -- Tribler Fri, 5 Oct 2007 11:00:26 +0200 + +tribler (4.1.5-1ubuntu3) feisty; urgency=low + + * changelog + + -- Tribler Fri, 5 Oct 2007 11:00:22 +0200 + +tribler (4.1.5-1ubuntu2) feisty; urgency=low + + * Removed -x from tribler startup script. + + -- Tribler Team Mon, 24 Sep 2007 16:08:16 +0200 + +tribler (4.1.5-1ubuntu1) feisty; urgency=low + + * New release + + -- Tribler Team Mon, 24 Sep 2007 15:58:13 +0200 + +tribler (4.1.4-1ubuntu4) feisty; urgency=low + + * New release + + -- Tribler Team Thu, 20 Sep 2007 15:00:21 +0200 + +tribler (4.1.4-1ubuntu2) feisty; urgency=low + + * Changed dependency to wx2.8-unicode, font enum problem + + -- Tribler Team Tue, 18 Sep 2007 14:31:06 +0200 + +tribler (4.1.4-1ubuntu1) feisty; urgency=low + + * New release + + -- Tribler Team Tue, 18 Sep 2007 14:21:21 +0200 + +tribler (4.0.2-1ubuntu1) feisty; urgency=low + + * New release + + -- Arno Bakker Wed, 11 Jun 2007 16:21:01 +0200 + +tribler (3.4.1-1) unstable; urgency=low + + * New release + + -- Arno Bakker Mon, 29 May 2006 11:53:04 +0200 + +tribler (3.4.0-1) unstable; urgency=low + + * New release + + -- Arno Bakker Wed, 24 May 2006 13:27:04 +0200 + +tribler (3.3.6-1) unstable; urgency=low + + * New release + + -- Arno Bakker Mon, 3 Apr 2006 10:44:32 +0200 + +tribler (3.3.4-1) unstable; urgency=low + + * First public release. + + -- Arno Bakker Thu, 16 Mar 2006 08:56:42 +0100 + +tribler (3.3.3-2) unstable; urgency=low + + * Forgot to add joe24.bmp + * Adjusted tribler script to ignore unicode earlier. + + -- Arno Bakker Fri, 10 Mar 2006 13:34:12 +0100 + +tribler (3.3.3-1) unstable; urgency=low + + * Fixes to work in Linux environment. + + -- Arno Bakker Fri, 10 Mar 2006 12:49:51 +0100 + +tribler (3.3.2-1) unstable; urgency=low + + * Now depends on the wxpython2.6-gtk2-ansi that I built. + + -- Arno Bakker Fri, 10 Mar 2006 11:45:39 +0100 + +tribler (3.3.1-3) unstable; urgency=low + + * Attempt to build cleaner installer + + -- Arno Bakker Tue, 7 Mar 2006 13:10:29 +0100 + +tribler (3.3.1-2) unstable; urgency=low + + * Moved all python files to /usr/share/tribler/python + * /usr/bin/tribler is now a script that adapts PYTHONPATH and calls python + + -- Otto Visser Mon, 6 Mar 2006 16:10:42 +0100 + +tribler (3.3.1-1) unstable; urgency=low + + * Initial packaging + + -- Otto Visser Fri, 24 Feb 2006 11:10:47 -0100 + diff --git a/tribler-mod/Tribler/Main/Build/Ubuntu/compat b/tribler-mod/Tribler/Main/Build/Ubuntu/compat new file mode 100644 index 0000000..b8626c4 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Ubuntu/compat @@ -0,0 +1 @@ +4 diff --git a/tribler-mod/Tribler/Main/Build/Ubuntu/control b/tribler-mod/Tribler/Main/Build/Ubuntu/control new file mode 100644 index 0000000..e7480d6 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Ubuntu/control @@ -0,0 +1,30 @@ +Source: tribler +Section: net +Priority: optional +Maintainer: Arno Bakker +Standards-Version: 3.7.2 +Build-Depends: python, debhelper (>= 5.0.37.2), devscripts + +Package: tribler +Architecture: all +Depends: python, python-wxgtk2.8, python-m2crypto, python-apsw, python-vlc, vlc, ffmpeg +Description: Python based Bittorrent/Internet TV application + It allows you to watch videos and download + content. Tribler aims to combine the ease of + Youtube.com with the performance of peer-to-peer. + . + Tribler is backwards compatible with the Bittorrent + download protocol. To discover interesting content + standard RSS feeds with .torrent links are supported + plus the "Tribe protocol" which automatically scans + the network for available Bittorrent swarms upon + startup. Tribler uses an embedded web browser to + access video clips of Youtube.com and Liveleak.com + Other features are "People who like this also like" + for recommending related swarms, the ability to + make friends, and to donate idle upload capacity + to friends. The later can improve download speed + by a factor of two on asymmetric Internet links. + The fastest way of social file sharing + . + Homepage: http://www.tribler.org/ diff --git a/tribler-mod/Tribler/Main/Build/Ubuntu/copyright b/tribler-mod/Tribler/Main/Build/Ubuntu/copyright new file mode 100644 index 0000000..68f78f6 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Ubuntu/copyright @@ -0,0 +1,630 @@ +Unless otherwise noted, all files are released under the MIT +license, exceptions contain licensing information in them. + +Copyright (C) 2001-2002 Bram Cohen + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation files +(the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +The Software is provided "AS IS", without warranty of any kind, +express or implied, including but not limited to the warranties of +merchantability, fitness for a particular purpose and +noninfringement. In no event shall the authors or copyright holders +be liable for any claim, damages or other liability, whether in an +action of contract, tort or otherwise, arising from, out of or in +connection with the Software or the use or other dealings in the +Software. + +------------------------------------------------------------------------------ + +All code written by Jie Yang, Pawel Garbacki, Jun Wang, Arno Bakker, +Jan David Mol, Qin Chen, Yuan Yuan, Jelle Roozenburg, Freek Zindel, +Fabian van der Werf, Lucian Musat, Michel Meulpolder, Maarten ten Brinke, +Ali Abbas, Boxun Zhang, Lucia d' Acunto, Rameez Rahman, Boudewijn Schoon, +Richard Gwin, Diego Rabaioli, Riccardo Petrocco has the following license: + + TRIBLER file-sharing library. + + Copyright (c) 2005-2009, Delft University of Technology and Vrije + Universiteit Amsterdam; All rights reserved. + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + Delft University of Technology + Postbus 5 + 2600 AA Delft + The Netherlands + + Vrije Universiteit + De Boelelaan 1105 + 1081 HV Amsterdam + The Netherlands + + +The research leading to this library has received funding from: + - BSIK Freeband Communication I-Share project (Dutch Ministry of Economic + Affairs) + - Netherlands Organisation for Scientific Research (NWO) grant 612.060.215. + - Dutch Technology Foundation STW: Veni project DTC.7299 + - European Community's Sixth Framework Programme in the P2P-FUSION project + under contract no 035249. + - The European Community's Seventh Framework Programme in the P2P-Next project + under grant agreement no 216217. + +------------------------------------------------------------------------------- + + BuddyCast4 content-recommendation library. + + The research leading to this library has received funding from the + European Community's Seventh Framework Programme [FP7/2007-2011] + in the Petamedia project under grant agreement no. 216444 + + The following library modules are Copyright (c) 2008-2009, + Delft University of Technology and Technische Universität Berlin; + All rights reserved. + + BaseLib/Core/BuddyCast/buddycast.py + + The following library modules are Copyright (c) 2008-2009, + Technische Universität Berlin; + All rights reserved. + + BaseLib/Core/Search/Reranking.py + BaseLib/Test/test_buddycast4.py + BaseLib/Test/test_buddycast4_stresstest.py + + All library modules are free software, unless stated otherwise; you can + redistribute them and/or modify them under the terms of the GNU Lesser + General Public License as published by the Free Software Foundation; in + particular, version 2.1 of the License. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + Delft University of Technology + Postbus 5 + 2600 AA Delft + The Netherlands + + Technische Universität Berlin + Strasse des 17. Juni 135 + 10623 Berlin + Germany + +------------------------------------------------------------------------------- + + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! + +------------------------------------------------------------------------------- + +PRIVACY WARNING: This software will by default exchange your download +history with others. This feature can be disabled by disabling the +recommender in the Preference menu. See also the disclaimer on +http://www.tribler.org/ diff --git a/tribler-mod/Tribler/Main/Build/Ubuntu/files b/tribler-mod/Tribler/Main/Build/Ubuntu/files new file mode 100644 index 0000000..64093c1 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Ubuntu/files @@ -0,0 +1 @@ +tribler_4.0.2-1ubuntu1_all.deb net optional diff --git a/tribler-mod/Tribler/Main/Build/Ubuntu/prerm b/tribler-mod/Tribler/Main/Build/Ubuntu/prerm new file mode 100644 index 0000000..e203aa7 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Ubuntu/prerm @@ -0,0 +1,47 @@ +#! /bin/sh +# prerm script for #PACKAGE# +# +# see: dh_installdeb(1) + +set -e + +# summary of how this script can be called: +# * `remove' +# * `upgrade' +# * `failed-upgrade' +# * `remove' `in-favour' +# * `deconfigure' `in-favour' +# `removing' +# +# for details, see http://www.debian.org/doc/debian-policy/ or +# the debian-policy package + +PACKAGE="tribler" + +dpkg --listfiles $PACKAGE | + awk '$0~/\.py$/ {print $0"c\n" $0"o"}' | + xargs rm -f >&2 + +killall tribler || : + + +case "$1" in + remove|upgrade|deconfigure) +# install-info --quiet --remove /usr/info/#PACKAGE#.info.gz + ;; + failed-upgrade) + ;; + *) + echo "prerm called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 + + diff --git a/tribler-mod/Tribler/Main/Build/Ubuntu/pycompat b/tribler-mod/Tribler/Main/Build/Ubuntu/pycompat new file mode 100644 index 0000000..0cfbf08 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Ubuntu/pycompat @@ -0,0 +1 @@ +2 diff --git a/tribler-mod/Tribler/Main/Build/Ubuntu/rules b/tribler-mod/Tribler/Main/Build/Ubuntu/rules new file mode 100755 index 0000000..c023bf9 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Ubuntu/rules @@ -0,0 +1,84 @@ +#!/usr/bin/make -f +# Sample debian/rules that uses debhelper. +# GNU copyright 1997 to 1999 by Joey Hess. + +# Uncomment this to turn on verbose mode. +#export DH_VERBOSE=1 + +configure: configure-stamp +configure-stamp: + dh_testdir + # Add here commands to configure the package. + + touch configure-stamp + + +build: build-stamp + +build-stamp: configure-stamp + dh_testdir + + # Add here commands to compile the package. + #$(MAKE) + #/usr/bin/docbook-to-man debian/bittorrent.sgml > bittorrent.1 + + touch build-stamp + +clean: + dh_testdir + dh_testroot + rm -f build-stamp configure-stamp + + # Add here commands to clean up after the build process. + #-$(MAKE) clean + find . -name '*.pyc' |xargs rm || : + + dh_clean + +install: build + dh_testdir + dh_testroot + dh_clean -k + dh_installdirs + +# Build architecture-independent files here. +binary-arch: build install +# We have nothing to do by default. + + +# Build architecture-dependent files here. +binary-indep: build install + dh_testdir + dh_testroot + dh_installdocs + dh_installexamples + dh_installmenu + dh_installmime + dh_installman + + mkdir -p debian/tribler/usr/share/tribler/ + cp -rf `ls -1d Tribler khashmir` debian/tribler/usr/share/tribler/ + rm -rf debian/tribler/usr/share/tribler/Tribler/Test + # add other files + mkdir -p debian/tribler/usr/bin + cp -f debian/tribler.sh debian/tribler/usr/bin/tribler + cp -f Tribler/LICENSE.txt debian/copyright + # for the menu + mkdir -p debian/tribler/usr/share/pixmaps + cp -f debian/tribler.xpm debian/tribler/usr/share/pixmaps/ + + dh_installchangelogs + dh_installinit -r --no-start -- stop 20 0 6 . + dh_install --sourcedir=debian/tmp + dh_install debian/tribler.desktop usr/share/applications + dh_link + dh_compress + dh_fixperms + dh_installdeb + dh_python + dh_gencontrol + dh_md5sums + dh_builddeb + +binary: binary-indep binary-arch +.PHONY: build clean binary-indep binary-arch binary install configure diff --git a/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.1 b/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.1 new file mode 100644 index 0000000..73908e8 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.1 @@ -0,0 +1,36 @@ +.\" Tribler: Python based Bittorrent/Internet TV application +.TH man 1 "12 June 2007" "1.0" "Tribler man page" +.SH NAME +tribler \- Python based Bittorrent/Internet TV application +.SH SYNOPSIS +.B tribler +.SH DESCRIPTION +.B Tribler +is a python-based Bittorrent/Internet TV application +It allows you to watch videos and download +content. Tribler aims to combine the ease of +Youtube.com with the performance of peer-to-peer. + +Tribler is backwards compatible with the Bittorrent +download protocol. To discover interesting content +standard RSS feeds with .torrent links are supported +plus the "Tribe protocol" which automatically scans +the network for available Bittorrent swarms upon +startup. Tribler uses an embedded web browser to +access video clips of Youtube.com and Liveleak.com +Other features are "People who like this also like" +for recommending related swarms, the ability to +make friends, and to donate idle upload capacity +to friends. The later can improve download speed +by a factor of two on asymmetric Internet links. +The fastest way of social file sharing + +Homepage: http://www.tribler.org +.SH FILES +.P +.I /usr/bin/tribler +.I /usr/share/tribler +.SH AUTHOR +.nf +Arno Bakker (arno@cs.vu.nl) +.fi diff --git a/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.desktop b/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.desktop new file mode 100644 index 0000000..36eb0e2 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.desktop @@ -0,0 +1,8 @@ +[Desktop Entry] +Name=Tribler +GenericName=P2P /Bittorrent/Youtube client +Exec=tribler +Icon=tribler +Terminal=false +Type=Application +Categories=Application;Network;P2P diff --git a/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.manpages b/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.manpages new file mode 100644 index 0000000..3f7cc10 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.manpages @@ -0,0 +1 @@ +debian/tribler.1 diff --git a/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.menu b/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.menu new file mode 100644 index 0000000..9e9c795 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.menu @@ -0,0 +1,4 @@ +?package(tribler):needs="x11" section="Apps/Net" \ + title="Tribler" \ + icon="/usr/share/pixmaps/tribler.xpm" \ + command="tribler" diff --git a/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.postinst.debhelper b/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.postinst.debhelper new file mode 100644 index 0000000..0b3b7c0 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.postinst.debhelper @@ -0,0 +1,14 @@ +# Automatically added by dh_installmenu +if [ "$1" = "configure" ] && [ -x "`which update-menus 2>/dev/null`" ]; then + update-menus +fi +# End automatically added section +# Automatically added by dh_python +PYTHON=python2.4 +if which $PYTHON >/dev/null 2>&1 && [ -e /usr/lib/$PYTHON/compileall.py ]; then + DIRLIST=" /usr/share/tribler" + for i in $DIRLIST ; do + $PYTHON /usr/lib/$PYTHON/compileall.py -q $i + done +fi +# End automatically added section diff --git a/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.postrm.debhelper b/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.postrm.debhelper new file mode 100644 index 0000000..2b4be4f --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.postrm.debhelper @@ -0,0 +1,3 @@ +# Automatically added by dh_installmenu +if [ -x "`which update-menus 2>/dev/null`" ]; then update-menus ; fi +# End automatically added section diff --git a/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.prerm.debhelper b/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.prerm.debhelper new file mode 100644 index 0000000..6a72f80 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.prerm.debhelper @@ -0,0 +1,5 @@ +# Automatically added by dh_python +dpkg -L tribler | + awk '$0~/\.py$/ {print $0"c\n" $0"o"}' | + xargs rm -f >&2 +# End automatically added section diff --git a/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.sh b/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.sh new file mode 100755 index 0000000..c459cca --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.sh @@ -0,0 +1,63 @@ +#!/bin/sh +# Startup script for Ubuntu Linux + +check() +{ + # look for the python executable + PYTHONBIN=`which $1` + if [ "$PYTHONBIN" != "" ]; then + _PYTHONBIN=$PYTHONBIN + fi + + if [ -d "/usr/lib/$1" ]; then + + # look for the python-vlc library + if [ -f "/usr/lib/$1/site-packages/vlc.so" ]; then + _VLCPATH="/usr/lib/$1/site-packages" + fi + + # look for the python-wx library + WXPYTHONVER=`ls -1d /usr/lib/$1/site-packages/wx-2.8* 2>/dev/null | grep -v ansi | sed -e 's/.*wx-//g' -e 's/-.*//g' | sort -nr | head -1` + if [ "$WXPYTHONVER" != "" ]; then + _WXPYTHONPATH=`ls -1d /usr/lib/$1/site-packages/wx-$WXPYTHONVER* | grep -v ansi | head -1` + fi + fi +} + +confirm() +{ + if [ "$1" = "" ]; then + echo $2 + echo "Cannot run Tribler, sorry" + exit 1 + fi +} + +warn() +{ + if [ "$1" = "" ]; then + echo $2 + echo "Some parts of Tribler may not function properly, sorry" + fi +} + +check "python2.4" +check "python2.5" +check "python2.6" + +confirm "$_PYTHONBIN" "Unfortunatly we were not able to find python (version 2.4, 2.5, or 2.6)." +confirm "$_WXPYTHONPATH" "Unfortunatly we were not able to find a unicode package for wxPython 2.8 (python version 2.4, 2.5, or 2.6)." +warn "$_VLCPATH" "Unfortunatey we were not able to find the python bindings for vlc." + +_TRIBLERPATH="/usr/share/tribler" + +echo "_PYTHONBIN: $_PYTHONBIN" +echo "_TRIBLERPATH: $_TRIBLERPATH" +echo "_WXPYTHONPATH: $_WXPYTHONPATH" +echo "_VLCPATH: $_VLCPATH" + +export PYTHONPATH="$_TRIBLERPATH:$_PYTHONPATH:$_WXPYTHONPATH:$_VLCPATH" + +echo "Starting Tribler..." +cd $_TRIBLERPATH +exec $_PYTHONBIN -O Tribler/Main/tribler.py "$@" > /tmp/$USER-tribler.log 2>&1 diff --git a/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.substvars b/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.substvars new file mode 100644 index 0000000..03204f7 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.substvars @@ -0,0 +1 @@ +python:Depends=python (<< 2.5), python (>= 2.4) diff --git a/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.xpm b/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.xpm new file mode 100644 index 0000000..3dd0066 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Ubuntu/tribler.xpm @@ -0,0 +1,173 @@ +/* XPM */ +static char *tribler_new[] = { +/* columns rows colors chars-per-pixel */ +"32 32 135 2", +" c black", +". c #366F0A0A011E", +"X c #46461B3812BD", +"o c #51A70F0F01AC", +"O c #53531AC50EB9", +"+ c #53A91D3A11D9", +"@ c #460D2104196F", +"# c #545422221818", +"$ c #68DA255E1750", +"% c #755845623BCA", +"& c #997C20590796", +"* c #82493F953316", +"= c #A34D1E1E0359", +"- c #A46B25ED0C0C", +"; c #A6A6358B1D73", +": c #A7FD3F5C29F1", +"> c #A8A844443030", +", c #C61B3BAD2003", +"< c #CECE33DE1515", +"1 c #CFB23B021D8F", +"2 c #EB232F680942", +"3 c #F4F42D2D0505", +"4 c #F5833114095F", +"5 c #F5D833890C7E", +"6 c #F61234FC0DB8", +"7 c #F6A038E31212", +"8 c #F6BD39E413F7", +"9 c #F6F63B581532", +"0 c #F72F3CCB166C", +"q c #D1D14A4A2ED9", +"w c #D27C4F323518", +"e c #D2D251A73838", +"r c #F7A140401B71", +"t c #F84D449A1F1F", +"y c #F6F6457E215A", +"u c #F64B482B2525", +"i c #F72F479D245D", +"p c #F8DB48812379", +"a c #F6BD4E312C49", +"s c #F7134EA42CC9", +"d c #F9314AF52698", +"f c #F9F950502C2C", +"g c #FA4F52C42F4C", +"h c #F84D53C531C0", +"j c #F7685BB13C59", +"k c #FAA45538326B", +"l c #FAFA57AD358B", +"z c #FB505A2138AA", +"x c #FBA55C953BCA", +"c c #F8F85DCF3DAF", +"v c #FAC15ED03E21", +"b c #FBFB5F093EE9", +"n c #F886600A40EB", +"m c #F814617E4326", +"M c #F88666F5492C", +"N c #FA3263804399", +"B c #FC51617E4209", +"V c #FCA663F24528", +"C c #F9DC64F34629", +"Z c #FCFC66664848", +"A c #FA8869304AA0", +"S c #FAA469864ABC", +"D c #F8866F36538C", +"F c #F9156D6D50DF", +"G c #F96A6FE15437", +"H c #FB6C703753E2", +"J c #FAC1731D5673", +"K c #FAA4751F59AF", +"L c #FAC176935A93", +"P c #FAFA76205A04", +"I c #FC8A74AD5976", +"U c #FC8A77E95D5D", +"Y c #FB507A085FEE", +"T c #FAA47C0A61F0", +"R c #FA327F466683", +"E c #FC8A7ED4659E", +"W c #FB5082BB694C", +"Q c #FCC3854C6D50", +"! c #FC8A89DF7255", +"~ c #FC518E71775A", +"^ c #FCFC8F0078EA", +"/ c #FD6E91CA7C26", +"( c #FCFC935A7D9A", +") c #FAC1977A8366", +"_ c #FB8995EB80B9", +"` c #FDA7984283D9", +"' c #FDE0992784F6", +"] c #FAC19D2B8A34", +"[ c #FBFB9D2B8917", +"{ c #FD52A0118D54", +"} c #FAFAA28590AD", +"| c #FBDEABE49B29", +" . c #FCA6AFAFA04A", +".. c #FC51B0CDA112", +"X. c #FC6DB13FA1A1", +"o. c #FD6EB89BAAC7", +"O. c #FE6FBAD7AD3B", +"+. c #FD35C0A3B3EC", +"@. c #FDFDC233B627", +"#. c #FD52C2C2B60B", +"$. c #FCFCC56FBA0F", +"%. c #FDE0C61BBA48", +"&. c #FCFCC6E3BBF4", +"*. c #FE53C7E4BC83", +"=. c #FE53CA1FBF69", +"-. c #FD8BCACAC04E", +";. c #FEFECCCCC2C2", +":. c #FDE0D041C670", +">. c #FD8BD07AC755", +",. c #FE53D227C91E", +"<. c #FEA9D40CCB83", +"1. c #FE1AD6B9CE23", +"2. c #FD8BD765CF96", +"3. c #FE70DB2FD41B", +"4. c #FE8CDE4FD7D7", +"5. c #FE6FE371DDDD", +"6. c #FEC5E4C7DF89", +"7. c #FE8CE53AE018", +"8. c #FEC5E83DE3AA", +"9. c #FE53E9E9E58F", +"0. c #FE6FEA5BE61E", +"q. c #FEC5EBB2E7CA", +"w. c #FEE1EC24E859", +"e. c #FEFEEB40E73C", +"r. c #FF37ED25E95A", +"t. c #FF54F22AEF7D", +"y. c #FF37F52DF30F", +"u. c #FF70F830F6A0", +"i. c #FFA9F915F7BE", +"p. c #FF8DFE36FDC4", +"a. c #FFA9FEA8FE53", +"s. c #FFC6FF1BFEE1", +"d. c #FFE2FF8DFF70", +"f. c gray100", +"g. c None", +/* pixels */ +"g.g.g.g.g.. o o o o o o O O O O O O O O + # # # # X g.g.g.g.g.g.", +"g.g.g.g.g.= 3 3 3 3 6 t f f f f f f f z V Z V Z Z e g.g.g.g.g.g.", +"g.g.g.g.g.= 3 3 7 p f f A K J b f z V Z Z Z Z Z Z e g.g.g.g.g.g.", +"g.g.g.g.g.= 3 0 f f f W 4.q.8.,.J Z Z Z Z Z Z Z Z e g.g.g.g.g.g.", +"g.g.g.g.g.- f f f f K w.f.f.f.f.5.{ { { J V Z Z b w g.g.g.g.g.g.", +"g.g.g.g.g.; f f f f / f.f.f.f.f.f.f.f.f.%.I Z z f q g.g.g.g.g.g.", +"g.g.g.g.g.; f f f f ' f.f.f.f.f.f.f.f.f.f.` f f f q g.g.g.g.g.g.", +"g.g.g.g.g.; f f k b Q q.f.f.f.f.f.f.f.f.f.~ f f f q g.g.g.g.g.g.", +"g.g.g.g.g.; f V V Z Z ^ 7.u.u.4.,.y.f.f.;.N f f f q g.g.g.g.g.g.", +"g.g.g.g.g.: V Z Z Z Z Z E / / U b ! .{ A f f f f q g.g.g.g.g.g.", +"g.g.g.g.g.> Z Z Z Z Z Z Z Z z f f f f f f f f p 0 1 g.g.g.g.g.g.", +"g.g.g.g.g.> Z Z Z Z Z Z =.=.#.#.#.#.#.#.W f d 4 3 < g.g.g.g.g.g.", +"g.g.g.g.g.> Z Z Z Z V Y f.f.f.f.f.f.f.f.;.0 3 3 3 < g.g.g.g.g.g.", +"g.g.g.g.g.> Z Z Z x k K f.f.f.f.f.f.f.f.=.9 3 3 3 < g.g.g.g.g.g.", +"g.g.g.g.g.> V b f f f K f.f.f.f.f.f.f.f.;.r 3 3 3 < g.g.g.g.g.g.", +"g.g.g.g.g.> k f f f f K f.f.f.f.f.f.f.f.<.u 3 3 3 < g.g.g.g.g.g.", +"g.g.g.g.g.; f f f f f T f.f.f.f.f.f.f.f.3.s 3 3 3 < g.g.g.g.g.g.", +"g.g.g.g.g.; f f f f f _ f.f.f.f.:.q.f.f.7.j 3 3 3 < g.g.g.g.g.g.", +"g.g.g.g.g.; f f f p u %.f.f.f.f.C +.f.f.f.R 3 3 3 < g.g.g.g.g.g.", +"g.g.g.g.g.; f f p 4 a 3.f.f.f.f.6 ] f.f.f.#.3 3 3 < g.g.g.g.g.g.", +"g.g.g.g.g.; t 6 3 3 { f.f.f.f.X.3 D f.f.f.p.u 3 3 < g.g.g.g.g.g.", +"g.g.g.g.g.- 3 3 3 0 8.f.f.f.r.D 3 7 f.f.f.f.| 6 3 < g.g.g.g.g.g.", +"g.g.g.g.g.= 3 3 5 ) f.f.f.f.o.9 3 3 X.f.f.6.D 3 3 < g.g.g.g.g.g.", +"g.g.g.g.g.= 3 3 m w.f.f.f.f.c 3 3 3 M q. .c 3 3 3 < g.g.g.g.g.g.", +"g.g.g.g.g.= 3 3 2.f.f.f.u.} 3 3 3 3 3 3 3 3 3 3 3 < g.g.g.g.g.g.", +"g.g.g.g.g.= 3 3 O.f.f.f.>.y 3 3 3 3 3 3 3 3 3 3 3 < g.g.g.g.g.g.", +"g.g.g.g.g.= 3 3 u &.f.<.0 3 3 3 3 3 3 3 3 3 3 3 3 < g.g.g.g.g.g.", +"g.g.g.g.g.= 3 3 3 s X.k 3 3 3 3 3 3 3 3 3 3 3 3 3 < g.g.g.g.g.g.", +"g.g.g.g.g.= 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 , g.g.g.g.g.g.", +"g.g.g.g.g.* 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 2 $ g.g.g.g.g.g.", +"g.g.g.g.g.g.% & = = = = = = = = = = = = = = = = @ g.g.g.g.g.g.g.", +"g.g.g.g.g.g.g.g.g.g.g.g.g.g.g.g.g.g.g.g.g.g.g.g.g.g.g.g.g.g.g.g." +}; diff --git a/tribler-mod/Tribler/Main/Build/Ubuntu/tribler_big.xpm b/tribler-mod/Tribler/Main/Build/Ubuntu/tribler_big.xpm new file mode 100644 index 0000000..28b2b61 --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Ubuntu/tribler_big.xpm @@ -0,0 +1,310 @@ +/* XPM */ +static char *tribler_xpm[] = { +/* columns rows colors chars-per-pixel */ +"48 48 256 2", +" c None", +". c #BB371D", +"X c #9D4A39", +"o c #BD4730", +"O c #945A4E", +"+ c #AA7870", +"@ c #C83718", +"# c #F42D05", +"$ c #F9502C", +"% c #FC6648", +"& c #A3807A", +"* c #FA8770", +"= c #FAA695", +"- c #FCC1B5", +"; c #FEFBFA", +": c gray100", +"> c #101010", +", c #111111", +"< c gray7", +"1 c #131313", +"2 c gray8", +"3 c #151515", +"4 c #161616", +"5 c gray9", +"6 c #181818", +"7 c #191919", +"8 c gray10", +"9 c #1B1B1B", +"0 c gray11", +"q c #1D1D1D", +"w c #1E1E1E", +"e c gray12", +"r c #202020", +"t c gray13", +"y c #222222", +"u c #232323", +"i c gray14", +"p c #252525", +"a c gray15", +"s c #272727", +"d c #282828", +"f c gray16", +"g c #2A2A2A", +"h c gray17", +"j c #2C2C2C", +"k c #2D2D2D", +"l c gray18", +"z c #2F2F2F", +"x c gray19", +"c c #313131", +"v c #323232", +"b c gray20", +"n c #343434", +"m c #353535", +"M c gray21", +"N c #373737", +"B c gray22", +"V c #393939", +"C c #3A3A3A", +"Z c gray23", +"A c #3C3C3C", +"S c gray24", +"D c #3E3E3E", +"F c #3F3F3F", +"G c gray25", +"H c #414141", +"J c gray26", +"K c #434343", +"L c #444444", +"P c gray27", +"I c #464646", +"U c gray28", +"Y c #484848", +"T c #494949", +"R c gray29", +"E c #4B4B4B", +"W c #4C4C4C", +"Q c gray30", +"! c #4E4E4E", +"~ c gray31", +"^ c #505050", +"/ c #515151", +"( c gray32", +") c #535353", +"_ c gray33", +"` c #555555", +"' c #565656", +"] c gray34", +"[ c #585858", +"{ c gray35", +"} c #5A5A5A", +"| c #5B5B5B", +" . c gray36", +".. c #5D5D5D", +"X. c gray37", +"o. c #5F5F5F", +"O. c #606060", +"+. c gray38", +"@. c #626262", +"#. c gray39", +"$. c #646464", +"%. c #656565", +"&. c gray40", +"*. c #676767", +"=. c #686868", +"-. c DimGray", +";. c #6A6A6A", +":. c gray42", +">. c #6C6C6C", +",. c #6D6D6D", +"<. c gray43", +"1. c #6F6F6F", +"2. c gray44", +"3. c #717171", +"4. c #727272", +"5. c gray45", +"6. c #747474", +"7. c gray46", +"8. c #767676", +"9. c #777777", +"0. c gray47", +"q. c #797979", +"w. c gray48", +"e. c #7B7B7B", +"r. c #7C7C7C", +"t. c gray49", +"y. c #7E7E7E", +"u. c gray50", +"i. c #808080", +"p. c #818181", +"a. c gray51", +"s. c #838383", +"d. c #848484", +"f. c gray52", +"g. c #868686", +"h. c gray53", +"j. c #888888", +"k. c #898989", +"l. c gray54", +"z. c #8B8B8B", +"x. c gray55", +"c. c #8D8D8D", +"v. c #8E8E8E", +"b. c gray56", +"n. c #909090", +"m. c gray57", +"M. c #929292", +"N. c #939393", +"B. c gray58", +"V. c #959595", +"C. c gray59", +"Z. c #979797", +"A. c #989898", +"S. c gray60", +"D. c #9A9A9A", +"F. c #9B9B9B", +"G. c gray61", +"H. c #9D9D9D", +"J. c gray62", +"K. c #9F9F9F", +"L. c #A0A0A0", +"P. c gray63", +"I. c #A2A2A2", +"U. c gray64", +"Y. c #A4A4A4", +"T. c #A5A5A5", +"R. c gray65", +"E. c #A7A7A7", +"W. c gray66", +"Q. c #A9A9A9", +"!. c #AAAAAA", +"~. c gray67", +"^. c #ACACAC", +"/. c gray68", +"(. c #AEAEAE", +"). c #AFAFAF", +"_. c gray69", +"`. c #B1B1B1", +"'. c #B2B2B2", +"]. c gray70", +"[. c #B4B4B4", +"{. c gray71", +"}. c #B6B6B6", +"|. c #B7B7B7", +" X c gray72", +".X c #B9B9B9", +"XX c gray73", +"oX c #BBBBBB", +"OX c #BCBCBC", +"+X c gray74", +"@X c gray", +"#X c gray75", +"$X c #C0C0C0", +"%X c #C1C1C1", +"&X c gray76", +"*X c #C3C3C3", +"=X c gray77", +"-X c #C5C5C5", +";X c #C6C6C6", +":X c gray78", +">X c #C8C8C8", +",X c gray79", +"0pgo(+^3jAsZ`a(rIGKijb5-<(Ci$27Ei0eQ$SZ_x;$%F2jp+=f3w2=kDI! zJ)b>y?~k)X_+!7M*!ofseNUoK5q*N`<7W$?Z=0=*%Eyh8K79)M0w*8005kM)fOPuI zkQw^E1$_Ss*bkYZ?_0q4uYmoK8T!5jeE$mA51FCwTfq0Pfc^0Q%&@Zyb>~r2hu;U5 zsgg@wzHg`@d<9PYu=X$HzKM{l=}{9#=mI+Z1#1 zl|h)Z8dNPOD=eVLaMwYMm}(9^`7ZDe#?~_Q^jL8X!@W*DhKo0t3{WCT)wZ<{cY9lL z{dx@Jv+NA755-ft$l5L*jA^f7!W>Ym_~=R`trvOOCghbMZ3}LD1go}Sasuv8#Pd6m zzgz5ePaM(*^C3!e?8uK$zc>?bp5p2`9iCwDCV#-2hX!$MXl3~nBoNK~O4~yRE&qOH${j6|W+04epvP7%s@f-LasKOKLtQ$K&P*!FFDT&@)4e z=t_qU0dPJfvWx4k`^qZjW}= z%re7%7Bue<&lu7JaAPrgdX?i|Xa+H%HdgAy&@lhQN>qGq#`1j{1VzPi4vNd+jB?^7uED6nHkEaGgt7KA+?rq>1b$H z4o_|wyIQcKF$c|y%ISO)V$ztQY!Ax?j~Nmxnu+=7XumTmudq+jG_@*v+Ge!goDKV- zVk1)}BMb>+bXvc7it01CZ-JNvmlHW>^s_onNc%EF*-GXL1{vNGDOPsn)ZB{4*Wrbo zvI3|7M8GU&s2F4rB!dh`PDAMj%4w0;y|o`lPb!*sKXp3!&3>qu$yCW8!z-qW@EUcZ zh2HMI`5Q?|QK=MjLoKH^GefoAOq2~WBr>$hD^0p-&w^m4Y}BDA@nnWti?vKC5b*i;t86XMFP%)FKl0k;klNErv=Rt3P(@LWn zoh&m$U5gD|X^>Sk@WJr9-(;}9Mx483)5xLw`fV3k`@bJ6b4!fKAYn-7oH*>3}cM+MPnd}^O4I;y_ zp_iGV!|rDO+H8x~)u?mohT6aRUeCGQ!GIL96>Y5DZSmR)w5=Z TT0n>45Is}Py~gKhw}9 + + + My Manifest Testing application + + + + + + + diff --git a/tribler-mod/Tribler/Main/Build/Win32/tribler.nsi b/tribler-mod/Tribler/Main/Build/Win32/tribler.nsi new file mode 100644 index 0000000..031cc1d --- /dev/null +++ b/tribler-mod/Tribler/Main/Build/Win32/tribler.nsi @@ -0,0 +1,254 @@ +!define PRODUCT "Tribler" +!define VERSION "5.1.2" + +!include "MUI.nsh" + +;-------------------------------- +;Configuration + +;General + Name "${PRODUCT} ${VERSION}" +OutFile "${PRODUCT}_${VERSION}.exe" + +;Folder selection page +InstallDir "$PROGRAMFILES\${PRODUCT}" + +;Remember install folder +InstallDirRegKey HKCU "Software\${PRODUCT}" "" + +; +; Uncomment for smaller file size +; +SetCompressor "lzma" +; +; Uncomment for quick built time +; +;SetCompress "off" + +CompletedText "Installation completed. Thank you for choosing ${PRODUCT}" + +BrandingText "${PRODUCT}" + +;-------------------------------- +;Modern UI Configuration + +!define MUI_ABORTWARNING +!define MUI_HEADERIMAGE +!define MUI_HEADERIMAGE_BITMAP "heading.bmp" + +;-------------------------------- +;Pages + +!define MUI_LICENSEPAGE_RADIOBUTTONS +!define MUI_LICENSEPAGE_RADIOBUTTONS_TEXT_ACCEPT "I accept" +!define MUI_LICENSEPAGE_RADIOBUTTONS_TEXT_DECLINE "I decline" +!define MUI_FINISHPAGE_RUN "$INSTDIR\tribler.exe" + +!insertmacro MUI_PAGE_LICENSE "binary-LICENSE.txt" +!insertmacro MUI_PAGE_COMPONENTS +!insertmacro MUI_PAGE_DIRECTORY +!insertmacro MUI_PAGE_INSTFILES +!insertmacro MUI_PAGE_FINISH + +!insertmacro MUI_UNPAGE_CONFIRM +!insertmacro MUI_UNPAGE_INSTFILES + +;!insertmacro MUI_DEFAULT UMUI_HEADERIMAGE_BMP heading.bmp" + +;-------------------------------- +;Languages + +!insertmacro MUI_LANGUAGE "English" + +;-------------------------------- +;Language Strings + +;Description +LangString DESC_SecMain ${LANG_ENGLISH} "Install ${PRODUCT}" +LangString DESC_SecDesk ${LANG_ENGLISH} "Create Desktop Shortcuts" +LangString DESC_SecStart ${LANG_ENGLISH} "Create Start Menu Shortcuts" +LangString DESC_SecDefaultTorrent ${LANG_ENGLISH} "Associate .torrent files with ${PRODUCT}" +LangString DESC_SecDefaultTStream ${LANG_ENGLISH} "Associate .tstream files with ${PRODUCT}" + +;-------------------------------- +;Installer Sections + +Section "!Main EXE" SecMain + SectionIn RO + SetOutPath "$INSTDIR" + File *.txt + File tribler.exe.manifest + File tribler.exe + File ffmpeg.exe + File /r vlc + File *.bat + Delete "$INSTDIR\*.pyd" + File *.pyd + Delete "$INSTDIR\python*.dll" + Delete "$INSTDIR\wx*.dll" + File *.dll + Delete "$INSTDIR\*.zip" + File *.zip + CreateDirectory "$INSTDIR\Tribler" + SetOutPath "$INSTDIR\Tribler" + File Tribler\*.sql + CreateDirectory "$INSTDIR\Tribler\Core" + SetOutPath "$INSTDIR\Tribler\Core" + File Tribler\Core\*.txt + CreateDirectory "$INSTDIR\Tribler\Core\Statistics" + SetOutPath "$INSTDIR\Tribler\Core\Statistics" + File Tribler\Core\Statistics\*.txt + File Tribler\Core\Statistics\*.sql + CreateDirectory "$INSTDIR\Tribler\Images" + SetOutPath "$INSTDIR\Tribler\Images" + File Tribler\Images\*.* + CreateDirectory "$INSTDIR\Tribler\Video" + CreateDirectory "$INSTDIR\Tribler\Video\Images" + SetOutPath "$INSTDIR\Tribler\Video\Images" + File Tribler\Video\Images\*.* + CreateDirectory "$INSTDIR\Tribler\Lang" + SetOutPath "$INSTDIR\Tribler\Lang" + IfFileExists user.lang userlang + File Tribler\Lang\*.* + userlang: + File /x user.lang Tribler\Lang\*.* + ; Main client specific + CreateDirectory "$INSTDIR\Tribler" + CreateDirectory "$INSTDIR\Tribler\Main\vwxGUI" + CreateDirectory "$INSTDIR\Tribler\Main\vwxGUI\images" + CreateDirectory "$INSTDIR\Tribler\Main\vwxGUI\images\5.0" + SetOutPath "$INSTDIR\Tribler\Main\vwxGUI" + File Tribler\Main\vwxGUI\*.* + SetOutPath "$INSTDIR\Tribler\Main\vwxGUI\images" + File Tribler\Main\vwxGUI\images\*.* + SetOutPath "$INSTDIR\Tribler\Main\vwxGUI\images\5.0" + File Tribler\Main\vwxGUI\images\5.0\*.* + ; Categories + CreateDirectory "$INSTDIR\Tribler\Category" + SetOutPath "$INSTDIR\Tribler\Category" + File Tribler\Category\*.conf + File Tribler\Category\*.filter + ; End + SetOutPath "$INSTDIR" + WriteRegStr HKEY_LOCAL_MACHINE "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}" "DisplayName" "${PRODUCT} (remove only)" + WriteRegStr HKEY_LOCAL_MACHINE "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}" "UninstallString" "$INSTDIR\Uninstall.exe" + + ; Now writing to KHEY_LOCAL_MACHINE only -- remove references to uninstall from current user + DeleteRegKey HKEY_CURRENT_USER "SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}" + ; Remove old error log if present + Delete "$INSTDIR\tribler.exe.log" + + WriteUninstaller "$INSTDIR\Uninstall.exe" + + ; Add an application to the firewall exception list - All Networks - All IP Version - Enabled + SimpleFC::AddApplication "Tribler" "$INSTDIR\${PRODUCT}.exe" 0 2 "" 1 + ; Pop $0 ; return error(1)/success(0) + +SectionEnd + + +Section "Desktop Icons" SecDesk + CreateShortCut "$DESKTOP\${PRODUCT}.lnk" "$INSTDIR\${PRODUCT}.exe" "" +SectionEnd + + +Section "Startmenu Icons" SecStart + CreateDirectory "$SMPROGRAMS\${PRODUCT}" + CreateShortCut "$SMPROGRAMS\${PRODUCT}\Uninstall.lnk" "$INSTDIR\Uninstall.exe" "" "$INSTDIR\Uninstall.exe" 0 + CreateShortCut "$SMPROGRAMS\${PRODUCT}\${PRODUCT}.lnk" "$INSTDIR\${PRODUCT}.exe" "" "$INSTDIR\${PRODUCT}.exe" 0 +SectionEnd + + +Section "Make Default For .torrent" SecDefaultTorrent + ; Delete ddeexec key if it exists + DeleteRegKey HKCR "bittorrent\shell\open\ddeexec" + WriteRegStr HKCR .torrent "" bittorrent + WriteRegStr HKCR .torrent "Content Type" application/x-bittorrent + WriteRegStr HKCR "MIME\Database\Content Type\application/x-bittorrent" Extension .torrent + WriteRegStr HKCR bittorrent "" "TORRENT File" + WriteRegBin HKCR bittorrent EditFlags 00000100 + WriteRegStr HKCR "bittorrent\shell" "" open + WriteRegStr HKCR "bittorrent\shell\open\command" "" '"$INSTDIR\${PRODUCT}.exe" "%1"' + WriteRegStr HKCR "bittorrent\DefaultIcon" "" "$INSTDIR\Tribler\Images\torrenticon.ico" +SectionEnd + + +Section "Make Default For .tstream" SecDefaultTStream + ; Arno: Poor man's attempt to check if already registered + ReadRegStr $0 HKCR .tstream "" + ReadRegStr $1 HKCR "tstream\shell\open\command" "" + StrCpy $2 $1 -4 + StrCmp $0 "" 0 +2 + return + MessageBox MB_YESNO ".tstream already registered to $2. Overwrite?" IDYES +2 IDNO 0 + Return + DetailPrint "Arno registering .tstream: $0 $1 $2" + + ; Register + WriteRegStr HKCR .tstream "" tstream + WriteRegStr HKCR .tstream "Content Type" application/x-tribler-stream + WriteRegStr HKCR "MIME\Database\Content Type\application/x-tribler-stream" Extension .tstream + WriteRegStr HKCR tstream "" "TSTREAM File" + WriteRegBin HKCR tstream EditFlags 00000100 + WriteRegStr HKCR "tstream\shell" "" open + WriteRegStr HKCR "tstream\shell\open\command" "" '"$INSTDIR\${PRODUCT}.exe" "%1"' + WriteRegStr HKCR "tstream\DefaultIcon" "" "$INSTDIR\Tribler\Images\SwarmPlayerIcon.ico" +SectionEnd + + +;-------------------------------- +;Descriptions + +!insertmacro MUI_FUNCTION_DESCRIPTION_BEGIN +!insertmacro MUI_DESCRIPTION_TEXT ${SecMain} $(DESC_SecMain) +!insertmacro MUI_DESCRIPTION_TEXT ${SecDesk} $(DESC_SecDesk) +!insertmacro MUI_DESCRIPTION_TEXT ${SecStart} $(DESC_SecStart) +;!insertmacro MUI_DESCRIPTION_TEXT ${SecLang} $(DESC_SecLang) +!insertmacro MUI_DESCRIPTION_TEXT ${SecDefaultTorrent} $(DESC_SecDefaultTorrent) +!insertmacro MUI_DESCRIPTION_TEXT ${SecDefaultTStream} $(DESC_SecDefaultTStream) +!insertmacro MUI_FUNCTION_DESCRIPTION_END + +;-------------------------------- +;Uninstaller Section + +Section "Uninstall" + + Delete "$INSTDIR\torrent\*.*" + RMDir "$INSTDIR\torrent" + + Delete "$INSTDIR\icons\*.*" + RMDir "$INSTDIR\icons" + + Delete "$INSTDIR\lang\*.*" + RMDir "$INSTDIR\lang" + + Delete "$INSTDIR\*.*" + RMDir "$INSTDIR" + + Delete "$DESKTOP\${PRODUCT}.lnk" + Delete "$SMPROGRAMS\${PRODUCT}\*.*" + RmDir "$SMPROGRAMS\${PRODUCT}" + + DeleteRegKey HKEY_LOCAL_MACHINE "SOFTWARE\${PRODUCT}" + DeleteRegKey HKEY_LOCAL_MACHINE "SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}" + + ; Remove an application from the firewall exception list + SimpleFC::RemoveApplication "$INSTDIR\${PRODUCT}.exe" + ; Pop $0 ; return error(1)/success(0) + +SectionEnd + +;-------------------------------- +;Functions Section + +Function .onInit + System::Call 'kernel32::CreateMutexA(i 0, i 0, t "Tribler") i .r1 ?e' + + Pop $R0 + + StrCmp $R0 0 +3 + + MessageBox MB_OK "The installer is already running." + + Abort +FunctionEnd diff --git a/tribler-mod/Tribler/Main/Dialogs/BandwidthSelector.py b/tribler-mod/Tribler/Main/Dialogs/BandwidthSelector.py new file mode 100644 index 0000000..4423131 --- /dev/null +++ b/tribler-mod/Tribler/Main/Dialogs/BandwidthSelector.py @@ -0,0 +1,66 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information + +import wx + + +class BandwidthSelector(wx.Dialog): + def __init__(self, parent, utility): + self.parent = parent + self.utility = utility + + style = wx.DEFAULT_DIALOG_STYLE + title = self.utility.lang.get('selectbandwidthtitle') + wx.Dialog.__init__(self,parent,-1,title,style=style,size=(470,180)) + + sizer = wx.GridBagSizer(5,20) + + self.bwctrl = BWControl(self) + + buttonbox = wx.BoxSizer(wx.HORIZONTAL) + okbtn = wx.Button(self, wx.ID_OK, label=self.utility.lang.get('ok'), style = wx.BU_EXACTFIT) + buttonbox.Add(okbtn, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + cancelbtn = wx.Button(self, wx.ID_CANCEL, label=self.utility.lang.get('cancel'), style = wx.BU_EXACTFIT) + buttonbox.Add(cancelbtn, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + + explain = wx.StaticText(self, -1, self.utility.lang.get('selectdlulbwexplan')) + explain.Wrap( 450 ) + + prompt = wx.StaticText(self, -1, self.utility.lang.get('selectdlulbwprompt')) + prompt.Wrap( 450 ) + + sizer.Add( explain, (1,1), span=(1,2) ) + sizer.Add( prompt, (2,1) ) + sizer.Add( self.bwctrl, (2,2) ) + sizer.Add( buttonbox, (3,1), span=(2,1) ) + + self.SetSizer(sizer) + + + def getUploadBandwidth(self): + return self.bwctrl.getUploadBandwidth() + + +class BWControl(wx.Panel): + + def __init__(self,parent,*args,**kwargs): + + self.utility = parent.utility + + wx.Panel.__init__(self, parent, -1, *args, **kwargs) + + self.uploadbwvals = [ 128/8, 256/8, 512/8, 1024/8, 2048/8, 0] + self.bwoptions = ['xxxx/128 kbps', 'xxxx/256 kbps', 'xxxx/512 kbps', 'xxxx/1024 kbps', 'xxxx/2048 kbps', 'more (LAN)'] + self.bwsel = wx.Choice(self, + -1, + #self.utility.lang.get('selectdlulbwprompt'), + wx.DefaultPosition, + wx.DefaultSize, + self.bwoptions, + 3) + + def getUploadBandwidth(self): + """ in Kbyte/s """ + index = self.bwsel.GetSelection() + return self.uploadbwvals[index] diff --git a/tribler-mod/Tribler/Main/Dialogs/BandwidthSelector.py.bak b/tribler-mod/Tribler/Main/Dialogs/BandwidthSelector.py.bak new file mode 100644 index 0000000..b1b1a74 --- /dev/null +++ b/tribler-mod/Tribler/Main/Dialogs/BandwidthSelector.py.bak @@ -0,0 +1,65 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information + +import wx + + +class BandwidthSelector(wx.Dialog): + def __init__(self, parent, utility): + self.parent = parent + self.utility = utility + + style = wx.DEFAULT_DIALOG_STYLE + title = self.utility.lang.get('selectbandwidthtitle') + wx.Dialog.__init__(self,parent,-1,title,style=style,size=(470,180)) + + sizer = wx.GridBagSizer(5,20) + + self.bwctrl = BWControl(self) + + buttonbox = wx.BoxSizer(wx.HORIZONTAL) + okbtn = wx.Button(self, wx.ID_OK, label=self.utility.lang.get('ok'), style = wx.BU_EXACTFIT) + buttonbox.Add(okbtn, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + cancelbtn = wx.Button(self, wx.ID_CANCEL, label=self.utility.lang.get('cancel'), style = wx.BU_EXACTFIT) + buttonbox.Add(cancelbtn, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + + explain = wx.StaticText(self, -1, self.utility.lang.get('selectdlulbwexplan')) + explain.Wrap( 450 ) + + prompt = wx.StaticText(self, -1, self.utility.lang.get('selectdlulbwprompt')) + prompt.Wrap( 450 ) + + sizer.Add( explain, (1,1), span=(1,2) ) + sizer.Add( prompt, (2,1) ) + sizer.Add( self.bwctrl, (2,2) ) + sizer.Add( buttonbox, (3,1), span=(2,1) ) + + self.SetSizer(sizer) + + + def getUploadBandwidth(self): + return self.bwctrl.getUploadBandwidth() + + +class BWControl(wx.Panel): + + def __init__(self,parent,*args,**kwargs): + + self.utility = parent.utility + + wx.Panel.__init__(self, parent, -1, *args, **kwargs) + + self.uploadbwvals = [ 128/8, 256/8, 512/8, 1024/8, 2048/8, 0] + self.bwoptions = ['xxxx/128 kbps', 'xxxx/256 kbps', 'xxxx/512 kbps', 'xxxx/1024 kbps', 'xxxx/2048 kbps', 'more (LAN)'] + self.bwsel = wx.Choice(self, + -1, + #self.utility.lang.get('selectdlulbwprompt'), + wx.DefaultPosition, + wx.DefaultSize, + self.bwoptions, + 3) + + def getUploadBandwidth(self): + """ in Kbyte/s """ + index = self.bwsel.GetSelection() + return self.uploadbwvals[index] diff --git a/tribler-mod/Tribler/Main/Dialogs/GUITaskQueue.py b/tribler-mod/Tribler/Main/Dialogs/GUITaskQueue.py new file mode 100644 index 0000000..db77608 --- /dev/null +++ b/tribler-mod/Tribler/Main/Dialogs/GUITaskQueue.py @@ -0,0 +1,34 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information +# +# GUITaskQueue is a server that executes tasks on behalf of the GUI that are too +# time consuming to be run by the actual GUI Thread (MainThread). Note that +# you still need to delegate the actual updating of the GUI to the MainThread via +# wx.CallAfter +# + +from Tribler.Utilities.TimedTaskQueue import TimedTaskQueue + +DEBUG = False + +class GUITaskQueue(TimedTaskQueue): + + __single = None + + def __init__(self): + if GUITaskQueue.__single: + raise RuntimeError, "GUITaskQueue is singleton" + GUITaskQueue.__single = self + + TimedTaskQueue.__init__(self) + + def getInstance(*args, **kw): + if GUITaskQueue.__single is None: + GUITaskQueue(*args, **kw) + return GUITaskQueue.__single + getInstance = staticmethod(getInstance) + + def resetSingleton(self): + """ For testing purposes """ + GUITaskQueue.__single = None diff --git a/tribler-mod/Tribler/Main/Dialogs/GUITaskQueue.py.bak b/tribler-mod/Tribler/Main/Dialogs/GUITaskQueue.py.bak new file mode 100644 index 0000000..f78a062 --- /dev/null +++ b/tribler-mod/Tribler/Main/Dialogs/GUITaskQueue.py.bak @@ -0,0 +1,33 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information +# +# GUITaskQueue is a server that executes tasks on behalf of the GUI that are too +# time consuming to be run by the actual GUI Thread (MainThread). Note that +# you still need to delegate the actual updating of the GUI to the MainThread via +# wx.CallAfter +# + +from Tribler.Utilities.TimedTaskQueue import TimedTaskQueue + +DEBUG = False + +class GUITaskQueue(TimedTaskQueue): + + __single = None + + def __init__(self): + if GUITaskQueue.__single: + raise RuntimeError, "GUITaskQueue is singleton" + GUITaskQueue.__single = self + + TimedTaskQueue.__init__(self) + + def getInstance(*args, **kw): + if GUITaskQueue.__single is None: + GUITaskQueue(*args, **kw) + return GUITaskQueue.__single + getInstance = staticmethod(getInstance) + + def resetSingleton(self): + """ For testing purposes """ + GUITaskQueue.__single = None diff --git a/tribler-mod/Tribler/Main/Dialogs/TorrentMaker.py b/tribler-mod/Tribler/Main/Dialogs/TorrentMaker.py new file mode 100644 index 0000000..bff4727 --- /dev/null +++ b/tribler-mod/Tribler/Main/Dialogs/TorrentMaker.py @@ -0,0 +1,1029 @@ +from time import localtime, strftime +# Written by Bram Cohen +# modified for multitracker by John Hoffman +# modified for Merkle hashes and digital signatures by Arno Bakker +# see LICENSE.txt for license information + +import sys +import wx +import wx.lib.imagebrowser as ib +import os + +from threading import Event, Thread, currentThread +from tempfile import mkstemp +from traceback import print_exc + +from Tribler.Core.API import * +from Tribler.Main.globals import DefaultDownloadStartupConfig + +FILESTOIGNORE = ['core', 'CVS'] + +DEBUG = False + + +################################################################ +# +# Class: MiscInfoPanel +# +# Panel for defining miscellaneous settings for a torrent +# +################################################################ +class MiscInfoPanel(wx.Panel): + def __init__(self, parent, dialog): + wx.Panel.__init__(self, parent, -1) + + self.dialog = dialog + self.utility = dialog.utility + + outerbox = wx.BoxSizer(wx.VERTICAL) + + # Created by: + outerbox.Add(wx.StaticText(self, -1, self.utility.lang.get('createdby')), 0, wx.EXPAND|wx.ALL, 5) + self.createdBy = wx.TextCtrl(self, -1) + outerbox.Add(self.createdBy, 0, wx.EXPAND|wx.ALL, 5) + + # Comment: + outerbox.Add(wx.StaticText(self, -1, self.utility.lang.get('comment')), 0, wx.EXPAND|wx.ALL, 5) + self.commentCtl = wx.TextCtrl(self, -1, size = (-1, 75), style = wx.TE_MULTILINE|wx.HSCROLL|wx.TE_DONTWRAP) + outerbox.Add(self.commentCtl, 0, wx.EXPAND|wx.ALL, 5) + + # Playtime: + outerbox.Add(wx.StaticText(self, -1, self.utility.lang.get('playtime')), 0, wx.EXPAND|wx.ALL, 5) + self.playtCtl = wx.TextCtrl(self, -1) + outerbox.Add(self.playtCtl, 0, wx.EXPAND|wx.ALL, 5) + + # Thumbnail: + ybox = wx.BoxSizer(wx.VERTICAL) + ybox.Add(wx.StaticText(self, -1, self.utility.lang.get('addthumbnail')), 0, wx.EXPAND|wx.ALL, 5) + xbox = wx.BoxSizer(wx.HORIZONTAL) + self.thumbCtl = wx.TextCtrl(self, -1) + xbox.Add(self.thumbCtl, 1, wx.EXPAND|wx.ALL, 5) + browsebtn = wx.Button(self, -1, "...") + self.Bind(wx.EVT_BUTTON, self.onBrowseThumb, browsebtn) + xbox.Add(browsebtn, 0, wx.ALL, 5) + ybox.Add(xbox, 0, wx.EXPAND|wx.ALL, 5) + outerbox.Add(ybox, 0, wx.ALL|wx.EXPAND, 5) + + self.SetSizerAndFit(outerbox) + + self.loadValues() + + def loadValues(self, Read = None): + if Read is None: + Read = self.utility.makerconfig.Read + + self.createdBy.SetValue(Read('created_by')) + self.commentCtl.SetValue(Read('comment')) + + def saveConfig(self, event = None): + self.utility.makerconfig.Write('created_by', self.createdBy.GetValue()) + self.utility.makerconfig.Write('comment', self.commentCtl.GetValue()) + + def getParams(self): + params = {} + + thumbfn = self.thumbCtl.GetValue() + if len(thumbfn) > 0: + try: + im = wx.Image(thumbfn) + ims = im.Scale(171,96) + + [thumbhandle,thumbfilename] = mkstemp("torrent-thumb") + os.close(thumbhandle) + ims.SaveFile(thumbfilename,wx.BITMAP_TYPE_JPEG) + params['thumb'] = thumbfilename + except: + print_exc() + + playt = self.playtCtl.GetValue() + if playt != '': + params['playtime'] = playt + + comment = self.commentCtl.GetValue() + if comment != '': + params['comment'] = comment + + createdby = self.createdBy.GetValue() + if comment != '': + params['created by'] = createdby + + return params + + + def onBrowseThumb(self, evt): + path = '' + + # open the image browser dialog + dlg = ib.ImageDialog(self, path) + dlg.Centre() + if dlg.ShowModal() == wx.ID_OK: + iconpath = dlg.GetFile() + + try: + im = wx.Image(iconpath) + if im is None: + self.show_inputerror(self.utility.lang.get('cantopenfile')) + else: + self.thumbCtl.SetValue(iconpath) + except: + self.show_inputerror(self.utility.lang.get('iconbadformat')) + else: + pass + + dlg.Destroy() + + def show_inputerror(self,txt): + dlg = wx.MessageDialog(self, txt, self.utility.lang.get('invalidinput'), wx.OK | wx.ICON_INFORMATION) + dlg.ShowModal() + dlg.Destroy() + + +################################################################ +# +# Class: TrackerInfoPanel +# +# Panel for defining tracker settings for a torrent +# +################################################################ +class TrackerInfoPanel(wx.Panel): + def __init__(self, parent, dialog): + wx.Panel.__init__(self, parent, -1) + + self.dialog = dialog + self.utility = dialog.utility + + outerbox = wx.BoxSizer(wx.VERTICAL) + + announcesection_title = wx.StaticBox(self, -1, self.utility.lang.get('announce')) + announcesection = wx.StaticBoxSizer(announcesection_title, wx.VERTICAL) + + self.announcehistory = [] + + # Use internal tracker? + itracker_box = wx.BoxSizer(wx.HORIZONTAL) + prompt = self.utility.lang.get('useinternaltracker')+' ('+self.utility.session.get_internal_tracker_url()+')' + self.itracker = wx.CheckBox(self, -1, prompt) + wx.EVT_CHECKBOX(self, self.itracker.GetId(), self.OnInternalTracker) + itracker_box.Add(self.itracker, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + announcesection.Add(itracker_box, 0, wx.EXPAND|wx.ALL, 3) + + # Manual override of tracker definition + manualover_box = wx.BoxSizer(wx.HORIZONTAL) + self.manualover = wx.CheckBox(self, -1, self.utility.lang.get('manualtrackerconfig')) + wx.EVT_CHECKBOX(self, self.manualover.GetId(), self.OnInternalTracker) # yes, OnInternalTracker + manualover_box.Add(self.manualover, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + announcesection.Add(manualover_box, 0, wx.EXPAND|wx.ALL, 3) + + # Copy announce from torrent + self.copybutton = wx.Button(self, -1, self.utility.lang.get('copyannouncefromtorrent')) + wx.EVT_BUTTON(self, self.copybutton.GetId(), self.announceCopy) + announcesection.Add(self.copybutton, 0, wx.ALL, 5) + + # Announce url: + self.annText = wx.StaticText(self, -1, self.utility.lang.get('announceurl')) + announcesection.Add(self.annText, 0, wx.ALL, 5) + + announceurl_box = wx.BoxSizer(wx.HORIZONTAL) + + self.annCtl = wx.ComboBox(self, -1, "", choices = self.announcehistory, style=wx.CB_DROPDOWN) + announceurl_box.Add(self.annCtl, 1, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 5) + + self.addbutton = wx.Button(self, -1, "+", size = (30, -1)) + self.addbutton.SetToolTipString(self.utility.lang.get('add')) + wx.EVT_BUTTON(self, self.addbutton.GetId(), self.addAnnounce) + announceurl_box.Add(self.addbutton, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 3) + + self.delbutton = wx.Button(self, -1, "-", size = (30, -1)) + self.delbutton.SetToolTipString(self.utility.lang.get('remove')) + wx.EVT_BUTTON(self, self.delbutton.GetId(), self.removeAnnounce) + announceurl_box.Add(self.delbutton, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 3) + + announcesection.Add(announceurl_box, 0, wx.EXPAND) + + # Announce List: + self.annListText = wx.StaticText(self, -1, self.utility.lang.get('announcelist')) + announcesection.Add(self.annListText, 0, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + self.annListCtl = wx.TextCtrl(self, -1, size = (-1, 75), style = wx.TE_MULTILINE|wx.HSCROLL|wx.TE_DONTWRAP) + self.annListCtl.SetToolTipString(self.utility.lang.get('multiannouncehelp')) + + announcesection.Add(self.annListCtl, 1, wx.EXPAND|wx.TOP, 5) + + outerbox.Add(announcesection, 0, wx.EXPAND|wx.ALL, 3) + + # HTTP Seeds: + outerbox.Add(wx.StaticText(self, -1, self.utility.lang.get('httpseeds')), 0, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + self.httpSeeds = wx.TextCtrl(self, -1, size = (-1, 75), style = wx.TE_MULTILINE|wx.HSCROLL|wx.TE_DONTWRAP) + self.httpSeeds.SetToolTipString(self.utility.lang.get('httpseedshelp')) + outerbox.Add(self.httpSeeds, 1, wx.EXPAND|wx.ALL, 5) + + self.SetSizerAndFit(outerbox) + + self.loadValues() + + def loadValues(self, Read = None): + if Read is None: + Read = self.utility.makerconfig.Read + + useitracker = Read('useitracker','boolean') + self.itracker.SetValue(useitracker) + manualtrackerconfig = Read('manualtrackerconfig','boolean') + self.manualover.SetValue(manualtrackerconfig) + + self.annCtl.Clear() + self.announcehistory = Read('announcehistory', "bencode-list") + for announceurl in self.announcehistory: + self.annCtl.Append(announceurl) + self.annCtl.SetValue(Read('announcedefault')) + + self.annListCtl.SetValue(Read('announce-list')) + + self.toggle_itracker(useitracker,manualtrackerconfig) + + self.httpSeeds.SetValue(Read('httpseeds')) + + + def toggle_itracker(self,useitracker,manualtrackerconfig): + if useitracker: + self.manualover.Enable() + if manualtrackerconfig: + self.copybutton.Enable() + self.annText.Enable() + self.annCtl.Enable() + self.annListText.Enable() + self.annListCtl.Enable() + self.addbutton.Enable() + self.delbutton.Enable() + else: + self.copybutton.Disable() + self.annText.Disable() + self.annCtl.Disable() + self.annListText.Disable() + self.annListCtl.Disable() + self.addbutton.Disable() + self.delbutton.Disable() + + self.dialog.fileInfoPanel.startnow.SetValue(True) + self.dialog.fileInfoPanel.startnow.Disable() + else: + self.manualover.Disable() + self.copybutton.Enable() + self.annText.Enable() + self.annCtl.Enable() + self.annListText.Enable() + self.annListCtl.Enable() + self.addbutton.Enable() + self.delbutton.Enable() + self.dialog.fileInfoPanel.startnow.Enable() + + def saveConfig(self, event = None): + index = self.annCtl.GetSelection() + if index != -1: + self.utility.makerconfig.Write('announcedefault', self.annCtl.GetValue()) + self.utility.makerconfig.Write('announcehistory', self.announcehistory, "bencode-list") + self.utility.makerconfig.Write('announce-list', self.annListCtl.GetValue()) + self.utility.makerconfig.Write('httpseeds', self.httpSeeds.GetValue()) + + def addAnnounce(self, event = None): + announceurl = self.annCtl.GetValue() + + # Don't add to the list if it's already present or the string is empty + announceurl = announceurl.strip() + if not announceurl or announceurl in self.announcehistory: + return + self.announcehistory.append(announceurl) + self.annCtl.Append(announceurl) + + def removeAnnounce(self, event = None): + index = self.annCtl.GetSelection() + if index != -1: + announceurl = self.annCtl.GetValue() + self.annCtl.Delete(index) + try: + self.announcehistory.remove(announceurl) + except: + pass + + def announceCopy(self, event = None): + dl = wx.FileDialog(self.dialog, + self.utility.lang.get('choosedottorrentfiletouse'), + '', + '', + self.utility.lang.get('torrentfileswildcard') + ' (*.torrent)|*.torrent', + wx.OPEN) + if dl.ShowModal() == wx.ID_OK: + try: + metainfo = self.utility.getMetainfo(dl.GetPath()) + if (metainfo is None): + return + self.annCtl.SetValue(metainfo['announce']) + if 'announce-list' in metainfo: + list = [] + for tier in metainfo['announce-list']: + for tracker in tier: + list += [tracker, ', '] + del list[-1] + list += ['\n'] + liststring = '' + for i in list: + liststring += i + self.annListCtl.SetValue(liststring+'\n\n') + else: + self.annListCtl.SetValue('') + except: + return + + def getAnnounceList(self): + text = self.annListCtl.GetValue() + list = [] + for tier in text.split('\n'): + sublist = [] + tier.replace(',', ' ') + for tracker in tier.split(' '): + if tracker != '': + sublist += [tracker] + if sublist: + list.append(sublist) + return list + + def getHTTPSeedList(self): + text = self.httpSeeds.GetValue() + list = [] + for tier in text.split('\n'): + tier.replace(',', ' ') + for tracker in tier.split(' '): + if tracker != '': + list.append(tracker) + return list + + def getParams(self): + params = {} + + if self.itracker.GetValue(): + params['usinginternaltracker'] = True + else: + params['usinginternaltracker'] = False + + if self.manualover.GetValue(): # Use manual specification of trackers + # Announce list + annlist = self.getAnnounceList() + if annlist: + params['announce-list'] = annlist + + # Announce URL + announceurl = None + index = self.annCtl.GetSelection() + if annlist and index == -1: + # If we don't have an announce url specified, + # try using the first value in announce-list + tier1 = annlist[0] + if tier1: + announceurl = tier1[0] + else: + announceurl = self.annCtl.GetValue() + + if announceurl is None: + # What should we do here? + announceurl = "" + + params['announce'] = announceurl + else: + # Use just internal tracker + params['announce'] = self.utility.session.get_internal_tracker_url() + + # HTTP Seeds + httpseedlist = self.getHTTPSeedList() + if httpseedlist: + params['httpseeds'] = httpseedlist + + return params + + def OnInternalTracker(self,event=None): + self.toggle_itracker(self.itracker.GetValue(),self.manualover.GetValue()) + + + +################################################################ +# +# Class: FileInfoPanel +# +# Class for choosing a file when creating a torrent +# +################################################################ +class FileInfoPanel(wx.Panel): + def __init__(self, parent, dialog): + wx.Panel.__init__(self, parent, -1) + + self.dialog = dialog + self.utility = dialog.utility + + outerbox = wx.BoxSizer(wx.VERTICAL) + + # Make torrent of: + maketorrent_box = wx.BoxSizer(wx.HORIZONTAL) + maketorrent_box.Add(wx.StaticText(self, -1, self.utility.lang.get('maketorrentof')), 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + self.dirCtl = wx.TextCtrl(self, -1, '') + maketorrent_box.Add(self.dirCtl, 1, wx.ALIGN_CENTER_VERTICAL|wx.EXPAND|wx.ALL, 5) + + button = wx.Button(self, -1, self.utility.lang.get('dir'), style = wx.BU_EXACTFIT) + wx.EVT_BUTTON(self, button.GetId(), self.selectDir) + maketorrent_box.Add(button, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + button2 = wx.Button(self, -1, self.utility.lang.get('file'), style = wx.BU_EXACTFIT) + wx.EVT_BUTTON(self, button2.GetId(), self.selectFile) + maketorrent_box.Add(button2, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + outerbox.Add(maketorrent_box, 0, wx.EXPAND) + + # Merkle: + merkletorrent_box = wx.BoxSizer(wx.HORIZONTAL) + self.createmerkletorrent = wx.CheckBox(self, -1, self.utility.lang.get('createmerkletorrent')) + merkletorrent_box.Add(self.createmerkletorrent, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + outerbox.Add(merkletorrent_box, 0, wx.EXPAND) + + # Piece size: + piecesize_box = wx.BoxSizer(wx.HORIZONTAL) + + piecesize_box.Add(wx.StaticText(self, -1, self.utility.lang.get('piecesize')), 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + abbrev_mb = " " + self.utility.lang.get('MB') + abbrev_kb = " " + self.utility.lang.get('KB') + + piece_choices = [self.utility.lang.get('automatic'), + '2' + abbrev_mb, + '1' + abbrev_mb, + '512' + abbrev_kb, + '256' + abbrev_kb, + '128' + abbrev_kb, + '64' + abbrev_kb, + '32' + abbrev_kb] + self.piece_length = wx.Choice(self, -1, choices = piece_choices) + self.piece_length_list = [0, 2**21, 2**20, 2**19, 2**18, 2**17, 2**16, 2**15] + piecesize_box.Add(self.piece_length, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + outerbox.Add(piecesize_box, 0, wx.EXPAND) + + +# panel.DragAcceptFiles(True) +# wx.EVT_DROP_FILES(panel, self.selectdrop) + + # Save torrent : + savetorrentbox = wx.StaticBoxSizer(wx.StaticBox(self, -1, self.utility.lang.get('savetor')), wx.VERTICAL) + + self.savetorrb1 = wx.RadioButton(self, -1, self.utility.lang.get('savetordefault'), (-1, -1), (-1, -1), wx.RB_GROUP) + savetorrb2 = wx.RadioButton(self, -1, self.utility.lang.get('savetorsource'), (-1, -1), (-1, -1)) + savetorrb3 = wx.RadioButton(self, -1, self.utility.lang.get('savetorask'), (-1, -1), (-1, -1)) + self.savetor = [self.savetorrb1, savetorrb2, savetorrb3] + + savetordefbox = wx.BoxSizer(wx.HORIZONTAL) + savetordefbox.Add(self.savetorrb1, 0, wx.ALIGN_CENTER_VERTICAL) + self.savetordeftext = wx.TextCtrl(self, -1, "") + browsebtn = wx.Button(self, -1, "...", style = wx.BU_EXACTFIT) + browsebtn.Bind(wx.EVT_BUTTON, self.onBrowseDir) + savetordefbox.Add(self.savetordeftext, 1, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5) + savetordefbox.Add(browsebtn, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 3) + savetorrentbox.Add(savetordefbox, 0, wx.EXPAND) + + savetorrentbox.Add(savetorrb2, 0) + + savetorrentbox.Add(savetorrb3, 0, wx.TOP, 4) + + outerbox.Add(savetorrentbox, 0, wx.EXPAND|wx.TOP|wx.BOTTOM, 5) + + optionalhash_title = wx.StaticBox(self, -1, self.utility.lang.get('makehash_optional')) + optionalhash = wx.StaticBoxSizer(optionalhash_title, wx.VERTICAL) + + self.makehash_md5 = wx.CheckBox(self, -1, self.utility.lang.get('makehash_md5')) + optionalhash.Add(self.makehash_md5, 0) + + self.makehash_crc32 = wx.CheckBox(self, -1, self.utility.lang.get('makehash_crc32')) + optionalhash.Add(self.makehash_crc32, 0, wx.TOP, 4) + + self.makehash_sha1 = wx.CheckBox(self, -1, self.utility.lang.get('makehash_sha1')) + optionalhash.Add(self.makehash_sha1, 0, wx.TOP, 4) + + self.createtorrentsig = wx.CheckBox(self, -1, self.utility.lang.get('createtorrentsig')) + optionalhash.Add(self.createtorrentsig, 0, wx.TOP, 4) + + outerbox.Add(optionalhash, 0, wx.EXPAND|wx.TOP|wx.BOTTOM, 5) + + self.startnow = wx.CheckBox(self, -1, self.utility.lang.get('startnow')) + outerbox.Add(self.startnow, 0, wx.ALIGN_LEFT|wx.ALL, 5) + + self.SetSizerAndFit(outerbox) + + self.loadValues() + +# panel.DragAcceptFiles(True) +# wx.EVT_DROP_FILES(panel, self.selectdrop) + + def loadValues(self, Read = None): + if Read is None: + Read = self.utility.makerconfig.Read + self.startnow.SetValue(Read('startnow', "boolean")) + self.makehash_md5.SetValue(Read('makehash_md5', "boolean")) + self.makehash_crc32.SetValue(Read('makehash_crc32', "boolean")) + self.makehash_sha1.SetValue(Read('makehash_sha1', "boolean")) + self.createmerkletorrent.SetValue(Read('createmerkletorrent', "boolean")) + self.createtorrentsig.SetValue(Read('createtorrentsig', "boolean")) + + self.savetor[Read('savetorrent', "int")].SetValue(True) + self.piece_length.SetSelection(Read('piece_size', "int")) + self.savetordeftext.SetValue(Read('savetordeffolder')) + + def saveConfig(self, event = None): + self.utility.makerconfig.Write('startnow', self.startnow.GetValue(), "boolean") + + self.utility.makerconfig.Write('makehash_md5', self.makehash_md5.GetValue(), "boolean") + self.utility.makerconfig.Write('makehash_crc32', self.makehash_crc32.GetValue(), "boolean") + self.utility.makerconfig.Write('makehash_sha1', self.makehash_sha1.GetValue(), "boolean") + self.utility.makerconfig.Write('createmerkletorrent', self.createmerkletorrent.GetValue(), "boolean") + self.utility.makerconfig.Write('createtorrentsig', self.createtorrentsig.GetValue(), "boolean") + + self.utility.makerconfig.Write('savetordeffolder', self.savetordeftext.GetValue()) + + for i in range(3): + if self.savetor[i].GetValue(): + self.utility.makerconfig.Write('savetorrent', i) + break + self.utility.makerconfig.Write('piece_size', self.piece_length.GetSelection()) + + def selectDir(self, event = None): + dlg = wx.DirDialog(self.dialog, + self.utility.lang.get('selectdir'), + style = wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON) + if dlg.ShowModal() == wx.ID_OK: + self.dirCtl.SetValue(dlg.GetPath()) + dlg.Destroy() + + def onBrowseDir(self, event = None): + dlg = wx.DirDialog(self.dialog, + self.utility.lang.get('choosetordeffolder'), + style = wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON) + if dlg.ShowModal() == wx.ID_OK: + self.savetordeftext.SetValue(dlg.GetPath()) + dlg.Destroy() + + def selectFile(self, event = None): + dlg = wx.FileDialog(self.dialog, + self.utility.lang.get('choosefiletouse'), + '', + '', + self.utility.lang.get('allfileswildcard') + ' (*.*)|*.*', + wx.OPEN) + if dlg.ShowModal() == wx.ID_OK: + self.dirCtl.SetValue(dlg.GetPath()) + dlg.Destroy() + + def selectdrop(self, x): + list = x.m_files + self.dirCtl.SetValue(x[0]) + + def getParams(self): + params = {} + self.targeted = [] + + params['piece length'] = self.piece_length_list[self.piece_length.GetSelection()] + + if self.makehash_md5.GetValue(): + params['makehash_md5'] = True + if self.makehash_crc32.GetValue(): + params['makehash_crc32'] = True + if self.makehash_sha1.GetValue(): + params['makehash_sha1'] = True + if self.createmerkletorrent.GetValue(): + params['createmerkletorrent'] = 1 + if self.createtorrentsig.GetValue(): + params['torrentsigkeypairfilename'] = self.utility.session.get_permid_keypair_filename() +## + for i in range(3): + if self.savetor[i].GetValue(): + break + + if i == 0: + defdestfolder = self.savetordeftext.GetValue() +# + + # Check if default download folder is not a file and create it if necessary + if os.path.exists(defdestfolder): + if not os.path.isdir(defdestfolder): + dlg = wx.MessageDialog(self, + message = self.utility.lang.get('notadir') + '\n' + \ + self.utility.lang.get('savedtofolderwithsource'), + caption = self.utility.lang.get('error'), + style = wx.OK | wx.ICON_ERROR) + dlg.ShowModal() + dlg.Destroy() + defdestfolder = "" + else: + try: + os.makedirs(defdestfolder) + except: + dlg = wx.MessageDialog(self, + message = self.utility.lang.get('invalidwinname') + '\n'+ \ + self.utility.lang.get('savedtofolderwithsource'), + caption = self.utility.lang.get('error'), + style = wx.OK | wx.ICON_ERROR) + dlg.ShowModal() + dlg.Destroy() + defdestfolder = "" + + +# + params['target'] = defdestfolder + + self.targeted = defdestfolder + + elif i == 2: + dl = wx.DirDialog(self, style = wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON) + result = dl.ShowModal() + dl.Destroy() + if result != wx.ID_OK: + return + params['target'] = dl.GetPath() + self.targeted = dl.GetPath() + else: + self.targeted = "" + + return params + + def getTargeted(self): + targeted = self.targeted + return targeted + + +################################################################ +# +# Class: TorrentMaker +# +# Creates the dialog for making a torrent +# +################################################################ +class TorrentMaker(wx.Frame): + def __init__(self, parent): + self.parent = parent + self.utility = self.parent.utility + + title = self.utility.lang.get('btfilemakertitle') + wx.Frame.__init__(self, None, -1, title) + + if sys.platform == 'win32': + self.SetIcon(self.utility.icon) + + panel = wx.Panel(self, -1) + + sizer = wx.BoxSizer(wx.VERTICAL) + + self.notebook = wx.Notebook(panel, -1) + + self.fileInfoPanel = FileInfoPanel(self.notebook, self) + self.notebook.AddPage(self.fileInfoPanel, self.utility.lang.get('fileinfo')) + + self.trackerInfoPanel = TrackerInfoPanel(self.notebook, self) + self.notebook.AddPage(self.trackerInfoPanel, self.utility.lang.get('trackerinfo')) + + self.miscInfoPanel = MiscInfoPanel(self.notebook, self) + self.notebook.AddPage(self.miscInfoPanel, self.utility.lang.get('miscinfo')) + + sizer.Add(self.notebook, 1, wx.EXPAND|wx.ALL, 5) + + btnbox = wx.BoxSizer(wx.HORIZONTAL) + b3 = wx.Button(panel, -1, self.utility.lang.get('saveasdefaultconfig')) + btnbox.Add(b3, 0, wx.EXPAND) + + b2 = wx.Button(panel, -1, self.utility.lang.get('maketorrent')) + btnbox.Add(b2, 0, wx.EXPAND|wx.LEFT|wx.RIGHT, 10) + + b4 = wx.Button(panel, -1, self.utility.lang.get('close')) + btnbox.Add(b4, 0, wx.EXPAND) + + sizer.Add(btnbox, 0, wx.ALIGN_CENTER|wx.ALL, 10) + + wx.EVT_BUTTON(panel, b2.GetId(), self.complete) + wx.EVT_BUTTON(panel, b3.GetId(), self.saveConfig) + wx.EVT_BUTTON(panel, b4.GetId(), self.closeWin) + + panel.SetSizerAndFit(sizer) + + self.Fit() + + self.Show() + + def closeWin(self, event = None): + savetordeffolder = self.fileInfoPanel.savetordeftext.GetValue() + self.utility.makerconfig.Write('savetordeffolder', savetordeffolder) + self.utility.makerconfig.Write('announcehistory', self.trackerInfoPanel.announcehistory, "bencode-list") + + self.Destroy() + + def saveConfig(self, event = None): + self.fileInfoPanel.saveConfig() + self.trackerInfoPanel.saveConfig() + self.miscInfoPanel.saveConfig() + + self.utility.makerconfig.Flush() + + def complete(self, event = None): + if DEBUG: + print "complete thread",currentThread() + + filename = self.fileInfoPanel.dirCtl.GetValue() + if filename == '': + dlg = wx.MessageDialog(self, message = self.utility.lang.get('youmustselectfileordir'), + caption = self.utility.lang.get('error'), style = wx.OK | wx.ICON_ERROR) + dlg.ShowModal() + dlg.Destroy() + return + + params = {} + params.update(tdefdefaults) + params.update(self.fileInfoPanel.getParams()) + params.update(self.trackerInfoPanel.getParams()) + params.update(self.miscInfoPanel.getParams()) + + try: + CompleteDir(self, filename, params) + except: + oldstdout = sys.stdout + sys.stdout = sys.stderr + print_exc() + sys.stdout = oldstdout + + +################################################################ +# +# Class: CompleteDir +# +# Creating torrents for one or more files +# +################################################################ +class CompleteDir: + def __init__(self, parent, srcpath, params): + self.srcpath = srcpath + self.params = params + self.startnow = parent.fileInfoPanel.startnow.GetValue() + + self.usinginternaltracker = False + if 'usinginternaltracker' in params: + self.usinginternaltracker = params['usinginternaltracker'] + del params['usinginternaltracker'] + self.startnow = True # Always start seeding immediately + + self.params = params + self.parent = parent + self.utility = self.parent.utility + self.flag = Event() + self.separatetorrents = False + self.files = [] + + if os.path.isdir(srcpath): + self.choicemade = Event() + frame = wx.Frame(None, -1, self.utility.lang.get('btmaketorrenttitle'), size = (1, 1)) + self.frame = frame + panel = wx.Panel(frame, -1) + gridSizer = wx.FlexGridSizer(cols = 1, vgap = 8, hgap = 8) + gridSizer.AddGrowableRow(1) + gridSizer.Add(wx.StaticText(panel, -1, + self.utility.lang.get('dirnotice')), 0, wx.ALIGN_CENTER) + gridSizer.Add(wx.StaticText(panel, -1, '')) + + b = wx.FlexGridSizer(cols = 3, hgap = 10) + yesbut = wx.Button(panel, -1, self.utility.lang.get('yes')) + def saidyes(e, self = self): + self.frame.Destroy() + self.separatetorrents = True + self.begin() + wx.EVT_BUTTON(frame, yesbut.GetId(), saidyes) + b.Add(yesbut, 0) + + nobut = wx.Button(panel, -1, self.utility.lang.get('no')) + def saidno(e, self = self): + self.frame.Destroy() + self.begin() + wx.EVT_BUTTON(frame, nobut.GetId(), saidno) + b.Add(nobut, 0) + + cancelbut = wx.Button(panel, -1, self.utility.lang.get('cancel')) + def canceled(e, self = self): + self.frame.Destroy() + wx.EVT_BUTTON(frame, cancelbut.GetId(), canceled) + b.Add(cancelbut, 0) + gridSizer.Add(b, 0, wx.ALIGN_CENTER) + border = wx.BoxSizer(wx.HORIZONTAL) + border.Add(gridSizer, 1, wx.EXPAND | wx.ALL, 4) + + panel.SetSizer(border) + panel.SetAutoLayout(True) + frame.Show() + border.Fit(panel) + frame.Fit() + else: + self.begin() + + def begin(self): + if self.separatetorrents: + frame = wx.Frame(None, -1, self.utility.lang.get('btmakedirtitle'), size = wx.Size(550, 250)) + else: + frame = wx.Frame(None, -1, self.utility.lang.get('btmaketorrenttitle'), size = wx.Size(550, 250)) + self.frame = frame + + panel = wx.Panel(frame, -1) + gridSizer = wx.FlexGridSizer(cols = 1, vgap = 15, hgap = 8) + + if self.separatetorrents: + self.currentLabel = wx.StaticText(panel, -1, self.utility.lang.get('checkfilesize')) + else: + self.currentLabel = wx.StaticText(panel, -1, self.utility.lang.get('building')) + gridSizer.Add(self.currentLabel, 0, wx.EXPAND) + self.gauge = wx.Gauge(panel, -1, range = 1000, style = wx.GA_SMOOTH) + gridSizer.Add(self.gauge, 0, wx.EXPAND) + gridSizer.Add((10, 10), 1, wx.EXPAND) + self.button = wx.Button(panel, -1, self.utility.lang.get('cancel')) + gridSizer.Add(self.button, 0, wx.ALIGN_CENTER) + gridSizer.AddGrowableRow(2) + gridSizer.AddGrowableCol(0) + + g2 = wx.FlexGridSizer(cols = 1, vgap = 15, hgap = 8) + g2.Add(gridSizer, 1, wx.EXPAND | wx.ALL, 25) + g2.AddGrowableRow(0) + g2.AddGrowableCol(0) + panel.SetSizer(g2) + panel.SetAutoLayout(True) + wx.EVT_BUTTON(frame, self.button.GetId(), self.onDone) + wx.EVT_CLOSE(frame, self.onDone) + frame.Show(True) + Thread(target = self.complete).start() + + def complete(self): + try: + if self.separatetorrents: + completedir(self.srcpath, self.params, self.flag, self.progressCallback, self.fileCallback) + else: + make_meta_file(self.srcpath, self.params, self.flag, self.progressCallback, self.fileCallback) + if not self.flag.isSet(): + self.completeCallback() + except (OSError, IOError), e: + self.errorCallback(e) + + def errorCallback(self,e): + wx.CallAfter(self.onError,e) + + def onError(self,e): + self.currentLabel.SetLabel(self.utility.lang.get('error')) + self.button.SetLabel(self.utility.lang.get('close')) + dlg = wx.MessageDialog(None, + message = self.utility.lang.get('error') + ' - ' + str(e), + caption = self.utility.lang.get('error'), + style = wx.OK | wx.ICON_ERROR) + dlg.ShowModal() + dlg.Destroy() + + def completeCallback(self): + wx.CallAfter(self.onComplete) + + def onComplete(self): + self.currentLabel.SetLabel(self.utility.lang.get('Done')) + self.gauge.SetValue(1000) + self.button.SetLabel(self.utility.lang.get('close')) + + def progressCallback(self, amount): + wx.CallAfter(self.OnProgressUpdate,amount) + + def OnProgressUpdate(self, amount): + target = int(amount * 1000) + old = self.gauge.GetValue() + perc10 = self.gauge.GetRange()/10 + if target > old+perc10: # 10% increments + self.gauge.SetValue(target) + + def fileCallback(self, orig, torrent): + self.files.append([orig,torrent]) + wx.CallAfter(self.onFile,torrent) + + def onFile(self, torrent): + if DEBUG: + print "onFile thread",currentThread() + self.currentLabel.SetLabel(self.utility.lang.get('building') + torrent) + + def onDone(self, event): + self.flag.set() + self.frame.Destroy() + if self.startnow: + # When seeding immediately, add torrents to queue + for orig,torrentfilename in self.files: + try: + absorig = os.path.abspath(orig) + if os.path.isfile(absorig): + # To seed a file, destdir must be one up. + destdir = os.path.dirname(absorig) + else: + destdir = absorig + + tdef = TorrentDef.load(torrentfilename) + defaultDLConfig = DefaultDownloadStartupConfig.getInstance() + dscfg = defaultDLConfig.copy() + dscfg.set_dest_dir(destdir) + self.utility.session.start_download(tdef,dscfg) + + except Exception,e: + print_exc() + self.onError(e) + + +def make_meta_file(srcpath,params,userabortflag,progressCallback,torrentfilenameCallback): + + tdef = TorrentDef() + + if not os.path.isdir(srcpath): + if 'playtime' in params: + tdef.add_content(srcpath,playtime=params['playtime']) + else: + tdef.add_content(srcpath) + else: + srcbasename = os.path.basename(os.path.normpath(srcpath)) + for filename in os.listdir(srcpath): + inpath = os.path.join(srcpath,filename) + outpath = os.path.join(srcbasename,filename) + # h4x0r playtime + if 'playtime' in params: + tdef.add_content(inpath,outpath,playtime=params['playtime']) + else: + tdef.add_content(inpath,outpath) + + if params['comment']: + tdef.set_comment(params['comment']) + if params['created by']: + tdef.set_created_by(params['created by']) + if params['announce']: + tdef.set_tracker(params['announce']) + if params['announce-list']: + tdef.set_tracker_hierarchy(params['announce-list']) + if params['nodes']: # mainline DHT + tdef.set_dht_nodes(params['nodes']) + if params['httpseeds']: + tdef.set_httpseeds(params['httpseeds']) + if params['encoding']: + tdef.set_encoding(params['encoding']) + if params['piece length']: + tdef.set_piece_length(params['piece length']) + if params['makehash_md5']: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentMaker: make MD5" + tdef.set_add_md5hash(params['makehash_md5']) + if params['makehash_crc32']: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentMaker: make CRC32" + tdef.set_add_crc32(params['makehash_crc32']) + if params['makehash_sha1']: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentMaker: make SHA1" + tdef.set_add_sha1hash(params['makehash_sha1']) + if params['createmerkletorrent']: + tdef.set_create_merkle_torrent(params['createmerkletorrent']) + if params['torrentsigkeypairfilename']: + tdef.set_signature_keypair_filename(params['torrentsigkeypairfilename']) + if params['thumb']: + tdef.set_thumbnail(params['thumb']) + + tdef.finalize(userabortflag=userabortflag,userprogresscallback=progressCallback) + + if params['createmerkletorrent']: + postfix = TRIBLER_TORRENT_EXT + else: + postfix = '.torrent' + + if 'target' in params and params['target']: + torrentfilename = os.path.join(params['target'], os.path.split(os.path.normpath(srcpath))[1] + postfix) + else: + a, b = os.path.split(srcpath) + if b == '': + torrentfilename = a + postfix + else: + torrentfilename = os.path.join(a, b + postfix) + + tdef.save(torrentfilename) + + # Inform higher layer we created torrent + torrentfilenameCallback(srcpath,torrentfilename) + +def completedir(srcpath, params, userabortflag, progressCallback, torrentfilenameCallback): + merkle_torrent = params['createmerkletorrent'] == 1 + if merkle_torrent: + ext = TRIBLER_TORRENT_EXT + else: + ext = '.torrent' + srcfiles = os.listdir(srcpath) + srcfiles.sort() + + # Filter out any .torrent files + goodfiles = [] + for srcfile in srcfiles: + if srcfile[-len(ext):] != ext and (srcfile + ext) not in srcfiles: + goodfile = os.path.join(srcpath, srcfile) + goodfiles.append(goodfile) + + for goodfile in goodfiles: + basename = os.path.split(goodfile)[-1] + # Ignore cores, CVS and dotfiles + if basename not in FILESTOIGNORE and basename[0] != '.': + make_meta_file(goodfile, params,userabortflag,progressCallback,torrentfilenameCallback) + diff --git a/tribler-mod/Tribler/Main/Dialogs/TorrentMaker.py.bak b/tribler-mod/Tribler/Main/Dialogs/TorrentMaker.py.bak new file mode 100644 index 0000000..262e67b --- /dev/null +++ b/tribler-mod/Tribler/Main/Dialogs/TorrentMaker.py.bak @@ -0,0 +1,1028 @@ +# Written by Bram Cohen +# modified for multitracker by John Hoffman +# modified for Merkle hashes and digital signatures by Arno Bakker +# see LICENSE.txt for license information + +import sys +import wx +import wx.lib.imagebrowser as ib +import os + +from threading import Event, Thread, currentThread +from tempfile import mkstemp +from traceback import print_exc + +from Tribler.Core.API import * +from Tribler.Main.globals import DefaultDownloadStartupConfig + +FILESTOIGNORE = ['core', 'CVS'] + +DEBUG = False + + +################################################################ +# +# Class: MiscInfoPanel +# +# Panel for defining miscellaneous settings for a torrent +# +################################################################ +class MiscInfoPanel(wx.Panel): + def __init__(self, parent, dialog): + wx.Panel.__init__(self, parent, -1) + + self.dialog = dialog + self.utility = dialog.utility + + outerbox = wx.BoxSizer(wx.VERTICAL) + + # Created by: + outerbox.Add(wx.StaticText(self, -1, self.utility.lang.get('createdby')), 0, wx.EXPAND|wx.ALL, 5) + self.createdBy = wx.TextCtrl(self, -1) + outerbox.Add(self.createdBy, 0, wx.EXPAND|wx.ALL, 5) + + # Comment: + outerbox.Add(wx.StaticText(self, -1, self.utility.lang.get('comment')), 0, wx.EXPAND|wx.ALL, 5) + self.commentCtl = wx.TextCtrl(self, -1, size = (-1, 75), style = wx.TE_MULTILINE|wx.HSCROLL|wx.TE_DONTWRAP) + outerbox.Add(self.commentCtl, 0, wx.EXPAND|wx.ALL, 5) + + # Playtime: + outerbox.Add(wx.StaticText(self, -1, self.utility.lang.get('playtime')), 0, wx.EXPAND|wx.ALL, 5) + self.playtCtl = wx.TextCtrl(self, -1) + outerbox.Add(self.playtCtl, 0, wx.EXPAND|wx.ALL, 5) + + # Thumbnail: + ybox = wx.BoxSizer(wx.VERTICAL) + ybox.Add(wx.StaticText(self, -1, self.utility.lang.get('addthumbnail')), 0, wx.EXPAND|wx.ALL, 5) + xbox = wx.BoxSizer(wx.HORIZONTAL) + self.thumbCtl = wx.TextCtrl(self, -1) + xbox.Add(self.thumbCtl, 1, wx.EXPAND|wx.ALL, 5) + browsebtn = wx.Button(self, -1, "...") + self.Bind(wx.EVT_BUTTON, self.onBrowseThumb, browsebtn) + xbox.Add(browsebtn, 0, wx.ALL, 5) + ybox.Add(xbox, 0, wx.EXPAND|wx.ALL, 5) + outerbox.Add(ybox, 0, wx.ALL|wx.EXPAND, 5) + + self.SetSizerAndFit(outerbox) + + self.loadValues() + + def loadValues(self, Read = None): + if Read is None: + Read = self.utility.makerconfig.Read + + self.createdBy.SetValue(Read('created_by')) + self.commentCtl.SetValue(Read('comment')) + + def saveConfig(self, event = None): + self.utility.makerconfig.Write('created_by', self.createdBy.GetValue()) + self.utility.makerconfig.Write('comment', self.commentCtl.GetValue()) + + def getParams(self): + params = {} + + thumbfn = self.thumbCtl.GetValue() + if len(thumbfn) > 0: + try: + im = wx.Image(thumbfn) + ims = im.Scale(171,96) + + [thumbhandle,thumbfilename] = mkstemp("torrent-thumb") + os.close(thumbhandle) + ims.SaveFile(thumbfilename,wx.BITMAP_TYPE_JPEG) + params['thumb'] = thumbfilename + except: + print_exc() + + playt = self.playtCtl.GetValue() + if playt != '': + params['playtime'] = playt + + comment = self.commentCtl.GetValue() + if comment != '': + params['comment'] = comment + + createdby = self.createdBy.GetValue() + if comment != '': + params['created by'] = createdby + + return params + + + def onBrowseThumb(self, evt): + path = '' + + # open the image browser dialog + dlg = ib.ImageDialog(self, path) + dlg.Centre() + if dlg.ShowModal() == wx.ID_OK: + iconpath = dlg.GetFile() + + try: + im = wx.Image(iconpath) + if im is None: + self.show_inputerror(self.utility.lang.get('cantopenfile')) + else: + self.thumbCtl.SetValue(iconpath) + except: + self.show_inputerror(self.utility.lang.get('iconbadformat')) + else: + pass + + dlg.Destroy() + + def show_inputerror(self,txt): + dlg = wx.MessageDialog(self, txt, self.utility.lang.get('invalidinput'), wx.OK | wx.ICON_INFORMATION) + dlg.ShowModal() + dlg.Destroy() + + +################################################################ +# +# Class: TrackerInfoPanel +# +# Panel for defining tracker settings for a torrent +# +################################################################ +class TrackerInfoPanel(wx.Panel): + def __init__(self, parent, dialog): + wx.Panel.__init__(self, parent, -1) + + self.dialog = dialog + self.utility = dialog.utility + + outerbox = wx.BoxSizer(wx.VERTICAL) + + announcesection_title = wx.StaticBox(self, -1, self.utility.lang.get('announce')) + announcesection = wx.StaticBoxSizer(announcesection_title, wx.VERTICAL) + + self.announcehistory = [] + + # Use internal tracker? + itracker_box = wx.BoxSizer(wx.HORIZONTAL) + prompt = self.utility.lang.get('useinternaltracker')+' ('+self.utility.session.get_internal_tracker_url()+')' + self.itracker = wx.CheckBox(self, -1, prompt) + wx.EVT_CHECKBOX(self, self.itracker.GetId(), self.OnInternalTracker) + itracker_box.Add(self.itracker, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + announcesection.Add(itracker_box, 0, wx.EXPAND|wx.ALL, 3) + + # Manual override of tracker definition + manualover_box = wx.BoxSizer(wx.HORIZONTAL) + self.manualover = wx.CheckBox(self, -1, self.utility.lang.get('manualtrackerconfig')) + wx.EVT_CHECKBOX(self, self.manualover.GetId(), self.OnInternalTracker) # yes, OnInternalTracker + manualover_box.Add(self.manualover, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + announcesection.Add(manualover_box, 0, wx.EXPAND|wx.ALL, 3) + + # Copy announce from torrent + self.copybutton = wx.Button(self, -1, self.utility.lang.get('copyannouncefromtorrent')) + wx.EVT_BUTTON(self, self.copybutton.GetId(), self.announceCopy) + announcesection.Add(self.copybutton, 0, wx.ALL, 5) + + # Announce url: + self.annText = wx.StaticText(self, -1, self.utility.lang.get('announceurl')) + announcesection.Add(self.annText, 0, wx.ALL, 5) + + announceurl_box = wx.BoxSizer(wx.HORIZONTAL) + + self.annCtl = wx.ComboBox(self, -1, "", choices = self.announcehistory, style=wx.CB_DROPDOWN) + announceurl_box.Add(self.annCtl, 1, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 5) + + self.addbutton = wx.Button(self, -1, "+", size = (30, -1)) + self.addbutton.SetToolTipString(self.utility.lang.get('add')) + wx.EVT_BUTTON(self, self.addbutton.GetId(), self.addAnnounce) + announceurl_box.Add(self.addbutton, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 3) + + self.delbutton = wx.Button(self, -1, "-", size = (30, -1)) + self.delbutton.SetToolTipString(self.utility.lang.get('remove')) + wx.EVT_BUTTON(self, self.delbutton.GetId(), self.removeAnnounce) + announceurl_box.Add(self.delbutton, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 3) + + announcesection.Add(announceurl_box, 0, wx.EXPAND) + + # Announce List: + self.annListText = wx.StaticText(self, -1, self.utility.lang.get('announcelist')) + announcesection.Add(self.annListText, 0, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + self.annListCtl = wx.TextCtrl(self, -1, size = (-1, 75), style = wx.TE_MULTILINE|wx.HSCROLL|wx.TE_DONTWRAP) + self.annListCtl.SetToolTipString(self.utility.lang.get('multiannouncehelp')) + + announcesection.Add(self.annListCtl, 1, wx.EXPAND|wx.TOP, 5) + + outerbox.Add(announcesection, 0, wx.EXPAND|wx.ALL, 3) + + # HTTP Seeds: + outerbox.Add(wx.StaticText(self, -1, self.utility.lang.get('httpseeds')), 0, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + self.httpSeeds = wx.TextCtrl(self, -1, size = (-1, 75), style = wx.TE_MULTILINE|wx.HSCROLL|wx.TE_DONTWRAP) + self.httpSeeds.SetToolTipString(self.utility.lang.get('httpseedshelp')) + outerbox.Add(self.httpSeeds, 1, wx.EXPAND|wx.ALL, 5) + + self.SetSizerAndFit(outerbox) + + self.loadValues() + + def loadValues(self, Read = None): + if Read is None: + Read = self.utility.makerconfig.Read + + useitracker = Read('useitracker','boolean') + self.itracker.SetValue(useitracker) + manualtrackerconfig = Read('manualtrackerconfig','boolean') + self.manualover.SetValue(manualtrackerconfig) + + self.annCtl.Clear() + self.announcehistory = Read('announcehistory', "bencode-list") + for announceurl in self.announcehistory: + self.annCtl.Append(announceurl) + self.annCtl.SetValue(Read('announcedefault')) + + self.annListCtl.SetValue(Read('announce-list')) + + self.toggle_itracker(useitracker,manualtrackerconfig) + + self.httpSeeds.SetValue(Read('httpseeds')) + + + def toggle_itracker(self,useitracker,manualtrackerconfig): + if useitracker: + self.manualover.Enable() + if manualtrackerconfig: + self.copybutton.Enable() + self.annText.Enable() + self.annCtl.Enable() + self.annListText.Enable() + self.annListCtl.Enable() + self.addbutton.Enable() + self.delbutton.Enable() + else: + self.copybutton.Disable() + self.annText.Disable() + self.annCtl.Disable() + self.annListText.Disable() + self.annListCtl.Disable() + self.addbutton.Disable() + self.delbutton.Disable() + + self.dialog.fileInfoPanel.startnow.SetValue(True) + self.dialog.fileInfoPanel.startnow.Disable() + else: + self.manualover.Disable() + self.copybutton.Enable() + self.annText.Enable() + self.annCtl.Enable() + self.annListText.Enable() + self.annListCtl.Enable() + self.addbutton.Enable() + self.delbutton.Enable() + self.dialog.fileInfoPanel.startnow.Enable() + + def saveConfig(self, event = None): + index = self.annCtl.GetSelection() + if index != -1: + self.utility.makerconfig.Write('announcedefault', self.annCtl.GetValue()) + self.utility.makerconfig.Write('announcehistory', self.announcehistory, "bencode-list") + self.utility.makerconfig.Write('announce-list', self.annListCtl.GetValue()) + self.utility.makerconfig.Write('httpseeds', self.httpSeeds.GetValue()) + + def addAnnounce(self, event = None): + announceurl = self.annCtl.GetValue() + + # Don't add to the list if it's already present or the string is empty + announceurl = announceurl.strip() + if not announceurl or announceurl in self.announcehistory: + return + self.announcehistory.append(announceurl) + self.annCtl.Append(announceurl) + + def removeAnnounce(self, event = None): + index = self.annCtl.GetSelection() + if index != -1: + announceurl = self.annCtl.GetValue() + self.annCtl.Delete(index) + try: + self.announcehistory.remove(announceurl) + except: + pass + + def announceCopy(self, event = None): + dl = wx.FileDialog(self.dialog, + self.utility.lang.get('choosedottorrentfiletouse'), + '', + '', + self.utility.lang.get('torrentfileswildcard') + ' (*.torrent)|*.torrent', + wx.OPEN) + if dl.ShowModal() == wx.ID_OK: + try: + metainfo = self.utility.getMetainfo(dl.GetPath()) + if (metainfo is None): + return + self.annCtl.SetValue(metainfo['announce']) + if 'announce-list' in metainfo: + list = [] + for tier in metainfo['announce-list']: + for tracker in tier: + list += [tracker, ', '] + del list[-1] + list += ['\n'] + liststring = '' + for i in list: + liststring += i + self.annListCtl.SetValue(liststring+'\n\n') + else: + self.annListCtl.SetValue('') + except: + return + + def getAnnounceList(self): + text = self.annListCtl.GetValue() + list = [] + for tier in text.split('\n'): + sublist = [] + tier.replace(',', ' ') + for tracker in tier.split(' '): + if tracker != '': + sublist += [tracker] + if sublist: + list.append(sublist) + return list + + def getHTTPSeedList(self): + text = self.httpSeeds.GetValue() + list = [] + for tier in text.split('\n'): + tier.replace(',', ' ') + for tracker in tier.split(' '): + if tracker != '': + list.append(tracker) + return list + + def getParams(self): + params = {} + + if self.itracker.GetValue(): + params['usinginternaltracker'] = True + else: + params['usinginternaltracker'] = False + + if self.manualover.GetValue(): # Use manual specification of trackers + # Announce list + annlist = self.getAnnounceList() + if annlist: + params['announce-list'] = annlist + + # Announce URL + announceurl = None + index = self.annCtl.GetSelection() + if annlist and index == -1: + # If we don't have an announce url specified, + # try using the first value in announce-list + tier1 = annlist[0] + if tier1: + announceurl = tier1[0] + else: + announceurl = self.annCtl.GetValue() + + if announceurl is None: + # What should we do here? + announceurl = "" + + params['announce'] = announceurl + else: + # Use just internal tracker + params['announce'] = self.utility.session.get_internal_tracker_url() + + # HTTP Seeds + httpseedlist = self.getHTTPSeedList() + if httpseedlist: + params['httpseeds'] = httpseedlist + + return params + + def OnInternalTracker(self,event=None): + self.toggle_itracker(self.itracker.GetValue(),self.manualover.GetValue()) + + + +################################################################ +# +# Class: FileInfoPanel +# +# Class for choosing a file when creating a torrent +# +################################################################ +class FileInfoPanel(wx.Panel): + def __init__(self, parent, dialog): + wx.Panel.__init__(self, parent, -1) + + self.dialog = dialog + self.utility = dialog.utility + + outerbox = wx.BoxSizer(wx.VERTICAL) + + # Make torrent of: + maketorrent_box = wx.BoxSizer(wx.HORIZONTAL) + maketorrent_box.Add(wx.StaticText(self, -1, self.utility.lang.get('maketorrentof')), 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + self.dirCtl = wx.TextCtrl(self, -1, '') + maketorrent_box.Add(self.dirCtl, 1, wx.ALIGN_CENTER_VERTICAL|wx.EXPAND|wx.ALL, 5) + + button = wx.Button(self, -1, self.utility.lang.get('dir'), style = wx.BU_EXACTFIT) + wx.EVT_BUTTON(self, button.GetId(), self.selectDir) + maketorrent_box.Add(button, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + button2 = wx.Button(self, -1, self.utility.lang.get('file'), style = wx.BU_EXACTFIT) + wx.EVT_BUTTON(self, button2.GetId(), self.selectFile) + maketorrent_box.Add(button2, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + outerbox.Add(maketorrent_box, 0, wx.EXPAND) + + # Merkle: + merkletorrent_box = wx.BoxSizer(wx.HORIZONTAL) + self.createmerkletorrent = wx.CheckBox(self, -1, self.utility.lang.get('createmerkletorrent')) + merkletorrent_box.Add(self.createmerkletorrent, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + outerbox.Add(merkletorrent_box, 0, wx.EXPAND) + + # Piece size: + piecesize_box = wx.BoxSizer(wx.HORIZONTAL) + + piecesize_box.Add(wx.StaticText(self, -1, self.utility.lang.get('piecesize')), 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + abbrev_mb = " " + self.utility.lang.get('MB') + abbrev_kb = " " + self.utility.lang.get('KB') + + piece_choices = [self.utility.lang.get('automatic'), + '2' + abbrev_mb, + '1' + abbrev_mb, + '512' + abbrev_kb, + '256' + abbrev_kb, + '128' + abbrev_kb, + '64' + abbrev_kb, + '32' + abbrev_kb] + self.piece_length = wx.Choice(self, -1, choices = piece_choices) + self.piece_length_list = [0, 2**21, 2**20, 2**19, 2**18, 2**17, 2**16, 2**15] + piecesize_box.Add(self.piece_length, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + outerbox.Add(piecesize_box, 0, wx.EXPAND) + + +# panel.DragAcceptFiles(True) +# wx.EVT_DROP_FILES(panel, self.selectdrop) + + # Save torrent : + savetorrentbox = wx.StaticBoxSizer(wx.StaticBox(self, -1, self.utility.lang.get('savetor')), wx.VERTICAL) + + self.savetorrb1 = wx.RadioButton(self, -1, self.utility.lang.get('savetordefault'), (-1, -1), (-1, -1), wx.RB_GROUP) + savetorrb2 = wx.RadioButton(self, -1, self.utility.lang.get('savetorsource'), (-1, -1), (-1, -1)) + savetorrb3 = wx.RadioButton(self, -1, self.utility.lang.get('savetorask'), (-1, -1), (-1, -1)) + self.savetor = [self.savetorrb1, savetorrb2, savetorrb3] + + savetordefbox = wx.BoxSizer(wx.HORIZONTAL) + savetordefbox.Add(self.savetorrb1, 0, wx.ALIGN_CENTER_VERTICAL) + self.savetordeftext = wx.TextCtrl(self, -1, "") + browsebtn = wx.Button(self, -1, "...", style = wx.BU_EXACTFIT) + browsebtn.Bind(wx.EVT_BUTTON, self.onBrowseDir) + savetordefbox.Add(self.savetordeftext, 1, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5) + savetordefbox.Add(browsebtn, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 3) + savetorrentbox.Add(savetordefbox, 0, wx.EXPAND) + + savetorrentbox.Add(savetorrb2, 0) + + savetorrentbox.Add(savetorrb3, 0, wx.TOP, 4) + + outerbox.Add(savetorrentbox, 0, wx.EXPAND|wx.TOP|wx.BOTTOM, 5) + + optionalhash_title = wx.StaticBox(self, -1, self.utility.lang.get('makehash_optional')) + optionalhash = wx.StaticBoxSizer(optionalhash_title, wx.VERTICAL) + + self.makehash_md5 = wx.CheckBox(self, -1, self.utility.lang.get('makehash_md5')) + optionalhash.Add(self.makehash_md5, 0) + + self.makehash_crc32 = wx.CheckBox(self, -1, self.utility.lang.get('makehash_crc32')) + optionalhash.Add(self.makehash_crc32, 0, wx.TOP, 4) + + self.makehash_sha1 = wx.CheckBox(self, -1, self.utility.lang.get('makehash_sha1')) + optionalhash.Add(self.makehash_sha1, 0, wx.TOP, 4) + + self.createtorrentsig = wx.CheckBox(self, -1, self.utility.lang.get('createtorrentsig')) + optionalhash.Add(self.createtorrentsig, 0, wx.TOP, 4) + + outerbox.Add(optionalhash, 0, wx.EXPAND|wx.TOP|wx.BOTTOM, 5) + + self.startnow = wx.CheckBox(self, -1, self.utility.lang.get('startnow')) + outerbox.Add(self.startnow, 0, wx.ALIGN_LEFT|wx.ALL, 5) + + self.SetSizerAndFit(outerbox) + + self.loadValues() + +# panel.DragAcceptFiles(True) +# wx.EVT_DROP_FILES(panel, self.selectdrop) + + def loadValues(self, Read = None): + if Read is None: + Read = self.utility.makerconfig.Read + self.startnow.SetValue(Read('startnow', "boolean")) + self.makehash_md5.SetValue(Read('makehash_md5', "boolean")) + self.makehash_crc32.SetValue(Read('makehash_crc32', "boolean")) + self.makehash_sha1.SetValue(Read('makehash_sha1', "boolean")) + self.createmerkletorrent.SetValue(Read('createmerkletorrent', "boolean")) + self.createtorrentsig.SetValue(Read('createtorrentsig', "boolean")) + + self.savetor[Read('savetorrent', "int")].SetValue(True) + self.piece_length.SetSelection(Read('piece_size', "int")) + self.savetordeftext.SetValue(Read('savetordeffolder')) + + def saveConfig(self, event = None): + self.utility.makerconfig.Write('startnow', self.startnow.GetValue(), "boolean") + + self.utility.makerconfig.Write('makehash_md5', self.makehash_md5.GetValue(), "boolean") + self.utility.makerconfig.Write('makehash_crc32', self.makehash_crc32.GetValue(), "boolean") + self.utility.makerconfig.Write('makehash_sha1', self.makehash_sha1.GetValue(), "boolean") + self.utility.makerconfig.Write('createmerkletorrent', self.createmerkletorrent.GetValue(), "boolean") + self.utility.makerconfig.Write('createtorrentsig', self.createtorrentsig.GetValue(), "boolean") + + self.utility.makerconfig.Write('savetordeffolder', self.savetordeftext.GetValue()) + + for i in range(3): + if self.savetor[i].GetValue(): + self.utility.makerconfig.Write('savetorrent', i) + break + self.utility.makerconfig.Write('piece_size', self.piece_length.GetSelection()) + + def selectDir(self, event = None): + dlg = wx.DirDialog(self.dialog, + self.utility.lang.get('selectdir'), + style = wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON) + if dlg.ShowModal() == wx.ID_OK: + self.dirCtl.SetValue(dlg.GetPath()) + dlg.Destroy() + + def onBrowseDir(self, event = None): + dlg = wx.DirDialog(self.dialog, + self.utility.lang.get('choosetordeffolder'), + style = wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON) + if dlg.ShowModal() == wx.ID_OK: + self.savetordeftext.SetValue(dlg.GetPath()) + dlg.Destroy() + + def selectFile(self, event = None): + dlg = wx.FileDialog(self.dialog, + self.utility.lang.get('choosefiletouse'), + '', + '', + self.utility.lang.get('allfileswildcard') + ' (*.*)|*.*', + wx.OPEN) + if dlg.ShowModal() == wx.ID_OK: + self.dirCtl.SetValue(dlg.GetPath()) + dlg.Destroy() + + def selectdrop(self, x): + list = x.m_files + self.dirCtl.SetValue(x[0]) + + def getParams(self): + params = {} + self.targeted = [] + + params['piece length'] = self.piece_length_list[self.piece_length.GetSelection()] + + if self.makehash_md5.GetValue(): + params['makehash_md5'] = True + if self.makehash_crc32.GetValue(): + params['makehash_crc32'] = True + if self.makehash_sha1.GetValue(): + params['makehash_sha1'] = True + if self.createmerkletorrent.GetValue(): + params['createmerkletorrent'] = 1 + if self.createtorrentsig.GetValue(): + params['torrentsigkeypairfilename'] = self.utility.session.get_permid_keypair_filename() +## + for i in range(3): + if self.savetor[i].GetValue(): + break + + if i == 0: + defdestfolder = self.savetordeftext.GetValue() +# + + # Check if default download folder is not a file and create it if necessary + if os.path.exists(defdestfolder): + if not os.path.isdir(defdestfolder): + dlg = wx.MessageDialog(self, + message = self.utility.lang.get('notadir') + '\n' + \ + self.utility.lang.get('savedtofolderwithsource'), + caption = self.utility.lang.get('error'), + style = wx.OK | wx.ICON_ERROR) + dlg.ShowModal() + dlg.Destroy() + defdestfolder = "" + else: + try: + os.makedirs(defdestfolder) + except: + dlg = wx.MessageDialog(self, + message = self.utility.lang.get('invalidwinname') + '\n'+ \ + self.utility.lang.get('savedtofolderwithsource'), + caption = self.utility.lang.get('error'), + style = wx.OK | wx.ICON_ERROR) + dlg.ShowModal() + dlg.Destroy() + defdestfolder = "" + + +# + params['target'] = defdestfolder + + self.targeted = defdestfolder + + elif i == 2: + dl = wx.DirDialog(self, style = wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON) + result = dl.ShowModal() + dl.Destroy() + if result != wx.ID_OK: + return + params['target'] = dl.GetPath() + self.targeted = dl.GetPath() + else: + self.targeted = "" + + return params + + def getTargeted(self): + targeted = self.targeted + return targeted + + +################################################################ +# +# Class: TorrentMaker +# +# Creates the dialog for making a torrent +# +################################################################ +class TorrentMaker(wx.Frame): + def __init__(self, parent): + self.parent = parent + self.utility = self.parent.utility + + title = self.utility.lang.get('btfilemakertitle') + wx.Frame.__init__(self, None, -1, title) + + if sys.platform == 'win32': + self.SetIcon(self.utility.icon) + + panel = wx.Panel(self, -1) + + sizer = wx.BoxSizer(wx.VERTICAL) + + self.notebook = wx.Notebook(panel, -1) + + self.fileInfoPanel = FileInfoPanel(self.notebook, self) + self.notebook.AddPage(self.fileInfoPanel, self.utility.lang.get('fileinfo')) + + self.trackerInfoPanel = TrackerInfoPanel(self.notebook, self) + self.notebook.AddPage(self.trackerInfoPanel, self.utility.lang.get('trackerinfo')) + + self.miscInfoPanel = MiscInfoPanel(self.notebook, self) + self.notebook.AddPage(self.miscInfoPanel, self.utility.lang.get('miscinfo')) + + sizer.Add(self.notebook, 1, wx.EXPAND|wx.ALL, 5) + + btnbox = wx.BoxSizer(wx.HORIZONTAL) + b3 = wx.Button(panel, -1, self.utility.lang.get('saveasdefaultconfig')) + btnbox.Add(b3, 0, wx.EXPAND) + + b2 = wx.Button(panel, -1, self.utility.lang.get('maketorrent')) + btnbox.Add(b2, 0, wx.EXPAND|wx.LEFT|wx.RIGHT, 10) + + b4 = wx.Button(panel, -1, self.utility.lang.get('close')) + btnbox.Add(b4, 0, wx.EXPAND) + + sizer.Add(btnbox, 0, wx.ALIGN_CENTER|wx.ALL, 10) + + wx.EVT_BUTTON(panel, b2.GetId(), self.complete) + wx.EVT_BUTTON(panel, b3.GetId(), self.saveConfig) + wx.EVT_BUTTON(panel, b4.GetId(), self.closeWin) + + panel.SetSizerAndFit(sizer) + + self.Fit() + + self.Show() + + def closeWin(self, event = None): + savetordeffolder = self.fileInfoPanel.savetordeftext.GetValue() + self.utility.makerconfig.Write('savetordeffolder', savetordeffolder) + self.utility.makerconfig.Write('announcehistory', self.trackerInfoPanel.announcehistory, "bencode-list") + + self.Destroy() + + def saveConfig(self, event = None): + self.fileInfoPanel.saveConfig() + self.trackerInfoPanel.saveConfig() + self.miscInfoPanel.saveConfig() + + self.utility.makerconfig.Flush() + + def complete(self, event = None): + if DEBUG: + print "complete thread",currentThread() + + filename = self.fileInfoPanel.dirCtl.GetValue() + if filename == '': + dlg = wx.MessageDialog(self, message = self.utility.lang.get('youmustselectfileordir'), + caption = self.utility.lang.get('error'), style = wx.OK | wx.ICON_ERROR) + dlg.ShowModal() + dlg.Destroy() + return + + params = {} + params.update(tdefdefaults) + params.update(self.fileInfoPanel.getParams()) + params.update(self.trackerInfoPanel.getParams()) + params.update(self.miscInfoPanel.getParams()) + + try: + CompleteDir(self, filename, params) + except: + oldstdout = sys.stdout + sys.stdout = sys.stderr + print_exc() + sys.stdout = oldstdout + + +################################################################ +# +# Class: CompleteDir +# +# Creating torrents for one or more files +# +################################################################ +class CompleteDir: + def __init__(self, parent, srcpath, params): + self.srcpath = srcpath + self.params = params + self.startnow = parent.fileInfoPanel.startnow.GetValue() + + self.usinginternaltracker = False + if 'usinginternaltracker' in params: + self.usinginternaltracker = params['usinginternaltracker'] + del params['usinginternaltracker'] + self.startnow = True # Always start seeding immediately + + self.params = params + self.parent = parent + self.utility = self.parent.utility + self.flag = Event() + self.separatetorrents = False + self.files = [] + + if os.path.isdir(srcpath): + self.choicemade = Event() + frame = wx.Frame(None, -1, self.utility.lang.get('btmaketorrenttitle'), size = (1, 1)) + self.frame = frame + panel = wx.Panel(frame, -1) + gridSizer = wx.FlexGridSizer(cols = 1, vgap = 8, hgap = 8) + gridSizer.AddGrowableRow(1) + gridSizer.Add(wx.StaticText(panel, -1, + self.utility.lang.get('dirnotice')), 0, wx.ALIGN_CENTER) + gridSizer.Add(wx.StaticText(panel, -1, '')) + + b = wx.FlexGridSizer(cols = 3, hgap = 10) + yesbut = wx.Button(panel, -1, self.utility.lang.get('yes')) + def saidyes(e, self = self): + self.frame.Destroy() + self.separatetorrents = True + self.begin() + wx.EVT_BUTTON(frame, yesbut.GetId(), saidyes) + b.Add(yesbut, 0) + + nobut = wx.Button(panel, -1, self.utility.lang.get('no')) + def saidno(e, self = self): + self.frame.Destroy() + self.begin() + wx.EVT_BUTTON(frame, nobut.GetId(), saidno) + b.Add(nobut, 0) + + cancelbut = wx.Button(panel, -1, self.utility.lang.get('cancel')) + def canceled(e, self = self): + self.frame.Destroy() + wx.EVT_BUTTON(frame, cancelbut.GetId(), canceled) + b.Add(cancelbut, 0) + gridSizer.Add(b, 0, wx.ALIGN_CENTER) + border = wx.BoxSizer(wx.HORIZONTAL) + border.Add(gridSizer, 1, wx.EXPAND | wx.ALL, 4) + + panel.SetSizer(border) + panel.SetAutoLayout(True) + frame.Show() + border.Fit(panel) + frame.Fit() + else: + self.begin() + + def begin(self): + if self.separatetorrents: + frame = wx.Frame(None, -1, self.utility.lang.get('btmakedirtitle'), size = wx.Size(550, 250)) + else: + frame = wx.Frame(None, -1, self.utility.lang.get('btmaketorrenttitle'), size = wx.Size(550, 250)) + self.frame = frame + + panel = wx.Panel(frame, -1) + gridSizer = wx.FlexGridSizer(cols = 1, vgap = 15, hgap = 8) + + if self.separatetorrents: + self.currentLabel = wx.StaticText(panel, -1, self.utility.lang.get('checkfilesize')) + else: + self.currentLabel = wx.StaticText(panel, -1, self.utility.lang.get('building')) + gridSizer.Add(self.currentLabel, 0, wx.EXPAND) + self.gauge = wx.Gauge(panel, -1, range = 1000, style = wx.GA_SMOOTH) + gridSizer.Add(self.gauge, 0, wx.EXPAND) + gridSizer.Add((10, 10), 1, wx.EXPAND) + self.button = wx.Button(panel, -1, self.utility.lang.get('cancel')) + gridSizer.Add(self.button, 0, wx.ALIGN_CENTER) + gridSizer.AddGrowableRow(2) + gridSizer.AddGrowableCol(0) + + g2 = wx.FlexGridSizer(cols = 1, vgap = 15, hgap = 8) + g2.Add(gridSizer, 1, wx.EXPAND | wx.ALL, 25) + g2.AddGrowableRow(0) + g2.AddGrowableCol(0) + panel.SetSizer(g2) + panel.SetAutoLayout(True) + wx.EVT_BUTTON(frame, self.button.GetId(), self.onDone) + wx.EVT_CLOSE(frame, self.onDone) + frame.Show(True) + Thread(target = self.complete).start() + + def complete(self): + try: + if self.separatetorrents: + completedir(self.srcpath, self.params, self.flag, self.progressCallback, self.fileCallback) + else: + make_meta_file(self.srcpath, self.params, self.flag, self.progressCallback, self.fileCallback) + if not self.flag.isSet(): + self.completeCallback() + except (OSError, IOError), e: + self.errorCallback(e) + + def errorCallback(self,e): + wx.CallAfter(self.onError,e) + + def onError(self,e): + self.currentLabel.SetLabel(self.utility.lang.get('error')) + self.button.SetLabel(self.utility.lang.get('close')) + dlg = wx.MessageDialog(None, + message = self.utility.lang.get('error') + ' - ' + str(e), + caption = self.utility.lang.get('error'), + style = wx.OK | wx.ICON_ERROR) + dlg.ShowModal() + dlg.Destroy() + + def completeCallback(self): + wx.CallAfter(self.onComplete) + + def onComplete(self): + self.currentLabel.SetLabel(self.utility.lang.get('Done')) + self.gauge.SetValue(1000) + self.button.SetLabel(self.utility.lang.get('close')) + + def progressCallback(self, amount): + wx.CallAfter(self.OnProgressUpdate,amount) + + def OnProgressUpdate(self, amount): + target = int(amount * 1000) + old = self.gauge.GetValue() + perc10 = self.gauge.GetRange()/10 + if target > old+perc10: # 10% increments + self.gauge.SetValue(target) + + def fileCallback(self, orig, torrent): + self.files.append([orig,torrent]) + wx.CallAfter(self.onFile,torrent) + + def onFile(self, torrent): + if DEBUG: + print "onFile thread",currentThread() + self.currentLabel.SetLabel(self.utility.lang.get('building') + torrent) + + def onDone(self, event): + self.flag.set() + self.frame.Destroy() + if self.startnow: + # When seeding immediately, add torrents to queue + for orig,torrentfilename in self.files: + try: + absorig = os.path.abspath(orig) + if os.path.isfile(absorig): + # To seed a file, destdir must be one up. + destdir = os.path.dirname(absorig) + else: + destdir = absorig + + tdef = TorrentDef.load(torrentfilename) + defaultDLConfig = DefaultDownloadStartupConfig.getInstance() + dscfg = defaultDLConfig.copy() + dscfg.set_dest_dir(destdir) + self.utility.session.start_download(tdef,dscfg) + + except Exception,e: + print_exc() + self.onError(e) + + +def make_meta_file(srcpath,params,userabortflag,progressCallback,torrentfilenameCallback): + + tdef = TorrentDef() + + if not os.path.isdir(srcpath): + if 'playtime' in params: + tdef.add_content(srcpath,playtime=params['playtime']) + else: + tdef.add_content(srcpath) + else: + srcbasename = os.path.basename(os.path.normpath(srcpath)) + for filename in os.listdir(srcpath): + inpath = os.path.join(srcpath,filename) + outpath = os.path.join(srcbasename,filename) + # h4x0r playtime + if 'playtime' in params: + tdef.add_content(inpath,outpath,playtime=params['playtime']) + else: + tdef.add_content(inpath,outpath) + + if params['comment']: + tdef.set_comment(params['comment']) + if params['created by']: + tdef.set_created_by(params['created by']) + if params['announce']: + tdef.set_tracker(params['announce']) + if params['announce-list']: + tdef.set_tracker_hierarchy(params['announce-list']) + if params['nodes']: # mainline DHT + tdef.set_dht_nodes(params['nodes']) + if params['httpseeds']: + tdef.set_httpseeds(params['httpseeds']) + if params['encoding']: + tdef.set_encoding(params['encoding']) + if params['piece length']: + tdef.set_piece_length(params['piece length']) + if params['makehash_md5']: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentMaker: make MD5" + tdef.set_add_md5hash(params['makehash_md5']) + if params['makehash_crc32']: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentMaker: make CRC32" + tdef.set_add_crc32(params['makehash_crc32']) + if params['makehash_sha1']: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentMaker: make SHA1" + tdef.set_add_sha1hash(params['makehash_sha1']) + if params['createmerkletorrent']: + tdef.set_create_merkle_torrent(params['createmerkletorrent']) + if params['torrentsigkeypairfilename']: + tdef.set_signature_keypair_filename(params['torrentsigkeypairfilename']) + if params['thumb']: + tdef.set_thumbnail(params['thumb']) + + tdef.finalize(userabortflag=userabortflag,userprogresscallback=progressCallback) + + if params['createmerkletorrent']: + postfix = TRIBLER_TORRENT_EXT + else: + postfix = '.torrent' + + if 'target' in params and params['target']: + torrentfilename = os.path.join(params['target'], os.path.split(os.path.normpath(srcpath))[1] + postfix) + else: + a, b = os.path.split(srcpath) + if b == '': + torrentfilename = a + postfix + else: + torrentfilename = os.path.join(a, b + postfix) + + tdef.save(torrentfilename) + + # Inform higher layer we created torrent + torrentfilenameCallback(srcpath,torrentfilename) + +def completedir(srcpath, params, userabortflag, progressCallback, torrentfilenameCallback): + merkle_torrent = params['createmerkletorrent'] == 1 + if merkle_torrent: + ext = TRIBLER_TORRENT_EXT + else: + ext = '.torrent' + srcfiles = os.listdir(srcpath) + srcfiles.sort() + + # Filter out any .torrent files + goodfiles = [] + for srcfile in srcfiles: + if srcfile[-len(ext):] != ext and (srcfile + ext) not in srcfiles: + goodfile = os.path.join(srcpath, srcfile) + goodfiles.append(goodfile) + + for goodfile in goodfiles: + basename = os.path.split(goodfile)[-1] + # Ignore cores, CVS and dotfiles + if basename not in FILESTOIGNORE and basename[0] != '.': + make_meta_file(goodfile, params,userabortflag,progressCallback,torrentfilenameCallback) + diff --git a/tribler-mod/Tribler/Main/Dialogs/__init__.py b/tribler-mod/Tribler/Main/Dialogs/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tribler-mod/Tribler/Main/Dialogs/__init__.py.bak b/tribler-mod/Tribler/Main/Dialogs/__init__.py.bak new file mode 100644 index 0000000..e69de29 diff --git a/tribler-mod/Tribler/Main/Dialogs/abcoption.py b/tribler-mod/Tribler/Main/Dialogs/abcoption.py new file mode 100644 index 0000000..05dbde8 --- /dev/null +++ b/tribler-mod/Tribler/Main/Dialogs/abcoption.py @@ -0,0 +1,1657 @@ +from time import localtime, strftime +# Written by ABC authors and Arno Bakker +# see LICENSE.txt for license information + +# TODO: +# - Adhere to SeedingOptions. Wait on Jelle checkin +# - Make Core adhere to diskfullthreshold + +import sys +import wx +import os + +from traceback import print_exc + +from Tribler.Main.Utility.constants import * #IGNORE:W0611 +from Tribler.Main.globals import DefaultDownloadStartupConfig,get_default_dscfg_filename + +from Tribler.Main.Dialogs.socnetmyinfo import MyInfoWizard +from Tribler.Video.VideoPlayer import * + +from Tribler.Core.API import * +from Tribler.Core.Utilities.utilities import show_permid +from Tribler.Core.osutils import getfreespace + +from Tribler.Core.defaults import * + +DEBUG = False + + +################################################################ +# +# Class: ABCOptionPanel +# +# Basic structure for options window panels +# +# Adds a button for "Restore Defaults" +# at the bottom of each panel +# +################################################################ +class ABCOptionPanel(wx.Panel): + def __init__(self, parent, dialog): + wx.Panel.__init__(self, parent, -1) + + self.dialog = dialog + self.utility = dialog.utility + + self.changed = False + + self.outersizer = wx.BoxSizer(wx.VERTICAL) + + self.sizer = wx.BoxSizer(wx.VERTICAL) + + self.defaultDLConfig = DefaultDownloadStartupConfig.getInstance() + + # Things to do after the subclass has finished its init stage + def initTasks(self): + self.loadValues() + + self.outersizer.Add(self.sizer, 1, wx.EXPAND) + + defaultsButton = wx.Button(self, -1, self.utility.lang.get('reverttodefault')) + wx.EVT_BUTTON(self, defaultsButton.GetId(), self.setDefaults) + self.outersizer.Add(defaultsButton, 0, wx.ALIGN_RIGHT|wx.TOP|wx.BOTTOM, 10) + + self.SetSizerAndFit(self.outersizer) + + def loadValues(self, Read = None): + # Dummy function that class members should override + pass + + def setDefaults(self, event = None): + self.loadValues(self.utility.config.ReadDefault) + + def apply(self): + # Dummy function that class members should override + pass + + +################################################################ +# +# Class: NetworkPanel +# +# Contains network settings +# +################################################################ +class NetworkPanel(ABCOptionPanel): + def __init__(self, parent, dialog): + ABCOptionPanel.__init__(self, parent, dialog) + sizer = self.sizer + + ip = self.utility.session.get_external_ip() + ip_txt = self.utility.lang.get('currentdiscoveredipaddress')+": "+ip + label = wx.StaticText(self, -1, ip_txt ) + sizer.Add( label, 0, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + + self.minport = self.utility.makeNumCtrl(self, 1, min = 1, max = 65536) + port_box = wx.BoxSizer(wx.HORIZONTAL) + port_box.Add(wx.StaticText(self, -1, self.utility.lang.get('portnumber')), 0, wx.ALIGN_CENTER_VERTICAL) + port_box.Add(self.minport, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + port_box.Add(wx.StaticText(self, -1, self.utility.lang.get('restartabc')), 0, wx.ALIGN_CENTER_VERTICAL) + + sizer.Add(port_box, 0, wx.EXPAND|wx.ALL, 5) + + self.kickban = wx.CheckBox(self, -1, self.utility.lang.get('kickban')) + sizer.Add(self.kickban, 0, wx.ALIGN_LEFT|wx.ALL, 5) + + # Do or don't get scrape data + ################################################################### + self.scrape = wx.CheckBox(self, -1, self.utility.lang.get('scrape')) + sizer.Add(self.scrape, 0, wx.ALIGN_LEFT|wx.ALL, 5) + self.scrape.SetToolTipString(self.utility.lang.get('scrape_hint')) + + ################################################################### + #self.ipv6 = wx.CheckBox(self, -1, "Initiate and receive connections via IPv6") + #if self.utility.config.Read('ipv6') == "1": + # self.ipv6.SetValue(True) + #else: + # self.ipv6.SetValue(False) + #################################################################### + + # URL of internal tracker, user should use it in annouce box / announce-list + itrack_box = wx.BoxSizer(wx.HORIZONTAL) + self.itrack = wx.TextCtrl(self, -1, "") + itrack_box.Add(wx.StaticText(self, -1, self.utility.lang.get('internaltrackerurl')), 0, wx.ALIGN_CENTER_VERTICAL) + itrack_box.Add(self.itrack, 1, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT|wx.EXPAND, 5) + sizer.Add(itrack_box, 0, wx.ALIGN_LEFT|wx.ALL|wx.EXPAND, 5) + + self.initTasks() + + def loadValues(self, Read = None): + + self.minport.SetValue(self.utility.session.get_listen_port()) + itrackerurl = self.utility.session.get_internal_tracker_url() + self.itrack.SetValue(itrackerurl) + + #self.scrape.SetValue(Read('scrape', "boolean")) # TODO: cannot find it being used + + self.kickban.SetValue(self.defaultDLConfig.get_auto_kick()) + + def apply(self): + minport = int(self.minport.GetValue()) + if minport > 65535: + minport = 65535 + + itrackerurl = self.itrack.GetValue() + + # Save SessionStartupConfig + state_dir = self.utility.session.get_state_dir() + cfgfilename = Session.get_default_config_filename(state_dir) + scfg = SessionStartupConfig.load(cfgfilename) + + for target in [scfg,self.utility.session]: + try: + target.set_listen_port(minport) + except: + print_exc() + try: + target.set_internal_tracker_url(itrackerurl) + except: + print_exc() + + + scfg.save(cfgfilename) + + #self.utility.config.Write('scrape', self.scrape.GetValue(), "boolean") + + kickban = self.kickban.GetValue() + + # Save DownloadStartupConfig + self.defaultDLConfig.set_auto_kick(kickban) + + dlcfgfilename = get_default_dscfg_filename(self.utility.session) + self.defaultDLConfig.save(dlcfgfilename) + + +################################################################ +# +# Class: AdvancedNetworkPanel +# +# Contains advanced network settings +# (defaults should be fine for most users) +# +################################################################ +class AdvancedNetworkPanel(ABCOptionPanel): + def __init__(self, parent, dialog): + ABCOptionPanel.__init__(self, parent, dialog) + sizer = self.sizer + + warningtext = wx.StaticText(self, -1, self.utility.lang.get('changeownrisk')) + sizer.Add(warningtext, 0, wx.ALIGN_CENTER|wx.ALL, 5) + + #self.ipv6bindsv4_data=wx.Choice(self, -1, + # choices = ['separate sockets', 'single socket']) + #self.ipv6bindsv4_data.SetSelection(int(self.advancedConfig['ipv6_binds_v4'])) + + datasizer = wx.FlexGridSizer(cols = 2, vgap = 5, hgap = 10) + + # Local IP + self.ip_data = wx.TextCtrl(self, -1) + datasizer.Add(wx.StaticText(self, -1, self.utility.lang.get('localip')), 1, wx.ALIGN_CENTER_VERTICAL) + datasizer.Add(self.ip_data) + + # IP to Bind to + self.bind_data = wx.TextCtrl(self, -1) + datasizer.Add(wx.StaticText(self, -1, self.utility.lang.get('iptobindto')), 1, wx.ALIGN_CENTER_VERTICAL) + datasizer.Add(self.bind_data) + + # Minimum Peers + self.minpeers_data = wx.SpinCtrl(self, size = wx.Size(60, -1)) + self.minpeers_data.SetRange(10, 100) + datasizer.Add(wx.StaticText(self, -1, self.utility.lang.get('minnumberofpeer')), 1, wx.ALIGN_CENTER_VERTICAL) + datasizer.Add(self.minpeers_data) + + # Maximum Connections + self.maxconnections_data=wx.SpinCtrl(self, size = wx.Size(60, -1)) + self.maxconnections_data.SetRange(0, 1000) + datasizer.Add(wx.StaticText(self, -1, self.utility.lang.get('maxpeerconnection')), 1, wx.ALIGN_CENTER_VERTICAL) + datasizer.Add(self.maxconnections_data) + + # UPnP Settings + if (sys.platform == 'win32'): + self.upnp_choices = [ self.utility.lang.get('upnp_0'), + self.utility.lang.get('upnp_1'), + self.utility.lang.get('upnp_2'), + self.utility.lang.get('upnp_3')] + else: + self.upnp_choices = [ self.utility.lang.get('upnp_0'), + self.utility.lang.get('upnp_3')] + self.upnp_data = wx.ComboBox(self, -1, "", wx.Point(-1, -1), wx.Size(-1, -1), self.upnp_choices, wx.CB_DROPDOWN|wx.CB_READONLY) + datasizer.Add(wx.StaticText(self, -1, self.utility.lang.get('upnp')), 1, wx.ALIGN_CENTER_VERTICAL) + datasizer.Add(self.upnp_data) + + + # ut_pex maximum Peers + self.ut_pex_maxaddrs_data = wx.SpinCtrl(self, size = wx.Size(60, -1)) + self.ut_pex_maxaddrs_data.SetRange(0, 1024) + t1 = wx.StaticText(self, -1, self.utility.lang.get('ut_pex_maxaddrs1')) + t2 = wx.StaticText(self, -1, self.utility.lang.get('ut_pex_maxaddrs2')) + tsizer = wx.BoxSizer(wx.VERTICAL) + tsizer.Add(t1, 1, wx.ALIGN_LEFT) + tsizer.Add(t2, 1, wx.ALIGN_LEFT) + datasizer.Add(tsizer, 1, wx.ALIGN_CENTER_VERTICAL) + datasizer.Add(self.ut_pex_maxaddrs_data) + sizer.Add(datasizer, 0, wx.ALL, 5) + + # Set tooltips + self.ip_data.SetToolTipString(self.utility.lang.get('iphint')) + self.bind_data.SetToolTipString(self.utility.lang.get('bindhint')) + self.minpeers_data.SetToolTipString(self.utility.lang.get('minpeershint')) + self.ut_pex_maxaddrs_data.SetToolTipString(self.utility.lang.get('ut_pex_maxaddrs_hint')) + self.maxconnections_data.SetToolTipString(self.utility.lang.get('maxconnectionhint')) + + self.initTasks() + + def loadValues(self, Read = None): + if Read is None: + Read = self.utility.config.Read + session = self.utility.session + + addrlist = session.get_bind_to_addresses() + addrstr = ','.join(addrlist) + + self.ip_data.SetValue(session.get_ip_for_tracker()) + self.bind_data.SetValue(addrstr) + + self.minpeers_data.SetValue(self.defaultDLConfig.get_min_peers()) + self.maxconnections_data.SetValue(self.defaultDLConfig.get_max_conns()) + + upnp_val = session.get_upnp_mode() + selected = self.upnp_val2selected(upnp_val) + self.upnp_data.SetStringSelection(self.upnp_choices[selected]) + + self.ut_pex_maxaddrs_data.SetValue(self.defaultDLConfig.get_ut_pex_max_addrs_from_peer()) + + + def upnp_val2selected(self,upnp_val): + if (sys.platform == 'win32'): + selected = upnp_val + else: + if upnp_val <= 2: + selected = 0 + else: + selected = 1 + return selected + + def selected2upnp_val(self,selected): + if (sys.platform == 'win32'): + upnp_val = selected + else: + if selected == 1: + upnp_val = UPNPMODE_UNIVERSAL_DIRECT + else: + upnp_val = UPNPMODE_DISABLED + return upnp_val + + + def apply(self): + + ip4track = self.ip_data.GetValue() + ip2bind2 = self.bind_data.GetValue() + if ip2bind2.strip(): + ip2bind2list = ip2bind2.split(",") + else: + ip2bind2list = [] + + selected = self.upnp_choices.index(self.upnp_data.GetValue()) + upnp_val = self.selected2upnp_val(selected) + + minpeers = int(self.minpeers_data.GetValue()) + maxconnections = int(self.maxconnections_data.GetValue()) + if maxconnections == 0: + maxinitiate = 2 * minpeers + else: + maxinitiate = min(2 * minpeers, maxconnections) + utmaxaddrs = int(self.ut_pex_maxaddrs_data.GetValue()) + + + # Save SessConfig + state_dir = self.utility.session.get_state_dir() + cfgfilename = Session.get_default_config_filename(state_dir) + scfg = SessionStartupConfig.load(cfgfilename) + + for target in [scfg,self.utility.session]: + try: + target.set_ip_for_tracker(ip4track) + except: + print_exc() + try: + target.set_bind_to_addresses(ip2bind2list) + except: + print_exc() + try: + target.set_upnp_mode(upnp_val) + except: + print_exc() + + scfg.save(cfgfilename) + + # Save DownloadStartupConfig + self.defaultDLConfig.set_min_peers(minpeers) + self.defaultDLConfig.set_max_conns(maxconnections) + self.defaultDLConfig.set_max_conns_to_initiate(maxinitiate) + self.defaultDLConfig.set_ut_pex_max_addrs_from_peer(utmaxaddrs) + + dlcfgfilename = get_default_dscfg_filename(self.utility.session) + self.defaultDLConfig.save(dlcfgfilename) + + +################################################################ +# +# Class: QueuePanel +# +# Contains settings that control how many torrents to start +# at once and when to start them +# +################################################################ + +# Arno, 2008-03-27: Currently disabled. Need to write queueing support on top +# of core + + +################################################################ +# +# Class: MiscPanel +# +# Contains settings that don't seem to fit well anywhere else +# +################################################################ +class MiscPanel(ABCOptionPanel): + def __init__(self, parent, dialog): + ABCOptionPanel.__init__(self, parent, dialog) + sizer = self.sizer + + self.trayoptions = [self.utility.lang.get('showtray_never'), + self.utility.lang.get('showtray_min'), + self.utility.lang.get('showtray_always')] + self.mintray = wx.RadioBox(self, + -1, + self.utility.lang.get('showtray'), + wx.DefaultPosition, + wx.DefaultSize, + self.trayoptions, + 3, + wx.RA_SPECIFY_COLS) + + # On the Mac, the option exists but is not shown, to support + # the widget being read & written. + if sys.platform != "darwin": + sizer.Add(self.mintray, 0, wx.ALIGN_LEFT|wx.ALL, 5) + else: + self.mintray.Hide() + + self.confirmonclose = wx.CheckBox(self, -1, self.utility.lang.get('confirmonexit')) + sizer.Add(self.confirmonclose, 0, wx.ALIGN_LEFT|wx.ALL, 5) + + # Registry association (only makes sense under windows) + if (sys.platform == 'win32'): + self.associate = wx.CheckBox(self, -1, self.utility.lang.get('associate')) + sizer.Add(self.associate, 0, wx.ALIGN_LEFT|wx.ALL, 5) + + # Languages option + if self.utility.languages == {}: + self.getLanguages() + self.language_names = [] + self.language_filenames = [] + for item in self.utility.languages: + self.language_names.append(item) + self.language_filenames.append(self.utility.languages[item]) + + self.language_choice = wx.ComboBox(self, -1, "", wx.Point(-1, -1), wx.Size(-1, -1), self.language_names, wx.CB_DROPDOWN|wx.CB_READONLY) + + lang_box = wx.BoxSizer(wx.HORIZONTAL) + lang_box.Add(wx.StaticText(self, -1, self.utility.lang.get('choose_language')), 0, wx.ALIGN_CENTER_VERTICAL) + lang_box.Add(self.language_choice, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + lang_box.Add(wx.StaticText(self, -1, self.utility.lang.get('restartabc')), 0, wx.ALIGN_CENTER_VERTICAL) + sizer.Add(lang_box, 0, wx.ALL, 5) + + self.recategorize_button = wx.Button(self, -1, self.utility.lang.get('recategorize_button')) + self.recategorize_button.Bind(wx.EVT_BUTTON, self.onRecategorize) + + recategorize_box = wx.BoxSizer(wx.HORIZONTAL) + recategorize_box.Add(wx.StaticText(self, -1, self.utility.lang.get('recategorize')), 0, wx.ALIGN_CENTER_VERTICAL) + recategorize_box.Add(self.recategorize_button, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + sizer.Add(recategorize_box, 0, wx.ALL, 5) + + self.initTasks() + + def loadValues(self, Read = None): + if Read is None: + Read = self.utility.config.Read + + mintray = Read('mintray', "int") + if mintray >= len(self.trayoptions): + mintray = len(self.trayoptions) - 1 + self.mintray.SetSelection(mintray) + + self.confirmonclose.SetValue(Read('confirmonclose', "boolean")) + + if (sys.platform == 'win32'): + self.associate.SetValue(Read('associate', "boolean")) + + index = self.language_filenames.index(Read('language_file')) + if not self.language_names: + # Should never get here -- this means there are no valid language files found! + sys.stderr.write("\nNO LANGUAGE FILES FOUND! Please add a valid language file\n") + defaultlang = "" + elif (index > -1): + defaultlang = self.language_names[index] + self.language_choice.SetStringSelection(defaultlang) + + def apply(self): + self.utility.config.Write('mintray', self.mintray.GetSelection()) + if self.utility.frame.tbicon is not None: + self.utility.frame.tbicon.updateIcon(False) + + # FIXME: quick hack to prevent Unicode problem, will still give problems + # when French, i.e. "fran\,cais" is selected. + # + val = str(self.language_choice.GetValue()) + langname_index = self.language_names.index(val) + self.utility.config.Write('language_file', self.language_filenames[langname_index]) + + self.utility.config.Write('confirmonclose', self.confirmonclose.GetValue(), "boolean") + + if (sys.platform == 'win32'): + self.utility.config.Write('associate', self.associate.GetValue(), "boolean") + + def getLanguages(self): + langpath = os.path.join(self.utility.getPath(),"Tribler","Lang") + + dirlist = os.listdir(langpath) + dirlist2 = [] + for filename in dirlist: + if (filename[-5:] == '.lang'): + dirlist2.append(filename) + dirlist2.sort() + + # Remove user.lang from the list + try: + dirlist2.remove("user.lang") + except: + pass + + self.utility.languages = {} + + for filename in dirlist2: + filepath = os.path.join(langpath, filename) + + config = wx.FileConfig(localFilename = filepath) + config.SetPath("ABC/language") + if config.Exists('languagename'): + self.utility.languages[config.Read('languagename')] = filename + + def onRecategorize(self, event=None): + #catobj = Category.getInstance() + #torrentdata = self.utility.guiUtility.data_manager.data + #catobj.reSortAll(torrentdata) + # Arno: need to fix Category to make this work again. + pass + +################################################################ +# +# Class: DiskPanel +# +# Contains settings related to saving files +# +################################################################ +class DiskPanel(ABCOptionPanel): + def __init__(self, parent, dialog): + ABCOptionPanel.__init__(self, parent, dialog) + sizer = self.sizer + + self.torrentbackup = wx.CheckBox(self, -1, self.utility.lang.get('removebackuptorrent')) + sizer.Add(self.torrentbackup, 0, wx.ALIGN_LEFT|wx.ALL, 5) + + self.defaultdir = wx.StaticText(self, -1, self.utility.lang.get('setdefaultfolder')) + self.dir = wx.TextCtrl(self, -1, "") + browsebtn = wx.Button(self, -1, "...", style = wx.BU_EXACTFIT) + self.Bind(wx.EVT_BUTTON, self.onBrowseDir, browsebtn) + + dirbox = wx.BoxSizer(wx.HORIZONTAL) + dirbox.Add(self.defaultdir, 0, wx.ALIGN_CENTER_VERTICAL) + dirbox.Add(self.dir, 1, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT|wx.EXPAND, 5) + dirbox.Add(browsebtn, 0, wx.ALIGN_CENTER_VERTICAL) + + sizer.Add(dirbox, 0, wx.ALIGN_LEFT|wx.ALL|wx.EXPAND, 5) + + diskfullbox = wx.BoxSizer(wx.HORIZONTAL) + self.diskfullcheckbox = wx.CheckBox(self, -1, self.utility.lang.get('diskfullthreshold')) + self.diskfullthreshold = self.utility.makeNumCtrl(self, 1, integerWidth = 4) + diskfullbox.Add(self.diskfullcheckbox, 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 5) + diskfullbox.Add(self.diskfullthreshold, 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 5) + diskfullbox.Add(wx.StaticText(self, -1, self.utility.lang.get('MB')), 0, wx.ALIGN_CENTER_VERTICAL) + + sizer.Add(diskfullbox, 0, wx.ALIGN_LEFT|wx.ALL, 5) + + self.initTasks() + + def loadValues(self, Read = None): + if Read is None: + Read = self.utility.config.Read + + self.dir.SetValue(self.defaultDLConfig.get_dest_dir()) + self.torrentbackup.SetValue(Read('removetorrent', "boolean")) + + diskfullthreshold = Read('diskfullthreshold', "int") # TODO: make sure Core uses this + if diskfullthreshold > 0: + self.diskfullcheckbox.SetValue(True) + self.diskfullthreshold.SetValue(diskfullthreshold) + + def apply(self): + self.utility.config.Write('removetorrent', self.torrentbackup.GetValue(), "boolean") + + if self.diskfullcheckbox.GetValue(): + diskfullthreshold = self.diskfullthreshold.GetValue() + else: + diskfullthreshold = 0 + self.utility.config.Write('diskfullthreshold', diskfullthreshold) + + + # Save DownloadStartupConfig + defaultdestdir = self.dir.GetValue() + self.defaultDLConfig.set_dest_dir(defaultdestdir) + + dlcfgfilename = get_default_dscfg_filename(self.utility.session) + self.defaultDLConfig.save(dlcfgfilename) + + # Save SessionStartupConfig + # Also change torrent collecting dir, which is by default in the default destdir + state_dir = self.utility.session.get_state_dir() + cfgfilename = Session.get_default_config_filename(state_dir) + scfg = SessionStartupConfig.load(cfgfilename) + + dirname = os.path.join(defaultdestdir,STATEDIR_TORRENTCOLL_DIR) + for target in [scfg,self.utility.session]: + try: + target.set_torrent_collecting_dir(dirname) + except: + print_exc() + + scfg.save(cfgfilename) + + + + def onBrowseDir(self, event = None): + dlg = wx.DirDialog(self.utility.frame, + self.utility.lang.get('choosedefaultdownloadfolder'), + style = wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON) + if dlg.ShowModal() == wx.ID_OK: + self.dir.SetValue(dlg.GetPath()) + dlg.Destroy() + + +################################################################ +# +# Class: AdvancedDiskPanel +# +# Contains advanced settings controlling how data is written to +# and read from disk. +# (defaults should be fine for most users) +# +################################################################ +class AdvancedDiskPanel(ABCOptionPanel): + def __init__(self, parent, dialog): + ABCOptionPanel.__init__(self, parent, dialog) + sizer = self.sizer + + warningtext = wx.StaticText(self, -1, self.utility.lang.get('changeownrisk')) + sizer.Add(warningtext, 0, wx.ALIGN_CENTER|wx.ALL, 5) + + datasizer = wx.FlexGridSizer(cols = 2, vgap = 5, hgap = 10) + + # Allocation Type + + alloc_choices = [self.utility.lang.get('alloc_normal'), + self.utility.lang.get('alloc_background'), + self.utility.lang.get('alloc_prealloc'), + self.utility.lang.get('alloc_sparse')] + self.alloc_types = [DISKALLOC_NORMAL, DISKALLOC_BACKGROUND, DISKALLOC_PREALLOCATE, DISKALLOC_SPARSE] + self.alloc_type2int = {} + for i in range(len(self.alloc_types)): + t = self.alloc_types[i] + self.alloc_type2int[t]=i + self.alloctype_data=wx.Choice(self, -1, wx.Point(-1, -1), wx.Size(-1, -1), alloc_choices) + + datasizer.Add(wx.StaticText(self, -1, self.utility.lang.get('diskalloctype')), 1, wx.ALIGN_CENTER_VERTICAL) + datasizer.Add(self.alloctype_data) + + # Allocation Rate + self.allocrate_data = wx.SpinCtrl(self, size = wx.Size(60, -1)) + self.allocrate_data.SetRange(1, 100) + datasizer.Add(wx.StaticText(self, -1, self.utility.lang.get('allocrate')), 1, wx.ALIGN_CENTER_VERTICAL) + + allocrate_box = wx.BoxSizer(wx.HORIZONTAL) + allocrate_box.Add(self.allocrate_data) + allocrate_box.Add(wx.StaticText(self, -1, " " + self.utility.lang.get('mb') + "/" + self.utility.lang.get("l_second")), 1, wx.ALIGN_CENTER_VERTICAL) + + datasizer.Add(allocrate_box) + + # Locking Method + locking_choices = [self.utility.lang.get('lock_never'), + self.utility.lang.get('lock_writing'), + self.utility.lang.get('lock_always')] + self.locking_data=wx.Choice(self, -1, wx.Point(-1, -1), wx.Size(-1, -1), locking_choices) + datasizer.Add(wx.StaticText(self, -1, self.utility.lang.get('filelocking')), 1, wx.ALIGN_CENTER_VERTICAL) + datasizer.Add(self.locking_data) + + # Doublecheck Method + doublecheck_choices = [self.utility.lang.get('check_none'), + self.utility.lang.get('check_double'), + self.utility.lang.get('check_triple')] + self.doublecheck_data=wx.Choice(self, -1, wx.Point(-1, -1), wx.Size(-1, -1), doublecheck_choices) + datasizer.Add(wx.StaticText(self, -1, self.utility.lang.get('extradatachecking')), 1, wx.ALIGN_CENTER_VERTICAL) + datasizer.Add(self.doublecheck_data) + + # Maximum Files Open + self.maxfilesopen_data=wx.SpinCtrl(self, size = wx.Size(60, -1)) + self.maxfilesopen_data.SetRange(0,200) + + datasizer.Add(wx.StaticText(self, -1, self.utility.lang.get('maxfileopen')), 1, wx.ALIGN_CENTER_VERTICAL) + datasizer.Add(self.maxfilesopen_data) + + # Flush data + self.flush_data_enable = wx.CheckBox(self, -1, self.utility.lang.get('flush_data')) + + self.flush_data = wx.SpinCtrl(self, size = wx.Size(60, -1)) + self.flush_data.SetRange(0, 999) + + datasizer.Add(self.flush_data_enable, 0, wx.ALIGN_CENTER_VERTICAL) + + flush_box = wx.BoxSizer(wx.HORIZONTAL) + flush_box.Add(self.flush_data, 0, wx.ALIGN_CENTER_VERTICAL) + flush_box.Add(wx.StaticText(self, -1, self.utility.lang.get('minute_long')), 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT, 5) + + datasizer.Add(flush_box) + + sizer.Add(datasizer, 0, wx.ALL, 5) + + # Disk buffering + buffer_title = wx.StaticBox(self, -1, self.utility.lang.get('bufferdisk')) + buffer = wx.StaticBoxSizer(buffer_title, wx.VERTICAL) + + self.buffer_read_enable = wx.CheckBox(self, -1, self.utility.lang.get('buffer_read')) + + buffer.Add(self.buffer_read_enable, 0, wx.ALL, 5) + sizer.Add(buffer, 0, wx.EXPAND|wx.ALL, 5) + + self.alloctype_data.SetToolTipString(self.utility.lang.get('alloctypehint')) + self.allocrate_data.SetToolTipString(self.utility.lang.get('allocratehint')) + self.locking_data.SetToolTipString(self.utility.lang.get('lockinghint')) + self.doublecheck_data.SetToolTipString(self.utility.lang.get('doublecheckhint')) + self.maxfilesopen_data.SetToolTipString(self.utility.lang.get('maxfileopenhint')) + + self.initTasks() + + def loadValues(self, Read = None): + if Read is None: + Read = self.utility.config.Read + + alloctype = self.defaultDLConfig.get_alloc_type() + alloc_selection = self.alloc_type2int[alloctype] + self.alloctype_data.SetSelection(alloc_selection) + + self.allocrate_data.SetValue(self.defaultDLConfig.get_alloc_rate()) + + lockfiles = self.defaultDLConfig.get_lock_files() + lockread = self.defaultDLConfig.get_lock_while_reading() + if lockfiles: + if lockread: + self.locking_data.SetSelection(2) + else: + self.locking_data.SetSelection(1) + else: + self.locking_data.SetSelection(0) + + doublecheck = self.defaultDLConfig.get_double_check_writes() + triplecheck = self.defaultDLConfig.get_triple_check_writes() + if doublecheck: + if triplecheck: + self.doublecheck_data.SetSelection(2) + else: + self.doublecheck_data.SetSelection(1) + else: + self.doublecheck_data.SetSelection(0) + + self.maxfilesopen_data.SetValue(self.defaultDLConfig.get_max_files_open()) + self.buffer_read_enable.SetValue(self.defaultDLConfig.get_buffer_reads()) + + + flushval = self.defaultDLConfig.get_auto_flush() + self.flush_data.SetValue(flushval) + self.flush_data_enable.SetValue(flushval > 0) + + def apply(self): + alloctype = self.alloc_types[self.alloctype_data.GetSelection()] + allocrate = int(self.allocrate_data.GetValue()) + maxopen = int(self.maxfilesopen_data.GetValue()) + lockfiles = self.locking_data.GetSelection() >= 1 + lockread = self.locking_data.GetSelection() > 1 + doublecheck = self.doublecheck_data.GetSelection() >= 1 + triplecheck = self.doublecheck_data.GetSelection() > 1 + bufferread = self.buffer_read_enable.GetValue() + + if not self.flush_data_enable.GetValue(): + flushval = 0 + else: + flushval = self.flush_data.GetValue() + + # Save DownloadStartupConfig + self.defaultDLConfig.set_alloc_type(alloctype) + self.defaultDLConfig.set_alloc_rate(allocrate) + self.defaultDLConfig.set_lock_files(lockfiles) + self.defaultDLConfig.set_lock_while_reading(lockread) + self.defaultDLConfig.set_double_check_writes(doublecheck) + self.defaultDLConfig.set_triple_check_writes(triplecheck) + self.defaultDLConfig.set_max_files_open(maxopen) + self.defaultDLConfig.set_buffer_reads(bufferread) + self.defaultDLConfig.set_auto_flush(flushval) + + dlcfgfilename = get_default_dscfg_filename(self.utility.session) + self.defaultDLConfig.save(dlcfgfilename) + + + + +################################################################ +# +# Class: SchedulerRulePanel +# +# Contains settings related to timeouts +# +################################################################ + +# Arno, 2008-02-27: Currently disabled, as there is no queuing + +################################################################ +# +# Class: RateLimitPanel +# +# Contains settings related to setting limits on upload and +# download rates +# +################################################################ +class RateLimitPanel(ABCOptionPanel): + def __init__(self, parent, dialog): + ABCOptionPanel.__init__(self, parent, dialog) + sizer = self.sizer + + # GUI dialog for Global upload setting + ######################################## + + # Upload settings + ######################################## + + uploadsection_title = wx.StaticBox(self, -1, self.utility.lang.get('uploadsetting')) + uploadsection = wx.StaticBoxSizer(uploadsection_title, wx.VERTICAL) + + """ + # Arno, 2008-03-27: Currently disabled, no queuing + self.maxupload = wx.SpinCtrl(self, size = wx.Size(60, -1)) + self.maxupload.SetRange(2, 100) + + maxuploadsbox = wx.BoxSizer(wx.HORIZONTAL) + maxuploadsbox.Add(wx.StaticText(self, -1, self.utility.lang.get('maxuploads')), 0, wx.ALIGN_CENTER_VERTICAL) + maxuploadsbox.Add(self.maxupload, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT, 5) + + uploadsection.Add(maxuploadsbox, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + """ + + maxoverall_down_label = wx.BoxSizer(wx.VERTICAL) + maxoverall_down_label.Add(wx.StaticText(self, -1, self.utility.lang.get('maxoveralluploadrate')), 0, wx.ALIGN_CENTER_VERTICAL) + maxoverall_down_label.Add(wx.StaticText(self, -1, self.utility.lang.get('whendownload')), 0, wx.ALIGN_CENTER_VERTICAL) + + self.uploadrate = self.utility.makeNumCtrl(self, 0, integerWidth = 4) + self.uploadrate.SetToolTipString(self.utility.lang.get('global_uprate_hint')) + + maxoverall_down = wx.BoxSizer(wx.HORIZONTAL) + maxoverall_down.Add(maxoverall_down_label, 0, wx.ALIGN_CENTER_VERTICAL) + maxoverall_down.Add(self.uploadrate, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT, 5) + maxoverall_down.Add(wx.StaticText(self, -1, self.utility.lang.get('KB') + "/" + self.utility.lang.get('l_second')), 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT, 3) + + uploadsection.Add(maxoverall_down, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + maxoverall_nodown_label = wx.BoxSizer(wx.VERTICAL) + maxoverall_nodown_label.Add(wx.StaticText(self, -1, self.utility.lang.get('maxoveralluploadrate')), 0, wx.ALIGN_CENTER_VERTICAL) + maxoverall_nodown_label.Add(wx.StaticText(self, -1, self.utility.lang.get('whennodownload')), 0, wx.ALIGN_CENTER_VERTICAL) + + self.seeduploadrate = self.utility.makeNumCtrl(self, 0, integerWidth = 4) + self.seeduploadrate.SetToolTipString(self.utility.lang.get('global_uprate_hint')) + + maxoverall_nodown = wx.BoxSizer(wx.HORIZONTAL) + maxoverall_nodown.Add(maxoverall_nodown_label, 0, wx.ALIGN_CENTER_VERTICAL) + maxoverall_nodown.Add(self.seeduploadrate, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT, 5) + maxoverall_nodown.Add(wx.StaticText(self, -1, self.utility.lang.get('KB') + "/" + self.utility.lang.get('l_second')), 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT, 3) + + uploadsection.Add(maxoverall_nodown, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + uploadsection.Add(wx.StaticText(self, -1, self.utility.lang.get('zeroisunlimited')), 0, wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT|wx.ALL, 5) + + sizer.Add(uploadsection, 0, wx.EXPAND|wx.ALL, 5) + + # Download Section + downloadsection_title = wx.StaticBox(self, -1, self.utility.lang.get('downloadsetting')) + downloadsection = wx.StaticBoxSizer(downloadsection_title, wx.VERTICAL) + + self.downloadrate = self.utility.makeNumCtrl(self, 0, integerWidth = 4) + + maxdownoverall_down = wx.BoxSizer(wx.HORIZONTAL) + maxdownoverall_down.Add(wx.StaticText(self, -1, self.utility.lang.get('maxoveralldownloadrate')), 0, wx.ALIGN_CENTER_VERTICAL) + maxdownoverall_down.Add(self.downloadrate, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT, 5) + maxdownoverall_down.Add(wx.StaticText(self, -1, self.utility.lang.get('KB') + "/" + self.utility.lang.get('l_second')), 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT, 3) + + downloadsection.Add(maxdownoverall_down, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + downloadsection.Add(wx.StaticText(self, -1, self.utility.lang.get('zeroisunlimited')), 0, wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT|wx.ALL, 5) + + sizer.Add(downloadsection, 0, wx.EXPAND|wx.ALL, 5) + + self.initTasks() + + def loadValues(self, Read = None): + if Read is None: + Read = self.utility.config.Read + + #self.maxupload.SetValue(Read('maxupload', "int")) + self.uploadrate.SetValue(Read('maxuploadrate', "int")) + self.downloadrate.SetValue(Read('maxdownloadrate', "int")) + self.seeduploadrate.SetValue(Read('maxseeduploadrate', "int")) + + def apply(self): + # Check max upload rate input must be integer + ############################################## + upload_rate = int(self.uploadrate.GetValue()) + seedupload_rate = int(self.seeduploadrate.GetValue()) + + download_rate = int(self.downloadrate.GetValue()) + + # Check max upload rate must not be less than 3 kB/s + ###################################################### + if (upload_rate < 3 and upload_rate != 0) or (seedupload_rate < 3 and seedupload_rate != 0): + #display warning + dlg = wx.MessageDialog(self, self.utility.lang.get('uploadrateminwarning'), self.utility.lang.get('error'), wx.ICON_ERROR) + dlg.ShowModal() + dlg.Destroy() + return False + + # Set new value to parameters + ############################## + ##self.utility.config.Write('maxupload', self.maxupload.GetValue()) + self.utility.config.Write('maxuploadrate', upload_rate) + self.utility.config.Write('maxseeduploadrate', seedupload_rate) + + self.utility.config.Write('maxdownloadrate', download_rate) + + # Change at Runtime + self.utility.ratelimiter.set_global_max_speed(UPLOAD,upload_rate) + self.utility.ratelimiter.set_global_max_speed(DOWNLOAD,download_rate) + self.utility.ratelimiter.set_global_max_seedupload_speed(seedupload_rate) + + +################################################################ +# +# Class: SeedingOptionsPanel +# +# Contains options controlling how long torrents should remain +# seeding. +# +################################################################ +class SeedingOptionsPanel(ABCOptionPanel): + def __init__(self, parent, dialog): + ABCOptionPanel.__init__(self, parent, dialog) + sizer = self.sizer + + # Added by Boxun + #t4t options + t4t_title = wx.StaticBox(self, -1, self.utility.lang.get('tit-4-tat')) + t4t_section = wx.StaticBoxSizer(t4t_title, wx.VERTICAL) + + # Ratio buttons + self.rb_t4t_no_leeching = wx.RadioButton(self, -1, self.utility.lang.get('no_leeching'), wx.Point(-1, -1), wx.Size(-1, -1), wx.RB_GROUP) + self.rb_t4t_unlimited = wx.RadioButton(self, -1, self.utility.lang.get('unlimited_seeding'), wx.Point(-1, -1), wx.Size(-1, -1)) + self.rb_t4t_until_time = wx.RadioButton(self, -1, self.utility.lang.get('seed_sometime'), wx.Point(-1, -1), wx.Size(-1, -1)) + self.rb_t4t_no_seeding = wx.RadioButton(self, -1, self.utility.lang.get('no_seeding'), wx.Point(-1, -1), wx.Size(-1, -1)) + + self.t4t_rbs = [self.rb_t4t_no_leeching, self.rb_t4t_unlimited, self.rb_t4t_until_time, self.rb_t4t_no_seeding] + + # Seeding ratio option + t4t_section.Add(self.rb_t4t_no_leeching, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + # Unlimited seeding + t4t_section.Add(self.rb_t4t_unlimited, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + # Seeding for sometime + hours_interval = ['0', '1', '2', '3', '5', '10'] + mins_interval = ['15', '30', '45'] + + self.cb_t4t_hours = wx.ComboBox(self, -1, "", wx.Point(-1, -1), + wx.Size(55, -1), hours_interval, wx.CB_DROPDOWN|wx.CB_READONLY) + self.cb_t4t_mins = wx.ComboBox(self, -1, "", wx.Point(-1, -1), + wx.Size(55, -1), mins_interval, wx.CB_DROPDOWN|wx.CB_READONLY) + + t4t_timing_sizer = wx.BoxSizer(wx.HORIZONTAL) + t4t_timing_sizer.Add(self.rb_t4t_until_time, 0, wx.ALIGN_CENTER_VERTICAL) + t4t_timing_sizer.Add(self.cb_t4t_hours, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + t4t_timing_sizer.Add(wx.StaticText(self, -1, self.utility.lang.get('seed_hours')), 0, wx.ALIGN_CENTER_VERTICAL) + t4t_timing_sizer.Add(self.cb_t4t_mins, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + t4t_timing_sizer.Add(wx.StaticText(self, -1, self.utility.lang.get('seed_mins')), 0, wx.ALIGN_CENTER_VERTICAL) + + t4t_section.Add(t4t_timing_sizer, -1, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + # No seeding (That's evil, don't touch it!) + t4t_section.Add(self.rb_t4t_no_seeding, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + sizer.Add(t4t_section, 0, wx.EXPAND|wx.ALL, 5) + + #g2g options + g2g_title = wx.StaticBox(self, -1, self.utility.lang.get('give-2-get')) + g2g_section = wx.StaticBoxSizer(g2g_title, wx.VERTICAL) + + # Ratio buttons + self.rb_g2g_large_ratio = wx.RadioButton(self, -1, self.utility.lang.get('seed_for_large_ratio'), wx.Point(-1, -1), wx.Size(-1, -1), wx.RB_GROUP) + self.rb_g2g_boost_rep = wx.RadioButton(self, -1, self.utility.lang.get('boost__reputation'), wx.Point(-1, -1), wx.Size(-1, -1)) + self.rb_g2g_until_time = wx.RadioButton(self, -1, self.utility.lang.get('seed_sometime'), wx.Point(-1, -1), wx.Size(-1, -1)) + self.rb_g2g_no_seeding = wx.RadioButton(self, -1, self.utility.lang.get('no_seeding'), wx.Point(-1, -1), wx.Size(-1, -1)) + + self.g2g_rbs = [self.rb_g2g_large_ratio, self.rb_g2g_boost_rep, self.rb_g2g_until_time, self.rb_g2g_no_seeding] + + # Seeding ratio option + g2g_ratio = ['50', '75', '100', '150', '200', '300', '500'] + self.cb_g2g_ratio = wx.ComboBox(self, -1, "", + wx.Point(-1, -1), wx.Size(65, -1), g2g_ratio, wx.CB_DROPDOWN|wx.CB_READONLY) + + g2g_ratio_sizer = wx.BoxSizer(wx.HORIZONTAL) + g2g_ratio_sizer.Add(self.rb_g2g_large_ratio, 0, wx.ALIGN_CENTER_VERTICAL) + g2g_ratio_sizer.Add(self.cb_g2g_ratio, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT, 5) + g2g_ratio_sizer.Add(wx.StaticText(self, -1, "%"), 0, wx.ALIGN_CENTER_VERTICAL) + + g2g_section.Add(g2g_ratio_sizer, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + # boost your reputation + g2g_section.Add(self.rb_g2g_boost_rep, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + # Seeding for sometime + self.cb_g2g_hours = wx.ComboBox(self, -1, "", wx.Point(-1, -1), + wx.Size(55, -1), hours_interval, wx.CB_DROPDOWN|wx.CB_READONLY) + self.cb_g2g_mins = wx.ComboBox(self, -1, "", wx.Point(-1, -1), + wx.Size(55, -1), mins_interval, wx.CB_DROPDOWN|wx.CB_READONLY) + + g2g_timing_sizer = wx.BoxSizer(wx.HORIZONTAL) + g2g_timing_sizer.Add(self.rb_g2g_until_time, 0, wx.ALIGN_CENTER_VERTICAL) + g2g_timing_sizer.Add(self.cb_g2g_hours, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + g2g_timing_sizer.Add(wx.StaticText(self, -1, self.utility.lang.get('seed_hours')), 0, wx.ALIGN_CENTER_VERTICAL) + g2g_timing_sizer.Add(self.cb_g2g_mins, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + g2g_timing_sizer.Add(wx.StaticText(self, -1, self.utility.lang.get('seed_mins')), 0, wx.ALIGN_CENTER_VERTICAL) + + g2g_section.Add(g2g_timing_sizer, -1, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + # No seeding (That's evil, don't touch it!) + g2g_section.Add(self.rb_g2g_no_seeding, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + sizer.Add(g2g_section, 0, wx.EXPAND|wx.ALL, 5) + # ~ Boxun + + self.initTasks() + + def loadValues(self, Read = None): + # Load settings from dlConfig + try: + if Read is None: + Read = self.utility.config.Read + + self.t4t_rbs[Read('t4t_option', "int")].SetValue(True) + self.cb_t4t_hours.SetValue(str(Read('t4t_hours', "int"))) + self.cb_t4t_mins.SetValue(str(Read('t4t_hours', "int"))) + self.g2g_rbs[Read('g2g_option', "int")].SetValue(True) + self.cb_g2g_ratio.SetValue(str(Read('g2g_ratio', "int"))) + self.cb_g2g_hours.SetValue(str(Read('g2g_hours', "int"))) + self.cb_g2g_mins.SetValue(str(Read('g2g_mins', "int"))) + + except: + print_exc() + + def apply(self): + try: + # tit-4-tat + for i in range (4): + if self.t4t_rbs[i].GetValue(): + self.utility.config.Write('t4t_option', i) + break + + self.utility.config.Write("t4t_hours", self.cb_t4t_hours.GetValue()) + self.utility.config.Write("t4t_mins", self.cb_t4t_mins.GetValue()) + + # give-2-get + for i in range (4): + if self.g2g_rbs[i].GetValue(): + self.utility.config.Write("g2g_option", i) + break + + self.utility.config.Write("g2g_hours", self.cb_g2g_hours.GetValue()) + self.utility.config.Write("g2g_mins", self.cb_g2g_mins.GetValue()) + self.utility.config.Write("g2g_ratio", self.cb_g2g_ratio.GetValue()) + + except: + print_exc() + + + + + +################################################################ +# +# Class: TriblerPanel +# +# Contains settings for Tribler's features +# +################################################################ +class TriblerPanel(ABCOptionPanel): + def __init__(self, parent, dialog): + ABCOptionPanel.__init__(self, parent, dialog) + sizer = self.sizer + + funcsection_title = wx.StaticBox(self, -1, self.utility.lang.get('corefuncsetting')) + funcsection = wx.StaticBoxSizer(funcsection_title, wx.VERTICAL) + + self.rec_enable = wx.CheckBox(self, -1, self.utility.lang.get('enablerecommender')+" "+self.utility.lang.get('restartabc')) + funcsection.Add(self.rec_enable, 0, wx.ALIGN_LEFT|wx.ALL, 5) + + self.dlhelp_enable = wx.CheckBox(self, -1, self.utility.lang.get('enabledlhelp')+" "+self.utility.lang.get('restartabc')) + funcsection.Add(self.dlhelp_enable, 0, wx.ALIGN_LEFT|wx.ALL, 5) + + self.collect_enable = wx.CheckBox(self, -1, self.utility.lang.get('enabledlcollecting')+" "+self.utility.lang.get('restartabc')) + funcsection.Add(self.collect_enable, 0, wx.ALIGN_LEFT|wx.ALL, 5) + + sizer.Add(funcsection, 0, wx.EXPAND|wx.ALL, 5) + + tcsection_title = wx.StaticBox(self, -1, self.utility.lang.get('torrentcollectsetting')) + tcsection = wx.StaticBoxSizer(tcsection_title, wx.VERTICAL) + + self.timectrl = self.utility.makeNumCtrl(self, 1, min = 1, max = 3600) + time_box = wx.BoxSizer(wx.HORIZONTAL) + time_box.Add(wx.StaticText(self, -1, self.utility.lang.get('torrentcollectsleep')), 0, wx.ALIGN_CENTER_VERTICAL) + time_box.Add(self.timectrl, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + time_box.Add(wx.StaticText(self, -1, self.utility.lang.get('restartabc')), 0, wx.ALIGN_CENTER_VERTICAL) + tcsection.Add(time_box, 0, wx.EXPAND|wx.ALL, 5) + + ntorrents_box = wx.BoxSizer(wx.HORIZONTAL) # set the max num of torrents to collect + self.ntorrents = self.utility.makeNumCtrl(self, 5000, min = 0, max = 999999) + ntorrents_box.Add(wx.StaticText(self, -1, self.utility.lang.get('maxntorrents')), 0, wx.ALIGN_CENTER_VERTICAL) + ntorrents_box.Add(self.ntorrents, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + tcsection.Add(ntorrents_box, 0, wx.EXPAND|wx.ALL, 5) + + npeers_box = wx.BoxSizer(wx.HORIZONTAL) # set the max num of peers to be used by buddycast + self.npeers = self.utility.makeNumCtrl(self, 2000, min = 0, max = 999999) + npeers_box.Add(wx.StaticText(self, -1, self.utility.lang.get('maxnpeers')), 0, wx.ALIGN_CENTER_VERTICAL) + npeers_box.Add(self.npeers, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + tcsection.Add(npeers_box, 0, wx.EXPAND|wx.ALL, 5) + + tc_threshold_box = wx.BoxSizer(wx.HORIZONTAL) # set the min space to stop torrent collecting + self.tc_threshold = self.utility.makeNumCtrl(self, 200, min = 0, max = 999999) + tc_threshold_box.Add(wx.StaticText(self, -1, self.utility.lang.get('tc_threshold')), 0, wx.ALIGN_CENTER_VERTICAL) + tc_threshold_box.Add(self.tc_threshold, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + tc_threshold_box.Add(wx.StaticText(self, -1, self.utility.lang.get('MB')), 0, wx.ALIGN_CENTER_VERTICAL) + tc_threshold_box.Add(wx.StaticText(self, -1, ' ('+self.utility.lang.get('current_free_space')+' '), 0, wx.ALIGN_CENTER_VERTICAL) + + current_free_space = getfreespace(self.utility.session.get_download_help_dir())/(2**20) + tc_threshold_box.Add(wx.StaticText(self, -1, str(current_free_space)), 0, wx.ALIGN_CENTER_VERTICAL) + tc_threshold_box.Add(wx.StaticText(self, -1, self.utility.lang.get('MB')+')'), 0, wx.ALIGN_CENTER_VERTICAL) + tcsection.Add(tc_threshold_box, 0, wx.EXPAND|wx.ALL, 5) + + tc_rate_box = wx.BoxSizer(wx.HORIZONTAL) # set the rate of torrent collecting + self.tc_rate = self.utility.makeNumCtrl(self, 5, min = 0, max = 999999) + tc_rate_box.Add(wx.StaticText(self, -1, self.utility.lang.get('torrentcollectingrate')), 0, wx.ALIGN_CENTER_VERTICAL) + tc_rate_box.Add(self.tc_rate, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + tcsection.Add(tc_rate_box, 0, wx.EXPAND|wx.ALL, 5) + + sizer.Add(tcsection, 0, wx.EXPAND|wx.ALL, 5) + + myinfosection_title = wx.StaticBox(self, -1, self.utility.lang.get('myinfosetting')) + myinfosection = wx.StaticBoxSizer(myinfosection_title, wx.VERTICAL) + + # Show PermID + mypermid = self.utility.session.get_permid() + pb64 = show_permid(mypermid) + if True: + # Make it copy-and-paste able + permid_box = wx.BoxSizer(wx.HORIZONTAL) + self.permidctrl = wx.TextCtrl(self, -1, pb64, size = (400, 30), style = wx.TE_READONLY) + permid_box.Add(wx.StaticText(self, -1, self.utility.lang.get('mypermid')), 0, wx.ALIGN_CENTER_VERTICAL) + permid_box.Add(self.permidctrl, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + myinfosection.Add(permid_box, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + else: + permid_txt = self.utility.lang.get('mypermid')+": "+pb64 + label = wx.StaticText(self, -1, permid_txt ) + myinfosection.Add( label, 1, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + self.myinfo = wx.Button(self, -1, self.utility.lang.get('myinfo') + "...") + self.Bind(wx.EVT_BUTTON, self.OnMyInfoWizard, self.myinfo) + myinfosection.Add(self.myinfo, 0, wx.ALL, 5) + + sizer.Add(myinfosection, 0, wx.EXPAND|wx.ALL, 5) + + if self.utility.frame.oldframe is not None: + self.debug = wx.Button(self, -1, 'Open debug window') + sizer.Add(self.debug, 0, wx.ALL, 5) + self.Bind(wx.EVT_BUTTON, self.OnDebug, self.debug) + + self.initTasks() + + + def loadValues(self, Read = None): + """ Loading values from configure file """ + + buddycast = self.utility.session.get_buddycast() + coopdl = self.utility.session.get_download_help() + torrcoll = self.utility.session.get_torrent_collecting() + maxcolltorrents = self.utility.session.get_torrent_collecting_max_torrents() + maxbcpeers = self.utility.session.get_buddycast_max_peers() + stopcollthres = self.utility.session.get_stop_collecting_threshold() + collrate = self.utility.session.get_torrent_collecting_rate() + + self.rec_enable.SetValue(buddycast) + self.dlhelp_enable.SetValue(coopdl) + self.collect_enable.SetValue(torrcoll) + self.ntorrents.SetValue(maxcolltorrents) + self.npeers.SetValue(maxbcpeers) + self.tc_threshold.SetValue(stopcollthres) + self.tc_rate.SetValue(collrate) + + # For subscriptions + self.timectrl.SetValue(self.utility.config.Read('torrentcollectsleep', 'int')) + + + + def apply(self): + """ do sth. when user click apply of OK button """ + + buddycast = self.rec_enable.GetValue() + coopdl = self.dlhelp_enable.GetValue() + torrcoll = self.collect_enable.GetValue() + maxcolltorrents = int(self.ntorrents.GetValue()) + maxbcpeers = int(self.npeers.GetValue()) + stopcollthres = int(self.tc_threshold.GetValue()) + collrate = int(self.tc_rate.GetValue()) + + + # Save SessConfig + state_dir = self.utility.session.get_state_dir() + cfgfilename = Session.get_default_config_filename(state_dir) + scfg = SessionStartupConfig.load(cfgfilename) + + for target in [scfg,self.utility.session]: + try: + target.set_buddycast(buddycast) + except: + print_exc() + try: + target.set_download_help(coopdl) + except: + print_exc() + try: + target.set_torrent_collecting(torrcoll) + except: + print_exc() + try: + target.set_torrent_collecting_max_torrents(maxcolltorrents) + except: + print_exc() + try: + target.set_buddycast_max_peers(maxbcpeers) + except: + print_exc() + try: + target.set_stop_collecting_threshold(stopcollthres) + except: + print_exc() + try: + target.set_torrent_collecting_rate(collrate) + except: + print_exc() + + scfg.save(cfgfilename) + + # For subscriptions + t = int(self.timectrl.GetValue()) + self.utility.config.Write('torrentcollectsleep', t) + + + def OnMyInfoWizard(self, event = None): + wizard = MyInfoWizard(self) + wizard.RunWizard(wizard.getFirstPage()) + + def WizardFinished(self,wizard): + wizard.Destroy() + + def OnDebug(self,event): + self.utility.frame.oldframe.Show() + +# HERE + +################################################################ +# +# Class: VideoPanel +# +# Contains settings for video features +# +################################################################ +class VideoPanel(ABCOptionPanel): + def __init__(self, parent, dialog): + ABCOptionPanel.__init__(self, parent, dialog) + sizer = self.sizer + + playbacksection_title = wx.StaticBox(self, -1, self.utility.lang.get('playback_section')) + playbacksection = wx.StaticBoxSizer(playbacksection_title, wx.VERTICAL) + + playbackbox = wx.BoxSizer(wx.HORIZONTAL) + feasible = return_feasible_playback_modes(self.utility.getPath()) + playback_choices = [] + self.playback_indices = [] + if PLAYBACKMODE_INTERNAL in feasible: + playback_choices.append(self.utility.lang.get('playback_internal')) + self.playback_indices.append(PLAYBACKMODE_INTERNAL) + if PLAYBACKMODE_EXTERNAL_DEFAULT in feasible: + playback_choices.append(self.utility.lang.get('playback_external_default')) + self.playback_indices.append(PLAYBACKMODE_EXTERNAL_DEFAULT) + if PLAYBACKMODE_EXTERNAL_MIME in feasible: + playback_choices.append(self.utility.lang.get('playback_external_mime')) + self.playback_indices.append(PLAYBACKMODE_EXTERNAL_MIME) + self.playback_chooser=wx.Choice(self, -1, wx.Point(-1, -1), wx.Size(-1, -1), playback_choices) + + playbackbox.Add(wx.StaticText(self, -1, self.utility.lang.get('playback_mode')), 1, wx.ALIGN_CENTER_VERTICAL) + playbackbox.Add(self.playback_chooser) + playbacksection.Add(playbackbox, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + + player_box = wx.BoxSizer(wx.HORIZONTAL) + self.player = wx.TextCtrl(self, -1, "") + player_box.Add(wx.StaticText(self, -1, self.utility.lang.get('videoplayer_default_path')), 0, wx.ALIGN_CENTER_VERTICAL) + player_box.Add(self.player, 1, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + #browsebtn = wx.Button(self, -1, "...", style = wx.BU_EXACTFIT) + browsebtn = wx.Button(self, -1, "...") + self.Bind(wx.EVT_BUTTON, self.onBrowsePlayer, browsebtn) + player_box.Add(browsebtn, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + playbacksection.Add(player_box, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT|wx.EXPAND, 5) + + sizer.Add(playbacksection, 0, wx.EXPAND|wx.ALL, 5) + + analysissection_title = wx.StaticBox(self, -1, self.utility.lang.get('analysis_section')) + analysissection = wx.StaticBoxSizer(analysissection_title, wx.VERTICAL) + + analyser_box = wx.BoxSizer(wx.HORIZONTAL) + self.analyser = wx.TextCtrl(self, -1, "") + analyser_box.Add(wx.StaticText(self, -1, self.utility.lang.get('videoanalyserpath')), 0, wx.ALIGN_CENTER_VERTICAL) + analyser_box.Add(self.analyser, 1, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + #browsebtn = wx.Button(self, -1, "...", style = wx.BU_EXACTFIT) + browsebtn = wx.Button(self, -1, "...") + self.Bind(wx.EVT_BUTTON, self.onBrowseAnalyser, browsebtn) + analyser_box.Add(browsebtn, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + analysissection.Add(analyser_box, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT|wx.EXPAND, 5) + + sizer.Add(analysissection, 0, wx.EXPAND|wx.ALL, 5) + + restarttxt = wx.StaticText(self, -1, self.utility.lang.get('restartabc')) + sizer.Add(restarttxt, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT,5) + + if sys.platform == 'win32': + self.quote = '"' + else: + self.quote = "'" + + self.initTasks() + + def loadValues(self, Read = None): + if Read is None: + Read = self.utility.config.Read + + mode = Read('videoplaybackmode', "int") + for index in self.playback_indices: + if index == mode: + self.playback_chooser.SetSelection(index) + + value = Read('videoplayerpath') + qvalue = self.quote_path(value) + self.player.SetValue(qvalue) + + value = self.utility.session.get_video_analyser_path() + qvalue = self.quote_path(value) + self.analyser.SetValue(qvalue) + + + def apply(self): + + value = self.playback_chooser.GetSelection() + mode = self.playback_indices[value] + self.utility.config.Write('videoplaybackmode',mode) + + for key,widget,mainmsg in [('videoplayerpath',self.player,self.utility.lang.get('videoplayernotfound'))]: + qvalue = widget.GetValue() + value = self.unquote_path(qvalue) + if not os.access(value,os.F_OK): + self.onError(mainmsg,value) + return + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","abcoptions: VideoPanel: Writing",key,value + self.utility.config.Write(key,value) + + # videoanalyserpath is a config parameter of the Session + vapath = None + for key,widget,mainmsg in[('videoanalyserpath',self.analyser,self.utility.lang.get('videoanalysernotfound'))]: + qvalue = widget.GetValue() + value = self.unquote_path(qvalue) + vapath = value + if not os.access(value,os.F_OK): + self.onError(mainmsg,value) + return + + # Save SessConfig + state_dir = self.utility.session.get_state_dir() + cfgfilename = Session.get_default_config_filename(state_dir) + scfg = SessionStartupConfig.load(cfgfilename) + + for target in [scfg,self.utility.session]: + try: + target.set_video_analyser_path(vapath) + except: + print_exc() + + scfg.save(cfgfilename) + + + + def unquote_path(self,value): + value.strip() + if value[0] == self.quote: + idx = value.find(self.quote,1) + return value[1:idx] + else: + return value + + def quote_path(self,value): + value.strip() + if value.find(' ') != -1: + return self.quote+value+self.quote + else: + return value + + + def onError(self,mainmsg,path): + msg = mainmsg + msg += '\n' + msg += path + msg += '\n' + dlg = wx.MessageDialog(None, msg, self.utility.lang.get('videoplayererrortitle'), wx.OK|wx.ICON_ERROR) + result = dlg.ShowModal() + dlg.Destroy() + + def onBrowsePlayer(self, event = None): + self.onBrowse(self.player,self.utility.lang.get('choosevideoplayer')) + + def onBrowseAnalyser(self, event = None): + self.onBrowse(self.analyser,self.utility.lang.get('choosevideoanalyser')) + + def onBrowse(self,widget,title): + dlg = wx.FileDialog(self.utility.frame, + title, + style = wx.OPEN | wx.FILE_MUST_EXIST) + if dlg.ShowModal() == wx.ID_OK: + value = dlg.GetPath() + qvalue = self.quote_path(value) + widget.SetValue(qvalue) + dlg.Destroy() + + +################################################################ +# +# Class: ABCTree +# +# A collapsable listing of all the options panels +# +################################################################ +class ABCTree(wx.TreeCtrl): + def __init__(self, parent, dialog): + style = wx.TR_DEFAULT_STYLE | wx.TR_HIDE_ROOT + wx.TreeCtrl.__init__(self, parent, -1, style = style) + + self.dialog = dialog + self.utility = dialog.utility + + self.root = self.AddRoot("Preferences") + + self.tribler = self.AppendItem(self.root, self.utility.lang.get('triblersetting')) + self.video = self.AppendItem(self.root, self.utility.lang.get('videosetting')) + self.ratelimits = self.AppendItem(self.root, self.utility.lang.get('ratelimits')) + self.seedingoptions = self.AppendItem(self.root, self.utility.lang.get('seedoptions')) + #self.queuesetting = self.AppendItem(self.root, self.utility.lang.get('queuesetting')) + #self.timeout = self.AppendItem(self.root, self.utility.lang.get('timeout')) + self.disk = self.AppendItem(self.root, self.utility.lang.get('disksettings')) + self.advanceddisk = self.AppendItem(self.disk, self.utility.lang.get('advanced')) + self.network = self.AppendItem(self.root, self.utility.lang.get('networksetting')) + self.advancednetwork = self.AppendItem(self.network, self.utility.lang.get('advanced')) + + #self.display = self.AppendItem(self.root, self.utility.lang.get('displaysetting')) + + #self.colors = self.AppendItem(self.display, self.utility.lang.get('torrentcolors')) + + self.misc = self.AppendItem(self.root, self.utility.lang.get('miscsetting')) + + self.treeMap = {self.ratelimits : self.dialog.rateLimitPanel, + self.seedingoptions : self.dialog.seedingOptionsPanel, + #self.queuesetting : self.dialog.queuePanel, + #self.timeout : self.dialog.schedulerRulePanel, + self.network : self.dialog.networkPanel, + self.misc : self.dialog.miscPanel, + self.tribler : self.dialog.triblerPanel, + self.video : self.dialog.videoPanel, + #self.display : self.dialog.displayPanel, + #self.colors : self.dialog.colorPanel, + self.disk : self.dialog.diskPanel } + + self.treeMap[self.advancednetwork] = self.dialog.advancedNetworkPanel + self.treeMap[self.advanceddisk] = self.dialog.advancedDiskPanel + + self.Bind(wx.EVT_TREE_SEL_CHANGED, self.onSwitchPage) + + self.SetAutoLayout(True) + self.Fit() + + def onSwitchPage(self, event = None): + if self.dialog.closing or event is None: + return + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","abcoption: event type:", event.GetEventType() + newitem = event.GetItem() + newpanel = None + foundnew = False + for key in self.treeMap: + if key == newitem: + newpanel = self.treeMap[key] + foundnew = True + if foundnew: + break + + if newpanel is not None: + # Trying to switch to the current window + try: + oldpanel = self.dialog.splitter.GetWindow2() + if oldpanel != newpanel: + oldpanel.Show(False) + self.dialog.splitter.ReplaceWindow(oldpanel, newpanel) + newpanel.Show(True) + newpanel.changed = True + except: + pass + # TODO: for some reason this is sometimes failing + # (splitter.GetWindow2() sometimes appears to + # return an Object rather than wx.Window) + + def open(self,name): + rootid = self.GetRootItem() + if rootid.IsOk(): + #print "root is",self.GetItemText(rootid) + [firstid,cookie] = self.GetFirstChild(rootid) + if firstid.IsOk(): + print "first is",self.GetItemText(firstid) + if not self.doopen(name,firstid): + while True: + [childid,cookie] = self.GetNextChild(firstid,cookie) + if childid.IsOk(): + if self.doopen(name,childid): + break + else: + break + + def doopen(self,wantname,childid): + gotname = self.GetItemText(childid) + print "gotname is",gotname + if gotname == wantname: + self.SelectItem(childid) + return True + else: + return False + + +################################################################ +# +# Class: ABCOptionDialog +# +# Creates a dialog that allows users to set various preferences +# +################################################################ +class ABCOptionDialog(wx.Dialog): + def __init__(self, parent,openname=None): + self.utility = parent.utility + + style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER +# size = wx.Size(530, 420) + + size, split = self.getWindowSettings() + + wx.Dialog.__init__(self, parent, -1, self.utility.lang.get('abcpreference'), size = size, style = style) + + self.splitter = wx.SplitterWindow(self, -1, style = wx.SP_NOBORDER | wx.SP_LIVE_UPDATE) + + self.rateLimitPanel = RateLimitPanel(self.splitter, self) + self.seedingOptionsPanel = SeedingOptionsPanel(self.splitter, self) + #self.queuePanel = QueuePanel(self.splitter, self) + + #self.schedulerRulePanel = SchedulerRulePanel(self.splitter, self) + self.networkPanel = NetworkPanel(self.splitter, self) + self.miscPanel = MiscPanel(self.splitter, self) + self.triblerPanel = TriblerPanel(self.splitter, self) + self.videoPanel = VideoPanel(self.splitter, self) + self.diskPanel = DiskPanel(self.splitter, self) + + self.advancedNetworkPanel = AdvancedNetworkPanel(self.splitter, self) + self.advancedDiskPanel = AdvancedDiskPanel(self.splitter, self) + + self.tree = ABCTree(self.splitter, self) + + # TODO: Try wx.Listbook instead of splitterwindow + + self.splitter.SetAutoLayout(True) + self.splitter.Fit() + + applybtn = wx.Button(self, -1, " "+self.utility.lang.get('apply')+" ", size = (60, -1)) + okbtn = wx.Button(self, -1, " "+self.utility.lang.get('ok')+" ", size = (60, -1)) + cancelbtn = wx.Button(self, -1, " "+self.utility.lang.get('cancel')+" ", size = (60, -1)) + + buttonbox = wx.BoxSizer(wx.HORIZONTAL) + buttonbox.Add(applybtn, 0, wx.ALL, 5) + buttonbox.Add(okbtn, 0, wx.ALL, 5) + buttonbox.Add(cancelbtn, 0, wx.ALL, 5) + + outerbox = wx.BoxSizer(wx.VERTICAL) + outerbox.Add(self.splitter , 1, wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, 5) + + outerbox.Add(wx.StaticLine(self, -1), 0, wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, 5) + + outerbox.Add(buttonbox, 0, wx.ALIGN_RIGHT) + + # Add events + ########################### + self.Bind(wx.EVT_BUTTON, self.onOK, okbtn) + self.Bind(wx.EVT_BUTTON, self.onApply, applybtn) + self.Bind(wx.EVT_BUTTON, self.onCloseGlobalPref, cancelbtn) + self.Bind(wx.EVT_CLOSE, self.onCloseGlobalPref) + + defaultPanel = self.triblerPanel + + self.splitter.SplitVertically(self.tree, defaultPanel, split) + defaultPanel.changed = True + self.splitter.SetMinimumPaneSize(50) + + for key in self.tree.treeMap: + panel = self.tree.treeMap[key] + panel.Show(False) + + defaultPanel.Show(True) + defaultPanel.Fit() + + self.SetSizer(outerbox) +# self.Fit() + + self.closing = False + if openname is not None: + self.tree.open(openname) + + treeitem = [k for (k,v) in self.tree.treeMap.iteritems() if v == defaultPanel][0] + self.tree.SelectItem( treeitem, True ) + + def getWindowSettings(self): + width = self.utility.config.Read("prefwindow_width", "int") + height = self.utility.config.Read("prefwindow_height", "int") + split = self.utility.config.Read("prefwindow_split", "int") + + return wx.Size(width, height), split + + def saveWindowSettings(self): + width, height = self.GetSizeTuple() + self.utility.config.Write("prefwindow_width", width) + self.utility.config.Write("prefwindow_height", height) + self.utility.config.Write("prefwindow_split", self.splitter.GetSashPosition()) + self.utility.config.Flush() + + def onCloseGlobalPref(self, event = None): + self.closing = True + + self.saveWindowSettings() + + self.EndModal(wx.ID_CANCEL) + + def onApply(self, event = None): + # Set new value to parameters + ############################## + + # Only apply changes for panels that the user has viewed + for key in self.tree.treeMap: + panel = self.tree.treeMap[key] + if panel.changed: + panel.apply() + + # write current changes to disk + self.utility.config.Flush() + + return True + + def onOK(self, event = None): + if self.onApply(): + self.closing = True + self.saveWindowSettings() + + self.EndModal(wx.ID_OK) + + + + diff --git a/tribler-mod/Tribler/Main/Dialogs/abcoption.py.bak b/tribler-mod/Tribler/Main/Dialogs/abcoption.py.bak new file mode 100644 index 0000000..3c2cd67 --- /dev/null +++ b/tribler-mod/Tribler/Main/Dialogs/abcoption.py.bak @@ -0,0 +1,1656 @@ +# Written by ABC authors and Arno Bakker +# see LICENSE.txt for license information + +# TODO: +# - Adhere to SeedingOptions. Wait on Jelle checkin +# - Make Core adhere to diskfullthreshold + +import sys +import wx +import os + +from traceback import print_exc + +from Tribler.Main.Utility.constants import * #IGNORE:W0611 +from Tribler.Main.globals import DefaultDownloadStartupConfig,get_default_dscfg_filename + +from Tribler.Main.Dialogs.socnetmyinfo import MyInfoWizard +from Tribler.Video.VideoPlayer import * + +from Tribler.Core.API import * +from Tribler.Core.Utilities.utilities import show_permid +from Tribler.Core.osutils import getfreespace + +from Tribler.Core.defaults import * + +DEBUG = False + + +################################################################ +# +# Class: ABCOptionPanel +# +# Basic structure for options window panels +# +# Adds a button for "Restore Defaults" +# at the bottom of each panel +# +################################################################ +class ABCOptionPanel(wx.Panel): + def __init__(self, parent, dialog): + wx.Panel.__init__(self, parent, -1) + + self.dialog = dialog + self.utility = dialog.utility + + self.changed = False + + self.outersizer = wx.BoxSizer(wx.VERTICAL) + + self.sizer = wx.BoxSizer(wx.VERTICAL) + + self.defaultDLConfig = DefaultDownloadStartupConfig.getInstance() + + # Things to do after the subclass has finished its init stage + def initTasks(self): + self.loadValues() + + self.outersizer.Add(self.sizer, 1, wx.EXPAND) + + defaultsButton = wx.Button(self, -1, self.utility.lang.get('reverttodefault')) + wx.EVT_BUTTON(self, defaultsButton.GetId(), self.setDefaults) + self.outersizer.Add(defaultsButton, 0, wx.ALIGN_RIGHT|wx.TOP|wx.BOTTOM, 10) + + self.SetSizerAndFit(self.outersizer) + + def loadValues(self, Read = None): + # Dummy function that class members should override + pass + + def setDefaults(self, event = None): + self.loadValues(self.utility.config.ReadDefault) + + def apply(self): + # Dummy function that class members should override + pass + + +################################################################ +# +# Class: NetworkPanel +# +# Contains network settings +# +################################################################ +class NetworkPanel(ABCOptionPanel): + def __init__(self, parent, dialog): + ABCOptionPanel.__init__(self, parent, dialog) + sizer = self.sizer + + ip = self.utility.session.get_external_ip() + ip_txt = self.utility.lang.get('currentdiscoveredipaddress')+": "+ip + label = wx.StaticText(self, -1, ip_txt ) + sizer.Add( label, 0, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + + self.minport = self.utility.makeNumCtrl(self, 1, min = 1, max = 65536) + port_box = wx.BoxSizer(wx.HORIZONTAL) + port_box.Add(wx.StaticText(self, -1, self.utility.lang.get('portnumber')), 0, wx.ALIGN_CENTER_VERTICAL) + port_box.Add(self.minport, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + port_box.Add(wx.StaticText(self, -1, self.utility.lang.get('restartabc')), 0, wx.ALIGN_CENTER_VERTICAL) + + sizer.Add(port_box, 0, wx.EXPAND|wx.ALL, 5) + + self.kickban = wx.CheckBox(self, -1, self.utility.lang.get('kickban')) + sizer.Add(self.kickban, 0, wx.ALIGN_LEFT|wx.ALL, 5) + + # Do or don't get scrape data + ################################################################### + self.scrape = wx.CheckBox(self, -1, self.utility.lang.get('scrape')) + sizer.Add(self.scrape, 0, wx.ALIGN_LEFT|wx.ALL, 5) + self.scrape.SetToolTipString(self.utility.lang.get('scrape_hint')) + + ################################################################### + #self.ipv6 = wx.CheckBox(self, -1, "Initiate and receive connections via IPv6") + #if self.utility.config.Read('ipv6') == "1": + # self.ipv6.SetValue(True) + #else: + # self.ipv6.SetValue(False) + #################################################################### + + # URL of internal tracker, user should use it in annouce box / announce-list + itrack_box = wx.BoxSizer(wx.HORIZONTAL) + self.itrack = wx.TextCtrl(self, -1, "") + itrack_box.Add(wx.StaticText(self, -1, self.utility.lang.get('internaltrackerurl')), 0, wx.ALIGN_CENTER_VERTICAL) + itrack_box.Add(self.itrack, 1, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT|wx.EXPAND, 5) + sizer.Add(itrack_box, 0, wx.ALIGN_LEFT|wx.ALL|wx.EXPAND, 5) + + self.initTasks() + + def loadValues(self, Read = None): + + self.minport.SetValue(self.utility.session.get_listen_port()) + itrackerurl = self.utility.session.get_internal_tracker_url() + self.itrack.SetValue(itrackerurl) + + #self.scrape.SetValue(Read('scrape', "boolean")) # TODO: cannot find it being used + + self.kickban.SetValue(self.defaultDLConfig.get_auto_kick()) + + def apply(self): + minport = int(self.minport.GetValue()) + if minport > 65535: + minport = 65535 + + itrackerurl = self.itrack.GetValue() + + # Save SessionStartupConfig + state_dir = self.utility.session.get_state_dir() + cfgfilename = Session.get_default_config_filename(state_dir) + scfg = SessionStartupConfig.load(cfgfilename) + + for target in [scfg,self.utility.session]: + try: + target.set_listen_port(minport) + except: + print_exc() + try: + target.set_internal_tracker_url(itrackerurl) + except: + print_exc() + + + scfg.save(cfgfilename) + + #self.utility.config.Write('scrape', self.scrape.GetValue(), "boolean") + + kickban = self.kickban.GetValue() + + # Save DownloadStartupConfig + self.defaultDLConfig.set_auto_kick(kickban) + + dlcfgfilename = get_default_dscfg_filename(self.utility.session) + self.defaultDLConfig.save(dlcfgfilename) + + +################################################################ +# +# Class: AdvancedNetworkPanel +# +# Contains advanced network settings +# (defaults should be fine for most users) +# +################################################################ +class AdvancedNetworkPanel(ABCOptionPanel): + def __init__(self, parent, dialog): + ABCOptionPanel.__init__(self, parent, dialog) + sizer = self.sizer + + warningtext = wx.StaticText(self, -1, self.utility.lang.get('changeownrisk')) + sizer.Add(warningtext, 0, wx.ALIGN_CENTER|wx.ALL, 5) + + #self.ipv6bindsv4_data=wx.Choice(self, -1, + # choices = ['separate sockets', 'single socket']) + #self.ipv6bindsv4_data.SetSelection(int(self.advancedConfig['ipv6_binds_v4'])) + + datasizer = wx.FlexGridSizer(cols = 2, vgap = 5, hgap = 10) + + # Local IP + self.ip_data = wx.TextCtrl(self, -1) + datasizer.Add(wx.StaticText(self, -1, self.utility.lang.get('localip')), 1, wx.ALIGN_CENTER_VERTICAL) + datasizer.Add(self.ip_data) + + # IP to Bind to + self.bind_data = wx.TextCtrl(self, -1) + datasizer.Add(wx.StaticText(self, -1, self.utility.lang.get('iptobindto')), 1, wx.ALIGN_CENTER_VERTICAL) + datasizer.Add(self.bind_data) + + # Minimum Peers + self.minpeers_data = wx.SpinCtrl(self, size = wx.Size(60, -1)) + self.minpeers_data.SetRange(10, 100) + datasizer.Add(wx.StaticText(self, -1, self.utility.lang.get('minnumberofpeer')), 1, wx.ALIGN_CENTER_VERTICAL) + datasizer.Add(self.minpeers_data) + + # Maximum Connections + self.maxconnections_data=wx.SpinCtrl(self, size = wx.Size(60, -1)) + self.maxconnections_data.SetRange(0, 1000) + datasizer.Add(wx.StaticText(self, -1, self.utility.lang.get('maxpeerconnection')), 1, wx.ALIGN_CENTER_VERTICAL) + datasizer.Add(self.maxconnections_data) + + # UPnP Settings + if (sys.platform == 'win32'): + self.upnp_choices = [ self.utility.lang.get('upnp_0'), + self.utility.lang.get('upnp_1'), + self.utility.lang.get('upnp_2'), + self.utility.lang.get('upnp_3')] + else: + self.upnp_choices = [ self.utility.lang.get('upnp_0'), + self.utility.lang.get('upnp_3')] + self.upnp_data = wx.ComboBox(self, -1, "", wx.Point(-1, -1), wx.Size(-1, -1), self.upnp_choices, wx.CB_DROPDOWN|wx.CB_READONLY) + datasizer.Add(wx.StaticText(self, -1, self.utility.lang.get('upnp')), 1, wx.ALIGN_CENTER_VERTICAL) + datasizer.Add(self.upnp_data) + + + # ut_pex maximum Peers + self.ut_pex_maxaddrs_data = wx.SpinCtrl(self, size = wx.Size(60, -1)) + self.ut_pex_maxaddrs_data.SetRange(0, 1024) + t1 = wx.StaticText(self, -1, self.utility.lang.get('ut_pex_maxaddrs1')) + t2 = wx.StaticText(self, -1, self.utility.lang.get('ut_pex_maxaddrs2')) + tsizer = wx.BoxSizer(wx.VERTICAL) + tsizer.Add(t1, 1, wx.ALIGN_LEFT) + tsizer.Add(t2, 1, wx.ALIGN_LEFT) + datasizer.Add(tsizer, 1, wx.ALIGN_CENTER_VERTICAL) + datasizer.Add(self.ut_pex_maxaddrs_data) + sizer.Add(datasizer, 0, wx.ALL, 5) + + # Set tooltips + self.ip_data.SetToolTipString(self.utility.lang.get('iphint')) + self.bind_data.SetToolTipString(self.utility.lang.get('bindhint')) + self.minpeers_data.SetToolTipString(self.utility.lang.get('minpeershint')) + self.ut_pex_maxaddrs_data.SetToolTipString(self.utility.lang.get('ut_pex_maxaddrs_hint')) + self.maxconnections_data.SetToolTipString(self.utility.lang.get('maxconnectionhint')) + + self.initTasks() + + def loadValues(self, Read = None): + if Read is None: + Read = self.utility.config.Read + session = self.utility.session + + addrlist = session.get_bind_to_addresses() + addrstr = ','.join(addrlist) + + self.ip_data.SetValue(session.get_ip_for_tracker()) + self.bind_data.SetValue(addrstr) + + self.minpeers_data.SetValue(self.defaultDLConfig.get_min_peers()) + self.maxconnections_data.SetValue(self.defaultDLConfig.get_max_conns()) + + upnp_val = session.get_upnp_mode() + selected = self.upnp_val2selected(upnp_val) + self.upnp_data.SetStringSelection(self.upnp_choices[selected]) + + self.ut_pex_maxaddrs_data.SetValue(self.defaultDLConfig.get_ut_pex_max_addrs_from_peer()) + + + def upnp_val2selected(self,upnp_val): + if (sys.platform == 'win32'): + selected = upnp_val + else: + if upnp_val <= 2: + selected = 0 + else: + selected = 1 + return selected + + def selected2upnp_val(self,selected): + if (sys.platform == 'win32'): + upnp_val = selected + else: + if selected == 1: + upnp_val = UPNPMODE_UNIVERSAL_DIRECT + else: + upnp_val = UPNPMODE_DISABLED + return upnp_val + + + def apply(self): + + ip4track = self.ip_data.GetValue() + ip2bind2 = self.bind_data.GetValue() + if ip2bind2.strip(): + ip2bind2list = ip2bind2.split(",") + else: + ip2bind2list = [] + + selected = self.upnp_choices.index(self.upnp_data.GetValue()) + upnp_val = self.selected2upnp_val(selected) + + minpeers = int(self.minpeers_data.GetValue()) + maxconnections = int(self.maxconnections_data.GetValue()) + if maxconnections == 0: + maxinitiate = 2 * minpeers + else: + maxinitiate = min(2 * minpeers, maxconnections) + utmaxaddrs = int(self.ut_pex_maxaddrs_data.GetValue()) + + + # Save SessConfig + state_dir = self.utility.session.get_state_dir() + cfgfilename = Session.get_default_config_filename(state_dir) + scfg = SessionStartupConfig.load(cfgfilename) + + for target in [scfg,self.utility.session]: + try: + target.set_ip_for_tracker(ip4track) + except: + print_exc() + try: + target.set_bind_to_addresses(ip2bind2list) + except: + print_exc() + try: + target.set_upnp_mode(upnp_val) + except: + print_exc() + + scfg.save(cfgfilename) + + # Save DownloadStartupConfig + self.defaultDLConfig.set_min_peers(minpeers) + self.defaultDLConfig.set_max_conns(maxconnections) + self.defaultDLConfig.set_max_conns_to_initiate(maxinitiate) + self.defaultDLConfig.set_ut_pex_max_addrs_from_peer(utmaxaddrs) + + dlcfgfilename = get_default_dscfg_filename(self.utility.session) + self.defaultDLConfig.save(dlcfgfilename) + + +################################################################ +# +# Class: QueuePanel +# +# Contains settings that control how many torrents to start +# at once and when to start them +# +################################################################ + +# Arno, 2008-03-27: Currently disabled. Need to write queueing support on top +# of core + + +################################################################ +# +# Class: MiscPanel +# +# Contains settings that don't seem to fit well anywhere else +# +################################################################ +class MiscPanel(ABCOptionPanel): + def __init__(self, parent, dialog): + ABCOptionPanel.__init__(self, parent, dialog) + sizer = self.sizer + + self.trayoptions = [self.utility.lang.get('showtray_never'), + self.utility.lang.get('showtray_min'), + self.utility.lang.get('showtray_always')] + self.mintray = wx.RadioBox(self, + -1, + self.utility.lang.get('showtray'), + wx.DefaultPosition, + wx.DefaultSize, + self.trayoptions, + 3, + wx.RA_SPECIFY_COLS) + + # On the Mac, the option exists but is not shown, to support + # the widget being read & written. + if sys.platform != "darwin": + sizer.Add(self.mintray, 0, wx.ALIGN_LEFT|wx.ALL, 5) + else: + self.mintray.Hide() + + self.confirmonclose = wx.CheckBox(self, -1, self.utility.lang.get('confirmonexit')) + sizer.Add(self.confirmonclose, 0, wx.ALIGN_LEFT|wx.ALL, 5) + + # Registry association (only makes sense under windows) + if (sys.platform == 'win32'): + self.associate = wx.CheckBox(self, -1, self.utility.lang.get('associate')) + sizer.Add(self.associate, 0, wx.ALIGN_LEFT|wx.ALL, 5) + + # Languages option + if self.utility.languages == {}: + self.getLanguages() + self.language_names = [] + self.language_filenames = [] + for item in self.utility.languages: + self.language_names.append(item) + self.language_filenames.append(self.utility.languages[item]) + + self.language_choice = wx.ComboBox(self, -1, "", wx.Point(-1, -1), wx.Size(-1, -1), self.language_names, wx.CB_DROPDOWN|wx.CB_READONLY) + + lang_box = wx.BoxSizer(wx.HORIZONTAL) + lang_box.Add(wx.StaticText(self, -1, self.utility.lang.get('choose_language')), 0, wx.ALIGN_CENTER_VERTICAL) + lang_box.Add(self.language_choice, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + lang_box.Add(wx.StaticText(self, -1, self.utility.lang.get('restartabc')), 0, wx.ALIGN_CENTER_VERTICAL) + sizer.Add(lang_box, 0, wx.ALL, 5) + + self.recategorize_button = wx.Button(self, -1, self.utility.lang.get('recategorize_button')) + self.recategorize_button.Bind(wx.EVT_BUTTON, self.onRecategorize) + + recategorize_box = wx.BoxSizer(wx.HORIZONTAL) + recategorize_box.Add(wx.StaticText(self, -1, self.utility.lang.get('recategorize')), 0, wx.ALIGN_CENTER_VERTICAL) + recategorize_box.Add(self.recategorize_button, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + sizer.Add(recategorize_box, 0, wx.ALL, 5) + + self.initTasks() + + def loadValues(self, Read = None): + if Read is None: + Read = self.utility.config.Read + + mintray = Read('mintray', "int") + if mintray >= len(self.trayoptions): + mintray = len(self.trayoptions) - 1 + self.mintray.SetSelection(mintray) + + self.confirmonclose.SetValue(Read('confirmonclose', "boolean")) + + if (sys.platform == 'win32'): + self.associate.SetValue(Read('associate', "boolean")) + + index = self.language_filenames.index(Read('language_file')) + if not self.language_names: + # Should never get here -- this means there are no valid language files found! + sys.stderr.write("\nNO LANGUAGE FILES FOUND! Please add a valid language file\n") + defaultlang = "" + elif (index > -1): + defaultlang = self.language_names[index] + self.language_choice.SetStringSelection(defaultlang) + + def apply(self): + self.utility.config.Write('mintray', self.mintray.GetSelection()) + if self.utility.frame.tbicon is not None: + self.utility.frame.tbicon.updateIcon(False) + + # FIXME: quick hack to prevent Unicode problem, will still give problems + # when French, i.e. "fran\,cais" is selected. + # + val = str(self.language_choice.GetValue()) + langname_index = self.language_names.index(val) + self.utility.config.Write('language_file', self.language_filenames[langname_index]) + + self.utility.config.Write('confirmonclose', self.confirmonclose.GetValue(), "boolean") + + if (sys.platform == 'win32'): + self.utility.config.Write('associate', self.associate.GetValue(), "boolean") + + def getLanguages(self): + langpath = os.path.join(self.utility.getPath(),"Tribler","Lang") + + dirlist = os.listdir(langpath) + dirlist2 = [] + for filename in dirlist: + if (filename[-5:] == '.lang'): + dirlist2.append(filename) + dirlist2.sort() + + # Remove user.lang from the list + try: + dirlist2.remove("user.lang") + except: + pass + + self.utility.languages = {} + + for filename in dirlist2: + filepath = os.path.join(langpath, filename) + + config = wx.FileConfig(localFilename = filepath) + config.SetPath("ABC/language") + if config.Exists('languagename'): + self.utility.languages[config.Read('languagename')] = filename + + def onRecategorize(self, event=None): + #catobj = Category.getInstance() + #torrentdata = self.utility.guiUtility.data_manager.data + #catobj.reSortAll(torrentdata) + # Arno: need to fix Category to make this work again. + pass + +################################################################ +# +# Class: DiskPanel +# +# Contains settings related to saving files +# +################################################################ +class DiskPanel(ABCOptionPanel): + def __init__(self, parent, dialog): + ABCOptionPanel.__init__(self, parent, dialog) + sizer = self.sizer + + self.torrentbackup = wx.CheckBox(self, -1, self.utility.lang.get('removebackuptorrent')) + sizer.Add(self.torrentbackup, 0, wx.ALIGN_LEFT|wx.ALL, 5) + + self.defaultdir = wx.StaticText(self, -1, self.utility.lang.get('setdefaultfolder')) + self.dir = wx.TextCtrl(self, -1, "") + browsebtn = wx.Button(self, -1, "...", style = wx.BU_EXACTFIT) + self.Bind(wx.EVT_BUTTON, self.onBrowseDir, browsebtn) + + dirbox = wx.BoxSizer(wx.HORIZONTAL) + dirbox.Add(self.defaultdir, 0, wx.ALIGN_CENTER_VERTICAL) + dirbox.Add(self.dir, 1, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT|wx.EXPAND, 5) + dirbox.Add(browsebtn, 0, wx.ALIGN_CENTER_VERTICAL) + + sizer.Add(dirbox, 0, wx.ALIGN_LEFT|wx.ALL|wx.EXPAND, 5) + + diskfullbox = wx.BoxSizer(wx.HORIZONTAL) + self.diskfullcheckbox = wx.CheckBox(self, -1, self.utility.lang.get('diskfullthreshold')) + self.diskfullthreshold = self.utility.makeNumCtrl(self, 1, integerWidth = 4) + diskfullbox.Add(self.diskfullcheckbox, 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 5) + diskfullbox.Add(self.diskfullthreshold, 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 5) + diskfullbox.Add(wx.StaticText(self, -1, self.utility.lang.get('MB')), 0, wx.ALIGN_CENTER_VERTICAL) + + sizer.Add(diskfullbox, 0, wx.ALIGN_LEFT|wx.ALL, 5) + + self.initTasks() + + def loadValues(self, Read = None): + if Read is None: + Read = self.utility.config.Read + + self.dir.SetValue(self.defaultDLConfig.get_dest_dir()) + self.torrentbackup.SetValue(Read('removetorrent', "boolean")) + + diskfullthreshold = Read('diskfullthreshold', "int") # TODO: make sure Core uses this + if diskfullthreshold > 0: + self.diskfullcheckbox.SetValue(True) + self.diskfullthreshold.SetValue(diskfullthreshold) + + def apply(self): + self.utility.config.Write('removetorrent', self.torrentbackup.GetValue(), "boolean") + + if self.diskfullcheckbox.GetValue(): + diskfullthreshold = self.diskfullthreshold.GetValue() + else: + diskfullthreshold = 0 + self.utility.config.Write('diskfullthreshold', diskfullthreshold) + + + # Save DownloadStartupConfig + defaultdestdir = self.dir.GetValue() + self.defaultDLConfig.set_dest_dir(defaultdestdir) + + dlcfgfilename = get_default_dscfg_filename(self.utility.session) + self.defaultDLConfig.save(dlcfgfilename) + + # Save SessionStartupConfig + # Also change torrent collecting dir, which is by default in the default destdir + state_dir = self.utility.session.get_state_dir() + cfgfilename = Session.get_default_config_filename(state_dir) + scfg = SessionStartupConfig.load(cfgfilename) + + dirname = os.path.join(defaultdestdir,STATEDIR_TORRENTCOLL_DIR) + for target in [scfg,self.utility.session]: + try: + target.set_torrent_collecting_dir(dirname) + except: + print_exc() + + scfg.save(cfgfilename) + + + + def onBrowseDir(self, event = None): + dlg = wx.DirDialog(self.utility.frame, + self.utility.lang.get('choosedefaultdownloadfolder'), + style = wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON) + if dlg.ShowModal() == wx.ID_OK: + self.dir.SetValue(dlg.GetPath()) + dlg.Destroy() + + +################################################################ +# +# Class: AdvancedDiskPanel +# +# Contains advanced settings controlling how data is written to +# and read from disk. +# (defaults should be fine for most users) +# +################################################################ +class AdvancedDiskPanel(ABCOptionPanel): + def __init__(self, parent, dialog): + ABCOptionPanel.__init__(self, parent, dialog) + sizer = self.sizer + + warningtext = wx.StaticText(self, -1, self.utility.lang.get('changeownrisk')) + sizer.Add(warningtext, 0, wx.ALIGN_CENTER|wx.ALL, 5) + + datasizer = wx.FlexGridSizer(cols = 2, vgap = 5, hgap = 10) + + # Allocation Type + + alloc_choices = [self.utility.lang.get('alloc_normal'), + self.utility.lang.get('alloc_background'), + self.utility.lang.get('alloc_prealloc'), + self.utility.lang.get('alloc_sparse')] + self.alloc_types = [DISKALLOC_NORMAL, DISKALLOC_BACKGROUND, DISKALLOC_PREALLOCATE, DISKALLOC_SPARSE] + self.alloc_type2int = {} + for i in range(len(self.alloc_types)): + t = self.alloc_types[i] + self.alloc_type2int[t]=i + self.alloctype_data=wx.Choice(self, -1, wx.Point(-1, -1), wx.Size(-1, -1), alloc_choices) + + datasizer.Add(wx.StaticText(self, -1, self.utility.lang.get('diskalloctype')), 1, wx.ALIGN_CENTER_VERTICAL) + datasizer.Add(self.alloctype_data) + + # Allocation Rate + self.allocrate_data = wx.SpinCtrl(self, size = wx.Size(60, -1)) + self.allocrate_data.SetRange(1, 100) + datasizer.Add(wx.StaticText(self, -1, self.utility.lang.get('allocrate')), 1, wx.ALIGN_CENTER_VERTICAL) + + allocrate_box = wx.BoxSizer(wx.HORIZONTAL) + allocrate_box.Add(self.allocrate_data) + allocrate_box.Add(wx.StaticText(self, -1, " " + self.utility.lang.get('mb') + "/" + self.utility.lang.get("l_second")), 1, wx.ALIGN_CENTER_VERTICAL) + + datasizer.Add(allocrate_box) + + # Locking Method + locking_choices = [self.utility.lang.get('lock_never'), + self.utility.lang.get('lock_writing'), + self.utility.lang.get('lock_always')] + self.locking_data=wx.Choice(self, -1, wx.Point(-1, -1), wx.Size(-1, -1), locking_choices) + datasizer.Add(wx.StaticText(self, -1, self.utility.lang.get('filelocking')), 1, wx.ALIGN_CENTER_VERTICAL) + datasizer.Add(self.locking_data) + + # Doublecheck Method + doublecheck_choices = [self.utility.lang.get('check_none'), + self.utility.lang.get('check_double'), + self.utility.lang.get('check_triple')] + self.doublecheck_data=wx.Choice(self, -1, wx.Point(-1, -1), wx.Size(-1, -1), doublecheck_choices) + datasizer.Add(wx.StaticText(self, -1, self.utility.lang.get('extradatachecking')), 1, wx.ALIGN_CENTER_VERTICAL) + datasizer.Add(self.doublecheck_data) + + # Maximum Files Open + self.maxfilesopen_data=wx.SpinCtrl(self, size = wx.Size(60, -1)) + self.maxfilesopen_data.SetRange(0,200) + + datasizer.Add(wx.StaticText(self, -1, self.utility.lang.get('maxfileopen')), 1, wx.ALIGN_CENTER_VERTICAL) + datasizer.Add(self.maxfilesopen_data) + + # Flush data + self.flush_data_enable = wx.CheckBox(self, -1, self.utility.lang.get('flush_data')) + + self.flush_data = wx.SpinCtrl(self, size = wx.Size(60, -1)) + self.flush_data.SetRange(0, 999) + + datasizer.Add(self.flush_data_enable, 0, wx.ALIGN_CENTER_VERTICAL) + + flush_box = wx.BoxSizer(wx.HORIZONTAL) + flush_box.Add(self.flush_data, 0, wx.ALIGN_CENTER_VERTICAL) + flush_box.Add(wx.StaticText(self, -1, self.utility.lang.get('minute_long')), 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT, 5) + + datasizer.Add(flush_box) + + sizer.Add(datasizer, 0, wx.ALL, 5) + + # Disk buffering + buffer_title = wx.StaticBox(self, -1, self.utility.lang.get('bufferdisk')) + buffer = wx.StaticBoxSizer(buffer_title, wx.VERTICAL) + + self.buffer_read_enable = wx.CheckBox(self, -1, self.utility.lang.get('buffer_read')) + + buffer.Add(self.buffer_read_enable, 0, wx.ALL, 5) + sizer.Add(buffer, 0, wx.EXPAND|wx.ALL, 5) + + self.alloctype_data.SetToolTipString(self.utility.lang.get('alloctypehint')) + self.allocrate_data.SetToolTipString(self.utility.lang.get('allocratehint')) + self.locking_data.SetToolTipString(self.utility.lang.get('lockinghint')) + self.doublecheck_data.SetToolTipString(self.utility.lang.get('doublecheckhint')) + self.maxfilesopen_data.SetToolTipString(self.utility.lang.get('maxfileopenhint')) + + self.initTasks() + + def loadValues(self, Read = None): + if Read is None: + Read = self.utility.config.Read + + alloctype = self.defaultDLConfig.get_alloc_type() + alloc_selection = self.alloc_type2int[alloctype] + self.alloctype_data.SetSelection(alloc_selection) + + self.allocrate_data.SetValue(self.defaultDLConfig.get_alloc_rate()) + + lockfiles = self.defaultDLConfig.get_lock_files() + lockread = self.defaultDLConfig.get_lock_while_reading() + if lockfiles: + if lockread: + self.locking_data.SetSelection(2) + else: + self.locking_data.SetSelection(1) + else: + self.locking_data.SetSelection(0) + + doublecheck = self.defaultDLConfig.get_double_check_writes() + triplecheck = self.defaultDLConfig.get_triple_check_writes() + if doublecheck: + if triplecheck: + self.doublecheck_data.SetSelection(2) + else: + self.doublecheck_data.SetSelection(1) + else: + self.doublecheck_data.SetSelection(0) + + self.maxfilesopen_data.SetValue(self.defaultDLConfig.get_max_files_open()) + self.buffer_read_enable.SetValue(self.defaultDLConfig.get_buffer_reads()) + + + flushval = self.defaultDLConfig.get_auto_flush() + self.flush_data.SetValue(flushval) + self.flush_data_enable.SetValue(flushval > 0) + + def apply(self): + alloctype = self.alloc_types[self.alloctype_data.GetSelection()] + allocrate = int(self.allocrate_data.GetValue()) + maxopen = int(self.maxfilesopen_data.GetValue()) + lockfiles = self.locking_data.GetSelection() >= 1 + lockread = self.locking_data.GetSelection() > 1 + doublecheck = self.doublecheck_data.GetSelection() >= 1 + triplecheck = self.doublecheck_data.GetSelection() > 1 + bufferread = self.buffer_read_enable.GetValue() + + if not self.flush_data_enable.GetValue(): + flushval = 0 + else: + flushval = self.flush_data.GetValue() + + # Save DownloadStartupConfig + self.defaultDLConfig.set_alloc_type(alloctype) + self.defaultDLConfig.set_alloc_rate(allocrate) + self.defaultDLConfig.set_lock_files(lockfiles) + self.defaultDLConfig.set_lock_while_reading(lockread) + self.defaultDLConfig.set_double_check_writes(doublecheck) + self.defaultDLConfig.set_triple_check_writes(triplecheck) + self.defaultDLConfig.set_max_files_open(maxopen) + self.defaultDLConfig.set_buffer_reads(bufferread) + self.defaultDLConfig.set_auto_flush(flushval) + + dlcfgfilename = get_default_dscfg_filename(self.utility.session) + self.defaultDLConfig.save(dlcfgfilename) + + + + +################################################################ +# +# Class: SchedulerRulePanel +# +# Contains settings related to timeouts +# +################################################################ + +# Arno, 2008-02-27: Currently disabled, as there is no queuing + +################################################################ +# +# Class: RateLimitPanel +# +# Contains settings related to setting limits on upload and +# download rates +# +################################################################ +class RateLimitPanel(ABCOptionPanel): + def __init__(self, parent, dialog): + ABCOptionPanel.__init__(self, parent, dialog) + sizer = self.sizer + + # GUI dialog for Global upload setting + ######################################## + + # Upload settings + ######################################## + + uploadsection_title = wx.StaticBox(self, -1, self.utility.lang.get('uploadsetting')) + uploadsection = wx.StaticBoxSizer(uploadsection_title, wx.VERTICAL) + + """ + # Arno, 2008-03-27: Currently disabled, no queuing + self.maxupload = wx.SpinCtrl(self, size = wx.Size(60, -1)) + self.maxupload.SetRange(2, 100) + + maxuploadsbox = wx.BoxSizer(wx.HORIZONTAL) + maxuploadsbox.Add(wx.StaticText(self, -1, self.utility.lang.get('maxuploads')), 0, wx.ALIGN_CENTER_VERTICAL) + maxuploadsbox.Add(self.maxupload, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT, 5) + + uploadsection.Add(maxuploadsbox, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + """ + + maxoverall_down_label = wx.BoxSizer(wx.VERTICAL) + maxoverall_down_label.Add(wx.StaticText(self, -1, self.utility.lang.get('maxoveralluploadrate')), 0, wx.ALIGN_CENTER_VERTICAL) + maxoverall_down_label.Add(wx.StaticText(self, -1, self.utility.lang.get('whendownload')), 0, wx.ALIGN_CENTER_VERTICAL) + + self.uploadrate = self.utility.makeNumCtrl(self, 0, integerWidth = 4) + self.uploadrate.SetToolTipString(self.utility.lang.get('global_uprate_hint')) + + maxoverall_down = wx.BoxSizer(wx.HORIZONTAL) + maxoverall_down.Add(maxoverall_down_label, 0, wx.ALIGN_CENTER_VERTICAL) + maxoverall_down.Add(self.uploadrate, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT, 5) + maxoverall_down.Add(wx.StaticText(self, -1, self.utility.lang.get('KB') + "/" + self.utility.lang.get('l_second')), 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT, 3) + + uploadsection.Add(maxoverall_down, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + maxoverall_nodown_label = wx.BoxSizer(wx.VERTICAL) + maxoverall_nodown_label.Add(wx.StaticText(self, -1, self.utility.lang.get('maxoveralluploadrate')), 0, wx.ALIGN_CENTER_VERTICAL) + maxoverall_nodown_label.Add(wx.StaticText(self, -1, self.utility.lang.get('whennodownload')), 0, wx.ALIGN_CENTER_VERTICAL) + + self.seeduploadrate = self.utility.makeNumCtrl(self, 0, integerWidth = 4) + self.seeduploadrate.SetToolTipString(self.utility.lang.get('global_uprate_hint')) + + maxoverall_nodown = wx.BoxSizer(wx.HORIZONTAL) + maxoverall_nodown.Add(maxoverall_nodown_label, 0, wx.ALIGN_CENTER_VERTICAL) + maxoverall_nodown.Add(self.seeduploadrate, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT, 5) + maxoverall_nodown.Add(wx.StaticText(self, -1, self.utility.lang.get('KB') + "/" + self.utility.lang.get('l_second')), 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT, 3) + + uploadsection.Add(maxoverall_nodown, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + uploadsection.Add(wx.StaticText(self, -1, self.utility.lang.get('zeroisunlimited')), 0, wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT|wx.ALL, 5) + + sizer.Add(uploadsection, 0, wx.EXPAND|wx.ALL, 5) + + # Download Section + downloadsection_title = wx.StaticBox(self, -1, self.utility.lang.get('downloadsetting')) + downloadsection = wx.StaticBoxSizer(downloadsection_title, wx.VERTICAL) + + self.downloadrate = self.utility.makeNumCtrl(self, 0, integerWidth = 4) + + maxdownoverall_down = wx.BoxSizer(wx.HORIZONTAL) + maxdownoverall_down.Add(wx.StaticText(self, -1, self.utility.lang.get('maxoveralldownloadrate')), 0, wx.ALIGN_CENTER_VERTICAL) + maxdownoverall_down.Add(self.downloadrate, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT, 5) + maxdownoverall_down.Add(wx.StaticText(self, -1, self.utility.lang.get('KB') + "/" + self.utility.lang.get('l_second')), 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT, 3) + + downloadsection.Add(maxdownoverall_down, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + downloadsection.Add(wx.StaticText(self, -1, self.utility.lang.get('zeroisunlimited')), 0, wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT|wx.ALL, 5) + + sizer.Add(downloadsection, 0, wx.EXPAND|wx.ALL, 5) + + self.initTasks() + + def loadValues(self, Read = None): + if Read is None: + Read = self.utility.config.Read + + #self.maxupload.SetValue(Read('maxupload', "int")) + self.uploadrate.SetValue(Read('maxuploadrate', "int")) + self.downloadrate.SetValue(Read('maxdownloadrate', "int")) + self.seeduploadrate.SetValue(Read('maxseeduploadrate', "int")) + + def apply(self): + # Check max upload rate input must be integer + ############################################## + upload_rate = int(self.uploadrate.GetValue()) + seedupload_rate = int(self.seeduploadrate.GetValue()) + + download_rate = int(self.downloadrate.GetValue()) + + # Check max upload rate must not be less than 3 kB/s + ###################################################### + if (upload_rate < 3 and upload_rate != 0) or (seedupload_rate < 3 and seedupload_rate != 0): + #display warning + dlg = wx.MessageDialog(self, self.utility.lang.get('uploadrateminwarning'), self.utility.lang.get('error'), wx.ICON_ERROR) + dlg.ShowModal() + dlg.Destroy() + return False + + # Set new value to parameters + ############################## + ##self.utility.config.Write('maxupload', self.maxupload.GetValue()) + self.utility.config.Write('maxuploadrate', upload_rate) + self.utility.config.Write('maxseeduploadrate', seedupload_rate) + + self.utility.config.Write('maxdownloadrate', download_rate) + + # Change at Runtime + self.utility.ratelimiter.set_global_max_speed(UPLOAD,upload_rate) + self.utility.ratelimiter.set_global_max_speed(DOWNLOAD,download_rate) + self.utility.ratelimiter.set_global_max_seedupload_speed(seedupload_rate) + + +################################################################ +# +# Class: SeedingOptionsPanel +# +# Contains options controlling how long torrents should remain +# seeding. +# +################################################################ +class SeedingOptionsPanel(ABCOptionPanel): + def __init__(self, parent, dialog): + ABCOptionPanel.__init__(self, parent, dialog) + sizer = self.sizer + + # Added by Boxun + #t4t options + t4t_title = wx.StaticBox(self, -1, self.utility.lang.get('tit-4-tat')) + t4t_section = wx.StaticBoxSizer(t4t_title, wx.VERTICAL) + + # Ratio buttons + self.rb_t4t_no_leeching = wx.RadioButton(self, -1, self.utility.lang.get('no_leeching'), wx.Point(-1, -1), wx.Size(-1, -1), wx.RB_GROUP) + self.rb_t4t_unlimited = wx.RadioButton(self, -1, self.utility.lang.get('unlimited_seeding'), wx.Point(-1, -1), wx.Size(-1, -1)) + self.rb_t4t_until_time = wx.RadioButton(self, -1, self.utility.lang.get('seed_sometime'), wx.Point(-1, -1), wx.Size(-1, -1)) + self.rb_t4t_no_seeding = wx.RadioButton(self, -1, self.utility.lang.get('no_seeding'), wx.Point(-1, -1), wx.Size(-1, -1)) + + self.t4t_rbs = [self.rb_t4t_no_leeching, self.rb_t4t_unlimited, self.rb_t4t_until_time, self.rb_t4t_no_seeding] + + # Seeding ratio option + t4t_section.Add(self.rb_t4t_no_leeching, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + # Unlimited seeding + t4t_section.Add(self.rb_t4t_unlimited, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + # Seeding for sometime + hours_interval = ['0', '1', '2', '3', '5', '10'] + mins_interval = ['15', '30', '45'] + + self.cb_t4t_hours = wx.ComboBox(self, -1, "", wx.Point(-1, -1), + wx.Size(55, -1), hours_interval, wx.CB_DROPDOWN|wx.CB_READONLY) + self.cb_t4t_mins = wx.ComboBox(self, -1, "", wx.Point(-1, -1), + wx.Size(55, -1), mins_interval, wx.CB_DROPDOWN|wx.CB_READONLY) + + t4t_timing_sizer = wx.BoxSizer(wx.HORIZONTAL) + t4t_timing_sizer.Add(self.rb_t4t_until_time, 0, wx.ALIGN_CENTER_VERTICAL) + t4t_timing_sizer.Add(self.cb_t4t_hours, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + t4t_timing_sizer.Add(wx.StaticText(self, -1, self.utility.lang.get('seed_hours')), 0, wx.ALIGN_CENTER_VERTICAL) + t4t_timing_sizer.Add(self.cb_t4t_mins, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + t4t_timing_sizer.Add(wx.StaticText(self, -1, self.utility.lang.get('seed_mins')), 0, wx.ALIGN_CENTER_VERTICAL) + + t4t_section.Add(t4t_timing_sizer, -1, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + # No seeding (That's evil, don't touch it!) + t4t_section.Add(self.rb_t4t_no_seeding, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + sizer.Add(t4t_section, 0, wx.EXPAND|wx.ALL, 5) + + #g2g options + g2g_title = wx.StaticBox(self, -1, self.utility.lang.get('give-2-get')) + g2g_section = wx.StaticBoxSizer(g2g_title, wx.VERTICAL) + + # Ratio buttons + self.rb_g2g_large_ratio = wx.RadioButton(self, -1, self.utility.lang.get('seed_for_large_ratio'), wx.Point(-1, -1), wx.Size(-1, -1), wx.RB_GROUP) + self.rb_g2g_boost_rep = wx.RadioButton(self, -1, self.utility.lang.get('boost__reputation'), wx.Point(-1, -1), wx.Size(-1, -1)) + self.rb_g2g_until_time = wx.RadioButton(self, -1, self.utility.lang.get('seed_sometime'), wx.Point(-1, -1), wx.Size(-1, -1)) + self.rb_g2g_no_seeding = wx.RadioButton(self, -1, self.utility.lang.get('no_seeding'), wx.Point(-1, -1), wx.Size(-1, -1)) + + self.g2g_rbs = [self.rb_g2g_large_ratio, self.rb_g2g_boost_rep, self.rb_g2g_until_time, self.rb_g2g_no_seeding] + + # Seeding ratio option + g2g_ratio = ['50', '75', '100', '150', '200', '300', '500'] + self.cb_g2g_ratio = wx.ComboBox(self, -1, "", + wx.Point(-1, -1), wx.Size(65, -1), g2g_ratio, wx.CB_DROPDOWN|wx.CB_READONLY) + + g2g_ratio_sizer = wx.BoxSizer(wx.HORIZONTAL) + g2g_ratio_sizer.Add(self.rb_g2g_large_ratio, 0, wx.ALIGN_CENTER_VERTICAL) + g2g_ratio_sizer.Add(self.cb_g2g_ratio, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT, 5) + g2g_ratio_sizer.Add(wx.StaticText(self, -1, "%"), 0, wx.ALIGN_CENTER_VERTICAL) + + g2g_section.Add(g2g_ratio_sizer, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + # boost your reputation + g2g_section.Add(self.rb_g2g_boost_rep, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + # Seeding for sometime + self.cb_g2g_hours = wx.ComboBox(self, -1, "", wx.Point(-1, -1), + wx.Size(55, -1), hours_interval, wx.CB_DROPDOWN|wx.CB_READONLY) + self.cb_g2g_mins = wx.ComboBox(self, -1, "", wx.Point(-1, -1), + wx.Size(55, -1), mins_interval, wx.CB_DROPDOWN|wx.CB_READONLY) + + g2g_timing_sizer = wx.BoxSizer(wx.HORIZONTAL) + g2g_timing_sizer.Add(self.rb_g2g_until_time, 0, wx.ALIGN_CENTER_VERTICAL) + g2g_timing_sizer.Add(self.cb_g2g_hours, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + g2g_timing_sizer.Add(wx.StaticText(self, -1, self.utility.lang.get('seed_hours')), 0, wx.ALIGN_CENTER_VERTICAL) + g2g_timing_sizer.Add(self.cb_g2g_mins, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + g2g_timing_sizer.Add(wx.StaticText(self, -1, self.utility.lang.get('seed_mins')), 0, wx.ALIGN_CENTER_VERTICAL) + + g2g_section.Add(g2g_timing_sizer, -1, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + # No seeding (That's evil, don't touch it!) + g2g_section.Add(self.rb_g2g_no_seeding, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + sizer.Add(g2g_section, 0, wx.EXPAND|wx.ALL, 5) + # ~ Boxun + + self.initTasks() + + def loadValues(self, Read = None): + # Load settings from dlConfig + try: + if Read is None: + Read = self.utility.config.Read + + self.t4t_rbs[Read('t4t_option', "int")].SetValue(True) + self.cb_t4t_hours.SetValue(str(Read('t4t_hours', "int"))) + self.cb_t4t_mins.SetValue(str(Read('t4t_hours', "int"))) + self.g2g_rbs[Read('g2g_option', "int")].SetValue(True) + self.cb_g2g_ratio.SetValue(str(Read('g2g_ratio', "int"))) + self.cb_g2g_hours.SetValue(str(Read('g2g_hours', "int"))) + self.cb_g2g_mins.SetValue(str(Read('g2g_mins', "int"))) + + except: + print_exc() + + def apply(self): + try: + # tit-4-tat + for i in range (4): + if self.t4t_rbs[i].GetValue(): + self.utility.config.Write('t4t_option', i) + break + + self.utility.config.Write("t4t_hours", self.cb_t4t_hours.GetValue()) + self.utility.config.Write("t4t_mins", self.cb_t4t_mins.GetValue()) + + # give-2-get + for i in range (4): + if self.g2g_rbs[i].GetValue(): + self.utility.config.Write("g2g_option", i) + break + + self.utility.config.Write("g2g_hours", self.cb_g2g_hours.GetValue()) + self.utility.config.Write("g2g_mins", self.cb_g2g_mins.GetValue()) + self.utility.config.Write("g2g_ratio", self.cb_g2g_ratio.GetValue()) + + except: + print_exc() + + + + + +################################################################ +# +# Class: TriblerPanel +# +# Contains settings for Tribler's features +# +################################################################ +class TriblerPanel(ABCOptionPanel): + def __init__(self, parent, dialog): + ABCOptionPanel.__init__(self, parent, dialog) + sizer = self.sizer + + funcsection_title = wx.StaticBox(self, -1, self.utility.lang.get('corefuncsetting')) + funcsection = wx.StaticBoxSizer(funcsection_title, wx.VERTICAL) + + self.rec_enable = wx.CheckBox(self, -1, self.utility.lang.get('enablerecommender')+" "+self.utility.lang.get('restartabc')) + funcsection.Add(self.rec_enable, 0, wx.ALIGN_LEFT|wx.ALL, 5) + + self.dlhelp_enable = wx.CheckBox(self, -1, self.utility.lang.get('enabledlhelp')+" "+self.utility.lang.get('restartabc')) + funcsection.Add(self.dlhelp_enable, 0, wx.ALIGN_LEFT|wx.ALL, 5) + + self.collect_enable = wx.CheckBox(self, -1, self.utility.lang.get('enabledlcollecting')+" "+self.utility.lang.get('restartabc')) + funcsection.Add(self.collect_enable, 0, wx.ALIGN_LEFT|wx.ALL, 5) + + sizer.Add(funcsection, 0, wx.EXPAND|wx.ALL, 5) + + tcsection_title = wx.StaticBox(self, -1, self.utility.lang.get('torrentcollectsetting')) + tcsection = wx.StaticBoxSizer(tcsection_title, wx.VERTICAL) + + self.timectrl = self.utility.makeNumCtrl(self, 1, min = 1, max = 3600) + time_box = wx.BoxSizer(wx.HORIZONTAL) + time_box.Add(wx.StaticText(self, -1, self.utility.lang.get('torrentcollectsleep')), 0, wx.ALIGN_CENTER_VERTICAL) + time_box.Add(self.timectrl, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + time_box.Add(wx.StaticText(self, -1, self.utility.lang.get('restartabc')), 0, wx.ALIGN_CENTER_VERTICAL) + tcsection.Add(time_box, 0, wx.EXPAND|wx.ALL, 5) + + ntorrents_box = wx.BoxSizer(wx.HORIZONTAL) # set the max num of torrents to collect + self.ntorrents = self.utility.makeNumCtrl(self, 5000, min = 0, max = 999999) + ntorrents_box.Add(wx.StaticText(self, -1, self.utility.lang.get('maxntorrents')), 0, wx.ALIGN_CENTER_VERTICAL) + ntorrents_box.Add(self.ntorrents, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + tcsection.Add(ntorrents_box, 0, wx.EXPAND|wx.ALL, 5) + + npeers_box = wx.BoxSizer(wx.HORIZONTAL) # set the max num of peers to be used by buddycast + self.npeers = self.utility.makeNumCtrl(self, 2000, min = 0, max = 999999) + npeers_box.Add(wx.StaticText(self, -1, self.utility.lang.get('maxnpeers')), 0, wx.ALIGN_CENTER_VERTICAL) + npeers_box.Add(self.npeers, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + tcsection.Add(npeers_box, 0, wx.EXPAND|wx.ALL, 5) + + tc_threshold_box = wx.BoxSizer(wx.HORIZONTAL) # set the min space to stop torrent collecting + self.tc_threshold = self.utility.makeNumCtrl(self, 200, min = 0, max = 999999) + tc_threshold_box.Add(wx.StaticText(self, -1, self.utility.lang.get('tc_threshold')), 0, wx.ALIGN_CENTER_VERTICAL) + tc_threshold_box.Add(self.tc_threshold, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + tc_threshold_box.Add(wx.StaticText(self, -1, self.utility.lang.get('MB')), 0, wx.ALIGN_CENTER_VERTICAL) + tc_threshold_box.Add(wx.StaticText(self, -1, ' ('+self.utility.lang.get('current_free_space')+' '), 0, wx.ALIGN_CENTER_VERTICAL) + + current_free_space = getfreespace(self.utility.session.get_download_help_dir())/(2**20) + tc_threshold_box.Add(wx.StaticText(self, -1, str(current_free_space)), 0, wx.ALIGN_CENTER_VERTICAL) + tc_threshold_box.Add(wx.StaticText(self, -1, self.utility.lang.get('MB')+')'), 0, wx.ALIGN_CENTER_VERTICAL) + tcsection.Add(tc_threshold_box, 0, wx.EXPAND|wx.ALL, 5) + + tc_rate_box = wx.BoxSizer(wx.HORIZONTAL) # set the rate of torrent collecting + self.tc_rate = self.utility.makeNumCtrl(self, 5, min = 0, max = 999999) + tc_rate_box.Add(wx.StaticText(self, -1, self.utility.lang.get('torrentcollectingrate')), 0, wx.ALIGN_CENTER_VERTICAL) + tc_rate_box.Add(self.tc_rate, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + tcsection.Add(tc_rate_box, 0, wx.EXPAND|wx.ALL, 5) + + sizer.Add(tcsection, 0, wx.EXPAND|wx.ALL, 5) + + myinfosection_title = wx.StaticBox(self, -1, self.utility.lang.get('myinfosetting')) + myinfosection = wx.StaticBoxSizer(myinfosection_title, wx.VERTICAL) + + # Show PermID + mypermid = self.utility.session.get_permid() + pb64 = show_permid(mypermid) + if True: + # Make it copy-and-paste able + permid_box = wx.BoxSizer(wx.HORIZONTAL) + self.permidctrl = wx.TextCtrl(self, -1, pb64, size = (400, 30), style = wx.TE_READONLY) + permid_box.Add(wx.StaticText(self, -1, self.utility.lang.get('mypermid')), 0, wx.ALIGN_CENTER_VERTICAL) + permid_box.Add(self.permidctrl, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + myinfosection.Add(permid_box, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + else: + permid_txt = self.utility.lang.get('mypermid')+": "+pb64 + label = wx.StaticText(self, -1, permid_txt ) + myinfosection.Add( label, 1, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + self.myinfo = wx.Button(self, -1, self.utility.lang.get('myinfo') + "...") + self.Bind(wx.EVT_BUTTON, self.OnMyInfoWizard, self.myinfo) + myinfosection.Add(self.myinfo, 0, wx.ALL, 5) + + sizer.Add(myinfosection, 0, wx.EXPAND|wx.ALL, 5) + + if self.utility.frame.oldframe is not None: + self.debug = wx.Button(self, -1, 'Open debug window') + sizer.Add(self.debug, 0, wx.ALL, 5) + self.Bind(wx.EVT_BUTTON, self.OnDebug, self.debug) + + self.initTasks() + + + def loadValues(self, Read = None): + """ Loading values from configure file """ + + buddycast = self.utility.session.get_buddycast() + coopdl = self.utility.session.get_download_help() + torrcoll = self.utility.session.get_torrent_collecting() + maxcolltorrents = self.utility.session.get_torrent_collecting_max_torrents() + maxbcpeers = self.utility.session.get_buddycast_max_peers() + stopcollthres = self.utility.session.get_stop_collecting_threshold() + collrate = self.utility.session.get_torrent_collecting_rate() + + self.rec_enable.SetValue(buddycast) + self.dlhelp_enable.SetValue(coopdl) + self.collect_enable.SetValue(torrcoll) + self.ntorrents.SetValue(maxcolltorrents) + self.npeers.SetValue(maxbcpeers) + self.tc_threshold.SetValue(stopcollthres) + self.tc_rate.SetValue(collrate) + + # For subscriptions + self.timectrl.SetValue(self.utility.config.Read('torrentcollectsleep', 'int')) + + + + def apply(self): + """ do sth. when user click apply of OK button """ + + buddycast = self.rec_enable.GetValue() + coopdl = self.dlhelp_enable.GetValue() + torrcoll = self.collect_enable.GetValue() + maxcolltorrents = int(self.ntorrents.GetValue()) + maxbcpeers = int(self.npeers.GetValue()) + stopcollthres = int(self.tc_threshold.GetValue()) + collrate = int(self.tc_rate.GetValue()) + + + # Save SessConfig + state_dir = self.utility.session.get_state_dir() + cfgfilename = Session.get_default_config_filename(state_dir) + scfg = SessionStartupConfig.load(cfgfilename) + + for target in [scfg,self.utility.session]: + try: + target.set_buddycast(buddycast) + except: + print_exc() + try: + target.set_download_help(coopdl) + except: + print_exc() + try: + target.set_torrent_collecting(torrcoll) + except: + print_exc() + try: + target.set_torrent_collecting_max_torrents(maxcolltorrents) + except: + print_exc() + try: + target.set_buddycast_max_peers(maxbcpeers) + except: + print_exc() + try: + target.set_stop_collecting_threshold(stopcollthres) + except: + print_exc() + try: + target.set_torrent_collecting_rate(collrate) + except: + print_exc() + + scfg.save(cfgfilename) + + # For subscriptions + t = int(self.timectrl.GetValue()) + self.utility.config.Write('torrentcollectsleep', t) + + + def OnMyInfoWizard(self, event = None): + wizard = MyInfoWizard(self) + wizard.RunWizard(wizard.getFirstPage()) + + def WizardFinished(self,wizard): + wizard.Destroy() + + def OnDebug(self,event): + self.utility.frame.oldframe.Show() + +# HERE + +################################################################ +# +# Class: VideoPanel +# +# Contains settings for video features +# +################################################################ +class VideoPanel(ABCOptionPanel): + def __init__(self, parent, dialog): + ABCOptionPanel.__init__(self, parent, dialog) + sizer = self.sizer + + playbacksection_title = wx.StaticBox(self, -1, self.utility.lang.get('playback_section')) + playbacksection = wx.StaticBoxSizer(playbacksection_title, wx.VERTICAL) + + playbackbox = wx.BoxSizer(wx.HORIZONTAL) + feasible = return_feasible_playback_modes(self.utility.getPath()) + playback_choices = [] + self.playback_indices = [] + if PLAYBACKMODE_INTERNAL in feasible: + playback_choices.append(self.utility.lang.get('playback_internal')) + self.playback_indices.append(PLAYBACKMODE_INTERNAL) + if PLAYBACKMODE_EXTERNAL_DEFAULT in feasible: + playback_choices.append(self.utility.lang.get('playback_external_default')) + self.playback_indices.append(PLAYBACKMODE_EXTERNAL_DEFAULT) + if PLAYBACKMODE_EXTERNAL_MIME in feasible: + playback_choices.append(self.utility.lang.get('playback_external_mime')) + self.playback_indices.append(PLAYBACKMODE_EXTERNAL_MIME) + self.playback_chooser=wx.Choice(self, -1, wx.Point(-1, -1), wx.Size(-1, -1), playback_choices) + + playbackbox.Add(wx.StaticText(self, -1, self.utility.lang.get('playback_mode')), 1, wx.ALIGN_CENTER_VERTICAL) + playbackbox.Add(self.playback_chooser) + playbacksection.Add(playbackbox, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + + player_box = wx.BoxSizer(wx.HORIZONTAL) + self.player = wx.TextCtrl(self, -1, "") + player_box.Add(wx.StaticText(self, -1, self.utility.lang.get('videoplayer_default_path')), 0, wx.ALIGN_CENTER_VERTICAL) + player_box.Add(self.player, 1, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + #browsebtn = wx.Button(self, -1, "...", style = wx.BU_EXACTFIT) + browsebtn = wx.Button(self, -1, "...") + self.Bind(wx.EVT_BUTTON, self.onBrowsePlayer, browsebtn) + player_box.Add(browsebtn, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + playbacksection.Add(player_box, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT|wx.EXPAND, 5) + + sizer.Add(playbacksection, 0, wx.EXPAND|wx.ALL, 5) + + analysissection_title = wx.StaticBox(self, -1, self.utility.lang.get('analysis_section')) + analysissection = wx.StaticBoxSizer(analysissection_title, wx.VERTICAL) + + analyser_box = wx.BoxSizer(wx.HORIZONTAL) + self.analyser = wx.TextCtrl(self, -1, "") + analyser_box.Add(wx.StaticText(self, -1, self.utility.lang.get('videoanalyserpath')), 0, wx.ALIGN_CENTER_VERTICAL) + analyser_box.Add(self.analyser, 1, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + #browsebtn = wx.Button(self, -1, "...", style = wx.BU_EXACTFIT) + browsebtn = wx.Button(self, -1, "...") + self.Bind(wx.EVT_BUTTON, self.onBrowseAnalyser, browsebtn) + analyser_box.Add(browsebtn, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + analysissection.Add(analyser_box, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT|wx.EXPAND, 5) + + sizer.Add(analysissection, 0, wx.EXPAND|wx.ALL, 5) + + restarttxt = wx.StaticText(self, -1, self.utility.lang.get('restartabc')) + sizer.Add(restarttxt, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT,5) + + if sys.platform == 'win32': + self.quote = '"' + else: + self.quote = "'" + + self.initTasks() + + def loadValues(self, Read = None): + if Read is None: + Read = self.utility.config.Read + + mode = Read('videoplaybackmode', "int") + for index in self.playback_indices: + if index == mode: + self.playback_chooser.SetSelection(index) + + value = Read('videoplayerpath') + qvalue = self.quote_path(value) + self.player.SetValue(qvalue) + + value = self.utility.session.get_video_analyser_path() + qvalue = self.quote_path(value) + self.analyser.SetValue(qvalue) + + + def apply(self): + + value = self.playback_chooser.GetSelection() + mode = self.playback_indices[value] + self.utility.config.Write('videoplaybackmode',mode) + + for key,widget,mainmsg in [('videoplayerpath',self.player,self.utility.lang.get('videoplayernotfound'))]: + qvalue = widget.GetValue() + value = self.unquote_path(qvalue) + if not os.access(value,os.F_OK): + self.onError(mainmsg,value) + return + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","abcoptions: VideoPanel: Writing",key,value + self.utility.config.Write(key,value) + + # videoanalyserpath is a config parameter of the Session + vapath = None + for key,widget,mainmsg in[('videoanalyserpath',self.analyser,self.utility.lang.get('videoanalysernotfound'))]: + qvalue = widget.GetValue() + value = self.unquote_path(qvalue) + vapath = value + if not os.access(value,os.F_OK): + self.onError(mainmsg,value) + return + + # Save SessConfig + state_dir = self.utility.session.get_state_dir() + cfgfilename = Session.get_default_config_filename(state_dir) + scfg = SessionStartupConfig.load(cfgfilename) + + for target in [scfg,self.utility.session]: + try: + target.set_video_analyser_path(vapath) + except: + print_exc() + + scfg.save(cfgfilename) + + + + def unquote_path(self,value): + value.strip() + if value[0] == self.quote: + idx = value.find(self.quote,1) + return value[1:idx] + else: + return value + + def quote_path(self,value): + value.strip() + if value.find(' ') != -1: + return self.quote+value+self.quote + else: + return value + + + def onError(self,mainmsg,path): + msg = mainmsg + msg += '\n' + msg += path + msg += '\n' + dlg = wx.MessageDialog(None, msg, self.utility.lang.get('videoplayererrortitle'), wx.OK|wx.ICON_ERROR) + result = dlg.ShowModal() + dlg.Destroy() + + def onBrowsePlayer(self, event = None): + self.onBrowse(self.player,self.utility.lang.get('choosevideoplayer')) + + def onBrowseAnalyser(self, event = None): + self.onBrowse(self.analyser,self.utility.lang.get('choosevideoanalyser')) + + def onBrowse(self,widget,title): + dlg = wx.FileDialog(self.utility.frame, + title, + style = wx.OPEN | wx.FILE_MUST_EXIST) + if dlg.ShowModal() == wx.ID_OK: + value = dlg.GetPath() + qvalue = self.quote_path(value) + widget.SetValue(qvalue) + dlg.Destroy() + + +################################################################ +# +# Class: ABCTree +# +# A collapsable listing of all the options panels +# +################################################################ +class ABCTree(wx.TreeCtrl): + def __init__(self, parent, dialog): + style = wx.TR_DEFAULT_STYLE | wx.TR_HIDE_ROOT + wx.TreeCtrl.__init__(self, parent, -1, style = style) + + self.dialog = dialog + self.utility = dialog.utility + + self.root = self.AddRoot("Preferences") + + self.tribler = self.AppendItem(self.root, self.utility.lang.get('triblersetting')) + self.video = self.AppendItem(self.root, self.utility.lang.get('videosetting')) + self.ratelimits = self.AppendItem(self.root, self.utility.lang.get('ratelimits')) + self.seedingoptions = self.AppendItem(self.root, self.utility.lang.get('seedoptions')) + #self.queuesetting = self.AppendItem(self.root, self.utility.lang.get('queuesetting')) + #self.timeout = self.AppendItem(self.root, self.utility.lang.get('timeout')) + self.disk = self.AppendItem(self.root, self.utility.lang.get('disksettings')) + self.advanceddisk = self.AppendItem(self.disk, self.utility.lang.get('advanced')) + self.network = self.AppendItem(self.root, self.utility.lang.get('networksetting')) + self.advancednetwork = self.AppendItem(self.network, self.utility.lang.get('advanced')) + + #self.display = self.AppendItem(self.root, self.utility.lang.get('displaysetting')) + + #self.colors = self.AppendItem(self.display, self.utility.lang.get('torrentcolors')) + + self.misc = self.AppendItem(self.root, self.utility.lang.get('miscsetting')) + + self.treeMap = {self.ratelimits : self.dialog.rateLimitPanel, + self.seedingoptions : self.dialog.seedingOptionsPanel, + #self.queuesetting : self.dialog.queuePanel, + #self.timeout : self.dialog.schedulerRulePanel, + self.network : self.dialog.networkPanel, + self.misc : self.dialog.miscPanel, + self.tribler : self.dialog.triblerPanel, + self.video : self.dialog.videoPanel, + #self.display : self.dialog.displayPanel, + #self.colors : self.dialog.colorPanel, + self.disk : self.dialog.diskPanel } + + self.treeMap[self.advancednetwork] = self.dialog.advancedNetworkPanel + self.treeMap[self.advanceddisk] = self.dialog.advancedDiskPanel + + self.Bind(wx.EVT_TREE_SEL_CHANGED, self.onSwitchPage) + + self.SetAutoLayout(True) + self.Fit() + + def onSwitchPage(self, event = None): + if self.dialog.closing or event is None: + return + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","abcoption: event type:", event.GetEventType() + newitem = event.GetItem() + newpanel = None + foundnew = False + for key in self.treeMap: + if key == newitem: + newpanel = self.treeMap[key] + foundnew = True + if foundnew: + break + + if newpanel is not None: + # Trying to switch to the current window + try: + oldpanel = self.dialog.splitter.GetWindow2() + if oldpanel != newpanel: + oldpanel.Show(False) + self.dialog.splitter.ReplaceWindow(oldpanel, newpanel) + newpanel.Show(True) + newpanel.changed = True + except: + pass + # TODO: for some reason this is sometimes failing + # (splitter.GetWindow2() sometimes appears to + # return an Object rather than wx.Window) + + def open(self,name): + rootid = self.GetRootItem() + if rootid.IsOk(): + #print "root is",self.GetItemText(rootid) + [firstid,cookie] = self.GetFirstChild(rootid) + if firstid.IsOk(): + print "first is",self.GetItemText(firstid) + if not self.doopen(name,firstid): + while True: + [childid,cookie] = self.GetNextChild(firstid,cookie) + if childid.IsOk(): + if self.doopen(name,childid): + break + else: + break + + def doopen(self,wantname,childid): + gotname = self.GetItemText(childid) + print "gotname is",gotname + if gotname == wantname: + self.SelectItem(childid) + return True + else: + return False + + +################################################################ +# +# Class: ABCOptionDialog +# +# Creates a dialog that allows users to set various preferences +# +################################################################ +class ABCOptionDialog(wx.Dialog): + def __init__(self, parent,openname=None): + self.utility = parent.utility + + style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER +# size = wx.Size(530, 420) + + size, split = self.getWindowSettings() + + wx.Dialog.__init__(self, parent, -1, self.utility.lang.get('abcpreference'), size = size, style = style) + + self.splitter = wx.SplitterWindow(self, -1, style = wx.SP_NOBORDER | wx.SP_LIVE_UPDATE) + + self.rateLimitPanel = RateLimitPanel(self.splitter, self) + self.seedingOptionsPanel = SeedingOptionsPanel(self.splitter, self) + #self.queuePanel = QueuePanel(self.splitter, self) + + #self.schedulerRulePanel = SchedulerRulePanel(self.splitter, self) + self.networkPanel = NetworkPanel(self.splitter, self) + self.miscPanel = MiscPanel(self.splitter, self) + self.triblerPanel = TriblerPanel(self.splitter, self) + self.videoPanel = VideoPanel(self.splitter, self) + self.diskPanel = DiskPanel(self.splitter, self) + + self.advancedNetworkPanel = AdvancedNetworkPanel(self.splitter, self) + self.advancedDiskPanel = AdvancedDiskPanel(self.splitter, self) + + self.tree = ABCTree(self.splitter, self) + + # TODO: Try wx.Listbook instead of splitterwindow + + self.splitter.SetAutoLayout(True) + self.splitter.Fit() + + applybtn = wx.Button(self, -1, " "+self.utility.lang.get('apply')+" ", size = (60, -1)) + okbtn = wx.Button(self, -1, " "+self.utility.lang.get('ok')+" ", size = (60, -1)) + cancelbtn = wx.Button(self, -1, " "+self.utility.lang.get('cancel')+" ", size = (60, -1)) + + buttonbox = wx.BoxSizer(wx.HORIZONTAL) + buttonbox.Add(applybtn, 0, wx.ALL, 5) + buttonbox.Add(okbtn, 0, wx.ALL, 5) + buttonbox.Add(cancelbtn, 0, wx.ALL, 5) + + outerbox = wx.BoxSizer(wx.VERTICAL) + outerbox.Add(self.splitter , 1, wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, 5) + + outerbox.Add(wx.StaticLine(self, -1), 0, wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, 5) + + outerbox.Add(buttonbox, 0, wx.ALIGN_RIGHT) + + # Add events + ########################### + self.Bind(wx.EVT_BUTTON, self.onOK, okbtn) + self.Bind(wx.EVT_BUTTON, self.onApply, applybtn) + self.Bind(wx.EVT_BUTTON, self.onCloseGlobalPref, cancelbtn) + self.Bind(wx.EVT_CLOSE, self.onCloseGlobalPref) + + defaultPanel = self.triblerPanel + + self.splitter.SplitVertically(self.tree, defaultPanel, split) + defaultPanel.changed = True + self.splitter.SetMinimumPaneSize(50) + + for key in self.tree.treeMap: + panel = self.tree.treeMap[key] + panel.Show(False) + + defaultPanel.Show(True) + defaultPanel.Fit() + + self.SetSizer(outerbox) +# self.Fit() + + self.closing = False + if openname is not None: + self.tree.open(openname) + + treeitem = [k for (k,v) in self.tree.treeMap.iteritems() if v == defaultPanel][0] + self.tree.SelectItem( treeitem, True ) + + def getWindowSettings(self): + width = self.utility.config.Read("prefwindow_width", "int") + height = self.utility.config.Read("prefwindow_height", "int") + split = self.utility.config.Read("prefwindow_split", "int") + + return wx.Size(width, height), split + + def saveWindowSettings(self): + width, height = self.GetSizeTuple() + self.utility.config.Write("prefwindow_width", width) + self.utility.config.Write("prefwindow_height", height) + self.utility.config.Write("prefwindow_split", self.splitter.GetSashPosition()) + self.utility.config.Flush() + + def onCloseGlobalPref(self, event = None): + self.closing = True + + self.saveWindowSettings() + + self.EndModal(wx.ID_CANCEL) + + def onApply(self, event = None): + # Set new value to parameters + ############################## + + # Only apply changes for panels that the user has viewed + for key in self.tree.treeMap: + panel = self.tree.treeMap[key] + if panel.changed: + panel.apply() + + # write current changes to disk + self.utility.config.Flush() + + return True + + def onOK(self, event = None): + if self.onApply(): + self.closing = True + self.saveWindowSettings() + + self.EndModal(wx.ID_OK) + + + + diff --git a/tribler-mod/Tribler/Main/Dialogs/aboutme.py b/tribler-mod/Tribler/Main/Dialogs/aboutme.py new file mode 100644 index 0000000..83e47be --- /dev/null +++ b/tribler-mod/Tribler/Main/Dialogs/aboutme.py @@ -0,0 +1,243 @@ +from time import localtime, strftime +import sys +import wx +import wx.html as html + +from webbrowser import open_new +from threading import Thread +from traceback import print_exc +from Tribler.Core.Utilities.timeouturlopen import urlOpenTimeout + +################################################################ +# +# Class: MyHtmlWindow +# +# Helper class to display html in a panel and handle clicking +# on urls. +# +################################################################ +class MyHtmlWindow(html.HtmlWindow): + def __init__(self, parent, id): + html.HtmlWindow.__init__(self, parent, id, size=(400, 300)) + self.Bind(wx.EVT_SCROLLWIN, self.OnScroll) + + def OnScroll(self, event): + event.Skip() + + def OnLinkClicked(self, linkinfo): + t = Thread(target = open_new(linkinfo.GetHref())) + t.setName( "AboutMeLinkOpen"+t.getName() ) + t.setDaemon(True) + t.start() + + +################################################################ +# +# Class: MyHtmlDialog +# +# Displays html formatted information in a dialog +# +################################################################ +class MyHtmlDialog(wx.Dialog): + def __init__(self, parent, title, content): + wx.Dialog.__init__(self, parent, -1, title) + + btn = wx.Button(self, wx.ID_OK, " OK ") + btn.SetDefault() + + color = self.GetBackgroundColour() + bgcolor = "#%02x%02x%02x" % (color.Red(), color.Green(), color.Blue()) + + about_html = "" + title + "" + \ + "" + \ + content + \ + "" + + self.html = MyHtmlWindow(self, -1) + self.html.SetPage(about_html) + + buttonbox = wx.BoxSizer(wx.HORIZONTAL) + buttonbox.Add(btn, 0, wx.ALL, 5) + + outerbox = wx.BoxSizer(wx.VERTICAL) + outerbox.Add(self.html, 0, wx.EXPAND|wx.ALL, 5) + outerbox.Add(buttonbox, 0, wx.ALIGN_CENTER) + + self.SetAutoLayout(True) + self.SetSizer(outerbox) + self.Fit() + + +################################################################ +# +# Class: VersionDialog +# +# Show information about the current version of ABC +# +################################################################ +class VersionDialog(MyHtmlDialog): + def __init__(self, parent): + self.parent = parent + self.utility = parent.utility + + content = "" + try : + nu = self.hasNewVersion() + if nu == 0: + content += "" + content += self.utility.lang.get('nonewversion') + content += "
\n" + content += "
" + elif nu == 1: + content += "" + newversion = self.utility.lang.get('hasnewversion') + content += "
" + newversion + "" + content += "
\n" + content += "
" + else: + content += "" + content = self.utility.lang.get('cantconnectwebserver') + content += "
\n" + content += "
" + except: + content = self.utility.lang.get('cantconnectwebserver') + print_exc() + + title = self.utility.lang.get('abclatestversion') + + MyHtmlDialog.__init__(self, parent, title, content) + + + def hasNewVersion(self): + my_version = self.utility.getVersion() + try: + # Arno: TODO: don't let this be done by MainThread + curr_status = urlOpenTimeout('http://tribler.org/version/',timeout=1).readlines() + line1 = curr_status[0] + if len(curr_status) > 1: + self.update_url = curr_status[1].strip() + else: + self.update_url = 'http://tribler.org/' + _curr_status = line1.split() + self.curr_version = _curr_status[0] + if self.newversion(self.curr_version, my_version): + return 1 + else: + return 0 + except: + print_exc() + return -1 + + def newversion(self, curr_version, my_version): + curr = curr_version.split('.') + my = my_version.split('.') + if len(my) >= len(curr): + nversion = len(my) + else: + nversion = len(curr) + for i in range(nversion): + if i < len(my): + my_v = int(my[i]) + else: + my_v = 0 + if i < len(curr): + curr_v = int(curr[i]) + else: + curr_v = 0 + if curr_v > my_v: + return True + elif curr_v < my_v: + return False + return False + +################################################################ +# +# Class: AboutMeDialog +# +# Display credits information about who has contributed to ABC +# along with what software modules it uses. +# +################################################################ +class AboutMeDialog(MyHtmlDialog): + def __init__(self, parent): + + self.parent = parent + self.utility = parent.utility + + bittornado_version = "0.3.13" + py2exe_version = "0.6.2" + nsis_version = "2.09" + + title = self.utility.lang.get('aboutabc') + +# # Start UI in Dialog +# ####################### +# +# btn = wx.Button(self, wx.ID_OK, " OK ") +# btn.SetDefault() +# +# color = self.GetBackgroundColour() +# bgcolor = "#%02x%02x%02x" % (color.Red(), color.Green(), color.Blue()) + + wx_version = "" + for v in wx.VERSION: + s = str(v)+"." + wx_version += s + wx_version = wx_version.strip(".") + + major, minor, micro, releaselevel, serial = sys.version_info + python_version = str(major) + "." + str(minor) + "." + str(micro) + + + content = "
" + \ + self.utility.lang.get('title') + " V" + self.utility.lang.get('version') + \ + "
" + \ + "" + \ + "

" + self.utility.lang.get('build_date') + "
" + \ + "

Tribler is an Internet TV application." + \ + "
It allows you to find, share, and consume videos." + \ + "
" + \ + "
Created by the Tribler P2P research Team." + \ + "
This work is supported by various research grants from the Dutch ministry of economic affairs, the Dutch Technology Foundation STW, and the European Union 6th+7th framework program." + \ + "
" + \ + "

" + \ + "" + \ + "" + \ + "" + \ + "" + \ + "" + \ + "" + \ + "" + \ + "" + \ + "" + \ + "" + \ + "" + \ + "" + \ + "" + \ + "" + \ + ""\ + "" + \ + "
Contributors:Delft University of Technology
 " + \ + "(triblersoft@gmail.com)" + \ + "
Vrije Universiteit Amsterdam" + \ + "
Homepage:Tribler Homepage
Forums:Tribler Forums
" + self.utility.lang.get('translate') + "
" + \ + "

The system core is BitTornado " + bittornado_version + "" + \ + "
based on Bittorrent coded by Bram Cohen" + \ + "
Plus the ABC code by Choopan Rattanapoka and Tim Tucker." + \ + "
Special Thanks:" + \ + "
NoirSoldats (noirsoldats@codemeu.com)" + \ + "
kratoak5" + \ + "
roee88" + \ + "
Greg Fleming (www.darkproject.com)" + \ + "
Pir4nhaX (www.clanyakuza.com)" + \ + "
Michel Hartmann (php4abc.i-networx.de)" + \ + "

Powered by Python " + python_version + ", " + \ + "wxPython " + wx_version + ", " + \ + "py2exe " + py2exe_version + ", " + \ + "NSIS " + nsis_version + "" + \ + "

Parts copyrighted (c) 2001-2002, Bram Cohen" + \ + "

Parts copyrighted (c) 2003-2004, Choopan Rattanapoka" + \ + "

Copyright (c) 2005-2008, Delft University of Technology and Vrije Universiteit Amsterdam" + \ + "" + + MyHtmlDialog.__init__(self, parent, title, content) diff --git a/tribler-mod/Tribler/Main/Dialogs/aboutme.py.bak b/tribler-mod/Tribler/Main/Dialogs/aboutme.py.bak new file mode 100644 index 0000000..db55135 --- /dev/null +++ b/tribler-mod/Tribler/Main/Dialogs/aboutme.py.bak @@ -0,0 +1,242 @@ +import sys +import wx +import wx.html as html + +from webbrowser import open_new +from threading import Thread +from traceback import print_exc +from Tribler.Core.Utilities.timeouturlopen import urlOpenTimeout + +################################################################ +# +# Class: MyHtmlWindow +# +# Helper class to display html in a panel and handle clicking +# on urls. +# +################################################################ +class MyHtmlWindow(html.HtmlWindow): + def __init__(self, parent, id): + html.HtmlWindow.__init__(self, parent, id, size=(400, 300)) + self.Bind(wx.EVT_SCROLLWIN, self.OnScroll) + + def OnScroll(self, event): + event.Skip() + + def OnLinkClicked(self, linkinfo): + t = Thread(target = open_new(linkinfo.GetHref())) + t.setName( "AboutMeLinkOpen"+t.getName() ) + t.setDaemon(True) + t.start() + + +################################################################ +# +# Class: MyHtmlDialog +# +# Displays html formatted information in a dialog +# +################################################################ +class MyHtmlDialog(wx.Dialog): + def __init__(self, parent, title, content): + wx.Dialog.__init__(self, parent, -1, title) + + btn = wx.Button(self, wx.ID_OK, " OK ") + btn.SetDefault() + + color = self.GetBackgroundColour() + bgcolor = "#%02x%02x%02x" % (color.Red(), color.Green(), color.Blue()) + + about_html = "" + title + "" + \ + "" + \ + content + \ + "" + + self.html = MyHtmlWindow(self, -1) + self.html.SetPage(about_html) + + buttonbox = wx.BoxSizer(wx.HORIZONTAL) + buttonbox.Add(btn, 0, wx.ALL, 5) + + outerbox = wx.BoxSizer(wx.VERTICAL) + outerbox.Add(self.html, 0, wx.EXPAND|wx.ALL, 5) + outerbox.Add(buttonbox, 0, wx.ALIGN_CENTER) + + self.SetAutoLayout(True) + self.SetSizer(outerbox) + self.Fit() + + +################################################################ +# +# Class: VersionDialog +# +# Show information about the current version of ABC +# +################################################################ +class VersionDialog(MyHtmlDialog): + def __init__(self, parent): + self.parent = parent + self.utility = parent.utility + + content = "" + try : + nu = self.hasNewVersion() + if nu == 0: + content += "" + content += self.utility.lang.get('nonewversion') + content += "
\n" + content += "
" + elif nu == 1: + content += "" + newversion = self.utility.lang.get('hasnewversion') + content += "" + newversion + "" + content += "
\n" + content += "
" + else: + content += "" + content = self.utility.lang.get('cantconnectwebserver') + content += "
\n" + content += "
" + except: + content = self.utility.lang.get('cantconnectwebserver') + print_exc() + + title = self.utility.lang.get('abclatestversion') + + MyHtmlDialog.__init__(self, parent, title, content) + + + def hasNewVersion(self): + my_version = self.utility.getVersion() + try: + # Arno: TODO: don't let this be done by MainThread + curr_status = urlOpenTimeout('http://tribler.org/version/',timeout=1).readlines() + line1 = curr_status[0] + if len(curr_status) > 1: + self.update_url = curr_status[1].strip() + else: + self.update_url = 'http://tribler.org/' + _curr_status = line1.split() + self.curr_version = _curr_status[0] + if self.newversion(self.curr_version, my_version): + return 1 + else: + return 0 + except: + print_exc() + return -1 + + def newversion(self, curr_version, my_version): + curr = curr_version.split('.') + my = my_version.split('.') + if len(my) >= len(curr): + nversion = len(my) + else: + nversion = len(curr) + for i in range(nversion): + if i < len(my): + my_v = int(my[i]) + else: + my_v = 0 + if i < len(curr): + curr_v = int(curr[i]) + else: + curr_v = 0 + if curr_v > my_v: + return True + elif curr_v < my_v: + return False + return False + +################################################################ +# +# Class: AboutMeDialog +# +# Display credits information about who has contributed to ABC +# along with what software modules it uses. +# +################################################################ +class AboutMeDialog(MyHtmlDialog): + def __init__(self, parent): + + self.parent = parent + self.utility = parent.utility + + bittornado_version = "0.3.13" + py2exe_version = "0.6.2" + nsis_version = "2.09" + + title = self.utility.lang.get('aboutabc') + +# # Start UI in Dialog +# ####################### +# +# btn = wx.Button(self, wx.ID_OK, " OK ") +# btn.SetDefault() +# +# color = self.GetBackgroundColour() +# bgcolor = "#%02x%02x%02x" % (color.Red(), color.Green(), color.Blue()) + + wx_version = "" + for v in wx.VERSION: + s = str(v)+"." + wx_version += s + wx_version = wx_version.strip(".") + + major, minor, micro, releaselevel, serial = sys.version_info + python_version = str(major) + "." + str(minor) + "." + str(micro) + + + content = "

" + \ + self.utility.lang.get('title') + " V" + self.utility.lang.get('version') + \ + "
" + \ + "" + \ + "

" + self.utility.lang.get('build_date') + "
" + \ + "

Tribler is an Internet TV application." + \ + "
It allows you to find, share, and consume videos." + \ + "
" + \ + "
Created by the Tribler P2P research Team." + \ + "
This work is supported by various research grants from the Dutch ministry of economic affairs, the Dutch Technology Foundation STW, and the European Union 6th+7th framework program." + \ + "
" + \ + "

" + \ + "" + \ + "" + \ + "" + \ + "" + \ + "" + \ + "" + \ + "" + \ + "" + \ + "" + \ + "" + \ + "" + \ + "" + \ + "" + \ + "" + \ + ""\ + "" + \ + "
Contributors:Delft University of Technology
 " + \ + "(triblersoft@gmail.com)" + \ + "
Vrije Universiteit Amsterdam" + \ + "
Homepage:Tribler Homepage
Forums:Tribler Forums
" + self.utility.lang.get('translate') + "
" + \ + "

The system core is BitTornado " + bittornado_version + "" + \ + "
based on Bittorrent coded by Bram Cohen" + \ + "
Plus the ABC code by Choopan Rattanapoka and Tim Tucker." + \ + "
Special Thanks:" + \ + "
NoirSoldats (noirsoldats@codemeu.com)" + \ + "
kratoak5" + \ + "
roee88" + \ + "
Greg Fleming (www.darkproject.com)" + \ + "
Pir4nhaX (www.clanyakuza.com)" + \ + "
Michel Hartmann (php4abc.i-networx.de)" + \ + "

Powered by Python " + python_version + ", " + \ + "wxPython " + wx_version + ", " + \ + "py2exe " + py2exe_version + ", " + \ + "NSIS " + nsis_version + "" + \ + "

Parts copyrighted (c) 2001-2002, Bram Cohen" + \ + "

Parts copyrighted (c) 2003-2004, Choopan Rattanapoka" + \ + "

Copyright (c) 2005-2008, Delft University of Technology and Vrije Universiteit Amsterdam" + \ + "" + + MyHtmlDialog.__init__(self, parent, title, content) diff --git a/tribler-mod/Tribler/Main/Dialogs/common.py b/tribler-mod/Tribler/Main/Dialogs/common.py new file mode 100644 index 0000000..4f91dc2 --- /dev/null +++ b/tribler-mod/Tribler/Main/Dialogs/common.py @@ -0,0 +1,245 @@ +from time import localtime, strftime +# Written by Jie Yang +# see LICENSE.txt for license information + +import wx +from ABC.GUI.list import ManagedList + +def sort_dictlist(dict_list, key, order='increase'): + + aux = [(dict_list[i][key], i) for i in xrange(len(dict_list))] + try: + aux.sort() + except UnicodeDecodeError: + # Arno: there are unicode strings and non-unicode strings in the data. + # One of the non-unicode strings contains data that cannot be + # decoded into a unicode string for comparison to the other unicode + # strings by the default 'ascii' codec. See + # http://downloads.egenix.com/python/Unicode-EPC2002-Talk.pdf + # + # This is a legacy problem, as the new code will store everything as + # unicode in the database. I therefore chose a dirty solution, don't + # sort + pass + if order == 'decrease' or order == 1: # 0 - increase, 1 - decrease + aux.reverse() + return [dict_list[i] for x, i in aux] + + +class CommonTriblerList(ManagedList): + """ + 0. Give a unique prefix + 1. IDs in rightalign and centeralign must be set in Utility.constants; + 2. Column labels must be set in the language file; + 3. To set default values, modify Utility.utility.setupConfig() + + WARNING: this constructor is called after the subclass already initialized + itself, so anything you do here will override the subclass, not initialize it. + """ + def __init__(self, parent, style, prefix, minid, maxid, exclude = [], rightalign = [], centeralign = []): + self.parent = parent + self.utility = parent.utility + self.prefix = prefix + ManagedList.__init__(self, parent, style, prefix, minid, maxid, exclude, rightalign, centeralign) + + self.data = [] + self.lastcolumnsorted, self.reversesort = self.columns.getSortedColumn() + self.info_dict = {} # use infohash as the key, used for update + self.num = self.getMaxNum() # max num of lines to show + self.curr_pos = -1 + + self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.OnRightClick) + self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.OnActivated) + self.Bind(wx.EVT_LIST_COL_CLICK, self.OnColClick) + self.Bind(wx.EVT_KEY_DOWN, self.onKeyDown) + + # for search + self.Bind(wx.EVT_FIND, self.OnFind) + self.Bind(wx.EVT_FIND_NEXT, self.OnFind) + self.Bind(wx.EVT_FIND_CLOSE, self.OnFindClose) + + #self.loadList() + self.DeleteAllItems() + self.loading() + + def loading(self): # display "loading ..." + self.InsertStringItem(0, self.utility.lang.get('loading')) + + def getMaxNum(self): + return self.utility.config.Read(self.prefix + "_num", "int") + + def OnRightClick(self, event): + print "right click", self.getSelectedItems() + + def onKeyDown(self, event): + keycode = event.GetKeyCode() + if event.CmdDown(): + if keycode == ord('a') or keycode == ord('A'): + # Select all files (CTRL-A) + self.selectAll() + elif keycode == ord('x') or keycode == ord('X'): + # Invert file selection (CTRL-X) + self.invertSelection() + elif keycode == ord('f') or keycode == ord('F'): + self.OnShowFind(event) + elif keycode == 399: + # Open right-click menu (windows menu key) + self.OnRightClick(event) + event.Skip() + + def OnShowFind(self, evt): + data = wx.FindReplaceData() + data.SetFlags(1) + dlg = wx.FindReplaceDialog(self, data, "Find") + dlg.data = data # save a reference to it... + dlg.Show(True) + + def OnFindClose(self, evt): + evt.GetDialog().Destroy() + + def OnFind(self, evt): +# if self.search_key not in self.keys: +# return + et = evt.GetEventType() + flag = evt.GetFlags() # 1: down, 2: mach whole word only, 4: match case, 6:4+2 + if not et in (wx.wxEVT_COMMAND_FIND, wx.wxEVT_COMMAND_FIND_NEXT): + return + if et == wx.wxEVT_COMMAND_FIND: + selected = self.getSelectedItems() + if selected: + self.curr_pos = selected[0] + else: + self.curr_pos = -1 + find_str = evt.GetFindString() + self.curr_pos = self.findAnItem(find_str, flag) + if self.curr_pos == -1: + dlg = wx.MessageDialog(self, 'Passed the end of the list!', + 'Search Stop', + wx.OK | wx.ICON_INFORMATION + ) + dlg.ShowModal() + dlg.Destroy() + pass + else: + #print "found", self.curr_pos + #item = self.GetItem(index) + self.SetItemState(self.curr_pos, wx.LIST_STATE_SELECTED, wx.LIST_STATE_SELECTED) + self.SetItemState(self.curr_pos, wx.LIST_STATE_FOCUSED, wx.LIST_STATE_FOCUSED) + + def findAnItem(self, find_str, flag): + def match(text, find_str, flag): + if flag&2: # mach whole word only + str_list = text.split() + else: + str_list = [text] + if not flag&4: # don't match case + find_str = find_str.lower() + for i in range(len(str_list)): + str_list[i] = str_list[i].lower() + for s in str_list: + if s.find(find_str) != -1: + return True + return False + + #print "find an item", find_str, flag, self.curr_pos + if flag&1: + begin = self.curr_pos+1 + end = len(self.data) + step = 1 + else: + if self.curr_pos == -1: + begin = len(self.data) -1 + else: + begin = self.curr_pos - 1 + end = -1 + step = -1 + datalist = range(begin, end, step) + #print "step:", begin, end, step, datalist + for row in datalist: + text = self.data[row][self.search_key] + text=text.replace('.', ' ') + text=text.replace('_', ' ') + text=text.replace('-', ' ') + if match(text, find_str, flag): + return row + return -1 # not found + + def getSelectedItems(self): + item = -1 + itemList = [] + while 1: + item = self.GetNextItem(item,wx.LIST_NEXT_ALL,wx.LIST_STATE_SELECTED) + if item == -1: + break + else: + itemList.append(item) + itemList.sort() + return itemList + + def OnActivated(self, event): + self.curr_idx = event.m_itemIndex + #print "actived", self.curr_idx + + def OnColClick(self, event): + col = event.m_col + active_columns = self.columns.active + if col >= len(active_columns) or col < 0: + return + else: + col = active_columns[col][0] # the real position + if self.lastcolumnsorted == col: + self.reversesort = 1 - self.reversesort + else: + self.reversesort = 0 + self.lastcolumnsorted = col + self.columns.writeSortedColumn(self.lastcolumnsorted, self.reversesort) + self.loadList(reload=False, sorted=True) + + def reloadData(self): + raise + + def getText(self, data, row, col): + raise + + def loadList(self, reload=True, sorted=True): + self.DeleteAllItems() + self.loading() + + active_columns = self.columns.active + if not active_columns: + return + + if reload: + self.reloadData() + + if sorted: + key = self.keys[self.lastcolumnsorted] + self.data = sort_dictlist(self.data, key, self.reversesort) + + num = len(self.data) + if self.num > 0 and self.num < num: + num = self.num + + first_col = active_columns[0][0] + # Delete the "Loading... entry before adding the real stuff + self.DeleteAllItems() + for i in xrange(num): + self.InsertStringItem(i, self.getText(self.data, i, first_col)) + for col,rank in active_columns[1:]: + txt = self.getText(self.data, i, col) + self.SetStringItem(i, rank, txt) + + self.Show(True) + +class MainWindow(wx.Frame): + def __init__(self,parent,id, title): + wx.Frame.__init__(self,parent,wx.ID_ANY,title, + style=wx.DEFAULT_FRAME_STYLE|wx.NO_FULL_REPAINT_ON_RESIZE) + self.control = CommonTriblerList(self, wx.Size(500, 100)) + self.Fit() + self.Show(True) + +if __name__ == '__main__': + app = wx.App() + frame=MainWindow(None,-1,'Demo') + app.MainLoop() diff --git a/tribler-mod/Tribler/Main/Dialogs/common.py.bak b/tribler-mod/Tribler/Main/Dialogs/common.py.bak new file mode 100644 index 0000000..23617d1 --- /dev/null +++ b/tribler-mod/Tribler/Main/Dialogs/common.py.bak @@ -0,0 +1,244 @@ +# Written by Jie Yang +# see LICENSE.txt for license information + +import wx +from ABC.GUI.list import ManagedList + +def sort_dictlist(dict_list, key, order='increase'): + + aux = [(dict_list[i][key], i) for i in xrange(len(dict_list))] + try: + aux.sort() + except UnicodeDecodeError: + # Arno: there are unicode strings and non-unicode strings in the data. + # One of the non-unicode strings contains data that cannot be + # decoded into a unicode string for comparison to the other unicode + # strings by the default 'ascii' codec. See + # http://downloads.egenix.com/python/Unicode-EPC2002-Talk.pdf + # + # This is a legacy problem, as the new code will store everything as + # unicode in the database. I therefore chose a dirty solution, don't + # sort + pass + if order == 'decrease' or order == 1: # 0 - increase, 1 - decrease + aux.reverse() + return [dict_list[i] for x, i in aux] + + +class CommonTriblerList(ManagedList): + """ + 0. Give a unique prefix + 1. IDs in rightalign and centeralign must be set in Utility.constants; + 2. Column labels must be set in the language file; + 3. To set default values, modify Utility.utility.setupConfig() + + WARNING: this constructor is called after the subclass already initialized + itself, so anything you do here will override the subclass, not initialize it. + """ + def __init__(self, parent, style, prefix, minid, maxid, exclude = [], rightalign = [], centeralign = []): + self.parent = parent + self.utility = parent.utility + self.prefix = prefix + ManagedList.__init__(self, parent, style, prefix, minid, maxid, exclude, rightalign, centeralign) + + self.data = [] + self.lastcolumnsorted, self.reversesort = self.columns.getSortedColumn() + self.info_dict = {} # use infohash as the key, used for update + self.num = self.getMaxNum() # max num of lines to show + self.curr_pos = -1 + + self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.OnRightClick) + self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.OnActivated) + self.Bind(wx.EVT_LIST_COL_CLICK, self.OnColClick) + self.Bind(wx.EVT_KEY_DOWN, self.onKeyDown) + + # for search + self.Bind(wx.EVT_FIND, self.OnFind) + self.Bind(wx.EVT_FIND_NEXT, self.OnFind) + self.Bind(wx.EVT_FIND_CLOSE, self.OnFindClose) + + #self.loadList() + self.DeleteAllItems() + self.loading() + + def loading(self): # display "loading ..." + self.InsertStringItem(0, self.utility.lang.get('loading')) + + def getMaxNum(self): + return self.utility.config.Read(self.prefix + "_num", "int") + + def OnRightClick(self, event): + print "right click", self.getSelectedItems() + + def onKeyDown(self, event): + keycode = event.GetKeyCode() + if event.CmdDown(): + if keycode == ord('a') or keycode == ord('A'): + # Select all files (CTRL-A) + self.selectAll() + elif keycode == ord('x') or keycode == ord('X'): + # Invert file selection (CTRL-X) + self.invertSelection() + elif keycode == ord('f') or keycode == ord('F'): + self.OnShowFind(event) + elif keycode == 399: + # Open right-click menu (windows menu key) + self.OnRightClick(event) + event.Skip() + + def OnShowFind(self, evt): + data = wx.FindReplaceData() + data.SetFlags(1) + dlg = wx.FindReplaceDialog(self, data, "Find") + dlg.data = data # save a reference to it... + dlg.Show(True) + + def OnFindClose(self, evt): + evt.GetDialog().Destroy() + + def OnFind(self, evt): +# if self.search_key not in self.keys: +# return + et = evt.GetEventType() + flag = evt.GetFlags() # 1: down, 2: mach whole word only, 4: match case, 6:4+2 + if not et in (wx.wxEVT_COMMAND_FIND, wx.wxEVT_COMMAND_FIND_NEXT): + return + if et == wx.wxEVT_COMMAND_FIND: + selected = self.getSelectedItems() + if selected: + self.curr_pos = selected[0] + else: + self.curr_pos = -1 + find_str = evt.GetFindString() + self.curr_pos = self.findAnItem(find_str, flag) + if self.curr_pos == -1: + dlg = wx.MessageDialog(self, 'Passed the end of the list!', + 'Search Stop', + wx.OK | wx.ICON_INFORMATION + ) + dlg.ShowModal() + dlg.Destroy() + pass + else: + #print "found", self.curr_pos + #item = self.GetItem(index) + self.SetItemState(self.curr_pos, wx.LIST_STATE_SELECTED, wx.LIST_STATE_SELECTED) + self.SetItemState(self.curr_pos, wx.LIST_STATE_FOCUSED, wx.LIST_STATE_FOCUSED) + + def findAnItem(self, find_str, flag): + def match(text, find_str, flag): + if flag&2: # mach whole word only + str_list = text.split() + else: + str_list = [text] + if not flag&4: # don't match case + find_str = find_str.lower() + for i in range(len(str_list)): + str_list[i] = str_list[i].lower() + for s in str_list: + if s.find(find_str) != -1: + return True + return False + + #print "find an item", find_str, flag, self.curr_pos + if flag&1: + begin = self.curr_pos+1 + end = len(self.data) + step = 1 + else: + if self.curr_pos == -1: + begin = len(self.data) -1 + else: + begin = self.curr_pos - 1 + end = -1 + step = -1 + datalist = range(begin, end, step) + #print "step:", begin, end, step, datalist + for row in datalist: + text = self.data[row][self.search_key] + text=text.replace('.', ' ') + text=text.replace('_', ' ') + text=text.replace('-', ' ') + if match(text, find_str, flag): + return row + return -1 # not found + + def getSelectedItems(self): + item = -1 + itemList = [] + while 1: + item = self.GetNextItem(item,wx.LIST_NEXT_ALL,wx.LIST_STATE_SELECTED) + if item == -1: + break + else: + itemList.append(item) + itemList.sort() + return itemList + + def OnActivated(self, event): + self.curr_idx = event.m_itemIndex + #print "actived", self.curr_idx + + def OnColClick(self, event): + col = event.m_col + active_columns = self.columns.active + if col >= len(active_columns) or col < 0: + return + else: + col = active_columns[col][0] # the real position + if self.lastcolumnsorted == col: + self.reversesort = 1 - self.reversesort + else: + self.reversesort = 0 + self.lastcolumnsorted = col + self.columns.writeSortedColumn(self.lastcolumnsorted, self.reversesort) + self.loadList(reload=False, sorted=True) + + def reloadData(self): + raise + + def getText(self, data, row, col): + raise + + def loadList(self, reload=True, sorted=True): + self.DeleteAllItems() + self.loading() + + active_columns = self.columns.active + if not active_columns: + return + + if reload: + self.reloadData() + + if sorted: + key = self.keys[self.lastcolumnsorted] + self.data = sort_dictlist(self.data, key, self.reversesort) + + num = len(self.data) + if self.num > 0 and self.num < num: + num = self.num + + first_col = active_columns[0][0] + # Delete the "Loading... entry before adding the real stuff + self.DeleteAllItems() + for i in xrange(num): + self.InsertStringItem(i, self.getText(self.data, i, first_col)) + for col,rank in active_columns[1:]: + txt = self.getText(self.data, i, col) + self.SetStringItem(i, rank, txt) + + self.Show(True) + +class MainWindow(wx.Frame): + def __init__(self,parent,id, title): + wx.Frame.__init__(self,parent,wx.ID_ANY,title, + style=wx.DEFAULT_FRAME_STYLE|wx.NO_FULL_REPAINT_ON_RESIZE) + self.control = CommonTriblerList(self, wx.Size(500, 100)) + self.Fit() + self.Show(True) + +if __name__ == '__main__': + app = wx.App() + frame=MainWindow(None,-1,'Demo') + app.MainLoop() diff --git a/tribler-mod/Tribler/Main/Dialogs/dlhelperframe.py b/tribler-mod/Tribler/Main/Dialogs/dlhelperframe.py new file mode 100644 index 0000000..c93fb2e --- /dev/null +++ b/tribler-mod/Tribler/Main/Dialogs/dlhelperframe.py @@ -0,0 +1,379 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information + +import wx +import os +import sys +from traceback import print_exc +from threading import Lock + +from Tribler.Core.Utilities.utilities import show_permid_short +#from managefriends import createImageList +from Tribler.Main.vwxGUI.IconsManager import IconsManager +from Tribler.Core.Utilities.utilities import show_permid_shorter +from Tribler.Core.simpledefs import * + +DEBUG = True + +################################################################ +# +# Class: DownloadHelperPanel +# +# Panel for coordinating the help of friends in downloading +# a torrent +# +################################################################ + + +class DownloadHelperFrame(wx.Frame): + + def __init__(self,parent,utility,download_state): + self.utility = utility + wx.Frame.__init__(self, None, -1, self.utility.lang.get('tb_dlhelp_short'), + size=(640,520)) + + main_panel = wx.Panel(self) + self.downloadHelperPanel = self.createMainPanel(main_panel,download_state) + bot_box = self.createBottomBoxer(main_panel) + + mainbox = wx.BoxSizer(wx.VERTICAL) + mainbox.Add(self.downloadHelperPanel, 1, wx.EXPAND|wx.ALL, 5) + mainbox.Add(bot_box, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.ALL, 5) + main_panel.SetSizer(mainbox) + + + iconpath = os.path.join(self.utility.session.get_tracker_favicon()) + # Giving it the whole bundle throws an exception about image 6 + self.icons = wx.IconBundle() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", iconpath + if iconpath: + self.icons.AddIconFromFile(iconpath,wx.BITMAP_TYPE_ICO) + self.SetIcons(self.icons) + + self.Bind(wx.EVT_CLOSE, self.OnCloseWindow) + self.Show() + + def createMainPanel(self,main_panel,download_state): + return DownloadHelperPanel(main_panel,self.utility,download_state) + + def createBottomBoxer(self, main_panel): + bot_box = wx.BoxSizer(wx.HORIZONTAL) + button = wx.Button(main_panel, -1, self.utility.lang.get('close'), style = wx.BU_EXACTFIT) + self.Bind(wx.EVT_BUTTON, self.OnCloseWindow, button) + bot_box.Add(button, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 3) + return bot_box + + + def OnCloseWindow(self, event = None): + self.Destroy() + + + +class DownloadHelperPanel(wx.Panel): + def __init__(self, parent, utility, download_state): + wx.Panel.__init__(self, parent, -1) + + self.utility = utility + self.download_state = download_state + self.lock = Lock() + + # If the torrent is stopped, don't allow helping + dlstopped = self.download_state is None or self.download_state.get_status() != DLSTATUS_DOWNLOADING + dlhelper = self.download_state.get_download().get_coopdl_role() == COOPDL_ROLE_HELPER + + if dlstopped or dlhelper: + if dlstopped: + msg = self.utility.lang.get('dlhelpdisabledstop') + elif dlhelper: + msg = self.utility.lang.get('dlhelpdisabledhelper') + mainbox = wx.BoxSizer(wx.VERTICAL) + mainbox.Add(wx.StaticText(self, -1, msg), 0, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + self.SetSizerAndFit(mainbox) + return + + # 0. Read friends from DB, and figure out who's already helping + # for this torrent + peer_db = self.utility.session.open_dbhandler(NTFY_PEERS) + friends = peer_db.getGUIPeers(category_name = 'friends', + sort = 'name', reverse = False + ) + helpingFriends = peer_db.getPeers(self.download_state.get_coopdl_helpers(), ['permid', 'name', 'ip']) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelperframe: friends is",[a['name'] for a in friends] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelperframe: helping friends is",[a['name'] for a in helpingFriends] + + # 1. Create list of images of all friends + type = wx.LC_LIST + #type = wx.LC_REPORT + + imgList = None + if type != wx.LC_REPORT: + try: + #imgList = createImageList(self.utility,friends) + im = IconsManager.getInstance() + imgList = im.create_wxImageList(friends,setindex=True) + except: + print_exc() + # disable icons + type = wx.LC_REPORT + + # 2. Filter out friends already helping for left window + self.remainingFriends = [] + for index, friend in enumerate(friends): + + if friend['name'] == '': + friend['name']= 'peer %s' % show_permid_shorter(friend['permid']) + + flag = 0 + for helper in helpingFriends: + if friend['permid'] == helper['permid']: + helper['tempiconindex'] = index + flag = 1 + break + if flag: + continue + #friend['tempiconindex'] = index + self.remainingFriends.append(friend) + + + # 3. TODO: remove entries from helpingFriends that are no longer friends + + # 4. Build GUI + mainbox = wx.BoxSizer(wx.VERTICAL) + topbox = wx.BoxSizer(wx.HORIZONTAL) + botbox1 = wx.BoxSizer(wx.HORIZONTAL) + botbox2 = wx.BoxSizer(wx.HORIZONTAL) + + # 4a. Friends in left window + friendsbox = wx.BoxSizer(wx.VERTICAL) + friendsbox.Add(wx.StaticText(self, -1, self.utility.lang.get('availcandidates')), 0, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + self.leftListCtl = FriendList(self,self.remainingFriends,type,imgList) + #self.leftListCtl.SetToolTipString(self.utility.lang.get('multiannouncehelp')) + + friendsbox.Add(self.leftListCtl, 1, wx.EXPAND|wx.TOP, 5) + topbox.Add(friendsbox, 1, wx.EXPAND) + + # 4b. +/- buttons in between + operatorbox = wx.BoxSizer(wx.VERTICAL) + + button = wx.Button(self, -1, self.utility.lang.get('requestdlhelp'), style = wx.BU_EXACTFIT) + button.SetToolTipString(self.utility.lang.get('requestdlhelp_help')) + wx.EVT_BUTTON(self, button.GetId(), self.add_helper) + operatorbox.Add(button, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 3) + + button2 = wx.Button(self, -1, self.utility.lang.get('stopdlhelp'), style = wx.BU_EXACTFIT) + button2.SetToolTipString(self.utility.lang.get('stopdlhelp_help')) + wx.EVT_BUTTON(self, button2.GetId(), self.remove_helper) + operatorbox.Add(button2, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 3) + + topbox.Add(operatorbox, 0, wx.ALIGN_CENTER_VERTICAL) + + # 4c. Selected helpers in right window + helperbox = wx.BoxSizer(wx.VERTICAL) + helperbox.Add(wx.StaticText(self, -1, self.utility.lang.get('helpers')), 0, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + self.rightListCtl = FriendList(self,helpingFriends,type,imgList) + #self.rightListCtl.SetToolTipString(self.utility.lang.get('httpseedshelp')) + helperbox.Add(self.rightListCtl, 1, wx.EXPAND|wx.ALL, 5) + topbox.Add(helperbox, 1, wx.EXPAND) + + # Keep helpers up-to-date + #self.timer = wx.Timer(self) + #self.Bind(wx.EVT_TIMER, self.OnTimer) + #self.timer.Start(4000) + self.init_download_state_update() + + howtotext1 = wx.StaticText(self, -1, self.utility.lang.get('dlhelphowto1')) + howtotext1.Wrap(500) + botbox1.Add(howtotext1, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL|wx.EXPAND, 5) + + howtotext2 = wx.StaticText(self, -1, self.utility.lang.get('dlhelphowto2')) + howtotext2.Wrap(500) + botbox2.Add(howtotext2, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL|wx.EXPAND, 5) + + + # 5. Show GUI + mainbox.Add(botbox1, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.ALL, 5) + mainbox.Add(topbox, 0, wx.EXPAND|wx.ALL) + mainbox.Add(botbox2, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.ALL, 5) + + #self.SetSizerAndFit(mainbox) + self.SetSizer(mainbox) + self.SetAutoLayout(True) + + + def add_helper(self, event = None): + changed_list = self.add_friends(self.leftListCtl,self.rightListCtl) + self.make_it_so(True,changed_list) + + def remove_helper(self, event = None): + changed_list = self.add_friends(self.rightListCtl,self.leftListCtl) + self.make_it_so(False,changed_list) + + def add_friends(self,left,right): + item = -1 + itemList = [] + while 1: + item = left.GetNextItem(item,wx.LIST_NEXT_ALL,wx.LIST_STATE_SELECTED) + if item == -1: + break + else: + itemList.append(item) + if len(itemList) > 0: + friendsList = left.removeFriends(itemList) + right.addFriends(friendsList) + return friendsList + else: + return [] + + def make_it_so(self, add, changed_list): + helpingFriends = self.rightListCtl.getFriends() + remainingFriends = self.leftListCtl.getFriends() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelperframe: before exec: remaining friends is",[a['name'] for a in remainingFriends] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelperframe: before exec: helping friends is",[a['name'] for a in helpingFriends] + + self.lock.acquire() + self.download_state.get_download().stop_coopdl_helpers([a['permid'] for a in remainingFriends]) + self.download_state.get_download().ask_coopdl_helpers([a['permid'] for a in helpingFriends]) + self.lock.release() + + def OnActivated(self, event = None): + pass + + def OnTimer(self, event = None): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelperframe: ON TIMER" + + peer_db = self.utility.session.open_dbhandler(NTFY_PEERS) + self.lock.acquire() + realhelpers = peer_db.getPeers(self.download_state.get_coopdl_helpers(), ['permid', 'name', 'ip']) + self.lock.release() + shownhelpers = self.rightListCtl.getFriends() + + + removehelpers = [] + removeitems = [] + for i, shown in enumerate(shownhelpers): + found = False + for real in realhelpers: + if real['permid'] == shown['permid']: + found = True + break + if not found: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelperframe: Helper",shown['name'],show_permid_short(shown['permid']),"could not be reached apparently, removing" + removehelpers.append(shown) + removeitems.append(i) + self.rightListCtl.removeFriends(removeitems) + self.leftListCtl.addFriends(removehelpers) + + def init_download_state_update(self): + dl = self.download_state.get_download() + dl.set_state_callback(self.download_state_update) + + def download_state_update(self, ds): + try: + self.lock.acquire() + self.download_state = ds + self.lock.release() + wx.CallAfter(self.OnTimer) + if not self.IsShown(): # do not call me again + return -1, False + else: + return 4.0, False + except wx.PyDeadObjectError: + return -1, False + + +################################################################ +# +# Class: FriendList +# +# ListCtrl for managing friends +# +################################################################ +class FriendList(wx.ListCtrl): + def __init__(self, parent, friends, type, imgList): + + self.type = type + self.imgList = imgList + style = wx.VSCROLL|wx.SIMPLE_BORDER|self.type|wx.LC_VRULES|wx.CLIP_CHILDREN + if (sys.platform == 'win32'): + style |= wx.LC_ALIGN_TOP + wx.ListCtrl.__init__(self, parent, -1, style=style) + self.SetMinSize(wx.Size(200, 300)) + + self.parent = parent + self.friends = friends + self.utility = parent.utility + + self.SetImageList(imgList,wx.IMAGE_LIST_SMALL) + self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.OnActivated) + self.loadList() + + def loadList(self): + if self.type == wx.LC_REPORT: + try: # get system font width + fw = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT).GetPointSize()+1 + except: + fw = wx.SystemSettings_GetFont(wx.SYS_SYSTEM_FONT).GetPointSize()+1 + + self.InsertColumn(0, self.utility.lang.get('name'), format=wx.LIST_FORMAT_CENTER, width=fw*6) + + self.updateAll() + self.Show(True) + + def updateAll(self): + self.DeleteAllItems() + i = 0; + for friend in self.friends: + self.addItem(i,friend) + i += 1 + + def OnActivated(self, event): + self.parent.OnActivated(event) + + def addItem(self,i,friend): + if self.type != wx.LC_REPORT: + label = friend['name'] + if not label: + label = friend['ip'] + self.InsertImageStringItem(i,label,friend['tempiconindex']) + else: + self.InsertStringItem(i, friend['name']) + + def removeFriends(self,itemList): + # Assumption: friends in list are in insert-order, i.e., not sorted afterwards! + friendList = [] + # Make sure item ids stay the same during delete + itemList.sort() + itemList.reverse() + for item in itemList: + friend = self.friends[item] + friendList.append(friend) + del self.friends[item] + self.DeleteItem(item) + return friendList + + def addFriends(self,friendList): + flag = 0 + i = self.GetItemCount() + for friend in friendList: + for chum in self.friends: + if friend['permid'] == chum['permid']: # here we must use 'permid' to distinguish + flag = 1 + break + if flag: + continue + self.friends.append(friend) + self.addItem(i,friend) + i += 1 + + def getFriends(self): + return self.friends + diff --git a/tribler-mod/Tribler/Main/Dialogs/dlhelperframe.py.bak b/tribler-mod/Tribler/Main/Dialogs/dlhelperframe.py.bak new file mode 100644 index 0000000..d92067b --- /dev/null +++ b/tribler-mod/Tribler/Main/Dialogs/dlhelperframe.py.bak @@ -0,0 +1,378 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information + +import wx +import os +import sys +from traceback import print_exc +from threading import Lock + +from Tribler.Core.Utilities.utilities import show_permid_short +#from managefriends import createImageList +from Tribler.Main.vwxGUI.IconsManager import IconsManager +from Tribler.Core.Utilities.utilities import show_permid_shorter +from Tribler.Core.simpledefs import * + +DEBUG = True + +################################################################ +# +# Class: DownloadHelperPanel +# +# Panel for coordinating the help of friends in downloading +# a torrent +# +################################################################ + + +class DownloadHelperFrame(wx.Frame): + + def __init__(self,parent,utility,download_state): + self.utility = utility + wx.Frame.__init__(self, None, -1, self.utility.lang.get('tb_dlhelp_short'), + size=(640,520)) + + main_panel = wx.Panel(self) + self.downloadHelperPanel = self.createMainPanel(main_panel,download_state) + bot_box = self.createBottomBoxer(main_panel) + + mainbox = wx.BoxSizer(wx.VERTICAL) + mainbox.Add(self.downloadHelperPanel, 1, wx.EXPAND|wx.ALL, 5) + mainbox.Add(bot_box, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.ALL, 5) + main_panel.SetSizer(mainbox) + + + iconpath = os.path.join(self.utility.session.get_tracker_favicon()) + # Giving it the whole bundle throws an exception about image 6 + self.icons = wx.IconBundle() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", iconpath + if iconpath: + self.icons.AddIconFromFile(iconpath,wx.BITMAP_TYPE_ICO) + self.SetIcons(self.icons) + + self.Bind(wx.EVT_CLOSE, self.OnCloseWindow) + self.Show() + + def createMainPanel(self,main_panel,download_state): + return DownloadHelperPanel(main_panel,self.utility,download_state) + + def createBottomBoxer(self, main_panel): + bot_box = wx.BoxSizer(wx.HORIZONTAL) + button = wx.Button(main_panel, -1, self.utility.lang.get('close'), style = wx.BU_EXACTFIT) + self.Bind(wx.EVT_BUTTON, self.OnCloseWindow, button) + bot_box.Add(button, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 3) + return bot_box + + + def OnCloseWindow(self, event = None): + self.Destroy() + + + +class DownloadHelperPanel(wx.Panel): + def __init__(self, parent, utility, download_state): + wx.Panel.__init__(self, parent, -1) + + self.utility = utility + self.download_state = download_state + self.lock = Lock() + + # If the torrent is stopped, don't allow helping + dlstopped = self.download_state is None or self.download_state.get_status() != DLSTATUS_DOWNLOADING + dlhelper = self.download_state.get_download().get_coopdl_role() == COOPDL_ROLE_HELPER + + if dlstopped or dlhelper: + if dlstopped: + msg = self.utility.lang.get('dlhelpdisabledstop') + elif dlhelper: + msg = self.utility.lang.get('dlhelpdisabledhelper') + mainbox = wx.BoxSizer(wx.VERTICAL) + mainbox.Add(wx.StaticText(self, -1, msg), 0, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + self.SetSizerAndFit(mainbox) + return + + # 0. Read friends from DB, and figure out who's already helping + # for this torrent + peer_db = self.utility.session.open_dbhandler(NTFY_PEERS) + friends = peer_db.getGUIPeers(category_name = 'friends', + sort = 'name', reverse = False + ) + helpingFriends = peer_db.getPeers(self.download_state.get_coopdl_helpers(), ['permid', 'name', 'ip']) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelperframe: friends is",[a['name'] for a in friends] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelperframe: helping friends is",[a['name'] for a in helpingFriends] + + # 1. Create list of images of all friends + type = wx.LC_LIST + #type = wx.LC_REPORT + + imgList = None + if type != wx.LC_REPORT: + try: + #imgList = createImageList(self.utility,friends) + im = IconsManager.getInstance() + imgList = im.create_wxImageList(friends,setindex=True) + except: + print_exc() + # disable icons + type = wx.LC_REPORT + + # 2. Filter out friends already helping for left window + self.remainingFriends = [] + for index, friend in enumerate(friends): + + if friend['name'] == '': + friend['name']= 'peer %s' % show_permid_shorter(friend['permid']) + + flag = 0 + for helper in helpingFriends: + if friend['permid'] == helper['permid']: + helper['tempiconindex'] = index + flag = 1 + break + if flag: + continue + #friend['tempiconindex'] = index + self.remainingFriends.append(friend) + + + # 3. TODO: remove entries from helpingFriends that are no longer friends + + # 4. Build GUI + mainbox = wx.BoxSizer(wx.VERTICAL) + topbox = wx.BoxSizer(wx.HORIZONTAL) + botbox1 = wx.BoxSizer(wx.HORIZONTAL) + botbox2 = wx.BoxSizer(wx.HORIZONTAL) + + # 4a. Friends in left window + friendsbox = wx.BoxSizer(wx.VERTICAL) + friendsbox.Add(wx.StaticText(self, -1, self.utility.lang.get('availcandidates')), 0, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + self.leftListCtl = FriendList(self,self.remainingFriends,type,imgList) + #self.leftListCtl.SetToolTipString(self.utility.lang.get('multiannouncehelp')) + + friendsbox.Add(self.leftListCtl, 1, wx.EXPAND|wx.TOP, 5) + topbox.Add(friendsbox, 1, wx.EXPAND) + + # 4b. +/- buttons in between + operatorbox = wx.BoxSizer(wx.VERTICAL) + + button = wx.Button(self, -1, self.utility.lang.get('requestdlhelp'), style = wx.BU_EXACTFIT) + button.SetToolTipString(self.utility.lang.get('requestdlhelp_help')) + wx.EVT_BUTTON(self, button.GetId(), self.add_helper) + operatorbox.Add(button, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 3) + + button2 = wx.Button(self, -1, self.utility.lang.get('stopdlhelp'), style = wx.BU_EXACTFIT) + button2.SetToolTipString(self.utility.lang.get('stopdlhelp_help')) + wx.EVT_BUTTON(self, button2.GetId(), self.remove_helper) + operatorbox.Add(button2, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 3) + + topbox.Add(operatorbox, 0, wx.ALIGN_CENTER_VERTICAL) + + # 4c. Selected helpers in right window + helperbox = wx.BoxSizer(wx.VERTICAL) + helperbox.Add(wx.StaticText(self, -1, self.utility.lang.get('helpers')), 0, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + self.rightListCtl = FriendList(self,helpingFriends,type,imgList) + #self.rightListCtl.SetToolTipString(self.utility.lang.get('httpseedshelp')) + helperbox.Add(self.rightListCtl, 1, wx.EXPAND|wx.ALL, 5) + topbox.Add(helperbox, 1, wx.EXPAND) + + # Keep helpers up-to-date + #self.timer = wx.Timer(self) + #self.Bind(wx.EVT_TIMER, self.OnTimer) + #self.timer.Start(4000) + self.init_download_state_update() + + howtotext1 = wx.StaticText(self, -1, self.utility.lang.get('dlhelphowto1')) + howtotext1.Wrap(500) + botbox1.Add(howtotext1, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL|wx.EXPAND, 5) + + howtotext2 = wx.StaticText(self, -1, self.utility.lang.get('dlhelphowto2')) + howtotext2.Wrap(500) + botbox2.Add(howtotext2, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL|wx.EXPAND, 5) + + + # 5. Show GUI + mainbox.Add(botbox1, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.ALL, 5) + mainbox.Add(topbox, 0, wx.EXPAND|wx.ALL) + mainbox.Add(botbox2, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.ALL, 5) + + #self.SetSizerAndFit(mainbox) + self.SetSizer(mainbox) + self.SetAutoLayout(True) + + + def add_helper(self, event = None): + changed_list = self.add_friends(self.leftListCtl,self.rightListCtl) + self.make_it_so(True,changed_list) + + def remove_helper(self, event = None): + changed_list = self.add_friends(self.rightListCtl,self.leftListCtl) + self.make_it_so(False,changed_list) + + def add_friends(self,left,right): + item = -1 + itemList = [] + while 1: + item = left.GetNextItem(item,wx.LIST_NEXT_ALL,wx.LIST_STATE_SELECTED) + if item == -1: + break + else: + itemList.append(item) + if len(itemList) > 0: + friendsList = left.removeFriends(itemList) + right.addFriends(friendsList) + return friendsList + else: + return [] + + def make_it_so(self, add, changed_list): + helpingFriends = self.rightListCtl.getFriends() + remainingFriends = self.leftListCtl.getFriends() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelperframe: before exec: remaining friends is",[a['name'] for a in remainingFriends] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelperframe: before exec: helping friends is",[a['name'] for a in helpingFriends] + + self.lock.acquire() + self.download_state.get_download().stop_coopdl_helpers([a['permid'] for a in remainingFriends]) + self.download_state.get_download().ask_coopdl_helpers([a['permid'] for a in helpingFriends]) + self.lock.release() + + def OnActivated(self, event = None): + pass + + def OnTimer(self, event = None): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelperframe: ON TIMER" + + peer_db = self.utility.session.open_dbhandler(NTFY_PEERS) + self.lock.acquire() + realhelpers = peer_db.getPeers(self.download_state.get_coopdl_helpers(), ['permid', 'name', 'ip']) + self.lock.release() + shownhelpers = self.rightListCtl.getFriends() + + + removehelpers = [] + removeitems = [] + for i, shown in enumerate(shownhelpers): + found = False + for real in realhelpers: + if real['permid'] == shown['permid']: + found = True + break + if not found: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","dlhelperframe: Helper",shown['name'],show_permid_short(shown['permid']),"could not be reached apparently, removing" + removehelpers.append(shown) + removeitems.append(i) + self.rightListCtl.removeFriends(removeitems) + self.leftListCtl.addFriends(removehelpers) + + def init_download_state_update(self): + dl = self.download_state.get_download() + dl.set_state_callback(self.download_state_update) + + def download_state_update(self, ds): + try: + self.lock.acquire() + self.download_state = ds + self.lock.release() + wx.CallAfter(self.OnTimer) + if not self.IsShown(): # do not call me again + return -1, False + else: + return 4.0, False + except wx.PyDeadObjectError: + return -1, False + + +################################################################ +# +# Class: FriendList +# +# ListCtrl for managing friends +# +################################################################ +class FriendList(wx.ListCtrl): + def __init__(self, parent, friends, type, imgList): + + self.type = type + self.imgList = imgList + style = wx.VSCROLL|wx.SIMPLE_BORDER|self.type|wx.LC_VRULES|wx.CLIP_CHILDREN + if (sys.platform == 'win32'): + style |= wx.LC_ALIGN_TOP + wx.ListCtrl.__init__(self, parent, -1, style=style) + self.SetMinSize(wx.Size(200, 300)) + + self.parent = parent + self.friends = friends + self.utility = parent.utility + + self.SetImageList(imgList,wx.IMAGE_LIST_SMALL) + self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.OnActivated) + self.loadList() + + def loadList(self): + if self.type == wx.LC_REPORT: + try: # get system font width + fw = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT).GetPointSize()+1 + except: + fw = wx.SystemSettings_GetFont(wx.SYS_SYSTEM_FONT).GetPointSize()+1 + + self.InsertColumn(0, self.utility.lang.get('name'), format=wx.LIST_FORMAT_CENTER, width=fw*6) + + self.updateAll() + self.Show(True) + + def updateAll(self): + self.DeleteAllItems() + i = 0; + for friend in self.friends: + self.addItem(i,friend) + i += 1 + + def OnActivated(self, event): + self.parent.OnActivated(event) + + def addItem(self,i,friend): + if self.type != wx.LC_REPORT: + label = friend['name'] + if not label: + label = friend['ip'] + self.InsertImageStringItem(i,label,friend['tempiconindex']) + else: + self.InsertStringItem(i, friend['name']) + + def removeFriends(self,itemList): + # Assumption: friends in list are in insert-order, i.e., not sorted afterwards! + friendList = [] + # Make sure item ids stay the same during delete + itemList.sort() + itemList.reverse() + for item in itemList: + friend = self.friends[item] + friendList.append(friend) + del self.friends[item] + self.DeleteItem(item) + return friendList + + def addFriends(self,friendList): + flag = 0 + i = self.GetItemCount() + for friend in friendList: + for chum in self.friends: + if friend['permid'] == chum['permid']: # here we must use 'permid' to distinguish + flag = 1 + break + if flag: + continue + self.friends.append(friend) + self.addItem(i,friend) + i += 1 + + def getFriends(self): + return self.friends + diff --git a/tribler-mod/Tribler/Main/Dialogs/makefriends.py b/tribler-mod/Tribler/Main/Dialogs/makefriends.py new file mode 100644 index 0000000..e54db66 --- /dev/null +++ b/tribler-mod/Tribler/Main/Dialogs/makefriends.py @@ -0,0 +1,269 @@ +from time import localtime, strftime +# Written by Jie Yang, Arno Bakker +# see LICENSE.txt for license information + +import os +import base64 +from traceback import print_exc +import wx +import wx.lib.imagebrowser as ib + +from Tribler.Core.simpledefs import * +from Tribler.Core.Utilities.utilities import show_permid +import wx.lib.editor as editor + +DEBUG = False + + +class InviteFriendsDialog(wx.Frame): + def __init__(self, text): + size = wx.Size(600, 250) + wx.Frame.__init__(self, None, -1, size=size) + win = wx.Panel(self, -1) + ed = editor.Editor(win, -1, style=wx.SUNKEN_BORDER) + box = wx.BoxSizer(wx.VERTICAL) + box.Add(ed, 1, wx.ALL|wx.GROW, 1) + win.SetSizer(box) + win.SetAutoLayout(True) + + ed.SetText(text) + self.Show() + #TODO: add two buttons: copy to clipboard, close + +class MakeFriendsDialog(wx.Dialog): + def __init__(self, parent, utility, editfriend = None): + #provider = wx.SimpleHelpProvider() + #wx.HelpProvider_Set(provider) + + self.utility = utility + self.editfriend = editfriend + + style = wx.DEFAULT_DIALOG_STYLE + #| wx.RESIZE_BORDER + pos = wx.DefaultPosition + size = wx.Size(600, 200) + #size, split = self.getWindowSettings() + + if editfriend is None: + title = self.utility.lang.get('addfriend') + else: + title = self.utility.lang.get('editfriend') + wx.Dialog.__init__(self, parent, -1, title, size = size, style = style) + pre = wx.PreDialog() + pre.SetExtraStyle(wx.DIALOG_EX_CONTEXTHELP) + pre.Create(parent, -1, title, pos, size, style) + self.PostCreate(pre) + + sizer = wx.BoxSizer(wx.VERTICAL) + + label = wx.StaticText(self, -1, title) + sizer.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5) + + # name + box = wx.BoxSizer(wx.HORIZONTAL) + + label = wx.StaticText(self, -1, self.utility.lang.get('name')+':',wx.DefaultPosition,wx.Size(40,18)) + #label.SetHelpText("") + box.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5) + + if editfriend is not None: + name = editfriend['name'] + else: + name = '' + self.name_text = wx.TextCtrl(self, -1, name, size=(140,-1)) + ##self.name_text.SetHelpText(self.utility.lang.get('nickname_help')) + box.Add(self.name_text, 0, wx.ALIGN_CENTRE|wx.ALL, 5) + + sizer.Add(box, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + # text about e-mail invitation + label = wx.StaticText(self, -1, self.utility.lang.get('pasteinvitationemail'),wx.DefaultPosition) + label.Wrap( 500 ) + sizer.Add(label, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + # ip + box = wx.BoxSizer(wx.HORIZONTAL) + + label = wx.StaticText(self, -1, self.utility.lang.get('ipaddress')+':',wx.DefaultPosition,wx.Size(40,18)) + #label.SetHelpText("") + box.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5) + + if editfriend is not None: + ip = editfriend['ip'] + else: + ip = '' + self.ip_text = wx.TextCtrl(self, -1, ip, size=(140,-1)) + ##self.ip_text.SetHelpText(self.utility.lang.get('friendsipaddr_help')) + box.Add(self.ip_text, 0, wx.ALIGN_CENTRE|wx.ALL, 5) + + sizer.Add(box, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT|wx.BOTTOM, 5) + + # port + box = wx.BoxSizer(wx.HORIZONTAL) + + label = wx.StaticText(self, -1, self.utility.lang.get('portnumber'),wx.DefaultPosition,wx.Size(40,18)) + #label.SetHelpText("") + box.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5) + + if editfriend is not None: + port_str = str(editfriend['port']) + else: + port_str = '' + self.port_text = wx.TextCtrl(self, -1, port_str, size=(140,-1)) + ##self.port_text.SetHelpText(self.utility.lang.get('friendsport_help')) + box.Add(self.port_text, 0, wx.ALIGN_CENTRE|wx.ALL, 5) + + sizer.Add(box, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT|wx.BOTTOM, 5) + + # permid + box = wx.BoxSizer(wx.HORIZONTAL) + + label = wx.StaticText(self, -1, self.utility.lang.get('permid')+':',wx.DefaultPosition,wx.Size(40,18)) + #label.SetHelpText("") + box.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5) + + if editfriend is not None: + permid = show_permid(editfriend['permid']) + else: + permid = '' + self.permid_text = wx.TextCtrl(self, -1, permid, size=(300,-1)) + ## self.permid_text.SetHelpText(self.utility.lang.get('friendspermid_help')) + box.Add(self.permid_text, 0, wx.ALIGN_CENTRE|wx.ALL, 5) + + sizer.Add(box, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + """ + # picture + box = wx.BoxSizer(wx.HORIZONTAL) + + label = wx.StaticText(self, -1, self.utility.lang.get('icon')) + #label.SetHelpText("") + box.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5) + + if editfriend is not None and editfriend.has_key('icon'): + icon = str(editfriend['icon']) + else: + icon = '' + self.icon_path = wx.TextCtrl(self, -1, icon, size=(80,-1)) + ## self.icon_path.SetHelpText(self.utility.lang.get('friendsicon_help')) + box.Add(self.icon_path, 3, wx.ALIGN_CENTRE|wx.ALL, 5) + + iconbtn = wx.Button(self, -1, label=self.utility.lang.get('browsebtn')) + box.Add(iconbtn, 1, wx.ALIGN_CENTRE|wx.ALL, 5) + self.Bind(wx.EVT_BUTTON, self.OnIconButton, iconbtn) + + sizer.Add(box, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + """ + + box = wx.BoxSizer(wx.HORIZONTAL) + line = wx.StaticLine(self, -1, size=(20,-1), style=wx.LI_HORIZONTAL) + sizer.Add(line, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.RIGHT|wx.TOP, 5) + btnsizer = wx.StdDialogButtonSizer() + + ##if (sys.platform != 'win32'): + ## btn = wx.ContextHelpButton(self) + ## btnsizer.AddButton(btn) + + if editfriend is None: + lbl = self.utility.lang.get('buttons_add') + else: + lbl = self.utility.lang.get('buttons_update') + btn = wx.Button(self, wx.ID_OK, label=lbl) + btn.SetDefault() + btnsizer.AddButton(btn) + self.Bind(wx.EVT_BUTTON, self.OnAddEditFriend, btn) + + btn = wx.Button(self, wx.ID_CANCEL) + btnsizer.AddButton(btn) + btnsizer.Realize() + + #sizer.Add(btnsizer, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + sizer.Add(btnsizer, 0, wx.ALIGN_CENTER|wx.ALL, 5) + + + + + self.SetSizer(sizer) + sizer.Fit(self) + + def OnAddEditFriend(self, event): + name = self.name_text.GetValue() + ip = str(self.ip_text.GetValue()) + b64permid = str(self.permid_text.GetValue()) + try: + permid = base64.decodestring( b64permid+'\n' ) + except: + print_exc() + permid = '' + #icon = self.icon_path.GetValue() + try: + port = int(self.port_text.GetValue()) + except: + port = 0 + + if len(name) == 0: + self.show_inputerror(self.utility.lang.get('nicknameempty_error')) + elif len(permid) == 0: + self.show_inputerror(self.utility.lang.get('friendspermid_error')) + elif port == 0: + self.show_inputerror(self.utility.lang.get('friendsport_error')) + else: + fdb = self.utility.session.open_dbhandler(NTFY_FRIENDS) + pdb = self.utility.session.open_dbhandler(NTFY_PEERS) + + #friend = {'permid':permid, 'ip':ip, 'port':port, 'name':name, 'icon':newiconfilename} + #friend = {'permid':permid, 'ip':ip, 'port':port, 'name':name} + friend = {'ip':ip, 'port':port, 'name':name} + if self.editfriend is not None: + if self.editfriend['permid'] != permid: + fdb.deleteFriend(self.editfriend['permid']) + pdb.deletePeer(self.editfriend['permid']) + + #fdb.addExternalFriend(friend) + pdb.addPeer(permid,friend) + #fdb.setFriend(permid) + # also sets friendstate to I_INVITED + self.utility.session.send_friendship_message(permid,F_REQUEST_MSG) + + event.Skip() # must be done, otherwise ShowModal() returns wrong error + self.Destroy() + + """ + elif icon != '' and not os.path.exists(icon): + self.show_inputerror(self.utility.lang.get('fiendsiconnotfound_error')) + else: + newiconfilename = '' + if icon != '': + ret = self.mm.create_from_file(permid,icon) + if not ret: + self.show_inputerror(self.utility.lang.get('friendsiconnotbmp_error')) + return + """ + + + def OnIconButton(self, evt): + # get current working directory + # TODO: record the last opened path in config file + try: + path = os.path.join(os.getcwd(), 'icons') + path = os.path.join(path, 'mugshots') + except Exception, msg: + path = '' + + # open the image browser dialog + dlg = ib.ImageDialog(self, path) + + dlg.Centre() + + if dlg.ShowModal() == wx.ID_OK: + self.icon_path.SetValue(dlg.GetFile()) + else: + pass + + dlg.Destroy() + + def show_inputerror(self,txt): + dlg = wx.MessageDialog(self, txt, 'Invalid Input', wx.OK | wx.ICON_INFORMATION) + dlg.ShowModal() + dlg.Destroy() + diff --git a/tribler-mod/Tribler/Main/Dialogs/makefriends.py.bak b/tribler-mod/Tribler/Main/Dialogs/makefriends.py.bak new file mode 100644 index 0000000..08b8111 --- /dev/null +++ b/tribler-mod/Tribler/Main/Dialogs/makefriends.py.bak @@ -0,0 +1,268 @@ +# Written by Jie Yang, Arno Bakker +# see LICENSE.txt for license information + +import os +import base64 +from traceback import print_exc +import wx +import wx.lib.imagebrowser as ib + +from Tribler.Core.simpledefs import * +from Tribler.Core.Utilities.utilities import show_permid +import wx.lib.editor as editor + +DEBUG = False + + +class InviteFriendsDialog(wx.Frame): + def __init__(self, text): + size = wx.Size(600, 250) + wx.Frame.__init__(self, None, -1, size=size) + win = wx.Panel(self, -1) + ed = editor.Editor(win, -1, style=wx.SUNKEN_BORDER) + box = wx.BoxSizer(wx.VERTICAL) + box.Add(ed, 1, wx.ALL|wx.GROW, 1) + win.SetSizer(box) + win.SetAutoLayout(True) + + ed.SetText(text) + self.Show() + #TODO: add two buttons: copy to clipboard, close + +class MakeFriendsDialog(wx.Dialog): + def __init__(self, parent, utility, editfriend = None): + #provider = wx.SimpleHelpProvider() + #wx.HelpProvider_Set(provider) + + self.utility = utility + self.editfriend = editfriend + + style = wx.DEFAULT_DIALOG_STYLE + #| wx.RESIZE_BORDER + pos = wx.DefaultPosition + size = wx.Size(600, 200) + #size, split = self.getWindowSettings() + + if editfriend is None: + title = self.utility.lang.get('addfriend') + else: + title = self.utility.lang.get('editfriend') + wx.Dialog.__init__(self, parent, -1, title, size = size, style = style) + pre = wx.PreDialog() + pre.SetExtraStyle(wx.DIALOG_EX_CONTEXTHELP) + pre.Create(parent, -1, title, pos, size, style) + self.PostCreate(pre) + + sizer = wx.BoxSizer(wx.VERTICAL) + + label = wx.StaticText(self, -1, title) + sizer.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5) + + # name + box = wx.BoxSizer(wx.HORIZONTAL) + + label = wx.StaticText(self, -1, self.utility.lang.get('name')+':',wx.DefaultPosition,wx.Size(40,18)) + #label.SetHelpText("") + box.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5) + + if editfriend is not None: + name = editfriend['name'] + else: + name = '' + self.name_text = wx.TextCtrl(self, -1, name, size=(140,-1)) + ##self.name_text.SetHelpText(self.utility.lang.get('nickname_help')) + box.Add(self.name_text, 0, wx.ALIGN_CENTRE|wx.ALL, 5) + + sizer.Add(box, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + # text about e-mail invitation + label = wx.StaticText(self, -1, self.utility.lang.get('pasteinvitationemail'),wx.DefaultPosition) + label.Wrap( 500 ) + sizer.Add(label, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + # ip + box = wx.BoxSizer(wx.HORIZONTAL) + + label = wx.StaticText(self, -1, self.utility.lang.get('ipaddress')+':',wx.DefaultPosition,wx.Size(40,18)) + #label.SetHelpText("") + box.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5) + + if editfriend is not None: + ip = editfriend['ip'] + else: + ip = '' + self.ip_text = wx.TextCtrl(self, -1, ip, size=(140,-1)) + ##self.ip_text.SetHelpText(self.utility.lang.get('friendsipaddr_help')) + box.Add(self.ip_text, 0, wx.ALIGN_CENTRE|wx.ALL, 5) + + sizer.Add(box, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT|wx.BOTTOM, 5) + + # port + box = wx.BoxSizer(wx.HORIZONTAL) + + label = wx.StaticText(self, -1, self.utility.lang.get('portnumber'),wx.DefaultPosition,wx.Size(40,18)) + #label.SetHelpText("") + box.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5) + + if editfriend is not None: + port_str = str(editfriend['port']) + else: + port_str = '' + self.port_text = wx.TextCtrl(self, -1, port_str, size=(140,-1)) + ##self.port_text.SetHelpText(self.utility.lang.get('friendsport_help')) + box.Add(self.port_text, 0, wx.ALIGN_CENTRE|wx.ALL, 5) + + sizer.Add(box, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT|wx.BOTTOM, 5) + + # permid + box = wx.BoxSizer(wx.HORIZONTAL) + + label = wx.StaticText(self, -1, self.utility.lang.get('permid')+':',wx.DefaultPosition,wx.Size(40,18)) + #label.SetHelpText("") + box.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5) + + if editfriend is not None: + permid = show_permid(editfriend['permid']) + else: + permid = '' + self.permid_text = wx.TextCtrl(self, -1, permid, size=(300,-1)) + ## self.permid_text.SetHelpText(self.utility.lang.get('friendspermid_help')) + box.Add(self.permid_text, 0, wx.ALIGN_CENTRE|wx.ALL, 5) + + sizer.Add(box, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + """ + # picture + box = wx.BoxSizer(wx.HORIZONTAL) + + label = wx.StaticText(self, -1, self.utility.lang.get('icon')) + #label.SetHelpText("") + box.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5) + + if editfriend is not None and editfriend.has_key('icon'): + icon = str(editfriend['icon']) + else: + icon = '' + self.icon_path = wx.TextCtrl(self, -1, icon, size=(80,-1)) + ## self.icon_path.SetHelpText(self.utility.lang.get('friendsicon_help')) + box.Add(self.icon_path, 3, wx.ALIGN_CENTRE|wx.ALL, 5) + + iconbtn = wx.Button(self, -1, label=self.utility.lang.get('browsebtn')) + box.Add(iconbtn, 1, wx.ALIGN_CENTRE|wx.ALL, 5) + self.Bind(wx.EVT_BUTTON, self.OnIconButton, iconbtn) + + sizer.Add(box, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + """ + + box = wx.BoxSizer(wx.HORIZONTAL) + line = wx.StaticLine(self, -1, size=(20,-1), style=wx.LI_HORIZONTAL) + sizer.Add(line, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.RIGHT|wx.TOP, 5) + btnsizer = wx.StdDialogButtonSizer() + + ##if (sys.platform != 'win32'): + ## btn = wx.ContextHelpButton(self) + ## btnsizer.AddButton(btn) + + if editfriend is None: + lbl = self.utility.lang.get('buttons_add') + else: + lbl = self.utility.lang.get('buttons_update') + btn = wx.Button(self, wx.ID_OK, label=lbl) + btn.SetDefault() + btnsizer.AddButton(btn) + self.Bind(wx.EVT_BUTTON, self.OnAddEditFriend, btn) + + btn = wx.Button(self, wx.ID_CANCEL) + btnsizer.AddButton(btn) + btnsizer.Realize() + + #sizer.Add(btnsizer, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + sizer.Add(btnsizer, 0, wx.ALIGN_CENTER|wx.ALL, 5) + + + + + self.SetSizer(sizer) + sizer.Fit(self) + + def OnAddEditFriend(self, event): + name = self.name_text.GetValue() + ip = str(self.ip_text.GetValue()) + b64permid = str(self.permid_text.GetValue()) + try: + permid = base64.decodestring( b64permid+'\n' ) + except: + print_exc() + permid = '' + #icon = self.icon_path.GetValue() + try: + port = int(self.port_text.GetValue()) + except: + port = 0 + + if len(name) == 0: + self.show_inputerror(self.utility.lang.get('nicknameempty_error')) + elif len(permid) == 0: + self.show_inputerror(self.utility.lang.get('friendspermid_error')) + elif port == 0: + self.show_inputerror(self.utility.lang.get('friendsport_error')) + else: + fdb = self.utility.session.open_dbhandler(NTFY_FRIENDS) + pdb = self.utility.session.open_dbhandler(NTFY_PEERS) + + #friend = {'permid':permid, 'ip':ip, 'port':port, 'name':name, 'icon':newiconfilename} + #friend = {'permid':permid, 'ip':ip, 'port':port, 'name':name} + friend = {'ip':ip, 'port':port, 'name':name} + if self.editfriend is not None: + if self.editfriend['permid'] != permid: + fdb.deleteFriend(self.editfriend['permid']) + pdb.deletePeer(self.editfriend['permid']) + + #fdb.addExternalFriend(friend) + pdb.addPeer(permid,friend) + #fdb.setFriend(permid) + # also sets friendstate to I_INVITED + self.utility.session.send_friendship_message(permid,F_REQUEST_MSG) + + event.Skip() # must be done, otherwise ShowModal() returns wrong error + self.Destroy() + + """ + elif icon != '' and not os.path.exists(icon): + self.show_inputerror(self.utility.lang.get('fiendsiconnotfound_error')) + else: + newiconfilename = '' + if icon != '': + ret = self.mm.create_from_file(permid,icon) + if not ret: + self.show_inputerror(self.utility.lang.get('friendsiconnotbmp_error')) + return + """ + + + def OnIconButton(self, evt): + # get current working directory + # TODO: record the last opened path in config file + try: + path = os.path.join(os.getcwd(), 'icons') + path = os.path.join(path, 'mugshots') + except Exception, msg: + path = '' + + # open the image browser dialog + dlg = ib.ImageDialog(self, path) + + dlg.Centre() + + if dlg.ShowModal() == wx.ID_OK: + self.icon_path.SetValue(dlg.GetFile()) + else: + pass + + dlg.Destroy() + + def show_inputerror(self,txt): + dlg = wx.MessageDialog(self, txt, 'Invalid Input', wx.OK | wx.ICON_INFORMATION) + dlg.ShowModal() + dlg.Destroy() + diff --git a/tribler-mod/Tribler/Main/Dialogs/regdialog.py b/tribler-mod/Tribler/Main/Dialogs/regdialog.py new file mode 100644 index 0000000..5d83513 --- /dev/null +++ b/tribler-mod/Tribler/Main/Dialogs/regdialog.py @@ -0,0 +1,69 @@ +from time import localtime, strftime +# Written by Tim Tucker +# see LICENSE.txt for license information +######################################################################### +# Description : Ask whether or not to associate ABC with torrents +######################################################################### +import wx + + +################################################################ +# +# Class: RegCheckDialog +# +# Prompts to associate ABC with .torrent files if it is not +# already associated with them +# +################################################################ +class RegCheckDialog(wx.Dialog): + def __init__(self, parent): + self.utility = parent.utility + + title = self.utility.lang.get('associate') + + pre = wx.PreDialog() + pre.Create(parent, -1, title) + self.this = pre.this + + outerbox = wx.BoxSizer( wx.VERTICAL ) + + outerbox.Add(wx.StaticText(self, -1, self.utility.lang.get('notassociated')), 0, wx.ALIGN_LEFT|wx.ALL, 5) + + self.yesbtn = wx.Button(self, -1, self.utility.lang.get('yes')) + self.Bind(wx.EVT_BUTTON, self.onYES, self.yesbtn) + + self.nobtn = wx.Button(self, -1, self.utility.lang.get('no')) + self.Bind(wx.EVT_BUTTON, self.onNO, self.nobtn) + + self.cancelbtn = wx.Button(self, wx.ID_CANCEL, self.utility.lang.get('cancel')) + + buttonbox = wx.BoxSizer( wx.HORIZONTAL ) + buttonbox.Add(self.yesbtn, 0, wx.ALL, 5) + buttonbox.Add(self.nobtn, 0, wx.ALL, 5) + buttonbox.Add(self.cancelbtn, 0, wx.ALL, 5) + + outerbox.Add( buttonbox, 0, wx.ALIGN_CENTER) + + self.SetAutoLayout( True ) + self.SetSizer( outerbox ) + self.Fit() + + def onYES(self, event = None): + self.apply(True) + self.EndModal(wx.ID_YES) + + def onNO(self, event = None): + self.apply(False) + self.EndModal(wx.ID_NO) + + def apply(self, register): + try: + self.utility.regchecker.updateRegistry(register) + except: + dlg = wx.MessageDialog(self, self.utility.lang.get('errorassociating'), self.utility.lang.get('error'), wx.OK | wx.ICON_INFORMATION) + dlg.ShowModal() + dlg.Destroy() + register=False + + self.utility.config.Write('associate', register, "boolean") + self.utility.config.Flush() diff --git a/tribler-mod/Tribler/Main/Dialogs/regdialog.py.bak b/tribler-mod/Tribler/Main/Dialogs/regdialog.py.bak new file mode 100644 index 0000000..455d2a2 --- /dev/null +++ b/tribler-mod/Tribler/Main/Dialogs/regdialog.py.bak @@ -0,0 +1,68 @@ +# Written by Tim Tucker +# see LICENSE.txt for license information +######################################################################### +# Description : Ask whether or not to associate ABC with torrents +######################################################################### +import wx + + +################################################################ +# +# Class: RegCheckDialog +# +# Prompts to associate ABC with .torrent files if it is not +# already associated with them +# +################################################################ +class RegCheckDialog(wx.Dialog): + def __init__(self, parent): + self.utility = parent.utility + + title = self.utility.lang.get('associate') + + pre = wx.PreDialog() + pre.Create(parent, -1, title) + self.this = pre.this + + outerbox = wx.BoxSizer( wx.VERTICAL ) + + outerbox.Add(wx.StaticText(self, -1, self.utility.lang.get('notassociated')), 0, wx.ALIGN_LEFT|wx.ALL, 5) + + self.yesbtn = wx.Button(self, -1, self.utility.lang.get('yes')) + self.Bind(wx.EVT_BUTTON, self.onYES, self.yesbtn) + + self.nobtn = wx.Button(self, -1, self.utility.lang.get('no')) + self.Bind(wx.EVT_BUTTON, self.onNO, self.nobtn) + + self.cancelbtn = wx.Button(self, wx.ID_CANCEL, self.utility.lang.get('cancel')) + + buttonbox = wx.BoxSizer( wx.HORIZONTAL ) + buttonbox.Add(self.yesbtn, 0, wx.ALL, 5) + buttonbox.Add(self.nobtn, 0, wx.ALL, 5) + buttonbox.Add(self.cancelbtn, 0, wx.ALL, 5) + + outerbox.Add( buttonbox, 0, wx.ALIGN_CENTER) + + self.SetAutoLayout( True ) + self.SetSizer( outerbox ) + self.Fit() + + def onYES(self, event = None): + self.apply(True) + self.EndModal(wx.ID_YES) + + def onNO(self, event = None): + self.apply(False) + self.EndModal(wx.ID_NO) + + def apply(self, register): + try: + self.utility.regchecker.updateRegistry(register) + except: + dlg = wx.MessageDialog(self, self.utility.lang.get('errorassociating'), self.utility.lang.get('error'), wx.OK | wx.ICON_INFORMATION) + dlg.ShowModal() + dlg.Destroy() + register=False + + self.utility.config.Write('associate', register, "boolean") + self.utility.config.Flush() diff --git a/tribler-mod/Tribler/Main/Dialogs/socnetmyinfo.py b/tribler-mod/Tribler/Main/Dialogs/socnetmyinfo.py new file mode 100644 index 0000000..8a5cddd --- /dev/null +++ b/tribler-mod/Tribler/Main/Dialogs/socnetmyinfo.py @@ -0,0 +1,271 @@ +from time import localtime, strftime +# Written by Arno Bakker, Jie Yang +# see LICENSE.txt for license information + +import os +import sys +from traceback import print_exc +import tempfile + +import wx +import wx.lib.imagebrowser as ib +# Arno: I have problems importing the Wizard classes, i.e. if I do +# import wx +# x = wx.Wizard +# it don't work. This explicit import seems to: +from wx.wizard import Wizard,WizardPageSimple,EVT_WIZARD_PAGE_CHANGED,EVT_WIZARD_PAGE_CHANGING,EVT_WIZARD_CANCEL,EVT_WIZARD_FINISHED + +from Tribler.Main.vwxGUI.IconsManager import IconsManager, data2wxBitmap, ICON_MAX_DIM +#from common import CommonTriblerList +from Tribler.Main.Utility.constants import * +from Tribler.Core.SessionConfig import SessionStartupConfig + +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility + + +SERVICETYPES = [] + +DEBUG = False + +################################################################ +# +# Class: MyInfoDialog +# +# Dialog with user's public info +# +################################################################ + +class MyInfoWizard(Wizard): + + def __init__(self,parent): + + self.parent = parent + self.utility = parent.utility + + title = self.utility.lang.get('myinfo') + # TODO: bitmap? + Wizard.__init__(self,parent, -1, title, style = wx.DEFAULT_DIALOG_STYLE) + + self.page1 = NameIconWizardPage(self,type) + #self.page2 = RWIDsWizardPage(self,type) + #self.page1.Chain(self.page1,self.page2) + self.GetPageAreaSizer().Add(self.page1) + #self.GetPageAreaSizer().Add(self.page2) + + self.Bind(EVT_WIZARD_PAGE_CHANGED,self.OnPageChanged) + self.Bind(EVT_WIZARD_PAGE_CHANGING,self.OnPageChanging) + self.Bind(EVT_WIZARD_CANCEL,self.OnCancel) + self.Bind(EVT_WIZARD_FINISHED,self.OnFinished) + + self.guiUtility = GUIUtility.getInstance() + + def OnPageChanged(self,event=None): + pass + + def OnPageChanging(self,event=None): + if event is not None: + if event.GetDirection(): + if self.GetCurrentPage() == self.page1: + if not self.page1.IsFilledIn(): + event.Veto() + + def OnCancel(self,event=None): + pass + + def OnFinished(self,event=None): + (name,icondata, iconmime) = self.page1.getNameIconData() + + # write changes to the pickled config file, because on shutdown, changes are not pickled! + # this is done to spare the mypreferences-changes. + + state_dir = self.utility.session.get_state_dir() + cfgfilename = self.utility.session.get_default_config_filename(state_dir) + scfg = SessionStartupConfig.load(cfgfilename) + + for target in [scfg,self.utility.session]: + try: + target.set_nickname(name) + target.set_mugshot(icondata, mime=iconmime) + except: + print_exc() + + scfg.save(cfgfilename) + + self.parent.WizardFinished(self) + + def getFirstPage(self): + return self.page1 + + + +class NameIconWizardPage(WizardPageSimple): + """ Ask user for public name and icon """ + + def __init__(self,parent,type): + WizardPageSimple.__init__(self,parent) + self.utility = parent.utility + + # 0. mainbox + mainbox = wx.BoxSizer(wx.VERTICAL) + + # 1. topbox + topbox = wx.BoxSizer(wx.VERTICAL) + + # Ask public name + name = self.utility.session.get_nickname() + + name_box = wx.BoxSizer(wx.HORIZONTAL) + self.myname = wx.TextCtrl(self, -1, name) + name_box.Add(wx.StaticText(self, -1, self.utility.lang.get('myname')), 0, wx.ALIGN_CENTER_VERTICAL) + name_box.Add(self.myname, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + topbox.Add(name_box, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + + # Ask public user icon / avatar + icon_box = wx.BoxSizer(wx.HORIZONTAL) + icon_box.Add(wx.StaticText(self, -1, self.utility.lang.get('myicon')), 0, wx.ALIGN_CENTER_VERTICAL) + + ## TODO: integrate this code with makefriends.py, especially checking code + self.iconbtn = None + self.iconmime, self.icondata = self.utility.session.get_mugshot() + if self.icondata: + bm = data2wxBitmap(self.iconmime, self.icondata) + else: + im = IconsManager.getInstance() + bm = im.get_default('personsMode','DEFAULT_THUMB') + + if sys.platform != 'darwin': + self.iconbtn = wx.BitmapButton(self, -1, bm) + icon_box.Add(self.iconbtn, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + #label = wx.StaticText(self, -1, self.utility.lang.get('obligiconformat')) + #icon_box.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5) + self.Bind(wx.EVT_BUTTON, self.OnIconButton, self.iconbtn) + else: + path = os.path.expandvars('$HOME') + self.iconbtn = wx.FilePickerCtrl(self, -1, path) + self.Bind(wx.EVT_FILEPICKER_CHANGED,self.OnIconSelected,id=self.iconbtn.GetId()) + icon_box.Add(self.iconbtn, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + + topbox.Add(icon_box, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + + + mainbox.Add(topbox, 0, wx.EXPAND) + self.SetSizerAndFit(mainbox) + + def OnIconButton(self, evt): + try: + if sys.platform == 'win32': + # Arno goes win32, find location of "My Pictures" + # see http://www.mvps.org/access/api/api0054.htm + from win32com.shell import shell + pidl = shell.SHGetSpecialFolderLocation(0,0x27) + path = shell.SHGetPathFromIDList(pidl) + else: + path = os.path.expandvars('$HOME') + except Exception, msg: + path = '' + print_exc() + + # open the image browser dialog + dlg = ib.ImageDialog(self, path) + dlg.Centre() + if dlg.ShowModal() == wx.ID_OK: + self.iconpath = dlg.GetFile() + self.process_input() + else: + pass + + dlg.Destroy() + + + def OnIconSelected(self,event=None): + self.iconpath = self.iconbtn.GetPath() + self.process_input() + + def process_input(self): + try: + im = wx.Image(self.iconpath) + if im is None: + self.show_inputerror(self.utility.lang.get('cantopenfile')) + else: + if sys.platform != 'darwin': + bm = wx.BitmapFromImage(im.Scale(64,64),-1) + self.iconbtn.SetBitmapLabel(bm) + + # Arno, 2008-10-21: scale image! + sim = im.Scale(ICON_MAX_DIM,ICON_MAX_DIM) + [thumbhandle,thumbfilename] = tempfile.mkstemp("user-thumb") + os.close(thumbhandle) + sim.SaveFile(thumbfilename,wx.BITMAP_TYPE_JPEG) + + self.iconmime = 'image/jpeg' + f = open(thumbfilename,"rb") + self.icondata = f.read() + f.close() + os.remove(thumbfilename) + except: + print_exc() + self.show_inputerror(self.utility.lang.get('iconbadformat')) + + + + def show_inputerror(self,txt): + dlg = wx.MessageDialog(self, txt, self.utility.lang.get('invalidinput'), wx.OK | wx.ICON_INFORMATION) + dlg.ShowModal() + dlg.Destroy() + + def IsFilledIn(self): + (name,_,_) = self.getNameIconData() + #print "ICONPATH IS",iconpath + return len(name) != 0 #and icondata is not None + + def getNameIconData(self): + name = self.myname.GetValue() + return (name,self.icondata, self.iconmime) + + +class RWIDsWizardPage(WizardPageSimple): + """ Ask user for his real-world identifiers """ + + def __init__(self,parent,type): + WizardPageSimple.__init__(self,parent) + self.parent = parent + self.utility = parent.utility + + mainbox = wx.BoxSizer(wx.VERTICAL) + text = wx.StaticText(self, -1, self.utility.lang.get('rwid_explanation')) + text.Wrap(400) + mainbox.Add(text, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.ALL, 5) + + # Real-World Identifiers + rwidbox = wx.BoxSizer(wx.VERTICAL) + self.rwidlist = RWIDList(self) + rwidbox.Add(self.rwidlist, 1, wx.EXPAND|wx.ALL, 5) + + rwidbtnbox = wx.BoxSizer(wx.HORIZONTAL) + + button = wx.Button(self, -1, self.utility.lang.get('addrwid'), style = wx.BU_EXACTFIT) + self.Bind(wx.EVT_BUTTON, self.OnAddRWID, button) + rwidbtnbox.Add(button, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 3) + + button = wx.Button(self, -1, self.utility.lang.get('remrwid'), style = wx.BU_EXACTFIT) + self.Bind(wx.EVT_BUTTON, self.OnRemoveRWID, button) + rwidbtnbox.Add(button, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 3) + rwidbox.Add(rwidbtnbox, 0, wx.EXPAND) + mainbox.Add(rwidbox, 0, wx.EXPAND) + + self.SetSizerAndFit(mainbox) + + self.rwidlist.loadList() + + + def OnAddRWID(self,event=None): + dlg = RWIDDialog(self) + dlg.ShowModal() + dlg.Destroy() + + def OnRemoveRWID(self,event=None): + self.rwidlist.remove() + + def add(self,service,id): + self.rwidlist.add(service,id) + diff --git a/tribler-mod/Tribler/Main/Dialogs/socnetmyinfo.py.bak b/tribler-mod/Tribler/Main/Dialogs/socnetmyinfo.py.bak new file mode 100644 index 0000000..25f72ef --- /dev/null +++ b/tribler-mod/Tribler/Main/Dialogs/socnetmyinfo.py.bak @@ -0,0 +1,270 @@ +# Written by Arno Bakker, Jie Yang +# see LICENSE.txt for license information + +import os +import sys +from traceback import print_exc +import tempfile + +import wx +import wx.lib.imagebrowser as ib +# Arno: I have problems importing the Wizard classes, i.e. if I do +# import wx +# x = wx.Wizard +# it don't work. This explicit import seems to: +from wx.wizard import Wizard,WizardPageSimple,EVT_WIZARD_PAGE_CHANGED,EVT_WIZARD_PAGE_CHANGING,EVT_WIZARD_CANCEL,EVT_WIZARD_FINISHED + +from Tribler.Main.vwxGUI.IconsManager import IconsManager, data2wxBitmap, ICON_MAX_DIM +#from common import CommonTriblerList +from Tribler.Main.Utility.constants import * +from Tribler.Core.SessionConfig import SessionStartupConfig + +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility + + +SERVICETYPES = [] + +DEBUG = False + +################################################################ +# +# Class: MyInfoDialog +# +# Dialog with user's public info +# +################################################################ + +class MyInfoWizard(Wizard): + + def __init__(self,parent): + + self.parent = parent + self.utility = parent.utility + + title = self.utility.lang.get('myinfo') + # TODO: bitmap? + Wizard.__init__(self,parent, -1, title, style = wx.DEFAULT_DIALOG_STYLE) + + self.page1 = NameIconWizardPage(self,type) + #self.page2 = RWIDsWizardPage(self,type) + #self.page1.Chain(self.page1,self.page2) + self.GetPageAreaSizer().Add(self.page1) + #self.GetPageAreaSizer().Add(self.page2) + + self.Bind(EVT_WIZARD_PAGE_CHANGED,self.OnPageChanged) + self.Bind(EVT_WIZARD_PAGE_CHANGING,self.OnPageChanging) + self.Bind(EVT_WIZARD_CANCEL,self.OnCancel) + self.Bind(EVT_WIZARD_FINISHED,self.OnFinished) + + self.guiUtility = GUIUtility.getInstance() + + def OnPageChanged(self,event=None): + pass + + def OnPageChanging(self,event=None): + if event is not None: + if event.GetDirection(): + if self.GetCurrentPage() == self.page1: + if not self.page1.IsFilledIn(): + event.Veto() + + def OnCancel(self,event=None): + pass + + def OnFinished(self,event=None): + (name,icondata, iconmime) = self.page1.getNameIconData() + + # write changes to the pickled config file, because on shutdown, changes are not pickled! + # this is done to spare the mypreferences-changes. + + state_dir = self.utility.session.get_state_dir() + cfgfilename = self.utility.session.get_default_config_filename(state_dir) + scfg = SessionStartupConfig.load(cfgfilename) + + for target in [scfg,self.utility.session]: + try: + target.set_nickname(name) + target.set_mugshot(icondata, mime=iconmime) + except: + print_exc() + + scfg.save(cfgfilename) + + self.parent.WizardFinished(self) + + def getFirstPage(self): + return self.page1 + + + +class NameIconWizardPage(WizardPageSimple): + """ Ask user for public name and icon """ + + def __init__(self,parent,type): + WizardPageSimple.__init__(self,parent) + self.utility = parent.utility + + # 0. mainbox + mainbox = wx.BoxSizer(wx.VERTICAL) + + # 1. topbox + topbox = wx.BoxSizer(wx.VERTICAL) + + # Ask public name + name = self.utility.session.get_nickname() + + name_box = wx.BoxSizer(wx.HORIZONTAL) + self.myname = wx.TextCtrl(self, -1, name) + name_box.Add(wx.StaticText(self, -1, self.utility.lang.get('myname')), 0, wx.ALIGN_CENTER_VERTICAL) + name_box.Add(self.myname, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + topbox.Add(name_box, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + + # Ask public user icon / avatar + icon_box = wx.BoxSizer(wx.HORIZONTAL) + icon_box.Add(wx.StaticText(self, -1, self.utility.lang.get('myicon')), 0, wx.ALIGN_CENTER_VERTICAL) + + ## TODO: integrate this code with makefriends.py, especially checking code + self.iconbtn = None + self.iconmime, self.icondata = self.utility.session.get_mugshot() + if self.icondata: + bm = data2wxBitmap(self.iconmime, self.icondata) + else: + im = IconsManager.getInstance() + bm = im.get_default('personsMode','DEFAULT_THUMB') + + if sys.platform != 'darwin': + self.iconbtn = wx.BitmapButton(self, -1, bm) + icon_box.Add(self.iconbtn, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + #label = wx.StaticText(self, -1, self.utility.lang.get('obligiconformat')) + #icon_box.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5) + self.Bind(wx.EVT_BUTTON, self.OnIconButton, self.iconbtn) + else: + path = os.path.expandvars('$HOME') + self.iconbtn = wx.FilePickerCtrl(self, -1, path) + self.Bind(wx.EVT_FILEPICKER_CHANGED,self.OnIconSelected,id=self.iconbtn.GetId()) + icon_box.Add(self.iconbtn, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + + topbox.Add(icon_box, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5) + + + mainbox.Add(topbox, 0, wx.EXPAND) + self.SetSizerAndFit(mainbox) + + def OnIconButton(self, evt): + try: + if sys.platform == 'win32': + # Arno goes win32, find location of "My Pictures" + # see http://www.mvps.org/access/api/api0054.htm + from win32com.shell import shell + pidl = shell.SHGetSpecialFolderLocation(0,0x27) + path = shell.SHGetPathFromIDList(pidl) + else: + path = os.path.expandvars('$HOME') + except Exception, msg: + path = '' + print_exc() + + # open the image browser dialog + dlg = ib.ImageDialog(self, path) + dlg.Centre() + if dlg.ShowModal() == wx.ID_OK: + self.iconpath = dlg.GetFile() + self.process_input() + else: + pass + + dlg.Destroy() + + + def OnIconSelected(self,event=None): + self.iconpath = self.iconbtn.GetPath() + self.process_input() + + def process_input(self): + try: + im = wx.Image(self.iconpath) + if im is None: + self.show_inputerror(self.utility.lang.get('cantopenfile')) + else: + if sys.platform != 'darwin': + bm = wx.BitmapFromImage(im.Scale(64,64),-1) + self.iconbtn.SetBitmapLabel(bm) + + # Arno, 2008-10-21: scale image! + sim = im.Scale(ICON_MAX_DIM,ICON_MAX_DIM) + [thumbhandle,thumbfilename] = tempfile.mkstemp("user-thumb") + os.close(thumbhandle) + sim.SaveFile(thumbfilename,wx.BITMAP_TYPE_JPEG) + + self.iconmime = 'image/jpeg' + f = open(thumbfilename,"rb") + self.icondata = f.read() + f.close() + os.remove(thumbfilename) + except: + print_exc() + self.show_inputerror(self.utility.lang.get('iconbadformat')) + + + + def show_inputerror(self,txt): + dlg = wx.MessageDialog(self, txt, self.utility.lang.get('invalidinput'), wx.OK | wx.ICON_INFORMATION) + dlg.ShowModal() + dlg.Destroy() + + def IsFilledIn(self): + (name,_,_) = self.getNameIconData() + #print "ICONPATH IS",iconpath + return len(name) != 0 #and icondata is not None + + def getNameIconData(self): + name = self.myname.GetValue() + return (name,self.icondata, self.iconmime) + + +class RWIDsWizardPage(WizardPageSimple): + """ Ask user for his real-world identifiers """ + + def __init__(self,parent,type): + WizardPageSimple.__init__(self,parent) + self.parent = parent + self.utility = parent.utility + + mainbox = wx.BoxSizer(wx.VERTICAL) + text = wx.StaticText(self, -1, self.utility.lang.get('rwid_explanation')) + text.Wrap(400) + mainbox.Add(text, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.ALL, 5) + + # Real-World Identifiers + rwidbox = wx.BoxSizer(wx.VERTICAL) + self.rwidlist = RWIDList(self) + rwidbox.Add(self.rwidlist, 1, wx.EXPAND|wx.ALL, 5) + + rwidbtnbox = wx.BoxSizer(wx.HORIZONTAL) + + button = wx.Button(self, -1, self.utility.lang.get('addrwid'), style = wx.BU_EXACTFIT) + self.Bind(wx.EVT_BUTTON, self.OnAddRWID, button) + rwidbtnbox.Add(button, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 3) + + button = wx.Button(self, -1, self.utility.lang.get('remrwid'), style = wx.BU_EXACTFIT) + self.Bind(wx.EVT_BUTTON, self.OnRemoveRWID, button) + rwidbtnbox.Add(button, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 3) + rwidbox.Add(rwidbtnbox, 0, wx.EXPAND) + mainbox.Add(rwidbox, 0, wx.EXPAND) + + self.SetSizerAndFit(mainbox) + + self.rwidlist.loadList() + + + def OnAddRWID(self,event=None): + dlg = RWIDDialog(self) + dlg.ShowModal() + dlg.Destroy() + + def OnRemoveRWID(self,event=None): + self.rwidlist.remove() + + def add(self,service,id): + self.rwidlist.add(service,id) + diff --git a/tribler-mod/Tribler/Main/Dialogs/systray.py b/tribler-mod/Tribler/Main/Dialogs/systray.py new file mode 100644 index 0000000..2b35b95 --- /dev/null +++ b/tribler-mod/Tribler/Main/Dialogs/systray.py @@ -0,0 +1,73 @@ +from time import localtime, strftime +# Author : Choopan RATTANAPOKA, Jie Yang, Arno Bakker +# see LICENSE.txt for license information +import sys +import os +import wx +from traceback import print_exc + +############################################################## +# +# Class : ABCTaskBarIcon +# +# Task Bar Icon +# +############################################################## +class ABCTaskBarIcon(wx.TaskBarIcon): + def __init__(self, parent): + wx.TaskBarIcon.__init__(self) + + self.parent = parent + self.utility = parent.utility + + self.TBMENU_RESTORE = wx.NewId() + + # setup a taskbar icon, and catch some events from it + self.Bind(wx.EVT_TASKBAR_LEFT_DCLICK, parent.onTaskBarActivate) + self.Bind(wx.EVT_MENU, parent.onTaskBarActivate, id = self.TBMENU_RESTORE) + + self.updateIcon(False) + + def updateIcon(self,iconifying = False): + remove = True + + mintray = self.utility.config.Read('mintray', "int") + if (mintray >= 2) or ((mintray >= 1) and iconifying): + remove = False + + if remove and self.IsIconInstalled(): + self.RemoveIcon() + elif not remove and not self.IsIconInstalled(): + self.SetIcon(self.utility.icon, "Tribler") + + def CreatePopupMenu(self): + menu = wx.Menu() + + mi = menu.Append(-1,self.utility.lang.get('stopall')) + self.Bind(wx.EVT_MENU, self.OnStopAll, id=mi.GetId()) + menu.AppendSeparator() + mi = menu.Append(-1,self.utility.lang.get('restartall')) + self.Bind(wx.EVT_MENU, self.OnRestartAll, id=mi.GetId()) + menu.AppendSeparator() + mi = menu.Append(-1,self.utility.lang.get('menuexit')) + self.Bind(wx.EVT_MENU, self.OnExitClient, id=mi.GetId()) + return menu + + def OnStopAll(self,event=None): + dlist = self.utility.session.get_downloads() + for d in dlist: + try: + d.stop() + except: + print_exc() + + def OnRestartAll(self,event=None): + dlist = self.utility.session.get_downloads() + for d in dlist: + try: + d.restart() + except: + print_exc() + + def OnExitClient(self,event=None): + self.parent.quit() diff --git a/tribler-mod/Tribler/Main/Dialogs/systray.py.bak b/tribler-mod/Tribler/Main/Dialogs/systray.py.bak new file mode 100644 index 0000000..56237cd --- /dev/null +++ b/tribler-mod/Tribler/Main/Dialogs/systray.py.bak @@ -0,0 +1,72 @@ +# Author : Choopan RATTANAPOKA, Jie Yang, Arno Bakker +# see LICENSE.txt for license information +import sys +import os +import wx +from traceback import print_exc + +############################################################## +# +# Class : ABCTaskBarIcon +# +# Task Bar Icon +# +############################################################## +class ABCTaskBarIcon(wx.TaskBarIcon): + def __init__(self, parent): + wx.TaskBarIcon.__init__(self) + + self.parent = parent + self.utility = parent.utility + + self.TBMENU_RESTORE = wx.NewId() + + # setup a taskbar icon, and catch some events from it + self.Bind(wx.EVT_TASKBAR_LEFT_DCLICK, parent.onTaskBarActivate) + self.Bind(wx.EVT_MENU, parent.onTaskBarActivate, id = self.TBMENU_RESTORE) + + self.updateIcon(False) + + def updateIcon(self,iconifying = False): + remove = True + + mintray = self.utility.config.Read('mintray', "int") + if (mintray >= 2) or ((mintray >= 1) and iconifying): + remove = False + + if remove and self.IsIconInstalled(): + self.RemoveIcon() + elif not remove and not self.IsIconInstalled(): + self.SetIcon(self.utility.icon, "Tribler") + + def CreatePopupMenu(self): + menu = wx.Menu() + + mi = menu.Append(-1,self.utility.lang.get('stopall')) + self.Bind(wx.EVT_MENU, self.OnStopAll, id=mi.GetId()) + menu.AppendSeparator() + mi = menu.Append(-1,self.utility.lang.get('restartall')) + self.Bind(wx.EVT_MENU, self.OnRestartAll, id=mi.GetId()) + menu.AppendSeparator() + mi = menu.Append(-1,self.utility.lang.get('menuexit')) + self.Bind(wx.EVT_MENU, self.OnExitClient, id=mi.GetId()) + return menu + + def OnStopAll(self,event=None): + dlist = self.utility.session.get_downloads() + for d in dlist: + try: + d.stop() + except: + print_exc() + + def OnRestartAll(self,event=None): + dlist = self.utility.session.get_downloads() + for d in dlist: + try: + d.restart() + except: + print_exc() + + def OnExitClient(self,event=None): + self.parent.quit() diff --git a/tribler-mod/Tribler/Main/Utility/__init__.py b/tribler-mod/Tribler/Main/Utility/__init__.py new file mode 100644 index 0000000..496df60 --- /dev/null +++ b/tribler-mod/Tribler/Main/Utility/__init__.py @@ -0,0 +1,4 @@ +from time import localtime, strftime +# Written by ABC authors +# see LICENSE.txt for license information + diff --git a/tribler-mod/Tribler/Main/Utility/__init__.py.bak b/tribler-mod/Tribler/Main/Utility/__init__.py.bak new file mode 100644 index 0000000..604e0aa --- /dev/null +++ b/tribler-mod/Tribler/Main/Utility/__init__.py.bak @@ -0,0 +1,3 @@ +# Written by ABC authors +# see LICENSE.txt for license information + diff --git a/tribler-mod/Tribler/Main/Utility/compat.py b/tribler-mod/Tribler/Main/Utility/compat.py new file mode 100644 index 0000000..8a97b42 --- /dev/null +++ b/tribler-mod/Tribler/Main/Utility/compat.py @@ -0,0 +1,307 @@ +from time import localtime, strftime +# Written by ABC authors and Arno Bakker +# see LICENSE.txt for license information + +########################## +# +# Things to handle backward compatability for the old-style +# torrent.lst and abc.ini +# +########################## + +import os +import sys + +from shutil import move, copy2 + +from Tribler.Utilities.configreader import ConfigReader +from Tribler.Main.Utility.helpers import existsAndIsReadable + +def moveOldConfigFiles(utility): + oldpath = utility.getPath() + newpath = utility.getConfigPath() + + files = ["torrent.lst", + "torrent.list", + "torrent.list.backup1", + "torrent.list.backup2", + "torrent.list.backup3", + "torrent.list.backup4", + "abc.ini", + "abc.conf", + "webservice.conf", + "maker.conf", + "torrent", + "torrentinfo"] + + for name in files: + oldname = os.path.join(oldpath, name) + if existsAndIsReadable(oldname): + newname = os.path.join(newpath, name) + try: + move(oldname, newname) + except: +# data = StringIO() +# print_exc(file = data) +# sys.stderr.write(data.getvalue()) + pass + + # Special case: move lang\user.lang to configdir\user.lang + oldname = os.path.join(oldpath, "lang", "user.lang") + if existsAndIsReadable(oldname): + newname = os.path.join(newpath, "user.lang") + try: + move(oldname, newname) + except: + pass + +def convertOldList(utility): + convertOldList1(utility) + convertOldList2(utility) + +# +# Convert the torrent.lst file to the new torrent.list +# format the first time ABC is run (if necessary) +# +def convertOldList1(utility): + # Only continue if torrent.lst exists + filename = os.path.join(utility.getConfigPath(), "torrent.lst") + if not existsAndIsReadable(filename): + return + + torrentconfig = utility.torrentconfig + + # Don't continue unless torrent.list is empty + try: + if torrentconfig.has_section("0"): + return + except: + return + + oldconfig = open(filename, "r+") + + configline = oldconfig.readline() + index = 0 + while configline != "" and configline != "\n": + try: + configmap = configline.split('|') + + torrentconfig.setSection(str(index)) + + torrentconfig.Write("src", configmap[1]) + torrentconfig.Write("dest", configmap[2]) + + # Write status information + torrentconfig.Write("status", configmap[3]) + torrentconfig.Write("prio", configmap[4]) + + # Write progress information + torrentconfig.Write("downsize", configmap[5]) + torrentconfig.Write("upsize", configmap[6]) + if (len(configmap) <= 7) or (configmap[7] == '?\n'): + progress = "0.0" + else: + progress = configmap[7] + torrentconfig.Write("progress", str(progress)) + except: +# data = StringIO() +# print_exc(file = data) +# sys.stderr.write(data.getvalue()) # report exception here too + pass + + configline = oldconfig.readline() + index += 1 + + oldconfig.close() + torrentconfig.Flush() + + # Rename the old list file + move(filename, filename + ".old") + +# +# Convert list to new format +# (only src stored in list, everything else stored in torrentinfo) +# +def convertOldList2(utility): + index = 0 + while convertOldList2B(utility, index): + index += 1 + utility.torrentconfig.Flush() + +def convertOldList2B(utility, indexval): + torrentconfig = utility.torrentconfig + + index = str(indexval) + + try: + if not torrentconfig.has_section(index): + return False + except: + return False + + if indexval == 0: + # backup the old file + oldconfigname = os.path.join(utility.getConfigPath(), "torrent.list") + if existsAndIsReadable(oldconfigname): + try: + copy2(oldconfigname, oldconfigname + ".old") + except: + pass + + # Torrent information + filename = torrentconfig.Read("src", section = index) + # Format from earlier 2.7.0 test builds: + if not filename: + # If the src is missing, then we should not try to add the torrent + sys.stdout.write("Filename is empty for index: " + str(index) + "!\n") + return False + elif filename.startswith(utility.getPath()): + src = filename + else: + src = os.path.join(utility.getConfigPath(), "torrent", filename) + + filename = os.path.split(src)[1] + newsrc = os.path.join(utility.getConfigPath(), "torrent", filename) + + configpath = os.path.join(utility.getConfigPath(), "torrentinfo", filename + ".info") + config = ConfigReader(configpath, "TorrentInfo") + + for name, value in torrentconfig.Items(index): + if name != "src" and value != "": + config.Write(name, value) + + config.Flush() + + torrentconfig.DeleteGroup(index) + torrentconfig.Write(index, newsrc) + + return True + +# Get settings from the old abc.ini file +def convertINI(utility): + # Only continue if abc.ini exists + filename = os.path.join(utility.getConfigPath(), "abc.ini") + if not existsAndIsReadable(filename): + return + + config = utility.config + lang = utility.lang + + # We'll ignore anything that was set to the defaults + # from the previous version + olddefaults = { 0: [-1, "abc_width", 710], + 1: [-1, "abc_height", 400], + 2: [-1, "detailwin_width", 610], + 3: [-1, "detailwin_height", 500], + 4: [0, "Title", 150], + 5: [1, "Progress", 60], + 6: [2, "BT Status", 100], + 7: [8, "Priority", 50], + 8: [5, "ETA", 85], + 9: [6, "Size", 75], + 10: [3, "DL Speed", 65], + 11: [4, "UL Speed", 60], + 12: [7, "%U/D Size", 65], + 13: [9, "Error Message", 200], + 14: [-1, "#Connected Seed", 60], + 15: [-1, "#Connected Peer", 60], + 16: [-1, "#Seeing Copies", 60], + 17: [-1, "Peer Avg Progress", 60], + 18: [-1, "Download Size", 75], + 19: [-1, "Upload Size", 75], + 20: [-1, "Total Speed", 80], + 21: [-1, "Torrent Name", 150] } + + oldconfig = open(filename, "r+") + + configline = oldconfig.readline() + while configline != "" and configline != "\n": + try: + configmap = configline.split("|") + + colid = int(configmap[0]) + + # Main window - width + if colid == 0: + if not config.Exists("window_width"): + try: + width = int(configmap[3]) + if width != olddefaults[colid][2]: + config.Write("window_width", width) + except: + pass + + # Main window - height + elif colid == 1: + if not config.Exists("window_height"): + try: + height = int(configmap[3]) + if height != olddefaults[colid][2]: + config.Write("window_height", height) + except: + pass + + # Advanced details - width + elif colid == 2: + if not config.Exists("detailwindow_width"): + try: + width = int(configmap[3]) + if width != olddefaults[colid][2]: + config.Write("detailwindow_width", width) + except: + pass + + # Advanced details - height + elif colid == 3: + if not config.Exists("detailwindow_height"): + try: + height = int(configmap[3]) + if height != olddefaults[colid][2]: + config.Write("detailwindow_height", height) + except: + pass + + # Column information + elif colid >= utility.list.columns.minid and colid < utility.list.columns.maxid: + # Column RankQ + if not config.Exists("column" + colid + "_rank"): + try: + rank = int(configmap[1]) + if rank != olddefaults[colid][0]: + config.Write("column" + colid + "_rank", rank) + except: + pass + + # Column title + if not lang.user_lang.Exists("column" + colid + "_text"): + try: + title = configmap[2] + if title != olddefaults[colid][1]: + lang.writeUser("column" + colid + "_text", title) + except: + pass + + # Column width + if not config.Exists("column" + colid + "_width"): + try: + width = int(configmap[3]) + if width != olddefaults[colid][2]: + config.Write("column" + colid + "_width", width) + except: + pass + except: + pass + + configline = oldconfig.readline() + + oldconfig.close() + + # Add in code to process things later + + lang.flush() + config.Flush() + + # Rename the old ini file + # (uncomment this out after we actually include something to process things) + move(filename, filename + ".old") + \ No newline at end of file diff --git a/tribler-mod/Tribler/Main/Utility/compat.py.bak b/tribler-mod/Tribler/Main/Utility/compat.py.bak new file mode 100644 index 0000000..b24db14 --- /dev/null +++ b/tribler-mod/Tribler/Main/Utility/compat.py.bak @@ -0,0 +1,306 @@ +# Written by ABC authors and Arno Bakker +# see LICENSE.txt for license information + +########################## +# +# Things to handle backward compatability for the old-style +# torrent.lst and abc.ini +# +########################## + +import os +import sys + +from shutil import move, copy2 + +from Tribler.Utilities.configreader import ConfigReader +from Tribler.Main.Utility.helpers import existsAndIsReadable + +def moveOldConfigFiles(utility): + oldpath = utility.getPath() + newpath = utility.getConfigPath() + + files = ["torrent.lst", + "torrent.list", + "torrent.list.backup1", + "torrent.list.backup2", + "torrent.list.backup3", + "torrent.list.backup4", + "abc.ini", + "abc.conf", + "webservice.conf", + "maker.conf", + "torrent", + "torrentinfo"] + + for name in files: + oldname = os.path.join(oldpath, name) + if existsAndIsReadable(oldname): + newname = os.path.join(newpath, name) + try: + move(oldname, newname) + except: +# data = StringIO() +# print_exc(file = data) +# sys.stderr.write(data.getvalue()) + pass + + # Special case: move lang\user.lang to configdir\user.lang + oldname = os.path.join(oldpath, "lang", "user.lang") + if existsAndIsReadable(oldname): + newname = os.path.join(newpath, "user.lang") + try: + move(oldname, newname) + except: + pass + +def convertOldList(utility): + convertOldList1(utility) + convertOldList2(utility) + +# +# Convert the torrent.lst file to the new torrent.list +# format the first time ABC is run (if necessary) +# +def convertOldList1(utility): + # Only continue if torrent.lst exists + filename = os.path.join(utility.getConfigPath(), "torrent.lst") + if not existsAndIsReadable(filename): + return + + torrentconfig = utility.torrentconfig + + # Don't continue unless torrent.list is empty + try: + if torrentconfig.has_section("0"): + return + except: + return + + oldconfig = open(filename, "r+") + + configline = oldconfig.readline() + index = 0 + while configline != "" and configline != "\n": + try: + configmap = configline.split('|') + + torrentconfig.setSection(str(index)) + + torrentconfig.Write("src", configmap[1]) + torrentconfig.Write("dest", configmap[2]) + + # Write status information + torrentconfig.Write("status", configmap[3]) + torrentconfig.Write("prio", configmap[4]) + + # Write progress information + torrentconfig.Write("downsize", configmap[5]) + torrentconfig.Write("upsize", configmap[6]) + if (len(configmap) <= 7) or (configmap[7] == '?\n'): + progress = "0.0" + else: + progress = configmap[7] + torrentconfig.Write("progress", str(progress)) + except: +# data = StringIO() +# print_exc(file = data) +# sys.stderr.write(data.getvalue()) # report exception here too + pass + + configline = oldconfig.readline() + index += 1 + + oldconfig.close() + torrentconfig.Flush() + + # Rename the old list file + move(filename, filename + ".old") + +# +# Convert list to new format +# (only src stored in list, everything else stored in torrentinfo) +# +def convertOldList2(utility): + index = 0 + while convertOldList2B(utility, index): + index += 1 + utility.torrentconfig.Flush() + +def convertOldList2B(utility, indexval): + torrentconfig = utility.torrentconfig + + index = str(indexval) + + try: + if not torrentconfig.has_section(index): + return False + except: + return False + + if indexval == 0: + # backup the old file + oldconfigname = os.path.join(utility.getConfigPath(), "torrent.list") + if existsAndIsReadable(oldconfigname): + try: + copy2(oldconfigname, oldconfigname + ".old") + except: + pass + + # Torrent information + filename = torrentconfig.Read("src", section = index) + # Format from earlier 2.7.0 test builds: + if not filename: + # If the src is missing, then we should not try to add the torrent + sys.stdout.write("Filename is empty for index: " + str(index) + "!\n") + return False + elif filename.startswith(utility.getPath()): + src = filename + else: + src = os.path.join(utility.getConfigPath(), "torrent", filename) + + filename = os.path.split(src)[1] + newsrc = os.path.join(utility.getConfigPath(), "torrent", filename) + + configpath = os.path.join(utility.getConfigPath(), "torrentinfo", filename + ".info") + config = ConfigReader(configpath, "TorrentInfo") + + for name, value in torrentconfig.Items(index): + if name != "src" and value != "": + config.Write(name, value) + + config.Flush() + + torrentconfig.DeleteGroup(index) + torrentconfig.Write(index, newsrc) + + return True + +# Get settings from the old abc.ini file +def convertINI(utility): + # Only continue if abc.ini exists + filename = os.path.join(utility.getConfigPath(), "abc.ini") + if not existsAndIsReadable(filename): + return + + config = utility.config + lang = utility.lang + + # We'll ignore anything that was set to the defaults + # from the previous version + olddefaults = { 0: [-1, "abc_width", 710], + 1: [-1, "abc_height", 400], + 2: [-1, "detailwin_width", 610], + 3: [-1, "detailwin_height", 500], + 4: [0, "Title", 150], + 5: [1, "Progress", 60], + 6: [2, "BT Status", 100], + 7: [8, "Priority", 50], + 8: [5, "ETA", 85], + 9: [6, "Size", 75], + 10: [3, "DL Speed", 65], + 11: [4, "UL Speed", 60], + 12: [7, "%U/D Size", 65], + 13: [9, "Error Message", 200], + 14: [-1, "#Connected Seed", 60], + 15: [-1, "#Connected Peer", 60], + 16: [-1, "#Seeing Copies", 60], + 17: [-1, "Peer Avg Progress", 60], + 18: [-1, "Download Size", 75], + 19: [-1, "Upload Size", 75], + 20: [-1, "Total Speed", 80], + 21: [-1, "Torrent Name", 150] } + + oldconfig = open(filename, "r+") + + configline = oldconfig.readline() + while configline != "" and configline != "\n": + try: + configmap = configline.split("|") + + colid = int(configmap[0]) + + # Main window - width + if colid == 0: + if not config.Exists("window_width"): + try: + width = int(configmap[3]) + if width != olddefaults[colid][2]: + config.Write("window_width", width) + except: + pass + + # Main window - height + elif colid == 1: + if not config.Exists("window_height"): + try: + height = int(configmap[3]) + if height != olddefaults[colid][2]: + config.Write("window_height", height) + except: + pass + + # Advanced details - width + elif colid == 2: + if not config.Exists("detailwindow_width"): + try: + width = int(configmap[3]) + if width != olddefaults[colid][2]: + config.Write("detailwindow_width", width) + except: + pass + + # Advanced details - height + elif colid == 3: + if not config.Exists("detailwindow_height"): + try: + height = int(configmap[3]) + if height != olddefaults[colid][2]: + config.Write("detailwindow_height", height) + except: + pass + + # Column information + elif colid >= utility.list.columns.minid and colid < utility.list.columns.maxid: + # Column RankQ + if not config.Exists("column" + colid + "_rank"): + try: + rank = int(configmap[1]) + if rank != olddefaults[colid][0]: + config.Write("column" + colid + "_rank", rank) + except: + pass + + # Column title + if not lang.user_lang.Exists("column" + colid + "_text"): + try: + title = configmap[2] + if title != olddefaults[colid][1]: + lang.writeUser("column" + colid + "_text", title) + except: + pass + + # Column width + if not config.Exists("column" + colid + "_width"): + try: + width = int(configmap[3]) + if width != olddefaults[colid][2]: + config.Write("column" + colid + "_width", width) + except: + pass + except: + pass + + configline = oldconfig.readline() + + oldconfig.close() + + # Add in code to process things later + + lang.flush() + config.Flush() + + # Rename the old ini file + # (uncomment this out after we actually include something to process things) + move(filename, filename + ".old") + \ No newline at end of file diff --git a/tribler-mod/Tribler/Main/Utility/constants.py b/tribler-mod/Tribler/Main/Utility/constants.py new file mode 100644 index 0000000..39af73e --- /dev/null +++ b/tribler-mod/Tribler/Main/Utility/constants.py @@ -0,0 +1,194 @@ +from time import localtime, strftime +# Written by ABC authors and Arno Bakker +# see LICENSE.txt for license information + +# Various constants + +# +# Constants used for keeping track of a torrent's status +# +STATUS_QUEUE = 0 +STATUS_STOP = 200 +STATUS_ACTIVE = 100 +STATUS_HASHCHECK = 101 +STATUS_PAUSE = 102 +STATUS_SUPERSEED = 103 +STATUS_FINISHED = 300 + +# +# Constants for the column headings in the torrent list +# +COL_TITLE = 4 +COL_PROGRESS = 5 +COL_BTSTATUS = 6 +COL_PRIO = 7 +COL_ETA = 8 +COL_SIZE = 9 +COL_DLSPEED = 10 +COL_ULSPEED = 11 +COL_RATIO = 12 +COL_MESSAGE = 13 +COL_SEEDS = 14 +COL_PEERS = 15 +COL_COPIES = 16 +COL_PEERPROGRESS = 17 +COL_DLSIZE = 18 +COL_ULSIZE = 19 +COL_TOTALSPEED = 20 +COL_NAME = 21 +COL_DEST = 22 +COL_SEEDTIME = 23 +COL_CONNECTIONS = 24 +COL_SEEDOPTION = 25 +# only for tribler GUI: +COL_DLANDTOTALSIZE = 30 + + +# +# Constants used for the column headings in the spew list +# +SPEW_UNCHOKE = 0 +SPEW_IP = 1 +SPEW_LR = 2 +SPEW_UP = 3 +SPEW_INTERESTED = 4 +SPEW_CHOKING = 5 +SPEW_DOWN = 6 +SPEW_INTERESTING = 7 +SPEW_CHOKED = 8 +SPEW_SNUBBED = 9 +SPEW_DLSIZE = 10 +SPEW_ULSIZE = 11 +SPEW_PEERPROGRESS = 12 +SPEW_PEERSPEED = 13 +SPEW_PERMID = 14 + +# +# Constants used for headings in the file info list +# +FILEINFO_FILENAME = 0 +FILEINFO_SIZE = 1 +FILEINFO_PROGRESS = 2 +FILEINFO_MD5 = 3 +FILEINFO_CRC32 = 4 +FILEINFO_SHA1 = 5 +FILEINFO_ED2K = 6 + +ACTION_SEPARATOR = -1 # Just to represent a separator within a menu/toolbar +ACTION_MOVEUP = 0 +ACTION_MOVEDOWN = 1 +ACTION_MOVETOP = 2 +ACTION_MOVEBOTTOM = 3 +ACTION_CLEARCOMPLETED = 4 +ACTION_PAUSEALL = 5 +ACTION_STOPALL = 6 +ACTION_UNSTOPALL = 7 +ACTION_WEBSERVICE = 8 +ACTION_ADDTORRENT = 9 +ACTION_ADDTORRENTNONDEFAULT = 10 +ACTION_ADDTORRENTURL = 11 +ACTION_RESUME = 12 +ACTION_PLAY = 13 +#ACTION_RESEEDRESUME = 13 +ACTION_PAUSE = 14 +ACTION_STOP = 15 +ACTION_QUEUE = 16 +ACTION_REMOVE = 17 +ACTION_REMOVEFILE = 18 +ACTION_SCRAPE = 19 +ACTION_DETAILS = 20 +ACTION_SUPERSEED = 21 +ACTION_HASHCHECK = 22 +ACTION_CLEARMESSAGE = 23 +ACTION_LOCALUPLOAD = 24 +ACTION_OPENDEST = 25 +ACTION_OPENFILEDEST = 26 +ACTION_PREFERENCES = 27 +ACTION_ABOUT = 28 +ACTION_CHECKVERSION = 29 +ACTION_MAKETORRENT = 30 +ACTION_WEBPREFERENCES = 31 +ACTION_EXTRACTFROMLIST = 32 +ACTION_COPYFROMLIST = 33 +ACTION_MANUALANNOUNCE = 34 +ACTION_EXTERNALANNOUNCE = 35 +ACTION_CHANGEDEST = 36 +ACTION_CHANGEPRIO = 37 +ACTION_EXPORTMENU = 38 +ACTION_TORRENTACTIONMENU = 39 +ACTION_FILEMENU = 40 +ACTION_VERSIONMENU = 41 +ACTION_TOOLSMENU = 42 +ACTION_ADDTORRENTMENU = 43 +ACTION_EXIT = 44 +ACTION_BUDDIES = 45 # Tribler +ACTION_FILES = 46 # Tribler +ACTION_MYINFO = 47 # Tribler +SPINNER_NUMSIM = 1 + +# +# Constants used for recommended torrent list +# +TORRENT_TORRENTNAME = 0 +TORRENT_CONTENTNAME = 1 +TORRENT_RECOMMENDATION = 2 +TORRENT_SOURCES = 3 +TORRENT_NLEECHERS = 4 +TORRENT_NSEEDERS = 5 +TORRENT_INJECTED = 6 +TORRENT_SIZE = 7 +TORRENT_NFILES = 8 +TORRENT_TRACKER = 9 +TORRENT_CATEGORY = 10 + +# +# Constants used for my preference list +# +MYPREF_TORRENTNAME = 0 +MYPREF_CONTENTNAME = 1 +MYPREF_RANK = 2 +MYPREF_SIZE = 3 +MYPREF_LASTSEEN = 4 + +# +# Constants used for taste buddy list +# +BUDDY_FRIEND = 0 +BUDDY_NAME = 1 +BUDDY_IP = 2 +BUDDY_SIM = 3 +BUDDY_LASTSEEN = 4 +BUDDY_NPREF = 5 +BUDDY_NCONN = 6 +BUDDY_NEXNG = 7 + + +# +# Constants used for my download history list +# +HISTORY_TORRENTNAME = 0 +HISTORY_CONTENTNAME = 1 +HISTORY_RANK = 2 +HISTORY_SIZE = 3 +HISTORY_LASTSEEN =4 + +# +# Constants used for encountered peer list +# +PEER_FRIEND = 0 +PEER_NAME = 1 +PEER_IP = 2 +PEER_SIMILARITY = 3 +PEER_LASTSEEN = 4 +PEER_PREFERENCES = 5 +PEER_CONNECTED = 6 +PEER_EXCHANGED = 7 + +CALLER_ARGV = 'argv' + +# +# Constants used for currently searching imported friends list +# +IMPORT_SERVICE = 0 +IMPORT_ID = 1 +IMPORT_NAME = 2 diff --git a/tribler-mod/Tribler/Main/Utility/constants.py.bak b/tribler-mod/Tribler/Main/Utility/constants.py.bak new file mode 100644 index 0000000..3d74945 --- /dev/null +++ b/tribler-mod/Tribler/Main/Utility/constants.py.bak @@ -0,0 +1,193 @@ +# Written by ABC authors and Arno Bakker +# see LICENSE.txt for license information + +# Various constants + +# +# Constants used for keeping track of a torrent's status +# +STATUS_QUEUE = 0 +STATUS_STOP = 200 +STATUS_ACTIVE = 100 +STATUS_HASHCHECK = 101 +STATUS_PAUSE = 102 +STATUS_SUPERSEED = 103 +STATUS_FINISHED = 300 + +# +# Constants for the column headings in the torrent list +# +COL_TITLE = 4 +COL_PROGRESS = 5 +COL_BTSTATUS = 6 +COL_PRIO = 7 +COL_ETA = 8 +COL_SIZE = 9 +COL_DLSPEED = 10 +COL_ULSPEED = 11 +COL_RATIO = 12 +COL_MESSAGE = 13 +COL_SEEDS = 14 +COL_PEERS = 15 +COL_COPIES = 16 +COL_PEERPROGRESS = 17 +COL_DLSIZE = 18 +COL_ULSIZE = 19 +COL_TOTALSPEED = 20 +COL_NAME = 21 +COL_DEST = 22 +COL_SEEDTIME = 23 +COL_CONNECTIONS = 24 +COL_SEEDOPTION = 25 +# only for tribler GUI: +COL_DLANDTOTALSIZE = 30 + + +# +# Constants used for the column headings in the spew list +# +SPEW_UNCHOKE = 0 +SPEW_IP = 1 +SPEW_LR = 2 +SPEW_UP = 3 +SPEW_INTERESTED = 4 +SPEW_CHOKING = 5 +SPEW_DOWN = 6 +SPEW_INTERESTING = 7 +SPEW_CHOKED = 8 +SPEW_SNUBBED = 9 +SPEW_DLSIZE = 10 +SPEW_ULSIZE = 11 +SPEW_PEERPROGRESS = 12 +SPEW_PEERSPEED = 13 +SPEW_PERMID = 14 + +# +# Constants used for headings in the file info list +# +FILEINFO_FILENAME = 0 +FILEINFO_SIZE = 1 +FILEINFO_PROGRESS = 2 +FILEINFO_MD5 = 3 +FILEINFO_CRC32 = 4 +FILEINFO_SHA1 = 5 +FILEINFO_ED2K = 6 + +ACTION_SEPARATOR = -1 # Just to represent a separator within a menu/toolbar +ACTION_MOVEUP = 0 +ACTION_MOVEDOWN = 1 +ACTION_MOVETOP = 2 +ACTION_MOVEBOTTOM = 3 +ACTION_CLEARCOMPLETED = 4 +ACTION_PAUSEALL = 5 +ACTION_STOPALL = 6 +ACTION_UNSTOPALL = 7 +ACTION_WEBSERVICE = 8 +ACTION_ADDTORRENT = 9 +ACTION_ADDTORRENTNONDEFAULT = 10 +ACTION_ADDTORRENTURL = 11 +ACTION_RESUME = 12 +ACTION_PLAY = 13 +#ACTION_RESEEDRESUME = 13 +ACTION_PAUSE = 14 +ACTION_STOP = 15 +ACTION_QUEUE = 16 +ACTION_REMOVE = 17 +ACTION_REMOVEFILE = 18 +ACTION_SCRAPE = 19 +ACTION_DETAILS = 20 +ACTION_SUPERSEED = 21 +ACTION_HASHCHECK = 22 +ACTION_CLEARMESSAGE = 23 +ACTION_LOCALUPLOAD = 24 +ACTION_OPENDEST = 25 +ACTION_OPENFILEDEST = 26 +ACTION_PREFERENCES = 27 +ACTION_ABOUT = 28 +ACTION_CHECKVERSION = 29 +ACTION_MAKETORRENT = 30 +ACTION_WEBPREFERENCES = 31 +ACTION_EXTRACTFROMLIST = 32 +ACTION_COPYFROMLIST = 33 +ACTION_MANUALANNOUNCE = 34 +ACTION_EXTERNALANNOUNCE = 35 +ACTION_CHANGEDEST = 36 +ACTION_CHANGEPRIO = 37 +ACTION_EXPORTMENU = 38 +ACTION_TORRENTACTIONMENU = 39 +ACTION_FILEMENU = 40 +ACTION_VERSIONMENU = 41 +ACTION_TOOLSMENU = 42 +ACTION_ADDTORRENTMENU = 43 +ACTION_EXIT = 44 +ACTION_BUDDIES = 45 # Tribler +ACTION_FILES = 46 # Tribler +ACTION_MYINFO = 47 # Tribler +SPINNER_NUMSIM = 1 + +# +# Constants used for recommended torrent list +# +TORRENT_TORRENTNAME = 0 +TORRENT_CONTENTNAME = 1 +TORRENT_RECOMMENDATION = 2 +TORRENT_SOURCES = 3 +TORRENT_NLEECHERS = 4 +TORRENT_NSEEDERS = 5 +TORRENT_INJECTED = 6 +TORRENT_SIZE = 7 +TORRENT_NFILES = 8 +TORRENT_TRACKER = 9 +TORRENT_CATEGORY = 10 + +# +# Constants used for my preference list +# +MYPREF_TORRENTNAME = 0 +MYPREF_CONTENTNAME = 1 +MYPREF_RANK = 2 +MYPREF_SIZE = 3 +MYPREF_LASTSEEN = 4 + +# +# Constants used for taste buddy list +# +BUDDY_FRIEND = 0 +BUDDY_NAME = 1 +BUDDY_IP = 2 +BUDDY_SIM = 3 +BUDDY_LASTSEEN = 4 +BUDDY_NPREF = 5 +BUDDY_NCONN = 6 +BUDDY_NEXNG = 7 + + +# +# Constants used for my download history list +# +HISTORY_TORRENTNAME = 0 +HISTORY_CONTENTNAME = 1 +HISTORY_RANK = 2 +HISTORY_SIZE = 3 +HISTORY_LASTSEEN =4 + +# +# Constants used for encountered peer list +# +PEER_FRIEND = 0 +PEER_NAME = 1 +PEER_IP = 2 +PEER_SIMILARITY = 3 +PEER_LASTSEEN = 4 +PEER_PREFERENCES = 5 +PEER_CONNECTED = 6 +PEER_EXCHANGED = 7 + +CALLER_ARGV = 'argv' + +# +# Constants used for currently searching imported friends list +# +IMPORT_SERVICE = 0 +IMPORT_ID = 1 +IMPORT_NAME = 2 diff --git a/tribler-mod/Tribler/Main/Utility/getscrapedata.py b/tribler-mod/Tribler/Main/Utility/getscrapedata.py new file mode 100644 index 0000000..f53e7c7 --- /dev/null +++ b/tribler-mod/Tribler/Main/Utility/getscrapedata.py @@ -0,0 +1,151 @@ +from time import localtime, strftime +# Written by ABC authors +# see LICENSE.txt for license information + +import re +import binascii +import sys +from threading import Thread,Event + +# The ScrapeThread calls ABCTorrent to update the info. As that updates +# the GUI, those updates must be done by the MainThread and not this +# scraping thread itself. + + +DEBUG = False + +################################################################ +# +# Class: ScrapeThread +# +# Retrieves scrape data from a tracker. +# +################################################################ +class ScrapeThread(Thread): + + def __init__(self, utility, torrent, manualscrape = False): + Thread.__init__(self, None, None, None) + + self.torrent = torrent + self.utility = utility + self.manualscrape = manualscrape + self.status = self.utility.lang.get('scraping') + self.currentseed = "?" + self.currentpeer = "?" + + self.setName( "Scrape"+self.getName() ) + self.setDaemon(True) + + + def run(self): + self.GetScrapeData() + + def GetScrapeData(self): + if DEBUG: + print "scrapethread: scraping..." + + # connect scrape at tracker and get data + # save at self.currentpeer, self.currentseed + # if error put '?' + + # The thread itself will update the list for its scraping infos + self.updateTorrent() + + metainfo = self.torrent.metainfo + + if metainfo is None: + self.status = self.utility.lang.get('cantreadmetainfo') + self.updateTorrent() + return + + if DEBUG: + print "scrapethread: got metainfo" + + announce = None + if 'announce' in metainfo: + announce = metainfo['announce'] + elif 'announce-list' in metainfo: + announce_list = metainfo['announce-list'] + announce = announce_list[0][0] + + if announce is None: + self.status = self.utility.lang.get('noannouncetrackerinmeta') + self.updateTorrent() + return + + if DEBUG: + print "scrapethread: got announce" + +# sys.stdout.write('Announce URL: ' + announce + '\n'); + + # Does tracker support scraping? + ix = announce.rfind('/') + if ((ix == -1) or (announce.rfind("/announce") != ix)): + # Tracker doesn't support scraping + self.status = self.utility.lang.get('trackernoscrape') + self.updateTorrent() + return + + p = re.compile('(.*/)[^/]+') + surl = p.sub(r'\1', announce) + #sys.stdout.write('sURL1: ' + surl + '\n') + #Fix this to comply with scrape standards. + ix = announce.rindex('/') + #tmp = 'ix: '.join(ix) + #sys.stdout.write('ix: ' + str(ix) + '\n') + if (ix + 9) > len(announce): + ix2 = len(announce) + else: + ix2 = ix + 9 + #sys.stdout.write('ix: ' + announce[(ix + 1):(ix2)] + '\n') + if announce[(ix + 1):(ix2)].endswith("announce", 0): + #sys.stdout.write('!!!VALID SCRAPE URL!!!' + '\n') + #sys.stdout.write('sURLTrue: ' + surl + 'scrape' + announce[(ix2):] + '\n'); + # fix for some private trackers (change ? to &): + if '?' in announce[ix2:]: + infohashprefix = '&' + else: + infohashprefix = '?' + surl = surl + 'scrape' + announce[ix2:] + infohashprefix + 'info_hash=' + #end new Scrape URL Code + info_hash_hex = self.torrent.infohash + hashlen = len(info_hash_hex) + for i in range(0, hashlen): + if (i % 2 == 0): + surl = surl + "%" + surl = surl + info_hash_hex[i] + + if DEBUG: + print "scrapethread: tring to scrape" + + # connect scrape URL + scrapedata = self.utility.getMetainfo(surl, style = "url") + + if scrapedata is None or not 'files' in scrapedata: + self.status = self.utility.lang.get('cantgetdatafromtracker') + else: + scrapedata = scrapedata['files'] + for i in scrapedata.keys(): + if binascii.b2a_hex(i) == info_hash_hex: + self.currentpeer = str(scrapedata[i]['incomplete']) + self.currentseed = str(scrapedata[i]['complete']) + self.status = self.utility.lang.get('scrapingdone') + + self.updateTorrent() + + if DEBUG: + print "scrapethread: done scraping" + + def updateTorrent(self): + wx.CallAfter(self.OnUpdateTorrent) + + + def OnUpdateTorrent(self): + if not self.manualscrape: + # Don't update status information if doing an automatic scrape + status = "" + else: + status = self.status + + # The thread itself will update the list for its scraping infos + self.torrent.updateScrapeData(self.currentpeer, self.currentseed, status) \ No newline at end of file diff --git a/tribler-mod/Tribler/Main/Utility/getscrapedata.py.bak b/tribler-mod/Tribler/Main/Utility/getscrapedata.py.bak new file mode 100644 index 0000000..6425377 --- /dev/null +++ b/tribler-mod/Tribler/Main/Utility/getscrapedata.py.bak @@ -0,0 +1,150 @@ +# Written by ABC authors +# see LICENSE.txt for license information + +import re +import binascii +import sys +from threading import Thread,Event + +# The ScrapeThread calls ABCTorrent to update the info. As that updates +# the GUI, those updates must be done by the MainThread and not this +# scraping thread itself. + + +DEBUG = False + +################################################################ +# +# Class: ScrapeThread +# +# Retrieves scrape data from a tracker. +# +################################################################ +class ScrapeThread(Thread): + + def __init__(self, utility, torrent, manualscrape = False): + Thread.__init__(self, None, None, None) + + self.torrent = torrent + self.utility = utility + self.manualscrape = manualscrape + self.status = self.utility.lang.get('scraping') + self.currentseed = "?" + self.currentpeer = "?" + + self.setName( "Scrape"+self.getName() ) + self.setDaemon(True) + + + def run(self): + self.GetScrapeData() + + def GetScrapeData(self): + if DEBUG: + print "scrapethread: scraping..." + + # connect scrape at tracker and get data + # save at self.currentpeer, self.currentseed + # if error put '?' + + # The thread itself will update the list for its scraping infos + self.updateTorrent() + + metainfo = self.torrent.metainfo + + if metainfo is None: + self.status = self.utility.lang.get('cantreadmetainfo') + self.updateTorrent() + return + + if DEBUG: + print "scrapethread: got metainfo" + + announce = None + if 'announce' in metainfo: + announce = metainfo['announce'] + elif 'announce-list' in metainfo: + announce_list = metainfo['announce-list'] + announce = announce_list[0][0] + + if announce is None: + self.status = self.utility.lang.get('noannouncetrackerinmeta') + self.updateTorrent() + return + + if DEBUG: + print "scrapethread: got announce" + +# sys.stdout.write('Announce URL: ' + announce + '\n'); + + # Does tracker support scraping? + ix = announce.rfind('/') + if ((ix == -1) or (announce.rfind("/announce") != ix)): + # Tracker doesn't support scraping + self.status = self.utility.lang.get('trackernoscrape') + self.updateTorrent() + return + + p = re.compile('(.*/)[^/]+') + surl = p.sub(r'\1', announce) + #sys.stdout.write('sURL1: ' + surl + '\n') + #Fix this to comply with scrape standards. + ix = announce.rindex('/') + #tmp = 'ix: '.join(ix) + #sys.stdout.write('ix: ' + str(ix) + '\n') + if (ix + 9) > len(announce): + ix2 = len(announce) + else: + ix2 = ix + 9 + #sys.stdout.write('ix: ' + announce[(ix + 1):(ix2)] + '\n') + if announce[(ix + 1):(ix2)].endswith("announce", 0): + #sys.stdout.write('!!!VALID SCRAPE URL!!!' + '\n') + #sys.stdout.write('sURLTrue: ' + surl + 'scrape' + announce[(ix2):] + '\n'); + # fix for some private trackers (change ? to &): + if '?' in announce[ix2:]: + infohashprefix = '&' + else: + infohashprefix = '?' + surl = surl + 'scrape' + announce[ix2:] + infohashprefix + 'info_hash=' + #end new Scrape URL Code + info_hash_hex = self.torrent.infohash + hashlen = len(info_hash_hex) + for i in range(0, hashlen): + if (i % 2 == 0): + surl = surl + "%" + surl = surl + info_hash_hex[i] + + if DEBUG: + print "scrapethread: tring to scrape" + + # connect scrape URL + scrapedata = self.utility.getMetainfo(surl, style = "url") + + if scrapedata is None or not 'files' in scrapedata: + self.status = self.utility.lang.get('cantgetdatafromtracker') + else: + scrapedata = scrapedata['files'] + for i in scrapedata.keys(): + if binascii.b2a_hex(i) == info_hash_hex: + self.currentpeer = str(scrapedata[i]['incomplete']) + self.currentseed = str(scrapedata[i]['complete']) + self.status = self.utility.lang.get('scrapingdone') + + self.updateTorrent() + + if DEBUG: + print "scrapethread: done scraping" + + def updateTorrent(self): + wx.CallAfter(self.OnUpdateTorrent) + + + def OnUpdateTorrent(self): + if not self.manualscrape: + # Don't update status information if doing an automatic scrape + status = "" + else: + status = self.status + + # The thread itself will update the list for its scraping infos + self.torrent.updateScrapeData(self.currentpeer, self.currentseed, status) \ No newline at end of file diff --git a/tribler-mod/Tribler/Main/Utility/helpers.py b/tribler-mod/Tribler/Main/Utility/helpers.py new file mode 100644 index 0000000..708a3d0 --- /dev/null +++ b/tribler-mod/Tribler/Main/Utility/helpers.py @@ -0,0 +1,202 @@ +from time import localtime, strftime +# Written by ABC authors +# see LICENSE.txt for license information + +import sys +import os +import socket + +from threading import Event, Semaphore +from time import sleep +from traceback import print_exc +#from cStringIO import StringIO + +from Tribler.Core.BitTornado.bencode import bdecode +from Tribler.Core.defaults import dldefaults as BTDefaults +from Tribler.Core.BitTornado.parseargs import parseargs +from Tribler.Core.BitTornado.zurllib import urlopen + +DEBUG = False +################################################################ +# +# Helper methods +# +# Contains commonly used helper functions +# +################################################################ + +# +# Check to see if a file both exists and is readable +# +def existsAndIsReadable(filename): + return os.access(filename, os.F_OK) and os.access(filename, os.R_OK) + +# +# Intersection of two lists (or dictionaries) +# +def intersection(list1, list2): + if list1 is None or list2 is None: + return [] + + # (Order matters slightly so that has_key is called fewer times) + if len(list1) < len(list2): + smaller = list1 + bigger = list2 + else: + smaller = list2 + bigger = list1 + + int_dict = {} + if isinstance(bigger, dict): + bigger_dict = bigger + else: + bigger_dict = {} + for e in bigger: + bigger_dict[e] = 1 + for e in smaller: + if e in bigger_dict: + int_dict[e] = bigger_dict[e] + return int_dict.keys() + +# +# Union of two lists (or dictionaries) +# +def union(list1, list2): + if list1 is None: + list1 = {} + if list2 is None: + list2 = {} + + # (Order matters slightly so that has_key is called fewer times) + if len(list1) < len(list2): + smaller = list1 + bigger = list2 + else: + smaller = list2 + bigger = list1 + + if isinstance(bigger, dict): + union_dict = bigger + else: + union_dict = {} + for e in bigger: + union_dict[e] = bigger[e] + for e in smaller: + union_dict[e] = smaller[e] + return union_dict + +# +# Difference of two dictionaries +# (A - B) +# +def difference(list1, list2): + if list2 is None: + return list1 + if list1 is None: + return {} + + diff_dict = list1.copy() + for e in list2: + if e in diff_dict: + del diff_dict[e] + return diff_dict + +# +# Get a socket to send on +# +def getClientSocket(host, port): + s = None + if DEBUG: + print 'getClientSocket(%s, %d)' % (host, port) + for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM): + af, socktype, proto, canonname, sa = res + try: + s = socket.socket(af, socktype, proto) + except socket.error: + s = None + continue + + try: + s.connect(sa) + except socket.error: + print_exc() + s.close() + s = None + continue + break + + return s + +# +# Get a socket to listen on +# +def getServerSocket(host, port): + s = None + + for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE): + af, socktype, proto, canonname, sa = res + try: + s = socket.socket(af, socktype, proto) + except socket.error: + print_exc() + s = None + continue + try: + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + s.bind(sa) + s.listen(1) + except socket.error: + print_exc() + s.close() + s = None + continue + break + + return s + +# +# Get a socket (either client or server) +# Will make up to 5 attempts to get the socket +# +def getSocket(host, port, sockettype = "client", attempt = 5): + s = None + + tries = 0 + + while s is None and tries < attempt: + try: + if sockettype == "server": + s = getServerSocket(host, port) + else: + s = getClientSocket(host, port) + except: + s = None + + if s is None: + # Try several times, increase in time each try + sleep(0.01 * tries) + tries += 1 + + return s + + +def stopTorrentsIfNeeded(torrentlist): + # Error : all selected torrents must be inactive to get extracted + showDialog = True + + # See which torrents are active + activetorrents = [ABCTorrentTemp for ABCTorrentTemp in torrentlist if ABCTorrentTemp.status.isActive()] + + # Ask to stop other torrents if necessary + if activetorrents > 0: + singleTorrent = len(activetorrents) == 1 + for ABCTorrentTemp in activetorrents: + if ABCTorrentTemp.dialogs.stopIfNeeded(showDialog, singleTorrent): + # Torrent was stopped, don't show the dialog anymore + showDialog = False + else: + # Selected not to stop the torrent, return False + return False + + # At this point all selected torrents should be stopped + return True diff --git a/tribler-mod/Tribler/Main/Utility/helpers.py.bak b/tribler-mod/Tribler/Main/Utility/helpers.py.bak new file mode 100644 index 0000000..5ef226e --- /dev/null +++ b/tribler-mod/Tribler/Main/Utility/helpers.py.bak @@ -0,0 +1,201 @@ +# Written by ABC authors +# see LICENSE.txt for license information + +import sys +import os +import socket + +from threading import Event, Semaphore +from time import sleep +from traceback import print_exc +#from cStringIO import StringIO + +from Tribler.Core.BitTornado.bencode import bdecode +from Tribler.Core.defaults import dldefaults as BTDefaults +from Tribler.Core.BitTornado.parseargs import parseargs +from Tribler.Core.BitTornado.zurllib import urlopen + +DEBUG = False +################################################################ +# +# Helper methods +# +# Contains commonly used helper functions +# +################################################################ + +# +# Check to see if a file both exists and is readable +# +def existsAndIsReadable(filename): + return os.access(filename, os.F_OK) and os.access(filename, os.R_OK) + +# +# Intersection of two lists (or dictionaries) +# +def intersection(list1, list2): + if list1 is None or list2 is None: + return [] + + # (Order matters slightly so that has_key is called fewer times) + if len(list1) < len(list2): + smaller = list1 + bigger = list2 + else: + smaller = list2 + bigger = list1 + + int_dict = {} + if isinstance(bigger, dict): + bigger_dict = bigger + else: + bigger_dict = {} + for e in bigger: + bigger_dict[e] = 1 + for e in smaller: + if e in bigger_dict: + int_dict[e] = bigger_dict[e] + return int_dict.keys() + +# +# Union of two lists (or dictionaries) +# +def union(list1, list2): + if list1 is None: + list1 = {} + if list2 is None: + list2 = {} + + # (Order matters slightly so that has_key is called fewer times) + if len(list1) < len(list2): + smaller = list1 + bigger = list2 + else: + smaller = list2 + bigger = list1 + + if isinstance(bigger, dict): + union_dict = bigger + else: + union_dict = {} + for e in bigger: + union_dict[e] = bigger[e] + for e in smaller: + union_dict[e] = smaller[e] + return union_dict + +# +# Difference of two dictionaries +# (A - B) +# +def difference(list1, list2): + if list2 is None: + return list1 + if list1 is None: + return {} + + diff_dict = list1.copy() + for e in list2: + if e in diff_dict: + del diff_dict[e] + return diff_dict + +# +# Get a socket to send on +# +def getClientSocket(host, port): + s = None + if DEBUG: + print 'getClientSocket(%s, %d)' % (host, port) + for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM): + af, socktype, proto, canonname, sa = res + try: + s = socket.socket(af, socktype, proto) + except socket.error: + s = None + continue + + try: + s.connect(sa) + except socket.error: + print_exc() + s.close() + s = None + continue + break + + return s + +# +# Get a socket to listen on +# +def getServerSocket(host, port): + s = None + + for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE): + af, socktype, proto, canonname, sa = res + try: + s = socket.socket(af, socktype, proto) + except socket.error: + print_exc() + s = None + continue + try: + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + s.bind(sa) + s.listen(1) + except socket.error: + print_exc() + s.close() + s = None + continue + break + + return s + +# +# Get a socket (either client or server) +# Will make up to 5 attempts to get the socket +# +def getSocket(host, port, sockettype = "client", attempt = 5): + s = None + + tries = 0 + + while s is None and tries < attempt: + try: + if sockettype == "server": + s = getServerSocket(host, port) + else: + s = getClientSocket(host, port) + except: + s = None + + if s is None: + # Try several times, increase in time each try + sleep(0.01 * tries) + tries += 1 + + return s + + +def stopTorrentsIfNeeded(torrentlist): + # Error : all selected torrents must be inactive to get extracted + showDialog = True + + # See which torrents are active + activetorrents = [ABCTorrentTemp for ABCTorrentTemp in torrentlist if ABCTorrentTemp.status.isActive()] + + # Ask to stop other torrents if necessary + if activetorrents > 0: + singleTorrent = len(activetorrents) == 1 + for ABCTorrentTemp in activetorrents: + if ABCTorrentTemp.dialogs.stopIfNeeded(showDialog, singleTorrent): + # Torrent was stopped, don't show the dialog anymore + showDialog = False + else: + # Selected not to stop the torrent, return False + return False + + # At this point all selected torrents should be stopped + return True diff --git a/tribler-mod/Tribler/Main/Utility/regchecker.py b/tribler-mod/Tribler/Main/Utility/regchecker.py new file mode 100644 index 0000000..d38e0c9 --- /dev/null +++ b/tribler-mod/Tribler/Main/Utility/regchecker.py @@ -0,0 +1,167 @@ +from time import localtime, strftime +# Written by ABC authors +# see LICENSE.txt for license information + +######################################################################## +# File ABCRegGUI.py v1.0 # +# Tool for associate/unassociate torrent file with ABC # +######################################################################## + +import sys +import os + +if (sys.platform == 'win32'): + import _winreg + + # short for PyHKEY from "_winreg" module + HKCR = _winreg.HKEY_CLASSES_ROOT + HKLM = _winreg.HKEY_LOCAL_MACHINE + HKCU = _winreg.HKEY_CURRENT_USER +else: + HKCR = 0 + HKLM = 1 + HKCU = 2 + +DEBUG = False + +################################################################ +# +# Class: RegChecker +# +# Used to check whether or not ABC is associated as the +# default BitTorrent application +# +################################################################ +class RegChecker: + def __init__(self, utility): + self.utility = utility + + if (sys.platform != 'win32'): + return + + abcpath = os.path.join(self.utility.getPath(), "tribler.exe") +# abcpath = os.path.normcase(abcpath) + iconpath = os.path.join(self.utility.getPath(), "torrenticon.ico") + + # Arno: 2007-06-18: Assuming no concurrency on TRIBLER_TORRENT_EXT + # tuple (array) with key to register + self.reg_data = [ (r".torrent", "", "bittorrent", _winreg.REG_SZ), + (r".torrent", "Content Type", r"application/x-bittorrent", _winreg.REG_SZ), + (r"MIME\Database\Content Type\application/x-bittorrent", "Extension", ".torrent", _winreg.REG_SZ), + (r"bittorrent", "", "TORRENT File", _winreg.REG_SZ), + (r"bittorrent\DefaultIcon", "", iconpath,_winreg.REG_SZ), + (r"bittorrent", "EditFlags", chr(0)+chr(0)+chr(1)+chr(0), _winreg.REG_BINARY), + (r"bittorrent\shell", "", "open", _winreg.REG_SZ), + (r"bittorrent\shell\open\command", "", "\"" + abcpath + "\" \"%1\"", _winreg.REG_SZ)] + self.reg_data_delete = [ (r"bittorrent\shell\open\ddeexec") ] + + # tuple (array) with key to delete + self.unreg_data = [ (r"bittorrent\shell\open\command"), + (r"bittorrent\shell\open"), + (r"bittorrent\shell"), + (r"bittorrent"), + (r"MIME\Database\Content Type\application/x-bittorrent"), + (r".torrent") ] + + # function that test Windows register for key & value exist + def testRegistry(self): + if (sys.platform != 'win32'): + return False + + key_name, value_name, value_data, value_type = self.reg_data[7] + + try: + # test that shell/open association with ABC exist + _abc_key = _winreg.OpenKey(HKCR, key_name, 0, _winreg.KEY_READ) + _value_data, _value_type = _winreg.QueryValueEx(_abc_key, value_name) + _winreg.CloseKey(_abc_key) + + _value_data = os.path.normcase(_value_data) + value_data = os.path.normcase(value_data) + + if _value_data != value_data: + # association with ABC don't exist + return False + except: + # error, test failed, key don't exist + # (could also indicate a unicode error) + return False + + # If ABC is registred, remove keys (ddeexec) that may interfere: + self.removeKeys(self.reg_data_delete) + + return True + + def updateRegistry(self, register = True): + if (sys.platform != 'win32'): + return False + + if register: + return self.registerABC() + else: + return self.unregisterABC() + + # Add a set of keys to the registry + def addKeys(self, keys): + for _key_name, _value_name, _value_data, _value_type in keys: + try: + # kreate desired key in Windows register + _abc_key = _winreg.CreateKey(HKCR, _key_name) + except EnvironmentError: + return False; + # set desired value in created Windows register key + _winreg.SetValueEx(_abc_key, _value_name, 0, _value_type, _value_data) + # close Windows register key + _winreg.CloseKey(_abc_key) + + return True + + # Remove a set of keys from the registry + def removeKeys(self, keys): + for _key_name in keys: + try: + # delete desired Windows register key + _winreg.DeleteKey(HKCR, _key_name) + except EnvironmentError: + return False; + + # function that regitered key in Windows register + def registerABC(self): + if (sys.platform != 'win32'): + return False + + # if ABC is already registered, + # we don't need to do anything + if self.testRegistry(): + return + + # "for" loop to get variable from tuple + success = self.addKeys(self.reg_data) + if not success: + return False + + # delete ddeexec key + success = self.removeKeys(self.reg_data_delete) + if not success: + return False + + return True + + # function that delete key in Windows register + def unregisterABC(self): + if (sys.platform != 'win32'): + return False + + # if ABC isn't already registered, + # we don't need to do anything + if not self.testRegistry(): + return + + # get variable for key deletion from tuple + success = self.removeKeys(self.unreg_data) + if not success: + return False + + return True + + diff --git a/tribler-mod/Tribler/Main/Utility/regchecker.py.bak b/tribler-mod/Tribler/Main/Utility/regchecker.py.bak new file mode 100644 index 0000000..b4fe5c4 --- /dev/null +++ b/tribler-mod/Tribler/Main/Utility/regchecker.py.bak @@ -0,0 +1,166 @@ +# Written by ABC authors +# see LICENSE.txt for license information + +######################################################################## +# File ABCRegGUI.py v1.0 # +# Tool for associate/unassociate torrent file with ABC # +######################################################################## + +import sys +import os + +if (sys.platform == 'win32'): + import _winreg + + # short for PyHKEY from "_winreg" module + HKCR = _winreg.HKEY_CLASSES_ROOT + HKLM = _winreg.HKEY_LOCAL_MACHINE + HKCU = _winreg.HKEY_CURRENT_USER +else: + HKCR = 0 + HKLM = 1 + HKCU = 2 + +DEBUG = False + +################################################################ +# +# Class: RegChecker +# +# Used to check whether or not ABC is associated as the +# default BitTorrent application +# +################################################################ +class RegChecker: + def __init__(self, utility): + self.utility = utility + + if (sys.platform != 'win32'): + return + + abcpath = os.path.join(self.utility.getPath(), "tribler.exe") +# abcpath = os.path.normcase(abcpath) + iconpath = os.path.join(self.utility.getPath(), "torrenticon.ico") + + # Arno: 2007-06-18: Assuming no concurrency on TRIBLER_TORRENT_EXT + # tuple (array) with key to register + self.reg_data = [ (r".torrent", "", "bittorrent", _winreg.REG_SZ), + (r".torrent", "Content Type", r"application/x-bittorrent", _winreg.REG_SZ), + (r"MIME\Database\Content Type\application/x-bittorrent", "Extension", ".torrent", _winreg.REG_SZ), + (r"bittorrent", "", "TORRENT File", _winreg.REG_SZ), + (r"bittorrent\DefaultIcon", "", iconpath,_winreg.REG_SZ), + (r"bittorrent", "EditFlags", chr(0)+chr(0)+chr(1)+chr(0), _winreg.REG_BINARY), + (r"bittorrent\shell", "", "open", _winreg.REG_SZ), + (r"bittorrent\shell\open\command", "", "\"" + abcpath + "\" \"%1\"", _winreg.REG_SZ)] + self.reg_data_delete = [ (r"bittorrent\shell\open\ddeexec") ] + + # tuple (array) with key to delete + self.unreg_data = [ (r"bittorrent\shell\open\command"), + (r"bittorrent\shell\open"), + (r"bittorrent\shell"), + (r"bittorrent"), + (r"MIME\Database\Content Type\application/x-bittorrent"), + (r".torrent") ] + + # function that test Windows register for key & value exist + def testRegistry(self): + if (sys.platform != 'win32'): + return False + + key_name, value_name, value_data, value_type = self.reg_data[7] + + try: + # test that shell/open association with ABC exist + _abc_key = _winreg.OpenKey(HKCR, key_name, 0, _winreg.KEY_READ) + _value_data, _value_type = _winreg.QueryValueEx(_abc_key, value_name) + _winreg.CloseKey(_abc_key) + + _value_data = os.path.normcase(_value_data) + value_data = os.path.normcase(value_data) + + if _value_data != value_data: + # association with ABC don't exist + return False + except: + # error, test failed, key don't exist + # (could also indicate a unicode error) + return False + + # If ABC is registred, remove keys (ddeexec) that may interfere: + self.removeKeys(self.reg_data_delete) + + return True + + def updateRegistry(self, register = True): + if (sys.platform != 'win32'): + return False + + if register: + return self.registerABC() + else: + return self.unregisterABC() + + # Add a set of keys to the registry + def addKeys(self, keys): + for _key_name, _value_name, _value_data, _value_type in keys: + try: + # kreate desired key in Windows register + _abc_key = _winreg.CreateKey(HKCR, _key_name) + except EnvironmentError: + return False; + # set desired value in created Windows register key + _winreg.SetValueEx(_abc_key, _value_name, 0, _value_type, _value_data) + # close Windows register key + _winreg.CloseKey(_abc_key) + + return True + + # Remove a set of keys from the registry + def removeKeys(self, keys): + for _key_name in keys: + try: + # delete desired Windows register key + _winreg.DeleteKey(HKCR, _key_name) + except EnvironmentError: + return False; + + # function that regitered key in Windows register + def registerABC(self): + if (sys.platform != 'win32'): + return False + + # if ABC is already registered, + # we don't need to do anything + if self.testRegistry(): + return + + # "for" loop to get variable from tuple + success = self.addKeys(self.reg_data) + if not success: + return False + + # delete ddeexec key + success = self.removeKeys(self.reg_data_delete) + if not success: + return False + + return True + + # function that delete key in Windows register + def unregisterABC(self): + if (sys.platform != 'win32'): + return False + + # if ABC isn't already registered, + # we don't need to do anything + if not self.testRegistry(): + return + + # get variable for key deletion from tuple + success = self.removeKeys(self.unreg_data) + if not success: + return False + + return True + + diff --git a/tribler-mod/Tribler/Main/Utility/utility.py b/tribler-mod/Tribler/Main/Utility/utility.py new file mode 100644 index 0000000..9704203 --- /dev/null +++ b/tribler-mod/Tribler/Main/Utility/utility.py @@ -0,0 +1,843 @@ +from time import localtime, strftime +# Written by ABC authors and Arno Bakker +# see LICENSE.txt for license information +import wx +import sys +import os +from threading import Event, Semaphore +from sha import sha +from traceback import print_exc +#from cStringIO import StringIO + +from wx.lib import masked + +from Tribler.Lang.lang import Lang +from Tribler.Core.BitTornado.bencode import bdecode +from Tribler.Core.defaults import dldefaults as BTDefaults +from Tribler.Core.defaults import DEFAULTPORT +from Tribler.Core.defaults import trackerdefaults as TrackerDefaults +from Tribler.Core.defaults import tdefdefaults as TorrentDefDefaults +from Tribler.Core.BitTornado.parseargs import parseargs +from Tribler.Core.BitTornado.zurllib import urlopen +from Tribler.Core.BitTornado.__init__ import version_id + +if sys.platform == 'win32': + from Tribler.Main.Utility.regchecker import RegChecker + +from Tribler.Utilities.configreader import ConfigReader +from Tribler.Main.Utility.compat import convertINI, moveOldConfigFiles +from Tribler.Main.Utility.constants import * #IGNORE:W0611 + +from Tribler.Core.Utilities.utilities import find_prog_in_PATH + +################################################################ +# +# Class: Utility +# +# Generic "glue" class that contains commonly used helper +# functions and helps to keep track of objects +# +################################################################ +class Utility: + def __init__(self, abcpath, configpath): + + self.version = version_id + self.abcpath = abcpath + + # Find the directory to save config files, etc. + self.dir_root = configpath + moveOldConfigFiles(self) + + self.setupConfig() + + # Setup language files + self.lang = Lang(self) + + # Convert old INI file + convertINI(self) + + # Make torrent directory (if needed) + self.MakeTorrentDir() + + self.setupTorrentMakerConfig() + + self.setupTorrentList() + + self.torrents = { "all": [], + "active": {}, + "inactive": {}, + "pause": {}, + "seeding": {}, + "downloading": {} } + + + self.accessflag = Event() + self.accessflag.set() + + self.invalidwinfilenamechar = '' + for i in range(32): + self.invalidwinfilenamechar += chr(i) + self.invalidwinfilenamechar += '"*/:<>?\\|' + + self.FILESEM = Semaphore(1) + + warned = self.config.Read('torrentassociationwarned','int') + if (sys.platform == 'win32' and not warned): + self.regchecker = RegChecker(self) + self.config.Write('torrentassociationwarned','1') + else: + self.regchecker = None + + self.lastdir = { "save" : "", + "open" : "", + "log": "" } + + # Is ABC in the process of shutting down? + self.abcquitting = False +# self.abcdonequitting = False + + # Keep track of the last tab that was being viewed + self.lasttab = { "advanced" : 0, + "preferences" : 0 } + + self.languages = {} + + # Keep track of all the "ManagedList" objects in use + self.lists = {} + + self.abcfileframe = None + self.abcbuddyframe = None + + def getVersion(self): + return self.version + + + +#=============================================================================== +# def getNumPeers(self): +# return self.peer_db.getNumEncounteredPeers()#, self.peer_db.size() +# +# def getNumFiles(self): +# return self.torrent_db.getNumMetadataAndLive()#, self.torrent_db.size() +#=============================================================================== + + def getConfigPath(self): + return self.dir_root + # TODO: python 2.3.x has a bug with os.access and unicode + #return self.dir_root.decode(sys.getfilesystemencoding()) + + def setupConfig(self): + defaults = { + # MiscPanel + 'language_file': 'english.lang', + 'confirmonclose': '1', + 'associate' : '1', + # DiskPanel + 'removetorrent': '0', + 'diskfullthreshold': '1', + # RateLimitPanel + #'maxupload': '5', + 'maxuploadrate': '0', + 'maxdownloadrate': '0', + 'maxseeduploadrate': '0', + # SeedingOptionsPanel + 'uploadoption': '0', + 'uploadtimeh': '0', + 'uploadtimem': '30', + 'uploadratio': '100', + #AdvancedNetworkPanel + #AdvancedDiskPanel + #TriblerPanel + 'torrentcollectsleep':'15', # for RSS Subscriptions + # VideoPanel + 'videoplaybackmode':'0', + # Misc + 'enableweb2search':'0', + 'torrentassociationwarned':'0', + # GUI + 'window_width': '1024', + 'window_height': '670', + 'detailwindow_width': '800', + 'detailwindow_height': '500', + 'prefwindow_width': '1000', + 'prefwindow_height': '480', + 'prefwindow_split': '400', + 't4t_option': 0, # Seeding items added by Boxun + 't4t_hours': 0, + 't4t_mins': 30, + 'g2g_option': 1, + 'g2g_ratio': 75, + 'g2g_hours': 0, + 'g2g_mins': 30, + 'family_filter': 1, + 'window_x': 0, + 'window_y': 0, + } + + if sys.platform == 'win32': + defaults['mintray'] = '2' + # Don't use double quotes here, those are lost when this string is stored in the + # abc.conf file in INI-file format. The code that starts the player will add quotes + # if there is a space in this string. + progfilesdir = os.path.expandvars('${PROGRAMFILES}') + #defaults['videoplayerpath'] = progfilesdir+'\\VideoLAN\\VLC\\vlc.exe' + # Path also valid on MS Vista + defaults['videoplayerpath'] = progfilesdir+'\\Windows Media Player\\wmplayer.exe' + defaults['videoanalyserpath'] = self.getPath()+'\\ffmpeg.exe' + elif sys.platform == 'darwin': + profiledir = os.path.expandvars('${HOME}') + defaults['mintray'] = '0' # tray doesn't make sense on Mac + vlcpath = find_prog_in_PATH("vlc") + if vlcpath is None: + defaults['videoplayerpath'] = "/Applications/QuickTime Player.app" + else: + defaults['videoplayerpath'] = vlcpath + ffmpegpath = find_prog_in_PATH("ffmpeg") + if ffmpegpath is None: + defaults['videoanalyserpath'] = "macbinaries/ffmpeg" + else: + defaults['videoanalyserpath'] = ffmpegpath + else: + defaults['mintray'] = '0' # Still crashes on Linux sometimes + vlcpath = find_prog_in_PATH("vlc") + if vlcpath is None: + defaults['videoplayerpath'] = "vlc" + else: + defaults['videoplayerpath'] = vlcpath + ffmpegpath = find_prog_in_PATH("ffmpeg") + if ffmpegpath is None: + defaults['videoanalyserpath'] = "ffmpeg" + else: + defaults['videoanalyserpath'] = ffmpegpath + + configfilepath = os.path.join(self.getConfigPath(), "abc.conf") + self.config = ConfigReader(configfilepath, "ABC", defaults) + + @staticmethod + def _convert__helper_4_1__4_2(abc_config, set_config_func, name, convert=lambda x:x): + if abc_config.Exists(name): + v = abc_config.Read(name) + try: + v = convert(v) + except: + pass + else: + set_config_func(v) + abc_config.DeleteEntry(name) + + def convert__presession_4_1__4_2(self, session_config): + bool_ = lambda x: x=="1" and True or False + self._convert__helper_4_1__4_2(self.config, session_config.set_buddycast, "enablerecommender", bool_) + self._convert__helper_4_1__4_2(self.config, session_config.set_buddycast_max_peers, "buddy_num", int) + self._convert__helper_4_1__4_2(self.config, session_config.set_download_help, "enabledlhelp", bool_) + self._convert__helper_4_1__4_2(self.config, session_config.set_internal_tracker_url, "internaltrackerurl") + self._convert__helper_4_1__4_2(self.config, session_config.set_listen_port, "minport", int) + self._convert__helper_4_1__4_2(self.config, session_config.set_nickname, "myname") + self._convert__helper_4_1__4_2(self.config, session_config.set_start_recommender, "startrecommender", bool_) + self._convert__helper_4_1__4_2(self.config, session_config.set_stop_collecting_threshold, "stopcollectingthreshold", int) + self._convert__helper_4_1__4_2(self.config, session_config.set_torrent_collecting, "enabledlcollecting", bool_) + self._convert__helper_4_1__4_2(self.config, session_config.set_ip_for_tracker, "ip") + self._convert__helper_4_1__4_2(self.config, session_config.set_bind_to_addresses, "bind", lambda x:[x]) + self._convert__helper_4_1__4_2(self.config, session_config.set_upnp_mode, "upnp_nat_access", int) + + def convert__postsession_4_1__4_2(self, session, default_download_config): + + # the mugshot was stored in icons/.jpg + # however... what is the permid??? + safename = "%s.jpg" % sha(session.get_permid()).hexdigest() + safepath = os.path.join(self.dir_root, "icons", safename) + if os.path.exists(safepath): + session.set_mugshot(open(safepath, "r").read(), "image/jpeg") + os.remove(safepath) + + bool_ = lambda x: x=="1" and True or False + self._convert__helper_4_1__4_2(self.config, default_download_config.set_alloc_rate, "alloc_rate", int) + self._convert__helper_4_1__4_2(self.config, default_download_config.set_alloc_type, "alloc_type") + self._convert__helper_4_1__4_2(self.config, default_download_config.set_dest_dir, "defaultfolder") + self._convert__helper_4_1__4_2(self.config, default_download_config.set_double_check_writes, "double_check", bool_) + self._convert__helper_4_1__4_2(self.config, default_download_config.set_lock_files, "lock_files", bool_) + self._convert__helper_4_1__4_2(self.config, default_download_config.set_lock_while_reading, "lock_while_reading", bool_) + self._convert__helper_4_1__4_2(self.config, default_download_config.set_max_conns, "max_connections", int) + self._convert__helper_4_1__4_2(self.config, default_download_config.set_max_files_open, "max_files_open", int) + self._convert__helper_4_1__4_2(self.config, default_download_config.set_triple_check_writes, "trible_check", bool_) + + def setupTorrentMakerConfig(self): + # Arno, 2008-03-27: To keep fileformat compatible + defaults = { + 'piece_size': '0', # An index into TorrentMaker.FileInfoPanel.piece_choices + 'comment': TorrentDefDefaults['comment'], + 'created_by': TorrentDefDefaults['created by'], + 'announcedefault': TorrentDefDefaults['announce'], + 'announcehistory': '', + 'announce-list': TorrentDefDefaults['announce-list'], + 'httpseeds': TorrentDefDefaults['httpseeds'], + 'makehash_md5': str(TorrentDefDefaults['makehash_md5']), + 'makehash_crc32': str(TorrentDefDefaults['makehash_crc32']), + 'makehash_sha1': str(TorrentDefDefaults['makehash_sha1']), + 'startnow': '1', + 'savetorrent': '1', + 'createmerkletorrent': '1', + 'createtorrentsig': '0', + 'useitracker': '1', + 'manualtrackerconfig': '0' + } + + torrentmakerconfigfilepath = os.path.join(self.getConfigPath(), "maker.conf") + self.makerconfig = ConfigReader(torrentmakerconfigfilepath, "ABC/TorrentMaker", defaults) + + def setupTorrentList(self): + torrentfilepath = os.path.join(self.getConfigPath(), "torrent.list") + self.torrentconfig = ConfigReader(torrentfilepath, "list0") + + # Initialization that has to be done after the wx.App object + # has been created + def postAppInit(self,iconpath): + try: + self.icon = wx.Icon(iconpath, wx.BITMAP_TYPE_ICO) + except: + pass + + #makeActionList(self) + + def getLastDir(self, operation = "save"): + lastdir = self.lastdir[operation] + + if operation == "save": + if not os.access(lastdir, os.F_OK): + lastdir = self.config.Read('defaultfolder') + + if not os.access(lastdir, os.F_OK): + lastdir = "" + + return lastdir + + def setLastDir(self, operation, dir ): + self.lastdir[operation] = dir + + def getPath(self): + return self.abcpath + #return self.abcpath.decode(sys.getfilesystemencoding()) + + def eta_value(self, n, truncate = 3): + if n == -1: + return '' + if not n: + return '' + n = int(n) + week, r1 = divmod(n, 60 * 60 * 24 * 7) + day, r2 = divmod(r1, 60 * 60 * 24) + hour, r3 = divmod(r2, 60 * 60) + minute, sec = divmod(r3, 60) + + if week > 1000: + return '' + + weekstr = '%d' % (week) + self.lang.get('l_week') + daystr = '%d' % (day) + self.lang.get('l_day') + hourstr = '%d' % (hour) + self.lang.get('l_hour') + minutestr = '%02d' % (minute) + self.lang.get('l_minute') + secstr = '%02d' % (sec) + self.lang.get('l_second') + + if week > 0: + text = weekstr + if truncate > 1: + text += ":" + daystr + if truncate > 2: + text += "-" + hourstr + elif day > 0: + text = daystr + if truncate > 1: + text += "-" + hourstr + if truncate > 2: + text += ":" + minutestr + elif hour > 0: + text = hourstr + if truncate > 1: + text += ":" + minutestr + if truncate > 2: + text += ":" + secstr + else: + text = minutestr + if truncate > 1: + text += ":" + secstr + + return text + + def getMetainfo(self, src, openoptions = 'rb', style = "file"): + return getMetainfo(src,openoptions=openoptions,style=style) + + def speed_format(self, s, truncate = 1, stopearly = None): + return self.size_format(s, truncate, stopearly) + "/" + self.lang.get('l_second') + + def speed_format_new(self, s): + + if s < 102400: + text = '%2.1f KB/s' % (s/1024.0) + elif s < 1022797: + text = '%d KB/s' % (s//1024) + elif s < 104857600: + text = '%2.1f MB/s' % (s/1048576.0) + elif s < 1047527425L: + text = '%d MB/s' % (s//1048576) + elif s < 107374182400L: + text = '%2.1f GB/s' % (s/1073741824.0) + elif s < 1072668082177L: + text = '%d GB/s' % (s//1073741824) + else: + text = '%2.1f TB/s' % (s//1099511627776L) + + return text + + + def size_format(self, s, truncate = None, stopearly = None, applylabel = True, rawsize = False, showbytes = False, labelonly = False, textonly = False): + size = 0.0 + label = "" + + if truncate is None: + truncate = 2 + + if ((s < 1024) and showbytes and stopearly is None) or stopearly == "Byte": + truncate = 0 + size = s + text = "Byte" + elif ((s < 1048576) and stopearly is None) or stopearly == "KB": + size = (s/1024.0) + text = "KB" + elif ((s < 1073741824L) and stopearly is None) or stopearly == "MB": + size = (s/1048576.0) + text = "MB" + elif ((s < 1099511627776L) and stopearly is None) or stopearly == "GB": + size = (s/1073741824.0) + text = "GB" + else: + size = (s/1099511627776.0) + text = "TB" + + if textonly: + return text + + label = self.lang.get(text) + if labelonly: + return label + + if rawsize: + return size + + # At this point, only accepting 0, 1, or 2 + if truncate == 0: + text = ('%.0f' % size) + elif truncate == 1: + text = ('%.1f' % size) + else: + text = ('%.2f' % size) + + if applylabel: + text += ' ' + label + + return text + + def makeNumCtrl(self, parent, value, integerWidth = 6, fractionWidth = 0, min = 0, max = None, size = wx.DefaultSize): + if size != wx.DefaultSize: + autoSize = False + else: + autoSize = True + return masked.NumCtrl(parent, + value = value, + size = size, + integerWidth = integerWidth, + fractionWidth = fractionWidth, + allowNegative = False, + min = min, + max = max, + groupDigits = False, + useFixedWidthFont = False, + autoSize = autoSize) + + def MakeTorrentDir(self): + torrentpath = os.path.join(self.getConfigPath(), "torrent") + pathexists = os.access(torrentpath, os.F_OK) + # If the torrent directory doesn't exist, create it now + if not pathexists: + os.mkdir(torrentpath) + + def RemoveEmptyDir(self, basedir, removesubdirs = True): + # remove subdirectories + if removesubdirs: + for root, dirs, files in os.walk(basedir, topdown = False): + for name in dirs: + dirname = os.path.join(root, name) + + # Only try to delete if it exists + if os.access(dirname, os.F_OK): + if not os.listdir(dirname): + os.rmdir(dirname) + #remove folder + if os.access(basedir, os.F_OK): + if not os.listdir(basedir): + os.rmdir(basedir) + + def makeBitmap(self, bitmap, trans_color = wx.Colour(200, 200, 200)): + button_bmp = wx.Bitmap(os.path.join(self.getPath(), 'icons', bitmap), wx.BITMAP_TYPE_BMP) + button_mask = wx.Mask(button_bmp, trans_color) + button_bmp.SetMask(button_mask) + return button_bmp + + def makeBitmapButton(self, parent, bitmap, tooltip, event, trans_color = wx.Colour(200, 200, 200), padx=18, pady=4): + tooltiptext = self.lang.get(tooltip) + + button_bmp = self.makeBitmap(bitmap, trans_color) + + ID_BUTTON = wx.NewId() + button_btn = wx.BitmapButton(parent, ID_BUTTON, button_bmp, size=wx.Size(button_bmp.GetWidth()+padx, button_bmp.GetHeight()+pady)) + button_btn.SetToolTipString(tooltiptext) + parent.Bind(wx.EVT_BUTTON, event, button_btn) + return button_btn + + def makeBitmapButtonFit(self, parent, bitmap, tooltip, event, trans_color = wx.Colour(200, 200, 200)): + tooltiptext = self.lang.get(tooltip) + + button_bmp = self.makeBitmap(bitmap, trans_color) + + ID_BUTTON = wx.NewId() + button_btn = wx.BitmapButton(parent, ID_BUTTON, button_bmp, size=wx.Size(button_bmp.GetWidth(), button_bmp.GetHeight())) + button_btn.SetToolTipString(tooltiptext) + parent.Bind(wx.EVT_BUTTON, event, button_btn) + return button_btn + + def getBTParams(self, skipcheck = False): + # Construct BT params + ########################### + btparams = [] + + btparams.append("--display_interval") + btparams.append(self.config.Read('display_interval')) + + # Use single port only + btparams.append("--minport") + btparams.append(self.config.Read('minport')) + btparams.append("--maxport") + btparams.append(self.config.Read('minport')) + +# btparams.append("--random_port") +# btparams.append(self.config.Read('randomport')) + + #if self.config.Read('ipv6') == "1": + # btparams.append("--ipv6_enable") + # btparams.append(self.config.Read('ipv6')) + # btparams.append("--ipv6_binds_v4") + # btparams.append(self.config.Read('ipv6_binds_v4')) + + # Fast resume + btparams.append("--selector_enabled") + btparams.append(self.config.Read('fastresume')) + + btparams.append("--auto_kick") + btparams.append(self.config.Read('kickban')) + btparams.append("--security") + btparams.append(self.config.Read('notsameip')) + + btparams.append("--max_upload_rate") + btparams.append("0") + + paramlist = [ "ip", + "bind", + "alloc_rate", + "alloc_type", + "double_check", + "triple_check", + "lock_while_reading", + "lock_files", + "min_peers", + "max_files_open", + "max_connections", + "upnp_nat_access", + "auto_flush", + "ut_pex_max_addrs_from_peer"] + + for param in paramlist: + value = self.config.Read(param) + if value != "": + btparams.append("--" + param) + btparams.append(value) + + config, args = parseargs(btparams, BTDefaults) + + return config + + def getTrackerParams(self): + tconfig = {} + for k,v,expl in TrackerDefaults: + tconfig[k] = v + + tconfig['port'] = DEFAULTPORT + dir = os.path.join(self.getConfigPath(),'itracker') + dfile = os.path.join(dir,'tracker.db') + tconfig['dfile'] = dfile + tconfig['allowed_dir'] = dir + tconfig['favicon'] = os.path.join(self.getPath(),'tribler.ico') + #tconfig['save_dfile_interval'] = 20 + tconfig['dfile_format'] = 'pickle' # We use unicode filenames, so bencode won't work + + return tconfig + + + + # Check if str is a valid Windows file name (or unit name if unit is true) + # If the filename isn't valid: returns a fixed name + # If the filename is valid: returns an empty string + def fixWindowsName(self, name, unit = False): + if unit and (len(name) != 2 or name[1] != ':'): + return 'c:' + if not name or name == '.' or name == '..': + return '_' + if unit: + name = name[0] + fixed = False + if len(name) > 250: + name = name[:250] + fixed = True + fixedname = '' + spaces = 0 + for c in name: + if c in self.invalidwinfilenamechar: + fixedname += '_' + fixed = True + else: + fixedname += c + if c == ' ': + spaces += 1 + if fixed: + return fixedname + elif spaces == len(name): + # contains only spaces + return '_' + else: + return '' + + def checkWinPath(self, parent, pathtocheck): + if pathtocheck and pathtocheck[-1] == '\\' and pathtocheck != '\\\\': + pathitems = pathtocheck[:-1].split('\\') + else: + pathitems = pathtocheck.split('\\') + nexttotest = 1 + if self.isPathRelative(pathtocheck): + # Relative path + # Empty relative path is allowed + if pathtocheck == '': + return True + fixedname = self.fixWindowsName(pathitems[0]) + if fixedname: + dlg = wx.MessageDialog(parent, + pathitems[0] + '\n' + \ + self.lang.get('invalidwinname') + '\n'+ \ + self.lang.get('suggestedname') + '\n\n' + \ + fixedname, + self.lang.get('error'), wx.ICON_ERROR) + dlg.ShowModal() + dlg.Destroy() + return False + else: + # Absolute path + # An absolute path must have at least one '\' + if not '\\' in pathtocheck: + dlg = wx.MessageDialog(parent, pathitems[0] + '\n' + self.lang.get('errorinvalidpath'), + self.lang.get('error'), wx.ICON_ERROR) + dlg.ShowModal() + dlg.Destroy() + return False + if pathtocheck[:2] != '\\\\': + # Not a network path + fixedname = self.fixWindowsName(pathitems[0], unit = True) + if fixedname: + dlg = wx.MessageDialog(parent, + pathitems[0] + '\n' + \ + self.lang.get('invalidwinname') + \ + fixedname, + self.lang.get('error'), wx.ICON_ERROR) + dlg.ShowModal() + dlg.Destroy() + return False + else: + # Network path + nexttotest = 2 + + for name in pathitems[nexttotest:]: + fixedname = self.fixWindowsName(name) + if fixedname: + dlg = wx.MessageDialog(parent, name + '\n' + self.lang.get('errorinvalidwinname') + fixedname, + self.lang.get('error'), wx.ICON_ERROR) + dlg.ShowModal() + dlg.Destroy() + return False + + return True + + def isPathRelative(self, path): + if len(path) < 2 or path[1] != ':' and path[:2] != '\\\\': + return True + return False + + # Get a dictionary with information about a font + def getInfoFromFont(self, font): + default = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT) + + try: + if font.Ok(): + font_to_use = font + else: + font_to_use = default + + fontname = font_to_use.GetFaceName() + fontsize = font_to_use.GetPointSize() + fontstyle = font_to_use.GetStyle() + fontweight = font_to_use.GetWeight() + + fontinfo = {'name': fontname, + 'size': fontsize, + 'style': fontstyle, + 'weight': fontweight } + except: + fontinfo = {'name': "", + 'size': 8, + 'style': wx.FONTSTYLE_NORMAL, + 'weight': wx.FONTWEIGHT_NORMAL } + + return fontinfo + + + def getFontFromInfo(self, fontinfo): + size = fontinfo['size'] + name = fontinfo['name'] + style = fontinfo['style'] + weight = fontinfo['weight'] + + try: + font = wx.Font(size, wx.DEFAULT, style, weight, faceName = name) + except: + font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT) + + return font + + # Make an entry for a popup menu + def makePopup(self, menu, event = None, label = "", extralabel = "", bindto = None, type="normal", status=""): + text = "" + if label != "": + text = self.lang.get(label) + text += extralabel + + newid = wx.NewId() + if event is not None: + if bindto is None: + bindto = menu + bindto.Bind(wx.EVT_MENU, event, id = newid) + + if type == "normal": + menu.Append(newid, text) + elif type == "checkitem": + menu.AppendCheckItem(newid, text) + if status == "active": + menu.Check(newid,True) + + if event is None: + menu.Enable(newid, False) + + return newid + + +def printTorrent(torrent, pre = ''): + for key, value in torrent.items(): + if type(value) == dict: + printTorrent(value, pre+' '+key) + elif key.lower() not in ['pieces', 'thumbnail', 'preview']: + print '%s | %s: %s' % (pre, key, value) + +def getMetainfo(src, openoptions = 'rb', style = "file"): + if src is None: + return None + + metainfo = None + try: + metainfo_file = None + # We're getting a url + if style == "rawdata": + return bdecode(src) + elif style == "url": + metainfo_file = urlopen(src) + # We're getting a file that exists + elif os.access(src, os.R_OK): + metainfo_file = open(src, openoptions) + + if metainfo_file is not None: + metainfo = bdecode(metainfo_file.read()) + metainfo_file.close() + except: + print_exc() + if metainfo_file is not None: + try: + metainfo_file.close() + except: + pass + metainfo = None + return metainfo + +def copyTorrent(torrent): + # make a copy of a torrent, to check if any of its "basic" props has been changed + # NB: only copies basic properties + basic_keys = ['infohash', 'num_seeders','num_leechers', + 'myDownloadHistory','web2', 'preview', 'simRank'] + if torrent is None: + return None + ntorrent = {} + for key in basic_keys: + value = torrent.get(key) + if not value is None: + ntorrent[key] = value + return ntorrent + +def similarTorrent(t1, t2): + # make a copy of a torrent, to check if any of its "basic" props has been changed + # NB: only copies basic properties + basic_keys = ['infohash', 'num_seeders','num_leechers', + 'myDownloadHistory','web2', 'preview', 'simRank'] + + if (t1 is None or t2 is None): + return (t1 is None and t2 is None) + + for key in basic_keys: + v1 = t1.get(key) + v2 = t2.get(key) + if v1 != v2: + return False + return True + +def copyPeer(peer): + # make a copy of a peer, to check if any of its "basic" props has been changed + # NB: only copies basic properties + basic_keys = ['permid', 'last_connected', 'simRank', 'similarity', 'name', 'friend', + 'num_peers', 'num_torrents', 'num_prefs', 'num_queries'] + if peer is None: + return None + npeer = {} + for key in basic_keys: + value = peer.get(key) + if not value is None: + npeer[key] = value + return npeer + +def similarPeer(t1, t2): + # make a copy of a peer, to check if any of its "basic" props has been changed + # NB: only copies basic properties + basic_keys = ['permid', 'last_connected', 'simRank', 'similarity', 'name', 'friend', + 'num_peers', 'num_torrents', 'num_prefs', 'num_queries'] + + if (t1 is None or t2 is None): + return (t1 is None and t2 is None) + + for key in basic_keys: + v1 = t1.get(key) + v2 = t2.get(key) + if v1 != v2: + return False + return True + + diff --git a/tribler-mod/Tribler/Main/Utility/utility.py.bak b/tribler-mod/Tribler/Main/Utility/utility.py.bak new file mode 100644 index 0000000..3a41544 --- /dev/null +++ b/tribler-mod/Tribler/Main/Utility/utility.py.bak @@ -0,0 +1,842 @@ +# Written by ABC authors and Arno Bakker +# see LICENSE.txt for license information +import wx +import sys +import os +from threading import Event, Semaphore +from sha import sha +from traceback import print_exc +#from cStringIO import StringIO + +from wx.lib import masked + +from Tribler.Lang.lang import Lang +from Tribler.Core.BitTornado.bencode import bdecode +from Tribler.Core.defaults import dldefaults as BTDefaults +from Tribler.Core.defaults import DEFAULTPORT +from Tribler.Core.defaults import trackerdefaults as TrackerDefaults +from Tribler.Core.defaults import tdefdefaults as TorrentDefDefaults +from Tribler.Core.BitTornado.parseargs import parseargs +from Tribler.Core.BitTornado.zurllib import urlopen +from Tribler.Core.BitTornado.__init__ import version_id + +if sys.platform == 'win32': + from Tribler.Main.Utility.regchecker import RegChecker + +from Tribler.Utilities.configreader import ConfigReader +from Tribler.Main.Utility.compat import convertINI, moveOldConfigFiles +from Tribler.Main.Utility.constants import * #IGNORE:W0611 + +from Tribler.Core.Utilities.utilities import find_prog_in_PATH + +################################################################ +# +# Class: Utility +# +# Generic "glue" class that contains commonly used helper +# functions and helps to keep track of objects +# +################################################################ +class Utility: + def __init__(self, abcpath, configpath): + + self.version = version_id + self.abcpath = abcpath + + # Find the directory to save config files, etc. + self.dir_root = configpath + moveOldConfigFiles(self) + + self.setupConfig() + + # Setup language files + self.lang = Lang(self) + + # Convert old INI file + convertINI(self) + + # Make torrent directory (if needed) + self.MakeTorrentDir() + + self.setupTorrentMakerConfig() + + self.setupTorrentList() + + self.torrents = { "all": [], + "active": {}, + "inactive": {}, + "pause": {}, + "seeding": {}, + "downloading": {} } + + + self.accessflag = Event() + self.accessflag.set() + + self.invalidwinfilenamechar = '' + for i in range(32): + self.invalidwinfilenamechar += chr(i) + self.invalidwinfilenamechar += '"*/:<>?\\|' + + self.FILESEM = Semaphore(1) + + warned = self.config.Read('torrentassociationwarned','int') + if (sys.platform == 'win32' and not warned): + self.regchecker = RegChecker(self) + self.config.Write('torrentassociationwarned','1') + else: + self.regchecker = None + + self.lastdir = { "save" : "", + "open" : "", + "log": "" } + + # Is ABC in the process of shutting down? + self.abcquitting = False +# self.abcdonequitting = False + + # Keep track of the last tab that was being viewed + self.lasttab = { "advanced" : 0, + "preferences" : 0 } + + self.languages = {} + + # Keep track of all the "ManagedList" objects in use + self.lists = {} + + self.abcfileframe = None + self.abcbuddyframe = None + + def getVersion(self): + return self.version + + + +#=============================================================================== +# def getNumPeers(self): +# return self.peer_db.getNumEncounteredPeers()#, self.peer_db.size() +# +# def getNumFiles(self): +# return self.torrent_db.getNumMetadataAndLive()#, self.torrent_db.size() +#=============================================================================== + + def getConfigPath(self): + return self.dir_root + # TODO: python 2.3.x has a bug with os.access and unicode + #return self.dir_root.decode(sys.getfilesystemencoding()) + + def setupConfig(self): + defaults = { + # MiscPanel + 'language_file': 'english.lang', + 'confirmonclose': '1', + 'associate' : '1', + # DiskPanel + 'removetorrent': '0', + 'diskfullthreshold': '1', + # RateLimitPanel + #'maxupload': '5', + 'maxuploadrate': '0', + 'maxdownloadrate': '0', + 'maxseeduploadrate': '0', + # SeedingOptionsPanel + 'uploadoption': '0', + 'uploadtimeh': '0', + 'uploadtimem': '30', + 'uploadratio': '100', + #AdvancedNetworkPanel + #AdvancedDiskPanel + #TriblerPanel + 'torrentcollectsleep':'15', # for RSS Subscriptions + # VideoPanel + 'videoplaybackmode':'0', + # Misc + 'enableweb2search':'0', + 'torrentassociationwarned':'0', + # GUI + 'window_width': '1024', + 'window_height': '670', + 'detailwindow_width': '800', + 'detailwindow_height': '500', + 'prefwindow_width': '1000', + 'prefwindow_height': '480', + 'prefwindow_split': '400', + 't4t_option': 0, # Seeding items added by Boxun + 't4t_hours': 0, + 't4t_mins': 30, + 'g2g_option': 1, + 'g2g_ratio': 75, + 'g2g_hours': 0, + 'g2g_mins': 30, + 'family_filter': 1, + 'window_x': 0, + 'window_y': 0, + } + + if sys.platform == 'win32': + defaults['mintray'] = '2' + # Don't use double quotes here, those are lost when this string is stored in the + # abc.conf file in INI-file format. The code that starts the player will add quotes + # if there is a space in this string. + progfilesdir = os.path.expandvars('${PROGRAMFILES}') + #defaults['videoplayerpath'] = progfilesdir+'\\VideoLAN\\VLC\\vlc.exe' + # Path also valid on MS Vista + defaults['videoplayerpath'] = progfilesdir+'\\Windows Media Player\\wmplayer.exe' + defaults['videoanalyserpath'] = self.getPath()+'\\ffmpeg.exe' + elif sys.platform == 'darwin': + profiledir = os.path.expandvars('${HOME}') + defaults['mintray'] = '0' # tray doesn't make sense on Mac + vlcpath = find_prog_in_PATH("vlc") + if vlcpath is None: + defaults['videoplayerpath'] = "/Applications/QuickTime Player.app" + else: + defaults['videoplayerpath'] = vlcpath + ffmpegpath = find_prog_in_PATH("ffmpeg") + if ffmpegpath is None: + defaults['videoanalyserpath'] = "macbinaries/ffmpeg" + else: + defaults['videoanalyserpath'] = ffmpegpath + else: + defaults['mintray'] = '0' # Still crashes on Linux sometimes + vlcpath = find_prog_in_PATH("vlc") + if vlcpath is None: + defaults['videoplayerpath'] = "vlc" + else: + defaults['videoplayerpath'] = vlcpath + ffmpegpath = find_prog_in_PATH("ffmpeg") + if ffmpegpath is None: + defaults['videoanalyserpath'] = "ffmpeg" + else: + defaults['videoanalyserpath'] = ffmpegpath + + configfilepath = os.path.join(self.getConfigPath(), "abc.conf") + self.config = ConfigReader(configfilepath, "ABC", defaults) + + @staticmethod + def _convert__helper_4_1__4_2(abc_config, set_config_func, name, convert=lambda x:x): + if abc_config.Exists(name): + v = abc_config.Read(name) + try: + v = convert(v) + except: + pass + else: + set_config_func(v) + abc_config.DeleteEntry(name) + + def convert__presession_4_1__4_2(self, session_config): + bool_ = lambda x: x=="1" and True or False + self._convert__helper_4_1__4_2(self.config, session_config.set_buddycast, "enablerecommender", bool_) + self._convert__helper_4_1__4_2(self.config, session_config.set_buddycast_max_peers, "buddy_num", int) + self._convert__helper_4_1__4_2(self.config, session_config.set_download_help, "enabledlhelp", bool_) + self._convert__helper_4_1__4_2(self.config, session_config.set_internal_tracker_url, "internaltrackerurl") + self._convert__helper_4_1__4_2(self.config, session_config.set_listen_port, "minport", int) + self._convert__helper_4_1__4_2(self.config, session_config.set_nickname, "myname") + self._convert__helper_4_1__4_2(self.config, session_config.set_start_recommender, "startrecommender", bool_) + self._convert__helper_4_1__4_2(self.config, session_config.set_stop_collecting_threshold, "stopcollectingthreshold", int) + self._convert__helper_4_1__4_2(self.config, session_config.set_torrent_collecting, "enabledlcollecting", bool_) + self._convert__helper_4_1__4_2(self.config, session_config.set_ip_for_tracker, "ip") + self._convert__helper_4_1__4_2(self.config, session_config.set_bind_to_addresses, "bind", lambda x:[x]) + self._convert__helper_4_1__4_2(self.config, session_config.set_upnp_mode, "upnp_nat_access", int) + + def convert__postsession_4_1__4_2(self, session, default_download_config): + + # the mugshot was stored in icons/.jpg + # however... what is the permid??? + safename = "%s.jpg" % sha(session.get_permid()).hexdigest() + safepath = os.path.join(self.dir_root, "icons", safename) + if os.path.exists(safepath): + session.set_mugshot(open(safepath, "r").read(), "image/jpeg") + os.remove(safepath) + + bool_ = lambda x: x=="1" and True or False + self._convert__helper_4_1__4_2(self.config, default_download_config.set_alloc_rate, "alloc_rate", int) + self._convert__helper_4_1__4_2(self.config, default_download_config.set_alloc_type, "alloc_type") + self._convert__helper_4_1__4_2(self.config, default_download_config.set_dest_dir, "defaultfolder") + self._convert__helper_4_1__4_2(self.config, default_download_config.set_double_check_writes, "double_check", bool_) + self._convert__helper_4_1__4_2(self.config, default_download_config.set_lock_files, "lock_files", bool_) + self._convert__helper_4_1__4_2(self.config, default_download_config.set_lock_while_reading, "lock_while_reading", bool_) + self._convert__helper_4_1__4_2(self.config, default_download_config.set_max_conns, "max_connections", int) + self._convert__helper_4_1__4_2(self.config, default_download_config.set_max_files_open, "max_files_open", int) + self._convert__helper_4_1__4_2(self.config, default_download_config.set_triple_check_writes, "trible_check", bool_) + + def setupTorrentMakerConfig(self): + # Arno, 2008-03-27: To keep fileformat compatible + defaults = { + 'piece_size': '0', # An index into TorrentMaker.FileInfoPanel.piece_choices + 'comment': TorrentDefDefaults['comment'], + 'created_by': TorrentDefDefaults['created by'], + 'announcedefault': TorrentDefDefaults['announce'], + 'announcehistory': '', + 'announce-list': TorrentDefDefaults['announce-list'], + 'httpseeds': TorrentDefDefaults['httpseeds'], + 'makehash_md5': str(TorrentDefDefaults['makehash_md5']), + 'makehash_crc32': str(TorrentDefDefaults['makehash_crc32']), + 'makehash_sha1': str(TorrentDefDefaults['makehash_sha1']), + 'startnow': '1', + 'savetorrent': '1', + 'createmerkletorrent': '1', + 'createtorrentsig': '0', + 'useitracker': '1', + 'manualtrackerconfig': '0' + } + + torrentmakerconfigfilepath = os.path.join(self.getConfigPath(), "maker.conf") + self.makerconfig = ConfigReader(torrentmakerconfigfilepath, "ABC/TorrentMaker", defaults) + + def setupTorrentList(self): + torrentfilepath = os.path.join(self.getConfigPath(), "torrent.list") + self.torrentconfig = ConfigReader(torrentfilepath, "list0") + + # Initialization that has to be done after the wx.App object + # has been created + def postAppInit(self,iconpath): + try: + self.icon = wx.Icon(iconpath, wx.BITMAP_TYPE_ICO) + except: + pass + + #makeActionList(self) + + def getLastDir(self, operation = "save"): + lastdir = self.lastdir[operation] + + if operation == "save": + if not os.access(lastdir, os.F_OK): + lastdir = self.config.Read('defaultfolder') + + if not os.access(lastdir, os.F_OK): + lastdir = "" + + return lastdir + + def setLastDir(self, operation, dir ): + self.lastdir[operation] = dir + + def getPath(self): + return self.abcpath + #return self.abcpath.decode(sys.getfilesystemencoding()) + + def eta_value(self, n, truncate = 3): + if n == -1: + return '' + if not n: + return '' + n = int(n) + week, r1 = divmod(n, 60 * 60 * 24 * 7) + day, r2 = divmod(r1, 60 * 60 * 24) + hour, r3 = divmod(r2, 60 * 60) + minute, sec = divmod(r3, 60) + + if week > 1000: + return '' + + weekstr = '%d' % (week) + self.lang.get('l_week') + daystr = '%d' % (day) + self.lang.get('l_day') + hourstr = '%d' % (hour) + self.lang.get('l_hour') + minutestr = '%02d' % (minute) + self.lang.get('l_minute') + secstr = '%02d' % (sec) + self.lang.get('l_second') + + if week > 0: + text = weekstr + if truncate > 1: + text += ":" + daystr + if truncate > 2: + text += "-" + hourstr + elif day > 0: + text = daystr + if truncate > 1: + text += "-" + hourstr + if truncate > 2: + text += ":" + minutestr + elif hour > 0: + text = hourstr + if truncate > 1: + text += ":" + minutestr + if truncate > 2: + text += ":" + secstr + else: + text = minutestr + if truncate > 1: + text += ":" + secstr + + return text + + def getMetainfo(self, src, openoptions = 'rb', style = "file"): + return getMetainfo(src,openoptions=openoptions,style=style) + + def speed_format(self, s, truncate = 1, stopearly = None): + return self.size_format(s, truncate, stopearly) + "/" + self.lang.get('l_second') + + def speed_format_new(self, s): + + if s < 102400: + text = '%2.1f KB/s' % (s/1024.0) + elif s < 1022797: + text = '%d KB/s' % (s//1024) + elif s < 104857600: + text = '%2.1f MB/s' % (s/1048576.0) + elif s < 1047527425L: + text = '%d MB/s' % (s//1048576) + elif s < 107374182400L: + text = '%2.1f GB/s' % (s/1073741824.0) + elif s < 1072668082177L: + text = '%d GB/s' % (s//1073741824) + else: + text = '%2.1f TB/s' % (s//1099511627776L) + + return text + + + def size_format(self, s, truncate = None, stopearly = None, applylabel = True, rawsize = False, showbytes = False, labelonly = False, textonly = False): + size = 0.0 + label = "" + + if truncate is None: + truncate = 2 + + if ((s < 1024) and showbytes and stopearly is None) or stopearly == "Byte": + truncate = 0 + size = s + text = "Byte" + elif ((s < 1048576) and stopearly is None) or stopearly == "KB": + size = (s/1024.0) + text = "KB" + elif ((s < 1073741824L) and stopearly is None) or stopearly == "MB": + size = (s/1048576.0) + text = "MB" + elif ((s < 1099511627776L) and stopearly is None) or stopearly == "GB": + size = (s/1073741824.0) + text = "GB" + else: + size = (s/1099511627776.0) + text = "TB" + + if textonly: + return text + + label = self.lang.get(text) + if labelonly: + return label + + if rawsize: + return size + + # At this point, only accepting 0, 1, or 2 + if truncate == 0: + text = ('%.0f' % size) + elif truncate == 1: + text = ('%.1f' % size) + else: + text = ('%.2f' % size) + + if applylabel: + text += ' ' + label + + return text + + def makeNumCtrl(self, parent, value, integerWidth = 6, fractionWidth = 0, min = 0, max = None, size = wx.DefaultSize): + if size != wx.DefaultSize: + autoSize = False + else: + autoSize = True + return masked.NumCtrl(parent, + value = value, + size = size, + integerWidth = integerWidth, + fractionWidth = fractionWidth, + allowNegative = False, + min = min, + max = max, + groupDigits = False, + useFixedWidthFont = False, + autoSize = autoSize) + + def MakeTorrentDir(self): + torrentpath = os.path.join(self.getConfigPath(), "torrent") + pathexists = os.access(torrentpath, os.F_OK) + # If the torrent directory doesn't exist, create it now + if not pathexists: + os.mkdir(torrentpath) + + def RemoveEmptyDir(self, basedir, removesubdirs = True): + # remove subdirectories + if removesubdirs: + for root, dirs, files in os.walk(basedir, topdown = False): + for name in dirs: + dirname = os.path.join(root, name) + + # Only try to delete if it exists + if os.access(dirname, os.F_OK): + if not os.listdir(dirname): + os.rmdir(dirname) + #remove folder + if os.access(basedir, os.F_OK): + if not os.listdir(basedir): + os.rmdir(basedir) + + def makeBitmap(self, bitmap, trans_color = wx.Colour(200, 200, 200)): + button_bmp = wx.Bitmap(os.path.join(self.getPath(), 'icons', bitmap), wx.BITMAP_TYPE_BMP) + button_mask = wx.Mask(button_bmp, trans_color) + button_bmp.SetMask(button_mask) + return button_bmp + + def makeBitmapButton(self, parent, bitmap, tooltip, event, trans_color = wx.Colour(200, 200, 200), padx=18, pady=4): + tooltiptext = self.lang.get(tooltip) + + button_bmp = self.makeBitmap(bitmap, trans_color) + + ID_BUTTON = wx.NewId() + button_btn = wx.BitmapButton(parent, ID_BUTTON, button_bmp, size=wx.Size(button_bmp.GetWidth()+padx, button_bmp.GetHeight()+pady)) + button_btn.SetToolTipString(tooltiptext) + parent.Bind(wx.EVT_BUTTON, event, button_btn) + return button_btn + + def makeBitmapButtonFit(self, parent, bitmap, tooltip, event, trans_color = wx.Colour(200, 200, 200)): + tooltiptext = self.lang.get(tooltip) + + button_bmp = self.makeBitmap(bitmap, trans_color) + + ID_BUTTON = wx.NewId() + button_btn = wx.BitmapButton(parent, ID_BUTTON, button_bmp, size=wx.Size(button_bmp.GetWidth(), button_bmp.GetHeight())) + button_btn.SetToolTipString(tooltiptext) + parent.Bind(wx.EVT_BUTTON, event, button_btn) + return button_btn + + def getBTParams(self, skipcheck = False): + # Construct BT params + ########################### + btparams = [] + + btparams.append("--display_interval") + btparams.append(self.config.Read('display_interval')) + + # Use single port only + btparams.append("--minport") + btparams.append(self.config.Read('minport')) + btparams.append("--maxport") + btparams.append(self.config.Read('minport')) + +# btparams.append("--random_port") +# btparams.append(self.config.Read('randomport')) + + #if self.config.Read('ipv6') == "1": + # btparams.append("--ipv6_enable") + # btparams.append(self.config.Read('ipv6')) + # btparams.append("--ipv6_binds_v4") + # btparams.append(self.config.Read('ipv6_binds_v4')) + + # Fast resume + btparams.append("--selector_enabled") + btparams.append(self.config.Read('fastresume')) + + btparams.append("--auto_kick") + btparams.append(self.config.Read('kickban')) + btparams.append("--security") + btparams.append(self.config.Read('notsameip')) + + btparams.append("--max_upload_rate") + btparams.append("0") + + paramlist = [ "ip", + "bind", + "alloc_rate", + "alloc_type", + "double_check", + "triple_check", + "lock_while_reading", + "lock_files", + "min_peers", + "max_files_open", + "max_connections", + "upnp_nat_access", + "auto_flush", + "ut_pex_max_addrs_from_peer"] + + for param in paramlist: + value = self.config.Read(param) + if value != "": + btparams.append("--" + param) + btparams.append(value) + + config, args = parseargs(btparams, BTDefaults) + + return config + + def getTrackerParams(self): + tconfig = {} + for k,v,expl in TrackerDefaults: + tconfig[k] = v + + tconfig['port'] = DEFAULTPORT + dir = os.path.join(self.getConfigPath(),'itracker') + dfile = os.path.join(dir,'tracker.db') + tconfig['dfile'] = dfile + tconfig['allowed_dir'] = dir + tconfig['favicon'] = os.path.join(self.getPath(),'tribler.ico') + #tconfig['save_dfile_interval'] = 20 + tconfig['dfile_format'] = 'pickle' # We use unicode filenames, so bencode won't work + + return tconfig + + + + # Check if str is a valid Windows file name (or unit name if unit is true) + # If the filename isn't valid: returns a fixed name + # If the filename is valid: returns an empty string + def fixWindowsName(self, name, unit = False): + if unit and (len(name) != 2 or name[1] != ':'): + return 'c:' + if not name or name == '.' or name == '..': + return '_' + if unit: + name = name[0] + fixed = False + if len(name) > 250: + name = name[:250] + fixed = True + fixedname = '' + spaces = 0 + for c in name: + if c in self.invalidwinfilenamechar: + fixedname += '_' + fixed = True + else: + fixedname += c + if c == ' ': + spaces += 1 + if fixed: + return fixedname + elif spaces == len(name): + # contains only spaces + return '_' + else: + return '' + + def checkWinPath(self, parent, pathtocheck): + if pathtocheck and pathtocheck[-1] == '\\' and pathtocheck != '\\\\': + pathitems = pathtocheck[:-1].split('\\') + else: + pathitems = pathtocheck.split('\\') + nexttotest = 1 + if self.isPathRelative(pathtocheck): + # Relative path + # Empty relative path is allowed + if pathtocheck == '': + return True + fixedname = self.fixWindowsName(pathitems[0]) + if fixedname: + dlg = wx.MessageDialog(parent, + pathitems[0] + '\n' + \ + self.lang.get('invalidwinname') + '\n'+ \ + self.lang.get('suggestedname') + '\n\n' + \ + fixedname, + self.lang.get('error'), wx.ICON_ERROR) + dlg.ShowModal() + dlg.Destroy() + return False + else: + # Absolute path + # An absolute path must have at least one '\' + if not '\\' in pathtocheck: + dlg = wx.MessageDialog(parent, pathitems[0] + '\n' + self.lang.get('errorinvalidpath'), + self.lang.get('error'), wx.ICON_ERROR) + dlg.ShowModal() + dlg.Destroy() + return False + if pathtocheck[:2] != '\\\\': + # Not a network path + fixedname = self.fixWindowsName(pathitems[0], unit = True) + if fixedname: + dlg = wx.MessageDialog(parent, + pathitems[0] + '\n' + \ + self.lang.get('invalidwinname') + \ + fixedname, + self.lang.get('error'), wx.ICON_ERROR) + dlg.ShowModal() + dlg.Destroy() + return False + else: + # Network path + nexttotest = 2 + + for name in pathitems[nexttotest:]: + fixedname = self.fixWindowsName(name) + if fixedname: + dlg = wx.MessageDialog(parent, name + '\n' + self.lang.get('errorinvalidwinname') + fixedname, + self.lang.get('error'), wx.ICON_ERROR) + dlg.ShowModal() + dlg.Destroy() + return False + + return True + + def isPathRelative(self, path): + if len(path) < 2 or path[1] != ':' and path[:2] != '\\\\': + return True + return False + + # Get a dictionary with information about a font + def getInfoFromFont(self, font): + default = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT) + + try: + if font.Ok(): + font_to_use = font + else: + font_to_use = default + + fontname = font_to_use.GetFaceName() + fontsize = font_to_use.GetPointSize() + fontstyle = font_to_use.GetStyle() + fontweight = font_to_use.GetWeight() + + fontinfo = {'name': fontname, + 'size': fontsize, + 'style': fontstyle, + 'weight': fontweight } + except: + fontinfo = {'name': "", + 'size': 8, + 'style': wx.FONTSTYLE_NORMAL, + 'weight': wx.FONTWEIGHT_NORMAL } + + return fontinfo + + + def getFontFromInfo(self, fontinfo): + size = fontinfo['size'] + name = fontinfo['name'] + style = fontinfo['style'] + weight = fontinfo['weight'] + + try: + font = wx.Font(size, wx.DEFAULT, style, weight, faceName = name) + except: + font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT) + + return font + + # Make an entry for a popup menu + def makePopup(self, menu, event = None, label = "", extralabel = "", bindto = None, type="normal", status=""): + text = "" + if label != "": + text = self.lang.get(label) + text += extralabel + + newid = wx.NewId() + if event is not None: + if bindto is None: + bindto = menu + bindto.Bind(wx.EVT_MENU, event, id = newid) + + if type == "normal": + menu.Append(newid, text) + elif type == "checkitem": + menu.AppendCheckItem(newid, text) + if status == "active": + menu.Check(newid,True) + + if event is None: + menu.Enable(newid, False) + + return newid + + +def printTorrent(torrent, pre = ''): + for key, value in torrent.items(): + if type(value) == dict: + printTorrent(value, pre+' '+key) + elif key.lower() not in ['pieces', 'thumbnail', 'preview']: + print '%s | %s: %s' % (pre, key, value) + +def getMetainfo(src, openoptions = 'rb', style = "file"): + if src is None: + return None + + metainfo = None + try: + metainfo_file = None + # We're getting a url + if style == "rawdata": + return bdecode(src) + elif style == "url": + metainfo_file = urlopen(src) + # We're getting a file that exists + elif os.access(src, os.R_OK): + metainfo_file = open(src, openoptions) + + if metainfo_file is not None: + metainfo = bdecode(metainfo_file.read()) + metainfo_file.close() + except: + print_exc() + if metainfo_file is not None: + try: + metainfo_file.close() + except: + pass + metainfo = None + return metainfo + +def copyTorrent(torrent): + # make a copy of a torrent, to check if any of its "basic" props has been changed + # NB: only copies basic properties + basic_keys = ['infohash', 'num_seeders','num_leechers', + 'myDownloadHistory','web2', 'preview', 'simRank'] + if torrent is None: + return None + ntorrent = {} + for key in basic_keys: + value = torrent.get(key) + if not value is None: + ntorrent[key] = value + return ntorrent + +def similarTorrent(t1, t2): + # make a copy of a torrent, to check if any of its "basic" props has been changed + # NB: only copies basic properties + basic_keys = ['infohash', 'num_seeders','num_leechers', + 'myDownloadHistory','web2', 'preview', 'simRank'] + + if (t1 is None or t2 is None): + return (t1 is None and t2 is None) + + for key in basic_keys: + v1 = t1.get(key) + v2 = t2.get(key) + if v1 != v2: + return False + return True + +def copyPeer(peer): + # make a copy of a peer, to check if any of its "basic" props has been changed + # NB: only copies basic properties + basic_keys = ['permid', 'last_connected', 'simRank', 'similarity', 'name', 'friend', + 'num_peers', 'num_torrents', 'num_prefs', 'num_queries'] + if peer is None: + return None + npeer = {} + for key in basic_keys: + value = peer.get(key) + if not value is None: + npeer[key] = value + return npeer + +def similarPeer(t1, t2): + # make a copy of a peer, to check if any of its "basic" props has been changed + # NB: only copies basic properties + basic_keys = ['permid', 'last_connected', 'simRank', 'similarity', 'name', 'friend', + 'num_peers', 'num_torrents', 'num_prefs', 'num_queries'] + + if (t1 is None or t2 is None): + return (t1 is None and t2 is None) + + for key in basic_keys: + v1 = t1.get(key) + v2 = t2.get(key) + if v1 != v2: + return False + return True + + diff --git a/tribler-mod/Tribler/Main/__init__.py b/tribler-mod/Tribler/Main/__init__.py new file mode 100644 index 0000000..b7ef832 --- /dev/null +++ b/tribler-mod/Tribler/Main/__init__.py @@ -0,0 +1,3 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Main/__init__.py.bak b/tribler-mod/Tribler/Main/__init__.py.bak new file mode 100644 index 0000000..395f8fb --- /dev/null +++ b/tribler-mod/Tribler/Main/__init__.py.bak @@ -0,0 +1,2 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Main/crawler.py b/tribler-mod/Tribler/Main/crawler.py new file mode 100644 index 0000000..71f4305 --- /dev/null +++ b/tribler-mod/Tribler/Main/crawler.py @@ -0,0 +1,90 @@ +from time import localtime, strftime +#!/usr/bin/python + +# modify the sys.stderr and sys.stdout for safe output +import Tribler.Debug.console + +from traceback import print_exc +import optparse +import os +import sys +import time +import re + +from Tribler.Core.API import * + +if __name__ == "__main__": + + command_line_parser = optparse.OptionParser() + command_line_parser.add_option("--statedir", action="store", type="string", help="Use an alternate statedir") + command_line_parser.add_option("--port", action="store", type="int", help="Listen at this port") + command_line_parser.add_option("--quick-connect", action="store", type="string", help="Immediately make an overlay connection to the supplied k.l.m.n:o address") + + # parse command-line arguments + opt, args = command_line_parser.parse_args() + + # what types of crawlers do we have? + options = ["database", "seedingstats", "friendship", "natcheck", "videoplayback"] + options.sort() + + # at least on crawler type should be started + if not filter(lambda type_:type_ in args, options): + print "Usage: python Tribler/Main/crawler.py" + print " --statedir STATEDIR (optional)" + print " --port PORT (optional)" + print " --quick-connect IPADDRESS:PORT (optional)" + for option in options: + print " %-38s (optional)" % option + sys.exit() + + print "Press Ctrl-C to stop the crawler" + + sscfg = SessionStartupConfig() + if opt.statedir: + sscfg.set_state_dir(os.path.realpath(opt.statedir)) + if opt.port: + sscfg.set_listen_port(opt.port) + sscfg.set_megacache(True) + sscfg.set_overlay(True) + sscfg.set_torrent_collecting(False) + sscfg.set_dialback(False) + sscfg.set_internal_tracker(False) + + s = Session(sscfg) + + # 22/10/08. Boudewijn: connect to a specific peer + # connect to a specific peer using the overlay + if opt.quick_connect: + match = re.match("([0-9]+)[.]([0-9]+)[.]([0-9]+)[.]([0-9]+)(?::([0-9]+))?", opt.quick_connect) + if match: + groups = list(match.groups()) + if not groups[4]: + groups[4] = "7762" + def after_connect(*args): + print args + from Tribler.Core.Overlay.SecureOverlay import SecureOverlay + + ip = ".".join(groups[0:4]) + port = int(groups[4]) + + print "Creating an overlay connection to", ip, port + overlay = SecureOverlay.getInstance() + overlay.connect_dns((ip, port), after_connect) + + else: + print "Could not decipher the --quick-connect address" + raise SystemExit + + # condition variable would be prettier, but that don't listen to + # KeyboardInterrupt + #time.sleep(sys.maxint/2048) + try: + while True: + x = sys.stdin.read() + except: + print_exc() + + s.shutdown() + time.sleep(3) + + diff --git a/tribler-mod/Tribler/Main/crawler.py.bak b/tribler-mod/Tribler/Main/crawler.py.bak new file mode 100644 index 0000000..f69184b --- /dev/null +++ b/tribler-mod/Tribler/Main/crawler.py.bak @@ -0,0 +1,89 @@ +#!/usr/bin/python + +# modify the sys.stderr and sys.stdout for safe output +import Tribler.Debug.console + +from traceback import print_exc +import optparse +import os +import sys +import time +import re + +from Tribler.Core.API import * + +if __name__ == "__main__": + + command_line_parser = optparse.OptionParser() + command_line_parser.add_option("--statedir", action="store", type="string", help="Use an alternate statedir") + command_line_parser.add_option("--port", action="store", type="int", help="Listen at this port") + command_line_parser.add_option("--quick-connect", action="store", type="string", help="Immediately make an overlay connection to the supplied k.l.m.n:o address") + + # parse command-line arguments + opt, args = command_line_parser.parse_args() + + # what types of crawlers do we have? + options = ["database", "seedingstats", "friendship", "natcheck", "videoplayback"] + options.sort() + + # at least on crawler type should be started + if not filter(lambda type_:type_ in args, options): + print "Usage: python Tribler/Main/crawler.py" + print " --statedir STATEDIR (optional)" + print " --port PORT (optional)" + print " --quick-connect IPADDRESS:PORT (optional)" + for option in options: + print " %-38s (optional)" % option + sys.exit() + + print "Press Ctrl-C to stop the crawler" + + sscfg = SessionStartupConfig() + if opt.statedir: + sscfg.set_state_dir(os.path.realpath(opt.statedir)) + if opt.port: + sscfg.set_listen_port(opt.port) + sscfg.set_megacache(True) + sscfg.set_overlay(True) + sscfg.set_torrent_collecting(False) + sscfg.set_dialback(False) + sscfg.set_internal_tracker(False) + + s = Session(sscfg) + + # 22/10/08. Boudewijn: connect to a specific peer + # connect to a specific peer using the overlay + if opt.quick_connect: + match = re.match("([0-9]+)[.]([0-9]+)[.]([0-9]+)[.]([0-9]+)(?::([0-9]+))?", opt.quick_connect) + if match: + groups = list(match.groups()) + if not groups[4]: + groups[4] = "7762" + def after_connect(*args): + print args + from Tribler.Core.Overlay.SecureOverlay import SecureOverlay + + ip = ".".join(groups[0:4]) + port = int(groups[4]) + + print "Creating an overlay connection to", ip, port + overlay = SecureOverlay.getInstance() + overlay.connect_dns((ip, port), after_connect) + + else: + print "Could not decipher the --quick-connect address" + raise SystemExit + + # condition variable would be prettier, but that don't listen to + # KeyboardInterrupt + #time.sleep(sys.maxint/2048) + try: + while True: + x = sys.stdin.read() + except: + print_exc() + + s.shutdown() + time.sleep(3) + + diff --git a/tribler-mod/Tribler/Main/globals.py b/tribler-mod/Tribler/Main/globals.py new file mode 100644 index 0000000..33af79d --- /dev/null +++ b/tribler-mod/Tribler/Main/globals.py @@ -0,0 +1,65 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys +import os +import pickle + +STATEDIR_DLCONFIG = "dlconfig.pickle" + +# Global variable containing the DownloadStartupConfig to use for crearing +# Downloads +from Tribler.Core.DownloadConfig import DownloadStartupConfig +from Tribler.Core.defaults import DLDEFAULTS_VERSION,dldefaults + +class DefaultDownloadStartupConfig(DownloadStartupConfig): + __single = None + + def __init__(self,dlconfig=None): + + if DefaultDownloadStartupConfig.__single: + raise RuntimeError, "DefaultDownloadStartupConfig is singleton" + DefaultDownloadStartupConfig.__single = self + + DownloadStartupConfig.__init__(self,dlconfig=dlconfig) + + def getInstance(*args, **kw): + if DefaultDownloadStartupConfig.__single is None: + DefaultDownloadStartupConfig(*args, **kw) + return DefaultDownloadStartupConfig.__single + getInstance = staticmethod(getInstance) + + def updateToCurrentVersion(self): + oldver = self.dlconfig['version'] + if oldver != DLDEFAULTS_VERSION: + for key in dldefaults.keys(): + if key not in self.dlconfig: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DefaultDownloadStartupConfig: Adding field",key + self.dlconfig[key] = dldefaults[key] + self.dlconfig['version'] = DLDEFAULTS_VERSION + + # + # Class method + # + def load(filename): + """ + Load a saved DownloadStartupConfig from disk. + + @param filename An absolute Unicode filename + @return DefaultDownloadStartupConfig object + """ + # Class method, no locking required + f = open(filename,"rb") + dlconfig = pickle.load(f) + dscfg = DefaultDownloadStartupConfig(dlconfig) + f.close() + + dscfg.updateToCurrentVersion() + + return dscfg + load = staticmethod(load) + + +def get_default_dscfg_filename(session): + return os.path.join(session.get_state_dir(),STATEDIR_DLCONFIG) diff --git a/tribler-mod/Tribler/Main/globals.py.bak b/tribler-mod/Tribler/Main/globals.py.bak new file mode 100644 index 0000000..cbe70bb --- /dev/null +++ b/tribler-mod/Tribler/Main/globals.py.bak @@ -0,0 +1,64 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys +import os +import pickle + +STATEDIR_DLCONFIG = "dlconfig.pickle" + +# Global variable containing the DownloadStartupConfig to use for crearing +# Downloads +from Tribler.Core.DownloadConfig import DownloadStartupConfig +from Tribler.Core.defaults import DLDEFAULTS_VERSION,dldefaults + +class DefaultDownloadStartupConfig(DownloadStartupConfig): + __single = None + + def __init__(self,dlconfig=None): + + if DefaultDownloadStartupConfig.__single: + raise RuntimeError, "DefaultDownloadStartupConfig is singleton" + DefaultDownloadStartupConfig.__single = self + + DownloadStartupConfig.__init__(self,dlconfig=dlconfig) + + def getInstance(*args, **kw): + if DefaultDownloadStartupConfig.__single is None: + DefaultDownloadStartupConfig(*args, **kw) + return DefaultDownloadStartupConfig.__single + getInstance = staticmethod(getInstance) + + def updateToCurrentVersion(self): + oldver = self.dlconfig['version'] + if oldver != DLDEFAULTS_VERSION: + for key in dldefaults.keys(): + if key not in self.dlconfig: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DefaultDownloadStartupConfig: Adding field",key + self.dlconfig[key] = dldefaults[key] + self.dlconfig['version'] = DLDEFAULTS_VERSION + + # + # Class method + # + def load(filename): + """ + Load a saved DownloadStartupConfig from disk. + + @param filename An absolute Unicode filename + @return DefaultDownloadStartupConfig object + """ + # Class method, no locking required + f = open(filename,"rb") + dlconfig = pickle.load(f) + dscfg = DefaultDownloadStartupConfig(dlconfig) + f.close() + + dscfg.updateToCurrentVersion() + + return dscfg + load = staticmethod(load) + + +def get_default_dscfg_filename(session): + return os.path.join(session.get_state_dir(),STATEDIR_DLCONFIG) diff --git a/tribler-mod/Tribler/Main/metadata-injector.py b/tribler-mod/Tribler/Main/metadata-injector.py new file mode 100644 index 0000000..a25d0b9 --- /dev/null +++ b/tribler-mod/Tribler/Main/metadata-injector.py @@ -0,0 +1,131 @@ +from time import localtime, strftime +#!/usr/bin/python + +# injector.py is used to 'inject' .torrent files into the overlay +# network. currently we only support a single .torrent source: rss +# feed. + +# modify the sys.stderr and sys.stdout for safe output +import Tribler.Debug.console + +from traceback import print_exc +import optparse +import os +import random +import shutil +import sys +import tempfile +import time + +from Tribler.Core.API import * +from Tribler.Core.CacheDB.sqlitecachedb import bin2str +from Tribler.Subscriptions.rss_client import TorrentFeedThread +from Tribler.Core.BuddyCast.buddycast import BuddyCastFactory +from Tribler.Core.Overlay.OverlayApps import OverlayApps + +from Tribler.Core.Overlay.permid import permid_for_user + +def main(): + command_line_parser = optparse.OptionParser() + command_line_parser.add_option("--statedir", action="store", type="string", help="Use an alternate statedir") + command_line_parser.add_option("--port", action="store", type="int", help="Listen at this port") + command_line_parser.add_option("--rss", action="store", type="string", help="Url where to fetch rss feed, or several seperated with ';'") + command_line_parser.add_option("--nickname", action="store", type="string", help="The moderator name") + + # parse command-line arguments + opt, args = command_line_parser.parse_args() + + if not (opt.rss): + print "Usage: python Tribler/Main/metadata-injector.py --help" + print "Example: python Tribler/Main/metadata-injector.py --rss http://frayja.com/rss.php --nickname frayja" + sys.exit() + + print "Press Ctrl-C to stop the metadata-injector" + + sscfg = SessionStartupConfig() + if opt.statedir: sscfg.set_state_dir(os.path.realpath(opt.statedir)) + if opt.port: sscfg.set_listen_port(opt.port) + if opt.nickname: sscfg.set_nickname(opt.nickname) + + # override default configuration + sscfg.set_rss_reload_frequency(120) + sscfg.set_rss_check_frequency(1) + sscfg.set_moderationcast_recent_own_moderations_per_have(25) + sscfg.set_moderationcast_random_own_moderations_per_have(25) + sscfg.set_moderationcast_recent_forward_moderations_per_have(5) + sscfg.set_moderationcast_random_forward_moderations_per_have(5) + sscfg.set_moderationcast_upload_bandwidth_limit(256*1024) + sscfg.set_moderationcast_download_bandwidth_limit(1024*1024) + + sscfg.set_megacache(True) + sscfg.set_overlay(True) + # turn torrent collecting on. this will cause torrents to be distributed + sscfg.set_torrent_collecting(True) + sscfg.set_dialback(False) + sscfg.set_internal_tracker(False) + + session = Session(sscfg) + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "permid: ", permid_for_user(session.get_permid()) + + if opt.rss: + buddycast_factory = BuddyCastFactory.getInstance() + moderation_cast_db = session.open_dbhandler(NTFY_MODERATIONCAST) + overlay_apps = OverlayApps.getInstance() + torrent_feed_thread = TorrentFeedThread.getInstance() + + def on_overlay_connection(exc, permid, selversion, locally_initiated): + """ + An overlay connection is established or lost. Send + moderation is appropriate. + """ + if not exc: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Force send" + buddycast_factory.moderationcast_core.createAndSendModerationCastHaveMessage(permid, selversion) + + def on_torrent_callback(rss_url, infohash, torrent_data): + """ + A torrent file is discovered through rss. Create a new + moderation. + """ + if "info" in torrent_data and "name" in torrent_data["info"]: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Creating moderation for %s" % torrent_data["info"]["name"] + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Creating moderation" + + moderation = {} + moderation['infohash'] = bin2str(infohash) + + moderation_cast_db.addOwnModeration(moderation) + + overlay_apps.register_connection_handler(on_overlay_connection) + + torrent_feed_thread.register(session) + for rss in opt.rss.split(";"): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Adding RSS: %s" % rss + torrent_feed_thread.addURL(rss, on_torrent_callback=on_torrent_callback) + torrent_feed_thread.start() + + # 22/10/08. Boudewijn: connect to a specific peer + # connect to a specific peer using the overlay + def after_connect(*args): + print "CONNECTED", args + from Tribler.Core.Overlay.SecureOverlay import SecureOverlay + overlay = SecureOverlay.getInstance() + overlay.connect_dns(("130.161.158.24", 7762), after_connect) + + # condition variable would be prettier, but that don't listen to + # KeyboardInterrupt + #time.sleep(sys.maxint/2048) + try: + while True: + x = sys.stdin.read() + except: + print_exc() + + session.shutdown() + print "Shutting down..." + time.sleep(5) + +if __name__ == "__main__": + main() diff --git a/tribler-mod/Tribler/Main/metadata-injector.py.bak b/tribler-mod/Tribler/Main/metadata-injector.py.bak new file mode 100644 index 0000000..0abf954 --- /dev/null +++ b/tribler-mod/Tribler/Main/metadata-injector.py.bak @@ -0,0 +1,130 @@ +#!/usr/bin/python + +# injector.py is used to 'inject' .torrent files into the overlay +# network. currently we only support a single .torrent source: rss +# feed. + +# modify the sys.stderr and sys.stdout for safe output +import Tribler.Debug.console + +from traceback import print_exc +import optparse +import os +import random +import shutil +import sys +import tempfile +import time + +from Tribler.Core.API import * +from Tribler.Core.CacheDB.sqlitecachedb import bin2str +from Tribler.Subscriptions.rss_client import TorrentFeedThread +from Tribler.Core.BuddyCast.buddycast import BuddyCastFactory +from Tribler.Core.Overlay.OverlayApps import OverlayApps + +from Tribler.Core.Overlay.permid import permid_for_user + +def main(): + command_line_parser = optparse.OptionParser() + command_line_parser.add_option("--statedir", action="store", type="string", help="Use an alternate statedir") + command_line_parser.add_option("--port", action="store", type="int", help="Listen at this port") + command_line_parser.add_option("--rss", action="store", type="string", help="Url where to fetch rss feed, or several seperated with ';'") + command_line_parser.add_option("--nickname", action="store", type="string", help="The moderator name") + + # parse command-line arguments + opt, args = command_line_parser.parse_args() + + if not (opt.rss): + print "Usage: python Tribler/Main/metadata-injector.py --help" + print "Example: python Tribler/Main/metadata-injector.py --rss http://frayja.com/rss.php --nickname frayja" + sys.exit() + + print "Press Ctrl-C to stop the metadata-injector" + + sscfg = SessionStartupConfig() + if opt.statedir: sscfg.set_state_dir(os.path.realpath(opt.statedir)) + if opt.port: sscfg.set_listen_port(opt.port) + if opt.nickname: sscfg.set_nickname(opt.nickname) + + # override default configuration + sscfg.set_rss_reload_frequency(120) + sscfg.set_rss_check_frequency(1) + sscfg.set_moderationcast_recent_own_moderations_per_have(25) + sscfg.set_moderationcast_random_own_moderations_per_have(25) + sscfg.set_moderationcast_recent_forward_moderations_per_have(5) + sscfg.set_moderationcast_random_forward_moderations_per_have(5) + sscfg.set_moderationcast_upload_bandwidth_limit(256*1024) + sscfg.set_moderationcast_download_bandwidth_limit(1024*1024) + + sscfg.set_megacache(True) + sscfg.set_overlay(True) + # turn torrent collecting on. this will cause torrents to be distributed + sscfg.set_torrent_collecting(True) + sscfg.set_dialback(False) + sscfg.set_internal_tracker(False) + + session = Session(sscfg) + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "permid: ", permid_for_user(session.get_permid()) + + if opt.rss: + buddycast_factory = BuddyCastFactory.getInstance() + moderation_cast_db = session.open_dbhandler(NTFY_MODERATIONCAST) + overlay_apps = OverlayApps.getInstance() + torrent_feed_thread = TorrentFeedThread.getInstance() + + def on_overlay_connection(exc, permid, selversion, locally_initiated): + """ + An overlay connection is established or lost. Send + moderation is appropriate. + """ + if not exc: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Force send" + buddycast_factory.moderationcast_core.createAndSendModerationCastHaveMessage(permid, selversion) + + def on_torrent_callback(rss_url, infohash, torrent_data): + """ + A torrent file is discovered through rss. Create a new + moderation. + """ + if "info" in torrent_data and "name" in torrent_data["info"]: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Creating moderation for %s" % torrent_data["info"]["name"] + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Creating moderation" + + moderation = {} + moderation['infohash'] = bin2str(infohash) + + moderation_cast_db.addOwnModeration(moderation) + + overlay_apps.register_connection_handler(on_overlay_connection) + + torrent_feed_thread.register(session) + for rss in opt.rss.split(";"): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Adding RSS: %s" % rss + torrent_feed_thread.addURL(rss, on_torrent_callback=on_torrent_callback) + torrent_feed_thread.start() + + # 22/10/08. Boudewijn: connect to a specific peer + # connect to a specific peer using the overlay + def after_connect(*args): + print "CONNECTED", args + from Tribler.Core.Overlay.SecureOverlay import SecureOverlay + overlay = SecureOverlay.getInstance() + overlay.connect_dns(("130.161.158.24", 7762), after_connect) + + # condition variable would be prettier, but that don't listen to + # KeyboardInterrupt + #time.sleep(sys.maxint/2048) + try: + while True: + x = sys.stdin.read() + except: + print_exc() + + session.shutdown() + print "Shutting down..." + time.sleep(5) + +if __name__ == "__main__": + main() diff --git a/tribler-mod/Tribler/Main/notification.py b/tribler-mod/Tribler/Main/notification.py new file mode 100644 index 0000000..9b82368 --- /dev/null +++ b/tribler-mod/Tribler/Main/notification.py @@ -0,0 +1,78 @@ +from time import localtime, strftime +# Written by Jan David Mol +# see LICENSE.txt for license information + +import os,sys + +(DOWNLOAD_COMPLETE,DONE_SEEDING)=range(0,2) + +types = [ + # (id,icon,langstr) + (DOWNLOAD_COMPLETE, "doc", "notification_download_complete"), + (DONE_SEEDING , "doc", "notification_finished_seeding") + ] + +class Notifier: + def __init__( self ): + pass + + def notify( self, type, title, content ): + pass + +class GrowlNotifier( Notifier ): + icons = { "doc": "TriblerDoc.icns", + "app": "tribler.icns" } + + def __init__( self, utility ): + import Growl + + self.utility = utility + self.icondir = utility.getPath() + try: + # distinguish between app bundle and run from source + # if "mac/" exists, use it as a path to the icons + macdir = os.path.join(self.icondir,"mac") + + os.stat( macdir ) + self.icondir = macdir + except: + pass + + + appname = "Tribler" + nAppIcon = Growl.Image.imageFromPath( os.path.join( self.icondir, self.icons["app"] ) ) + + # register all notification types and the application name & icon + self.growler = Growl.GrowlNotifier( appname, [utility.lang.get(x[2]) for x in types], applicationIcon=nAppIcon ) + self.growler.register() + + def notify( self, type, title, content ): + import Growl + + # lookup the type + x = [x for x in types if x[0]==type] + assert x, "Notification type not found: notify(%s,'%s','%s')" % (type,title,content) + info = x[0] + iconfile = self.icons[info[1]] + mesg = self.utility.lang.get(info[2]) + + # fetch the icon + nIcon = Growl.Image.imageFromPath( os.path.join( self.icondir, iconfile ) ) + + # notify Growl + self.growler.notify( mesg, title, content, icon=nIcon ) + +def notify( type, title, content ): + pass + +# ----- set the right notifier +def init( utility ): + global notify + + if sys.platform == "darwin": + try: + notifier = GrowlNotifier( utility ) + notify = notifier.notify + except: + pass + diff --git a/tribler-mod/Tribler/Main/notification.py.bak b/tribler-mod/Tribler/Main/notification.py.bak new file mode 100644 index 0000000..fdcd5b3 --- /dev/null +++ b/tribler-mod/Tribler/Main/notification.py.bak @@ -0,0 +1,77 @@ +# Written by Jan David Mol +# see LICENSE.txt for license information + +import os,sys + +(DOWNLOAD_COMPLETE,DONE_SEEDING)=range(0,2) + +types = [ + # (id,icon,langstr) + (DOWNLOAD_COMPLETE, "doc", "notification_download_complete"), + (DONE_SEEDING , "doc", "notification_finished_seeding") + ] + +class Notifier: + def __init__( self ): + pass + + def notify( self, type, title, content ): + pass + +class GrowlNotifier( Notifier ): + icons = { "doc": "TriblerDoc.icns", + "app": "tribler.icns" } + + def __init__( self, utility ): + import Growl + + self.utility = utility + self.icondir = utility.getPath() + try: + # distinguish between app bundle and run from source + # if "mac/" exists, use it as a path to the icons + macdir = os.path.join(self.icondir,"mac") + + os.stat( macdir ) + self.icondir = macdir + except: + pass + + + appname = "Tribler" + nAppIcon = Growl.Image.imageFromPath( os.path.join( self.icondir, self.icons["app"] ) ) + + # register all notification types and the application name & icon + self.growler = Growl.GrowlNotifier( appname, [utility.lang.get(x[2]) for x in types], applicationIcon=nAppIcon ) + self.growler.register() + + def notify( self, type, title, content ): + import Growl + + # lookup the type + x = [x for x in types if x[0]==type] + assert x, "Notification type not found: notify(%s,'%s','%s')" % (type,title,content) + info = x[0] + iconfile = self.icons[info[1]] + mesg = self.utility.lang.get(info[2]) + + # fetch the icon + nIcon = Growl.Image.imageFromPath( os.path.join( self.icondir, iconfile ) ) + + # notify Growl + self.growler.notify( mesg, title, content, icon=nIcon ) + +def notify( type, title, content ): + pass + +# ----- set the right notifier +def init( utility ): + global notify + + if sys.platform == "darwin": + try: + notifier = GrowlNotifier( utility ) + notify = notifier.notify + except: + pass + diff --git a/tribler-mod/Tribler/Main/tribler.py b/tribler-mod/Tribler/Main/tribler.py new file mode 100644 index 0000000..0c23cd6 --- /dev/null +++ b/tribler-mod/Tribler/Main/tribler.py @@ -0,0 +1,1203 @@ +from time import localtime, strftime +#!/usr/bin/python + +######################################################################### +# +# Author : Choopan RATTANAPOKA, Jie Yang, Arno Bakker +# +# Description : Main ABC [Yet Another Bittorrent Client] python script. +# you can run from source code by using +# >python abc.py +# need Python, WxPython in order to run from source code. +# +# see LICENSE.txt for license information +######################################################################### + +# Arno: M2Crypto overrides the method for https:// in the +# standard Python libraries. This causes msnlib to fail and makes Tribler +# freakout when "http://www.tribler.org/version" is redirected to +# "https://www.tribler.org/version/" (which happened during our website +# changeover) Until M2Crypto 0.16 is patched I'll restore the method to the +# original, as follows. +# +# This must be done in the first python file that is started. +# + +# modify the sys.stderr and sys.stdout for safe output +import Tribler.Debug.console + +import os,sys +import urllib +original_open_https = urllib.URLopener.open_https +import M2Crypto # Not a useless import! See above. +urllib.URLopener.open_https = original_open_https + +# Arno, 2008-03-21: see what happens when we disable this locale thing. Gives +# errors on Vista in "Regional and Language Settings Options" different from +# "English[United Kingdom]" +#import locale + +#try: +# import wxversion +# wxversion.select('2.8') +#except: +# pass +import wx +import wx.animate +from wx import xrc +#import hotshot + +from traceback import print_exc +import urllib2 +import tempfile + +import Tribler.Main.vwxGUI.font as font +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles +from Tribler.Main.vwxGUI.MainFrame import MainFrame # py2exe needs this import +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +## from Tribler.Main.vwxGUI.TasteHeart import set_tasteheart_bitmaps +## from Tribler.Main.vwxGUI.perfBar import set_perfBar_bitmaps +from Tribler.Main.vwxGUI.FriendsItemPanel import fs2text +from Tribler.Main.Dialogs.GUITaskQueue import GUITaskQueue +from Tribler.Main.notification import init as notification_init +from Tribler.Main.globals import DefaultDownloadStartupConfig,get_default_dscfg_filename +from Tribler.Main.Utility.utility import Utility +from Tribler.Main.Utility.constants import * + +from Tribler.Category.Category import Category +from Tribler.Policies.RateManager import UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager +from Tribler.Policies.SeedingManager import GlobalSeedingManager +from Tribler.Utilities.Instance2Instance import * +from Tribler.Utilities.LinuxSingleInstanceChecker import * + +from Tribler.Core.API import * +from Tribler.Core.Utilities.utilities import show_permid_short +#import Tribler.Core.CacheDB.friends as friends + +from Tribler.Video.defs import * +from Tribler.Video.VideoPlayer import VideoPlayer,PLAYBACKMODE_INTERNAL +from Tribler.Video.VideoFrame import VideoDummyFrame, VideoMacFrame + +# Boudewijn: keep this import BELOW the imports from Tribler.xxx.* as +# one of those modules imports time as a module. +from time import time, sleep + +I2I_LISTENPORT = 57891 +VIDEOHTTP_LISTENPORT = 6878 +SESSION_CHECKPOINT_INTERVAL = 1800.0 # seconds + +DEBUG = False +ALLOW_MULTIPLE = False + +############################################################## +# +# Class : ABCApp +# +# Main ABC application class that contains ABCFrame Object +# +############################################################## +class ABCApp(wx.App): + def __init__(self, redirectstderrout, params, single_instance_checker, installdir): + self.params = params + self.single_instance_checker = single_instance_checker + self.installdir = installdir + self.error = None + self.last_update = 0 + self.update_freq = 0 # how often to update #peers/#torrents + + self.guiserver = GUITaskQueue.getInstance() + self.said_start_playback = False + self.decodeprogress = 0 + + self.old_reputation = 0 + + try: + ubuntu = False + if sys.platform == "linux2": + f = open("/etc/issue","rb") + data = f.read(100) + f.close() + if data.find("Ubuntu 8.10") != -1: + ubuntu = True + if data.find("Ubuntu 9.04") != -1: + ubuntu = True + + if not redirectstderrout and ubuntu: + # On Ubuntu 8.10 not redirecting output causes the program to quit + wx.App.__init__(self, redirect=True) + else: + wx.App.__init__(self, redirectstderrout) + except: + print_exc() + + def OnInit(self): + try: + self.utility = Utility(self.installdir,Session.get_default_state_dir()) + self.utility.app = self + + #self.postinitstarted = False + """ + Hanging self.OnIdle to the onidle event doesnot work under linux (ubuntu). The images in xrc files + will not load in any but the filespanel. + """ + #self.Bind(wx.EVT_IDLE, self.OnIdle) + + + # Set locale to determine localisation + #locale.setlocale(locale.LC_ALL, '') + + sys.stderr.write('Client Starting Up.\n') + sys.stderr.write('Build: ' + self.utility.lang.get('build') + '\n') + + bm = wx.Bitmap(os.path.join(self.utility.getPath(),'Tribler','Images','splash.jpg'),wx.BITMAP_TYPE_JPEG) + #s = wx.MINIMIZE_BOX | wx.MAXIMIZE_BOX | wx.RESIZE_BORDER | wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX | wx.CLIP_CHILDREN + #s = wx.SIMPLE_BORDER|wx.FRAME_NO_TASKBAR|wx.FRAME_FLOAT_ON_PARENT + self.splash = wx.SplashScreen(bm, wx.SPLASH_CENTRE_ON_SCREEN|wx.SPLASH_TIMEOUT, 1000, None, -1) + + # Arno: Do heavy startup on GUI thread after splash screen has been + # painted. + self.splash.Show() + "Replacement for self.Bind(wx.EVT_IDLE, self.OnIdle)" + wx.CallAfter(self.PostInit) + return True + + except Exception,e: + print_exc() + self.error = e + self.onError() + return False + + def OnIdle(self,event=None): + if not self.postinitstarted: + self.postinitstarted = True + wx.CallAfter(self.PostInit) + # Arno: On Linux I sometimes have to move the mouse into the splash + # for the rest of Tribler to start. H4x0r + if event is not None: + event.RequestMore(True) + event.Skip() + + + def PostInit(self): + try: + # On Linux: allow painting of splash screen first. + wx.Yield() + + # Initialise fonts + font.init() + + + self.utility.postAppInit(os.path.join(self.installdir,'Tribler','Images','tribler.ico')) + + # H4x0r a bit + ## set_tasteheart_bitmaps(self.utility.getPath()) + ## set_perfBar_bitmaps(self.utility.getPath()) + + cat = Category.getInstance(self.utility.getPath()) + cat.init_from_main(self.utility) + + # Put it here so an error is shown in the startup-error popup + # Start server for instance2instance communication + self.i2iconnhandler = InstanceConnectionHandler(self.i2ithread_readlinecallback) + self.i2is = Instance2InstanceServer(I2I_LISTENPORT,self.i2iconnhandler) + self.i2is.start() + + self.triblerStyles = TriblerStyles.getInstance() + + # Fire up the VideoPlayer, it abstracts away whether we're using + # an internal or external video player. + playbackmode = self.utility.config.Read('videoplaybackmode', "int") + self.videoplayer = VideoPlayer.getInstance(httpport=VIDEOHTTP_LISTENPORT) + self.videoplayer.register(self.utility,preferredplaybackmode=playbackmode) + + notification_init( self.utility ) + + # + # Read and create GUI from .xrc files + # + self.guiUtility = GUIUtility.getInstance(self.utility, self.params) + self.res = xrc.XmlResource(os.path.join(self.utility.getPath(),'Tribler', 'Main','vwxGUI','MyFrame.xrc')) + self.guiUtility.xrcResource = self.res + self.frame = self.res.LoadFrame(None, "MyFrame") + self.guiUtility.frame = self.frame + + self.frame.set_wxapp(self) + + + self.guiUtility.scrollWindow = xrc.XRCCTRL(self.frame, "level0") + self.guiUtility.mainSizer = self.guiUtility.scrollWindow.GetSizer() + self.frame.topBackgroundRight = xrc.XRCCTRL(self.frame, "topBG3") + #self.guiUtility.scrollWindow.SetScrollbars(1,1,1100,683) + #self.guiUtility.scrollWindow.SetScrollRate(15,15) + self.frame.mainButtonPersons = xrc.XRCCTRL(self.frame, "mainButtonPersons") + self.frame.messageField = xrc.XRCCTRL(self.frame, "messageField") + self.frame.pageTitle = xrc.XRCCTRL(self.frame, "pageTitle") + self.frame.pageTitlePanel = xrc.XRCCTRL(self.frame, "pageTitlePanel") + self.frame.standardDetails = xrc.XRCCTRL(self.frame, "standardDetails") + self.frame.standardOverview = xrc.XRCCTRL(self.frame, "standardOverview") + self.frame.firewallStatus = xrc.XRCCTRL(self.frame, "firewallStatus") + + # Make sure self.utility.frame is set + self.startAPI() + self.guiUtility.open_dbs() + ##self.guiUtility.initStandardOverview(self.frame.standardOverview) + + # TEST: add mod for Gopher + """ + moderation_cast_db = self.utility.session.open_dbhandler(NTFY_MODERATIONCAST) + moderation = {} + from Tribler.Core.CacheDB.sqlitecachedb import bin2str + moderation['infohash'] = bin2str('\xbd\x0c\x86\xf9\xe4JE\x0e\xff\xff\x16\xedF01*<| \xe9') + moderation_cast_db.addOwnModeration(moderation) + """ + + self.frame.searchtxtctrl = xrc.XRCCTRL(self.frame, "tx220cCCC") + self.frame.search_icon = xrc.XRCCTRL(self.frame, "search_icon") + self.frame.files_friends = xrc.XRCCTRL(self.frame, "files_friends") + self.frame.top_image = xrc.XRCCTRL(self.frame, "top_image") + + self.frame.top_bg = xrc.XRCCTRL(self.frame,"top_search") + self.frame.top_bg.set_frame(self.frame) + self.frame.pagerPanel = xrc.XRCCTRL(self.frame,"pagerPanel") + self.frame.standardPager = xrc.XRCCTRL(self.frame,"standardPager") + self.frame.horizontal = xrc.XRCCTRL(self.frame, "horizontal") + self.frame.changePlay = xrc.XRCCTRL(self.frame, "changePlay") + + + # on linux pagerpanel needs a SetMinSize call + if sys.platform == "linux2": + self.frame.pagerPanel.SetMinSize((666,20)) + elif sys.platform == 'darwin': + self.frame.pagerPanel.SetMinSize((674,21)) + else: + self.frame.pagerPanel.SetMinSize((666,21)) + + + + # videopanel + self.frame.videoparentpanel = xrc.XRCCTRL(self.frame,"videopanel") + if sys.platform == 'darwin': + self.frame.videoparentpanel.SetBackgroundColour((216,233,240)) + self.frame.videoparentpanel.Hide() + if sys.platform == "linux2": + self.frame.videoparentpanel.SetMinSize((363,400)) + elif sys.platform == 'win32': + self.frame.videoparentpanel.SetMinSize((363,400)) + else: + self.frame.videoparentpanel.SetMinSize((355,240)) + + + logopath = os.path.join(self.utility.getPath(),'Tribler','Main','vwxGUI','images','5.0','video.gif') + if sys.platform == 'darwin': + self.frame.videoframe = VideoMacFrame(self.frame.videoparentpanel,self.utility,"Videoplayer",os.path.join(self.installdir,'Tribler','Images','tribler.ico'),self.videoplayer.get_vlcwrap(),logopath) + self.videoplayer.set_videoframe(self.frame.videoframe) + else: + self.frame.videoframe = VideoDummyFrame(self.frame.videoparentpanel,self.utility,self.videoplayer.get_vlcwrap(),logopath) + self.videoplayer.set_videoframe(self.frame.videoframe) + + if sys.platform == "linux2": + # On Linux the _PostInit does not get called if the thing + # is not shown. We need the _PostInit to be called to set + # the GUIUtility.standardOverview, etc. member variables. + # + wx.CallAfter(self.frame.standardOverview.Hide) + wx.CallAfter(self.frame.standardDetails.Hide) + hide_names = [self.frame.pagerPanel] + else: + hide_names = [self.frame.standardOverview,self.frame.standardDetails,self.frame.pagerPanel] + + + + for name in hide_names: + name.Hide() + self.frame.videoframe.hide_videoframe() + + self.frame.top_bg.createBackgroundImage() + ## self.frame.top_bg.setBackground((230,230,230)) + + + self.frame.top_bg.Layout() + if sys.platform == 'win32': + wx.CallAfter(self.frame.top_bg.Refresh) + wx.CallAfter(self.frame.top_bg.Layout) + + + + # reputation + self.guiserver.add_task(self.guiservthread_update_reputation, .2) + + self.setDBStats() + + self.Bind(wx.EVT_QUERY_END_SESSION, self.frame.OnCloseWindow) + self.Bind(wx.EVT_END_SESSION, self.frame.OnCloseWindow) + + + # Arno, 2007-05-03: wxWidgets 2.8.3.0 and earlier have the MIME-type for .bmp + # files set to 'image/x-bmp' whereas 'image/bmp' is the official one. + try: + bmphand = None + hands = wx.Image.GetHandlers() + for hand in hands: + #print "Handler",hand.GetExtension(),hand.GetType(),hand.GetMimeType() + if hand.GetMimeType() == 'image/x-bmp': + bmphand = hand + break + #wx.Image.AddHandler() + if bmphand is not None: + bmphand.SetMimeType('image/bmp') + except: + # wx < 2.7 don't like wx.Image.GetHandlers() + print_exc() + + # Must be after ABCLaunchMany is created + #self.torrentfeed = TorrentFeedThread.getInstance() + #self.torrentfeed.register(self.utility) + #self.torrentfeed.start() + + #print "DIM",wx.GetDisplaySize() + #print "MM",wx.GetDisplaySizeMM() + + #self.frame.Refresh() + #self.frame.Layout() + self.frame.Show(True) + + wx.CallAfter(self.startWithRightView) + # Delay this so GUI has time to paint + wx.CallAfter(self.loadSessionCheckpoint) + + + #self.sr_indicator_left_image = wx.Image(os.path.join(self.utility.getPath(),"Tribler","Main","vwxGUI","images","5.0", "SRindicator_left.png", wx.BITMAP_TYPE_ANY)) + #self.sr_indicator_left = wx.StaticBitmap(self, -1, wx.BitmapFromImage(self.sr_indicator_left_image)) + + #self.sr_indicator_right_image = wx.Image(os.path.join(self.utility.getPath(),"Tribler","Main","vwxGUI","images","5.0", "SRindicator_right.png", wx.BITMAP_TYPE_ANY)) + #self.sr_indicator_right = wx.StaticBitmap(self, -1, wx.BitmapFromImage(self.sr_indicator_right_image)) + + + except Exception,e: + print_exc() + self.error = e + self.onError() + return False + + return True + + + + def OnSearchResultsPressed(self, event): + self.guiUtility.OnResultsClicked() + + + def helpClick(self,event=None): + title = self.utility.lang.get('sharing_reputation_information_title') + msg = self.utility.lang.get('sharing_reputation_information_message') + + dlg = wx.MessageDialog(None, msg, title, wx.OK|wx.ICON_INFORMATION) + result = dlg.ShowModal() + dlg.Destroy() + + def viewSettings(self,event): + self.guiUtility.settingsOverview() + + def viewLibrary(self,event): + self.guiUtility.standardLibraryOverview() + + def toggleFamilyFilter(self,event): + self.guiUtility.toggleFamilyFilter() + + + def startAPI(self): + + # Start Tribler Session + state_dir = Session.get_default_state_dir() + + cfgfilename = Session.get_default_config_filename(state_dir) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Session config",cfgfilename + try: + self.sconfig = SessionStartupConfig.load(cfgfilename) + except: + print_exc() + self.sconfig = SessionStartupConfig() + self.sconfig.set_state_dir(state_dir) + # Set default Session params here + destdir = get_default_dest_dir() + torrcolldir = os.path.join(destdir,STATEDIR_TORRENTCOLL_DIR) + self.sconfig.set_torrent_collecting_dir(torrcolldir) + self.sconfig.set_nat_detect(True) + + # rename old collected torrent directory + try: + if not os.path.exists(destdir): + os.makedirs(destdir) + old_collected_torrent_dir = os.path.join(state_dir, 'torrent2') + if not os.path.exists(torrcolldir) and os.path.isdir(old_collected_torrent_dir): + os.rename(old_collected_torrent_dir, torrcolldir) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Moved dir with old collected torrents to", torrcolldir + + # Arno, 2008-10-23: Also copy torrents the user got himself + old_own_torrent_dir = os.path.join(state_dir, 'torrent') + for name in os.listdir(old_own_torrent_dir): + oldpath = os.path.join(old_own_torrent_dir,name) + newpath = os.path.join(torrcolldir,name) + if not os.path.exists(newpath): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Copying own torrent",oldpath,newpath + os.rename(oldpath,newpath) + + # Internal tracker + except: + print_exc() + + # 22/08/08 boudewijn: convert abc.conf to SessionConfig + self.utility.convert__presession_4_1__4_2(self.sconfig) + + s = Session(self.sconfig) + self.utility.session = s + + s.add_observer(self.sesscb_ntfy_reachable,NTFY_REACHABLE,[NTFY_INSERT]) + s.add_observer(self.sesscb_ntfy_activities,NTFY_ACTIVITIES,[NTFY_INSERT]) + s.add_observer(self.sesscb_ntfy_dbstats,NTFY_TORRENTS,[NTFY_INSERT]) + s.add_observer(self.sesscb_ntfy_dbstats,NTFY_PEERS,[NTFY_INSERT]) + s.add_observer(self.sesscb_ntfy_friends,NTFY_PEERS,[NTFY_UPDATE]) + + + # set port number in GuiUtility + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'LISTEN PORT :' , s.get_listen_port() + port = s.get_listen_port() + self.guiUtility.set_port_number(port) + + + # Load the default DownloadStartupConfig + dlcfgfilename = get_default_dscfg_filename(s) + try: + defaultDLConfig = DefaultDownloadStartupConfig.load(dlcfgfilename) + except: + defaultDLConfig = DefaultDownloadStartupConfig.getInstance() + #print_exc() + defaultdestdir = os.path.join(get_default_dest_dir()) + defaultDLConfig.set_dest_dir(defaultdestdir) + + # 29/08/08 boudewijn: convert abc.conf to DefaultDownloadStartupConfig + self.utility.convert__postsession_4_1__4_2(s, defaultDLConfig) + + s.set_coopdlconfig(defaultDLConfig) + + # Loading of checkpointed Downloads delayed to allow GUI to paint, + # see loadSessionCheckpoint + + # Create global rate limiter + self.ratelimiter = UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager() + self.rateadjustcount = 0 + + maxup = self.utility.config.Read('maxuploadrate', "int") + if maxup == -1: # no upload + self.ratelimiter.set_global_max_speed(UPLOAD, 0.00001) + else: + self.ratelimiter.set_global_max_speed(UPLOAD, maxup) + + + maxdown = self.utility.config.Read('maxdownloadrate', "int") + self.ratelimiter.set_global_max_speed(DOWNLOAD, maxdown) + + + maxupseed = self.utility.config.Read('maxseeduploadrate', "int") + self.ratelimiter.set_global_max_seedupload_speed(maxupseed) + self.utility.ratelimiter = self.ratelimiter + +# SelectiveSeeding _ + self.seedingmanager = GlobalSeedingManager(self.utility.config.Read)#, self.utility.session) + self.seedingcount = 0 +# _SelectiveSeeding + + # seeding stats crawling + self.seeding_snapshot_count = 0 + self.seedingstats_settings = s.open_dbhandler(NTFY_SEEDINGSTATSSETTINGS).loadCrawlingSettings() + self.seedingstats_enabled = self.seedingstats_settings[0][2] + self.seedingstats_interval = self.seedingstats_settings[0][1] + + # Only allow updates to come in after we defined ratelimiter + s.set_download_states_callback(self.sesscb_states_callback) + + # Load friends from friends.txt + #friends.init(s) + + # Schedule task for checkpointing Session, to avoid hash checks after + # crashes. + # + self.guiserver.add_task(self.guiservthread_checkpoint_timer,SESSION_CHECKPOINT_INTERVAL) + + + def sesscb_states_callback(self,dslist): + """ Called by SessionThread """ + wx.CallAfter(self.gui_states_callback,dslist) + return(1.0, True) + + def get_reputation(self): + """ get the current reputation score""" + bc_db = self.utility.session.open_dbhandler(NTFY_BARTERCAST) + reputation = bc_db.getMyReputation() + self.utility.session.close_dbhandler(bc_db) + return reputation + + def get_total_down(self): + bc_db = self.utility.session.open_dbhandler(NTFY_BARTERCAST) + return bc_db.total_down + + def get_total_up(self): + bc_db = self.utility.session.open_dbhandler(NTFY_BARTERCAST) + return bc_db.total_up + + + def set_reputation(self): + """ set the reputation in the GUI""" + reputation = self.get_reputation() + if sys.platform == 'win32': + self.frame.top_bg.updateReputation(reputation) + elif self.frame.top_bg.sr_msg: + if reputation < -0.33: + self.frame.top_bg.sr_msg.SetLabel('Poor') + self.frame.top_bg.sr_msg.SetForegroundColour((255,51,0)) + elif reputation < 0.33: + self.frame.top_bg.sr_msg.SetLabel('Average') + self.frame.top_bg.sr_msg.SetForegroundColour(wx.BLACK) + else: + self.frame.top_bg.sr_msg.SetLabel('Good') + self.frame.top_bg.sr_msg.SetForegroundColour((0,80,120)) + + + + + if DEBUG: + print >> sys.stderr , "main: My Reputation",reputation + + self.frame.top_bg.help.SetToolTipString(self.utility.lang.get('help') % (reputation)) + + d = int(self.get_total_down()) * 1024.0 + + if d < 10: + s = '%dB Down ' % d + elif d < 100: + s = '%dB Down ' % d + elif d < 1000: + s = '%dB Down ' % d + elif d < 1024: + s = '%1.1fKB Down' % (d/1024.0) + elif d < 10240: + s = '%dKB Down ' % (d//1024) + elif d < 102400: + s = '%dKB Down ' % (d//1024) + elif d < 1022796: + s = '%dKB Down' % (d//1024) + elif d < 1048576: + s = '%1.1fMB Down' % (d//1048576.0) + elif d < 10485760: + s = '%dMB Down ' % (d//1048576) + elif d < 104857600: + s = '%dMB Down ' % (d//1048576) + elif d < 1047527425: + s = '%dMB Down' % (d//1048576) + elif d < 1073741824: + s = '%1.1fGB Down' % (d//1073741824.0) + elif d < 10737418240: + s = '%dGB Down ' % (d//1073741824) + elif d < 107374182400: + s = '%dGB Down ' % (d//1073741824) + else: + s = '%dGB Down' % (d//1073741824) + + + #if d < 10: + # s = '%dB Down ' % d + #elif d < 100: + # s = '%dB Down ' % d + #elif d < 1000: + # s = '%dB Down ' % d + #elif d < 10000: + # s = '%dKB Down ' % (d//1000L) + #elif d < 100000: + # s = '%dKB Down ' % (d//1000L) + #elif d < 1000000: + # s = '%dKB Down' % (d//1000L) + #elif d < 10000000: + # s = '%dMB Down ' % (d//1000000L) + #elif d < 100000000: + # s = '%dMB Down ' % (d//1000000L) + #elif d < 1000000000: + # s = '%dMB Down' % (d//1000000L) + #elif d < 10000000000: + # s = '%dGB Down ' % (d//1000000000L) + #elif d < 100000000000: + # s = '%dGB Down ' % (d//1000000000L) + #else: + # s = '%dGB Down' % (d//1000000000L) + + self.frame.top_bg.total_down.SetLabel(s) + + + u = self.get_total_up() * 1024.0 + + + if u < 1000: + s = '%4dB Up' % u + elif u < 1024: + s = '%1.1fKB Up' % (u/1024.0) + elif u < 1022796: + s = '%3dKB Up' % (u//1024) + elif u < 1048576: + s = '%1.1fMB Up' % (u//1048576.0) + elif u < 1047527425: + s = '%3dMB Up' % (u//1048576) + elif u < 1073741824: + s = '%1.1fGB Up' % (u//1073741824.0) + else: + s = '%3dGB Up' % (u//1073741824) + + + #if u < 1000: + # s = '%4dB Up' % u + #elif u < 1000000: + # s = '%3dKB Up' % (u//1000L) + #elif u < 1000000000: + # s = '%3dMB Up' % (u//1000000L) + #else: + # s = '%3dGB Up' % (u//1000000000L) + + self.frame.top_bg.total_up.SetLabel(s) + + + self.frame.hsizer = self.frame.top_bg.sr_indicator.GetContainingSizer() + self.frame.hsizer.Remove(0) + self.frame.hsizer.Prepend(wx.Size(reputation*40+50,0),0,wx.LEFT,0) + + self.frame.hsizer.Layout() + + ##self.old_reputation = reputation + + + def guiservthread_update_reputation(self): + """ update the reputation""" + wx.CallAfter(self.set_reputation) + self.guiserver.add_task(self.guiservthread_update_reputation,10.0) + + + + + def gui_states_callback(self,dslist): + """ Called by MainThread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Stats:" + + torrentdb = self.utility.session.open_dbhandler(NTFY_TORRENTS) + peerdb = self.utility.session.open_dbhandler(NTFY_PEERS) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Stats: Total torrents found",torrentdb.size(),"peers",peerdb.size() + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Stats: NAT",self.utility.session.get_nat_type() + try: + # Print stats on Console + for ds in dslist: + # safename = `ds.get_download().get_def().get_name()` + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Stats: %s %.1f%% %s dl %.1f ul %.1f n %d\n" % (dlstatus_strings[ds.get_status()],100.0*ds.get_progress(),safename,ds.get_current_speed(DOWNLOAD),ds.get_current_speed(UPLOAD),ds.get_num_peers()) + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Infohash:",`ds.get_download().get_def().get_infohash()` + if ds.get_status() == DLSTATUS_STOPPED_ON_ERROR: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Error:",`ds.get_error()` + + # Find State of currently playing video + playds = None + d = self.videoplayer.get_vod_download() + for ds in dslist: + if ds.get_download() == d: + playds = ds + break + + # Apply status displaying from SwarmPlayer + if playds: + videoplayer_mediastate = self.videoplayer.get_state() + + totalhelping = 0 + totalspeed = {UPLOAD:0.0,DOWNLOAD:0.0} + for ds in dslist: + totalspeed[UPLOAD] += ds.get_current_speed(UPLOAD) + totalspeed[DOWNLOAD] += ds.get_current_speed(DOWNLOAD) + totalhelping += ds.get_num_peers() + + [topmsg,msg,self.said_start_playback,self.decodeprogress] = get_status_msgs(playds,videoplayer_mediastate,"Tribler",self.said_start_playback,self.decodeprogress,totalhelping,totalspeed) + # Update status msg and progress bar + if topmsg != '': + + if videoplayer_mediastate == MEDIASTATE_PLAYING or (videoplayer_mediastate == MEDIASTATE_STOPPED and self.said_start_playback): + # In SwarmPlayer we would display "Decoding: N secs" + # when VLC was playing but the video was not yet + # being displayed (because VLC was looking for an + # I-frame). We would display it in the area where + # VLC would paint if it was ready to display. + # Hence, our text would be overwritten when the + # video was ready. We write the status text to + # its own area here, so trick doesn't work. + # For now: just hide. + text = msg + else: + text = topmsg + else: + text = msg + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Messages",topmsg,msg,`playds.get_download().get_def().get_name()` + + self.videoplayer.set_player_status_and_progress(text,playds.get_pieces_complete()) + + # Pass DownloadStates to libaryView + try: + if self.guiUtility.standardOverview is not None: + mode = self.guiUtility.standardOverview.mode + #if mode == 'libraryMode' or mode == 'friendsMode': + # Also pass dslist to friendsView, for coopdl boosting info + # Arno, 2009-02-11: We also need it in filesMode now. + modedata = self.guiUtility.standardOverview.data[mode] + grid = modedata.get('grid') + if grid is not None: + gm = grid.gridManager + gm.download_state_gui_callback(dslist) + except KeyError: + # Apparently libraryMode only has has a 'grid' key when visible + print_exc() + except AttributeError: + print_exc() + except: + print_exc() + + # Restart other torrents when the single torrent that was + # running in VOD mode is done + currdlist = [] + for ds in dslist: + currdlist.append(ds.get_download()) + vodd = self.videoplayer.get_vod_download() + for ds in dslist: + d = ds.get_download() + if d == vodd and ds.get_status() == DLSTATUS_SEEDING: + self.restart_other_downloads(currdlist) + break + + # Adjust speeds once every 4 seconds + adjustspeeds = False + if self.rateadjustcount % 4 == 0: + adjustspeeds = True + self.rateadjustcount += 1 + + if adjustspeeds: + self.ratelimiter.add_downloadstatelist(dslist) + self.ratelimiter.adjust_speeds() + + # Update stats in lower right overview box + self.guiUtility.refreshTorrentStats(dslist) + + # Upload overall upload states + self.guiUtility.refreshUploadStats(dslist) + +# SelectiveSeeding_ + # Apply seeding policy every 60 seconds, for performance + applyseedingpolicy = False + if self.seedingcount % 60 == 0: + applyseedingpolicy = True + self.seedingcount += 1 + + if applyseedingpolicy: + self.seedingmanager.apply_seeding_policy(dslist) +# _SelectiveSeeding + +# Crawling Seeding Stats_ + if self.seedingstats_enabled == 1: + snapshot_seeding_stats = False + if self.seeding_snapshot_count % self.seedingstats_interval == 0: + snapshot_seeding_stats = True + self.seeding_snapshot_count += 1 + + if snapshot_seeding_stats: + bc_db = self.utility.session.open_dbhandler(NTFY_BARTERCAST) + reputation = bc_db.getMyReputation() + self.utility.session.close_dbhandler(bc_db) + + seedingstats_db = self.utility.session.open_dbhandler(NTFY_SEEDINGSTATS) + seedingstats_db.updateSeedingStats(self.utility.session.get_permid(), reputation, dslist, self.seedingstats_interval) + self.utility.session.close_dbhandler(seedingstats_db) +# _Crawling Seeding Stats + + except: + print_exc() + + def restart_other_downloads(self,currdlist): + restartdlist = self.videoplayer.get_vod_postponed_downloads() + self.videoplayer.set_vod_postponed_downloads([]) # restart only once + for d in restartdlist: + if d in currdlist: + d.set_mode(DLMODE_NORMAL) + d.restart() + + + def OnClosingVideoFrameOrExtPlayer(self): + vodd = self.videoplayer.get_vod_download() + if vodd is not None: + if vodd.get_def().get_live(): + # Arno, 2009-03-27: Works poorly with VLC 0.9 without MPEGTS + # patch. There VLC may close the HTTP connection and we interpret + # it as a window close (no window in 5.0) and stop live, thereby + # killing any future attempts. Should see how this works with + # MPEGTS patch put in. + # + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: OnClosingVideoFrameOrExtPlayer: vodd is live, stopping",vodd.get_def().get_name_as_unicode() + vodd.stop() + self.restart_other_downloads(self.utility.session.get_downloads()) + #else: playing Web2 video + + def loadSessionCheckpoint(self): + # Load all other downloads + # TODO: reset all saved DownloadConfig to new default? + if self.params[0] != "": + # There is something on the cmdline, start all stopped + self.utility.session.load_checkpoint(initialdlstatus=DLSTATUS_STOPPED) + else: + self.utility.session.load_checkpoint() + + def guiservthread_checkpoint_timer(self): + """ Periodically checkpoint Session """ + try: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Checkpointing Session" + self.utility.session.checkpoint() + self.guiserver.add_task(self.guiservthread_checkpoint_timer,SESSION_CHECKPOINT_INTERVAL) + except: + print_exc() + + + def sesscb_ntfy_dbstats(self,subject,changeType,objectID,*args): + """ Called by SessionCallback thread """ + wx.CallAfter(self.setDBStats) + # Test + #if subject == NTFY_PEERS: + # self.frame.friendsmgr.sesscb_friendship_callback(objectID,{}) + + def setDBStats(self): + """ Set total # peers and torrents discovered """ + + # Arno: GUI thread accessing database + now = time() + if now - self.last_update < self.update_freq: + return + self.last_update = now + peer_db = self.utility.session.open_dbhandler(NTFY_PEERS) + npeers = peer_db.getNumberPeers() + torrent_db = self.utility.session.open_dbhandler(NTFY_TORRENTS) + nfiles = torrent_db.getNumberTorrents() + if nfiles > 30 and npeers > 30: + self.update_freq = 2 + # Arno: not closing db connections, assuming main thread's will be + # closed at end. + + #self.frame.numberPersons.SetLabel('%d' % npeers) + #self.frame.numberFiles.SetLabel('%d' % nfiles) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "************>>>>>>>> setDBStats", npeers, nfiles + + def sesscb_ntfy_activities(self,subject,changeType,objectID,*args): + # Called by SessionCallback thread + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: sesscb_ntfy_activities called:",subject,"ct",changeType,"oid",objectID,"a",args + wx.CallAfter(self.frame.setActivity,objectID,*args) + + def sesscb_ntfy_reachable(self,subject,changeType,objectID,msg): + wx.CallAfter(self.frame.standardOverview.onReachable) + + + def sesscb_ntfy_friends(self,subject,changeType,objectID,*args): + """ Called by SessionCallback thread """ + if subject == NTFY_PEERS: + peerdb = self.utility.session.open_dbhandler(NTFY_PEERS) + peer = peerdb.getPeer(objectID) + #self.utility.session.close_dbhandler(peerdb) + else: + peer = None + wx.CallAfter(self.gui_ntfy_friends,subject,changeType,objectID,args,peer) + + def gui_ntfy_friends(self,subject,changeType,objectID,args,peer): + """ A change in friendship status, report via message window """ + if len(args) == 2: + if args[0] == 'friend': + fs = args[1] + if fs != FS_I_INVITED and fs != FS_I_DENIED and fs != FS_NOFRIEND: + fstext = fs2text(fs) + if peer['name'] is None or peer['name'] == '': + name = show_permid_short(objectID) + else: + name = peer['name'] + msg = name + u" " + fstext + wx.CallAfter(self.frame.setActivity,NTFY_ACT_NONE,msg) + + def onError(self,source=None): + # Don't use language independence stuff, self.utility may not be + # valid. + msg = "Unfortunately, Tribler ran into an internal error:\n\n" + if source is not None: + msg += source + msg += str(self.error.__class__)+':'+str(self.error) + msg += '\n' + msg += 'Please see the FAQ on www.tribler.org on how to act.' + dlg = wx.MessageDialog(None, msg, "Tribler Fatal Error", wx.OK|wx.ICON_ERROR) + result = dlg.ShowModal() + print_exc() + dlg.Destroy() + + + def OnExit(self): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: ONEXIT" + + #friends.done(self.utility.session) + + #self.torrentfeed.shutdown() + + # Don't checkpoint, interferes with current way of saving Preferences, + # see Tribler/Main/Dialogs/abcoption.py + self.utility.session.shutdown(hacksessconfcheckpoint=False) + + while not self.utility.session.has_shutdown(): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main ONEXIT: Waiting for Session to shutdown" + sleep(1) + + + if not ALLOW_MULTIPLE: + del self.single_instance_checker + return 0 + + def db_exception_handler(self,e): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Database Exception handler called",e,"value",e.args,"#" + try: + if e.args[1] == "DB object has been closed": + return # We caused this non-fatal error, don't show. + if self.error is not None and self.error.args[1] == e.args[1]: + return # don't repeat same error + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "main: db_exception_handler error", e, type(e) + print_exc() + #print_stack() + self.error = e + onerror_lambda = lambda:self.onError(source="The database layer reported: ") + wx.CallAfter(onerror_lambda) + + def getConfigPath(self): + return self.utility.getConfigPath() + + def startWithRightView(self): + if self.params[0] != "": + self.guiUtility.standardLibraryOverview() + + + def i2ithread_readlinecallback(self,ic,cmd): + """ Called by Instance2Instance thread """ + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Another instance called us with cmd",cmd + ic.close() + + if cmd.startswith('START '): + param = cmd[len('START '):] + torrentfilename = None + if param.startswith('http:'): + # Retrieve from web + f = tempfile.NamedTemporaryFile() + n = urllib2.urlopen(param) + data = n.read() + f.write(data) + f.close() + n.close() + torrentfilename = f.name + else: + torrentfilename = param + + # Switch to GUI thread + # New for 5.0: Start in VOD mode + def start_asked_download(): + self.frame.startDownload(torrentfilename,vodmode=True) + self.guiUtility.standardLibraryOverview(refresh=True) + + wx.CallAfter(start_asked_download) + + + +def get_status_msgs(ds,videoplayer_mediastate,appname,said_start_playback,decodeprogress,totalhelping,totalspeed): + + intime = "Not playing for quite some time." + ETA = ((60 * 15, "Playing in less than 15 minutes."), + (60 * 10, "Playing in less than 10 minutes."), + (60 * 5, "Playing in less than 5 minutes."), + (60, "Playing in less than a minute.")) + + topmsg = '' + msg = '' + + logmsgs = ds.get_log_messages() + logmsg = None + if len(logmsgs) > 0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Log",logmsgs[0] + logmsg = logmsgs[-1][1] + + preprogress = ds.get_vod_prebuffering_progress() + playable = ds.get_vod_playable() + t = ds.get_vod_playable_after() + + intime = ETA[0][1] + for eta_time, eta_msg in ETA: + if t > eta_time: + break + intime = eta_msg + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: playble",playable,"preprog",preprogress + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: ETA is",t,"secs" + # if t > float(2 ** 30): + # intime = "inf" + # elif t == 0.0: + # intime = "now" + # else: + # h, t = divmod(t, 60.0*60.0) + # m, s = divmod(t, 60.0) + # if h == 0.0: + # if m == 0.0: + # intime = "%ds" % (s) + # else: + # intime = "%dm:%02ds" % (m,s) + # else: + # intime = "%dh:%02dm:%02ds" % (h,m,s) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: VODStats",preprogress,playable,"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%" + + if ds.get_status() == DLSTATUS_HASHCHECKING: + genprogress = ds.get_progress() + pstr = str(int(genprogress*100)) + msg = "Checking already downloaded parts "+pstr+"% done" + elif ds.get_status() == DLSTATUS_STOPPED_ON_ERROR: + msg = 'Error playing: '+str(ds.get_error()) + elif ds.get_progress() == 1.0: + msg = '' + elif playable: + if not said_start_playback: + msg = "Starting playback..." + + if videoplayer_mediastate == MEDIASTATE_STOPPED and said_start_playback: + if totalhelping == 0: + topmsg = u"Please leave the "+appname+" running, this will help other "+appname+" users to download faster." + else: + topmsg = u"Helping "+str(totalhelping)+" "+appname+" users to download. Please leave it running in the background." + + # Display this on status line + # TODO: Show balloon in systray when closing window to indicate things continue there + msg = '' + + elif videoplayer_mediastate == MEDIASTATE_PLAYING: + said_start_playback = True + # It may take a while for VLC to actually start displaying + # video, as it is trying to tune in to the stream (finding + # I-Frame). Display some info to show that: + # + cname = ds.get_download().get_def().get_name_as_unicode() + topmsg = u'Decoding: '+cname+' '+str(decodeprogress)+' s' + decodeprogress += 1 + msg = '' + elif videoplayer_mediastate == MEDIASTATE_PAUSED: + # msg = "Buffering... " + str(int(100.0*preprogress))+"%" + msg = "Buffering... " + str(int(100.0*preprogress))+"%. " + intime + else: + msg = '' + + elif preprogress != 1.0: + pstr = str(int(preprogress*100)) + npeers = ds.get_num_peers() + npeerstr = str(npeers) + if npeers == 0 and logmsg is not None: + msg = logmsg + elif npeers == 1: + msg = "Prebuffering "+pstr+"% done (connected to 1 person). " + intime + else: + msg = "Prebuffering "+pstr+"% done (connected to "+npeerstr+" people). " + intime + + try: + d = ds.get_download() + tdef = d.get_def() + videofiles = d.get_selected_files() + if len(videofiles) >= 1: + videofile = videofiles[0] + else: + videofile = None + if tdef.get_bitrate(videofile) is None: + msg += ' This video may not play properly because its bitrate is unknown' + except: + print_exc() + else: + # msg = "Waiting for sufficient download speed... "+intime + msg = 'Waiting for sufficient download speed... ' + intime + + npeers = ds.get_num_peers() + if npeers == 1: + msg = "One person found, receiving %.1f KB/s" % totalspeed[DOWNLOAD] + else: + msg = "%d people found, receiving %.1f KB/s" % (npeers, totalspeed[DOWNLOAD]) + + if playable: + if videoplayer_mediastate == MEDIASTATE_PAUSED and not ds.get_status() == DLSTATUS_SEEDING: + msg = "Buffering... " + msg + else: + msg = "" + + return [topmsg,msg,said_start_playback,decodeprogress] + + +############################################################## +# +# Main Program Start Here +# +############################################################## +def run(params = None): + if params is None: + params = [""] + + if len(sys.argv) > 1: + params = sys.argv[1:] + try: + # Create single instance semaphore + # Arno: On Linux and wxPython-2.8.1.1 the SingleInstanceChecker appears + # to mess up stderr, i.e., I get IOErrors when writing to it via print_exc() + # + if sys.platform != 'linux2': + single_instance_checker = wx.SingleInstanceChecker("tribler-" + wx.GetUserId()) + else: + single_instance_checker = LinuxSingleInstanceChecker("tribler") + + if not ALLOW_MULTIPLE and single_instance_checker.IsAnotherRunning(): + #Send torrent info to abc single instance + if params[0] != "": + torrentfilename = params[0] + i2ic = Instance2InstanceClient(I2I_LISTENPORT,'START',torrentfilename) + else: + arg0 = sys.argv[0].lower() + if arg0.endswith('.exe'): + # supply a unicode string to ensure that the unicode filesystem API is used (applies to windows) + installdir = os.path.abspath(os.path.dirname(unicode(sys.argv[0]))) + else: + # call the unicode specific getcwdu() otherwise homedirectories may crash + installdir = os.getcwdu() + # Arno: don't chdir to allow testing as other user from other dir. + #os.chdir(installdir) + + # Launch first abc single instance + app = ABCApp(False, params, single_instance_checker, installdir) + configpath = app.getConfigPath() + app.MainLoop() + + print "Client shutting down. Sleeping for a few seconds to allow other threads to finish" + sleep(1) + except: + print_exc() + + # This is the right place to close the database, unfortunately Linux has + # a problem, see ABCFrame.OnCloseWindow + # + #if sys.platform != 'linux2': + # tribler_done(configpath) + #os._exit(0) + +if __name__ == '__main__': + run() + diff --git a/tribler-mod/Tribler/Main/tribler.py.bak b/tribler-mod/Tribler/Main/tribler.py.bak new file mode 100644 index 0000000..9fd82b8 --- /dev/null +++ b/tribler-mod/Tribler/Main/tribler.py.bak @@ -0,0 +1,1202 @@ +#!/usr/bin/python + +######################################################################### +# +# Author : Choopan RATTANAPOKA, Jie Yang, Arno Bakker +# +# Description : Main ABC [Yet Another Bittorrent Client] python script. +# you can run from source code by using +# >python abc.py +# need Python, WxPython in order to run from source code. +# +# see LICENSE.txt for license information +######################################################################### + +# Arno: M2Crypto overrides the method for https:// in the +# standard Python libraries. This causes msnlib to fail and makes Tribler +# freakout when "http://www.tribler.org/version" is redirected to +# "https://www.tribler.org/version/" (which happened during our website +# changeover) Until M2Crypto 0.16 is patched I'll restore the method to the +# original, as follows. +# +# This must be done in the first python file that is started. +# + +# modify the sys.stderr and sys.stdout for safe output +import Tribler.Debug.console + +import os,sys +import urllib +original_open_https = urllib.URLopener.open_https +import M2Crypto # Not a useless import! See above. +urllib.URLopener.open_https = original_open_https + +# Arno, 2008-03-21: see what happens when we disable this locale thing. Gives +# errors on Vista in "Regional and Language Settings Options" different from +# "English[United Kingdom]" +#import locale + +#try: +# import wxversion +# wxversion.select('2.8') +#except: +# pass +import wx +import wx.animate +from wx import xrc +#import hotshot + +from traceback import print_exc +import urllib2 +import tempfile + +import Tribler.Main.vwxGUI.font as font +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles +from Tribler.Main.vwxGUI.MainFrame import MainFrame # py2exe needs this import +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +## from Tribler.Main.vwxGUI.TasteHeart import set_tasteheart_bitmaps +## from Tribler.Main.vwxGUI.perfBar import set_perfBar_bitmaps +from Tribler.Main.vwxGUI.FriendsItemPanel import fs2text +from Tribler.Main.Dialogs.GUITaskQueue import GUITaskQueue +from Tribler.Main.notification import init as notification_init +from Tribler.Main.globals import DefaultDownloadStartupConfig,get_default_dscfg_filename +from Tribler.Main.Utility.utility import Utility +from Tribler.Main.Utility.constants import * + +from Tribler.Category.Category import Category +from Tribler.Policies.RateManager import UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager +from Tribler.Policies.SeedingManager import GlobalSeedingManager +from Tribler.Utilities.Instance2Instance import * +from Tribler.Utilities.LinuxSingleInstanceChecker import * + +from Tribler.Core.API import * +from Tribler.Core.Utilities.utilities import show_permid_short +#import Tribler.Core.CacheDB.friends as friends + +from Tribler.Video.defs import * +from Tribler.Video.VideoPlayer import VideoPlayer,PLAYBACKMODE_INTERNAL +from Tribler.Video.VideoFrame import VideoDummyFrame, VideoMacFrame + +# Boudewijn: keep this import BELOW the imports from Tribler.xxx.* as +# one of those modules imports time as a module. +from time import time, sleep + +I2I_LISTENPORT = 57891 +VIDEOHTTP_LISTENPORT = 6878 +SESSION_CHECKPOINT_INTERVAL = 1800.0 # seconds + +DEBUG = False +ALLOW_MULTIPLE = False + +############################################################## +# +# Class : ABCApp +# +# Main ABC application class that contains ABCFrame Object +# +############################################################## +class ABCApp(wx.App): + def __init__(self, redirectstderrout, params, single_instance_checker, installdir): + self.params = params + self.single_instance_checker = single_instance_checker + self.installdir = installdir + self.error = None + self.last_update = 0 + self.update_freq = 0 # how often to update #peers/#torrents + + self.guiserver = GUITaskQueue.getInstance() + self.said_start_playback = False + self.decodeprogress = 0 + + self.old_reputation = 0 + + try: + ubuntu = False + if sys.platform == "linux2": + f = open("/etc/issue","rb") + data = f.read(100) + f.close() + if data.find("Ubuntu 8.10") != -1: + ubuntu = True + if data.find("Ubuntu 9.04") != -1: + ubuntu = True + + if not redirectstderrout and ubuntu: + # On Ubuntu 8.10 not redirecting output causes the program to quit + wx.App.__init__(self, redirect=True) + else: + wx.App.__init__(self, redirectstderrout) + except: + print_exc() + + def OnInit(self): + try: + self.utility = Utility(self.installdir,Session.get_default_state_dir()) + self.utility.app = self + + #self.postinitstarted = False + """ + Hanging self.OnIdle to the onidle event doesnot work under linux (ubuntu). The images in xrc files + will not load in any but the filespanel. + """ + #self.Bind(wx.EVT_IDLE, self.OnIdle) + + + # Set locale to determine localisation + #locale.setlocale(locale.LC_ALL, '') + + sys.stderr.write('Client Starting Up.\n') + sys.stderr.write('Build: ' + self.utility.lang.get('build') + '\n') + + bm = wx.Bitmap(os.path.join(self.utility.getPath(),'Tribler','Images','splash.jpg'),wx.BITMAP_TYPE_JPEG) + #s = wx.MINIMIZE_BOX | wx.MAXIMIZE_BOX | wx.RESIZE_BORDER | wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX | wx.CLIP_CHILDREN + #s = wx.SIMPLE_BORDER|wx.FRAME_NO_TASKBAR|wx.FRAME_FLOAT_ON_PARENT + self.splash = wx.SplashScreen(bm, wx.SPLASH_CENTRE_ON_SCREEN|wx.SPLASH_TIMEOUT, 1000, None, -1) + + # Arno: Do heavy startup on GUI thread after splash screen has been + # painted. + self.splash.Show() + "Replacement for self.Bind(wx.EVT_IDLE, self.OnIdle)" + wx.CallAfter(self.PostInit) + return True + + except Exception,e: + print_exc() + self.error = e + self.onError() + return False + + def OnIdle(self,event=None): + if not self.postinitstarted: + self.postinitstarted = True + wx.CallAfter(self.PostInit) + # Arno: On Linux I sometimes have to move the mouse into the splash + # for the rest of Tribler to start. H4x0r + if event is not None: + event.RequestMore(True) + event.Skip() + + + def PostInit(self): + try: + # On Linux: allow painting of splash screen first. + wx.Yield() + + # Initialise fonts + font.init() + + + self.utility.postAppInit(os.path.join(self.installdir,'Tribler','Images','tribler.ico')) + + # H4x0r a bit + ## set_tasteheart_bitmaps(self.utility.getPath()) + ## set_perfBar_bitmaps(self.utility.getPath()) + + cat = Category.getInstance(self.utility.getPath()) + cat.init_from_main(self.utility) + + # Put it here so an error is shown in the startup-error popup + # Start server for instance2instance communication + self.i2iconnhandler = InstanceConnectionHandler(self.i2ithread_readlinecallback) + self.i2is = Instance2InstanceServer(I2I_LISTENPORT,self.i2iconnhandler) + self.i2is.start() + + self.triblerStyles = TriblerStyles.getInstance() + + # Fire up the VideoPlayer, it abstracts away whether we're using + # an internal or external video player. + playbackmode = self.utility.config.Read('videoplaybackmode', "int") + self.videoplayer = VideoPlayer.getInstance(httpport=VIDEOHTTP_LISTENPORT) + self.videoplayer.register(self.utility,preferredplaybackmode=playbackmode) + + notification_init( self.utility ) + + # + # Read and create GUI from .xrc files + # + self.guiUtility = GUIUtility.getInstance(self.utility, self.params) + self.res = xrc.XmlResource(os.path.join(self.utility.getPath(),'Tribler', 'Main','vwxGUI','MyFrame.xrc')) + self.guiUtility.xrcResource = self.res + self.frame = self.res.LoadFrame(None, "MyFrame") + self.guiUtility.frame = self.frame + + self.frame.set_wxapp(self) + + + self.guiUtility.scrollWindow = xrc.XRCCTRL(self.frame, "level0") + self.guiUtility.mainSizer = self.guiUtility.scrollWindow.GetSizer() + self.frame.topBackgroundRight = xrc.XRCCTRL(self.frame, "topBG3") + #self.guiUtility.scrollWindow.SetScrollbars(1,1,1100,683) + #self.guiUtility.scrollWindow.SetScrollRate(15,15) + self.frame.mainButtonPersons = xrc.XRCCTRL(self.frame, "mainButtonPersons") + self.frame.messageField = xrc.XRCCTRL(self.frame, "messageField") + self.frame.pageTitle = xrc.XRCCTRL(self.frame, "pageTitle") + self.frame.pageTitlePanel = xrc.XRCCTRL(self.frame, "pageTitlePanel") + self.frame.standardDetails = xrc.XRCCTRL(self.frame, "standardDetails") + self.frame.standardOverview = xrc.XRCCTRL(self.frame, "standardOverview") + self.frame.firewallStatus = xrc.XRCCTRL(self.frame, "firewallStatus") + + # Make sure self.utility.frame is set + self.startAPI() + self.guiUtility.open_dbs() + ##self.guiUtility.initStandardOverview(self.frame.standardOverview) + + # TEST: add mod for Gopher + """ + moderation_cast_db = self.utility.session.open_dbhandler(NTFY_MODERATIONCAST) + moderation = {} + from Tribler.Core.CacheDB.sqlitecachedb import bin2str + moderation['infohash'] = bin2str('\xbd\x0c\x86\xf9\xe4JE\x0e\xff\xff\x16\xedF01*<| \xe9') + moderation_cast_db.addOwnModeration(moderation) + """ + + self.frame.searchtxtctrl = xrc.XRCCTRL(self.frame, "tx220cCCC") + self.frame.search_icon = xrc.XRCCTRL(self.frame, "search_icon") + self.frame.files_friends = xrc.XRCCTRL(self.frame, "files_friends") + self.frame.top_image = xrc.XRCCTRL(self.frame, "top_image") + + self.frame.top_bg = xrc.XRCCTRL(self.frame,"top_search") + self.frame.top_bg.set_frame(self.frame) + self.frame.pagerPanel = xrc.XRCCTRL(self.frame,"pagerPanel") + self.frame.standardPager = xrc.XRCCTRL(self.frame,"standardPager") + self.frame.horizontal = xrc.XRCCTRL(self.frame, "horizontal") + self.frame.changePlay = xrc.XRCCTRL(self.frame, "changePlay") + + + # on linux pagerpanel needs a SetMinSize call + if sys.platform == "linux2": + self.frame.pagerPanel.SetMinSize((666,20)) + elif sys.platform == 'darwin': + self.frame.pagerPanel.SetMinSize((674,21)) + else: + self.frame.pagerPanel.SetMinSize((666,21)) + + + + # videopanel + self.frame.videoparentpanel = xrc.XRCCTRL(self.frame,"videopanel") + if sys.platform == 'darwin': + self.frame.videoparentpanel.SetBackgroundColour((216,233,240)) + self.frame.videoparentpanel.Hide() + if sys.platform == "linux2": + self.frame.videoparentpanel.SetMinSize((363,400)) + elif sys.platform == 'win32': + self.frame.videoparentpanel.SetMinSize((363,400)) + else: + self.frame.videoparentpanel.SetMinSize((355,240)) + + + logopath = os.path.join(self.utility.getPath(),'Tribler','Main','vwxGUI','images','5.0','video.gif') + if sys.platform == 'darwin': + self.frame.videoframe = VideoMacFrame(self.frame.videoparentpanel,self.utility,"Videoplayer",os.path.join(self.installdir,'Tribler','Images','tribler.ico'),self.videoplayer.get_vlcwrap(),logopath) + self.videoplayer.set_videoframe(self.frame.videoframe) + else: + self.frame.videoframe = VideoDummyFrame(self.frame.videoparentpanel,self.utility,self.videoplayer.get_vlcwrap(),logopath) + self.videoplayer.set_videoframe(self.frame.videoframe) + + if sys.platform == "linux2": + # On Linux the _PostInit does not get called if the thing + # is not shown. We need the _PostInit to be called to set + # the GUIUtility.standardOverview, etc. member variables. + # + wx.CallAfter(self.frame.standardOverview.Hide) + wx.CallAfter(self.frame.standardDetails.Hide) + hide_names = [self.frame.pagerPanel] + else: + hide_names = [self.frame.standardOverview,self.frame.standardDetails,self.frame.pagerPanel] + + + + for name in hide_names: + name.Hide() + self.frame.videoframe.hide_videoframe() + + self.frame.top_bg.createBackgroundImage() + ## self.frame.top_bg.setBackground((230,230,230)) + + + self.frame.top_bg.Layout() + if sys.platform == 'win32': + wx.CallAfter(self.frame.top_bg.Refresh) + wx.CallAfter(self.frame.top_bg.Layout) + + + + # reputation + self.guiserver.add_task(self.guiservthread_update_reputation, .2) + + self.setDBStats() + + self.Bind(wx.EVT_QUERY_END_SESSION, self.frame.OnCloseWindow) + self.Bind(wx.EVT_END_SESSION, self.frame.OnCloseWindow) + + + # Arno, 2007-05-03: wxWidgets 2.8.3.0 and earlier have the MIME-type for .bmp + # files set to 'image/x-bmp' whereas 'image/bmp' is the official one. + try: + bmphand = None + hands = wx.Image.GetHandlers() + for hand in hands: + #print "Handler",hand.GetExtension(),hand.GetType(),hand.GetMimeType() + if hand.GetMimeType() == 'image/x-bmp': + bmphand = hand + break + #wx.Image.AddHandler() + if bmphand is not None: + bmphand.SetMimeType('image/bmp') + except: + # wx < 2.7 don't like wx.Image.GetHandlers() + print_exc() + + # Must be after ABCLaunchMany is created + #self.torrentfeed = TorrentFeedThread.getInstance() + #self.torrentfeed.register(self.utility) + #self.torrentfeed.start() + + #print "DIM",wx.GetDisplaySize() + #print "MM",wx.GetDisplaySizeMM() + + #self.frame.Refresh() + #self.frame.Layout() + self.frame.Show(True) + + wx.CallAfter(self.startWithRightView) + # Delay this so GUI has time to paint + wx.CallAfter(self.loadSessionCheckpoint) + + + #self.sr_indicator_left_image = wx.Image(os.path.join(self.utility.getPath(),"Tribler","Main","vwxGUI","images","5.0", "SRindicator_left.png", wx.BITMAP_TYPE_ANY)) + #self.sr_indicator_left = wx.StaticBitmap(self, -1, wx.BitmapFromImage(self.sr_indicator_left_image)) + + #self.sr_indicator_right_image = wx.Image(os.path.join(self.utility.getPath(),"Tribler","Main","vwxGUI","images","5.0", "SRindicator_right.png", wx.BITMAP_TYPE_ANY)) + #self.sr_indicator_right = wx.StaticBitmap(self, -1, wx.BitmapFromImage(self.sr_indicator_right_image)) + + + except Exception,e: + print_exc() + self.error = e + self.onError() + return False + + return True + + + + def OnSearchResultsPressed(self, event): + self.guiUtility.OnResultsClicked() + + + def helpClick(self,event=None): + title = self.utility.lang.get('sharing_reputation_information_title') + msg = self.utility.lang.get('sharing_reputation_information_message') + + dlg = wx.MessageDialog(None, msg, title, wx.OK|wx.ICON_INFORMATION) + result = dlg.ShowModal() + dlg.Destroy() + + def viewSettings(self,event): + self.guiUtility.settingsOverview() + + def viewLibrary(self,event): + self.guiUtility.standardLibraryOverview() + + def toggleFamilyFilter(self,event): + self.guiUtility.toggleFamilyFilter() + + + def startAPI(self): + + # Start Tribler Session + state_dir = Session.get_default_state_dir() + + cfgfilename = Session.get_default_config_filename(state_dir) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Session config",cfgfilename + try: + self.sconfig = SessionStartupConfig.load(cfgfilename) + except: + print_exc() + self.sconfig = SessionStartupConfig() + self.sconfig.set_state_dir(state_dir) + # Set default Session params here + destdir = get_default_dest_dir() + torrcolldir = os.path.join(destdir,STATEDIR_TORRENTCOLL_DIR) + self.sconfig.set_torrent_collecting_dir(torrcolldir) + self.sconfig.set_nat_detect(True) + + # rename old collected torrent directory + try: + if not os.path.exists(destdir): + os.makedirs(destdir) + old_collected_torrent_dir = os.path.join(state_dir, 'torrent2') + if not os.path.exists(torrcolldir) and os.path.isdir(old_collected_torrent_dir): + os.rename(old_collected_torrent_dir, torrcolldir) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Moved dir with old collected torrents to", torrcolldir + + # Arno, 2008-10-23: Also copy torrents the user got himself + old_own_torrent_dir = os.path.join(state_dir, 'torrent') + for name in os.listdir(old_own_torrent_dir): + oldpath = os.path.join(old_own_torrent_dir,name) + newpath = os.path.join(torrcolldir,name) + if not os.path.exists(newpath): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Copying own torrent",oldpath,newpath + os.rename(oldpath,newpath) + + # Internal tracker + except: + print_exc() + + # 22/08/08 boudewijn: convert abc.conf to SessionConfig + self.utility.convert__presession_4_1__4_2(self.sconfig) + + s = Session(self.sconfig) + self.utility.session = s + + s.add_observer(self.sesscb_ntfy_reachable,NTFY_REACHABLE,[NTFY_INSERT]) + s.add_observer(self.sesscb_ntfy_activities,NTFY_ACTIVITIES,[NTFY_INSERT]) + s.add_observer(self.sesscb_ntfy_dbstats,NTFY_TORRENTS,[NTFY_INSERT]) + s.add_observer(self.sesscb_ntfy_dbstats,NTFY_PEERS,[NTFY_INSERT]) + s.add_observer(self.sesscb_ntfy_friends,NTFY_PEERS,[NTFY_UPDATE]) + + + # set port number in GuiUtility + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'LISTEN PORT :' , s.get_listen_port() + port = s.get_listen_port() + self.guiUtility.set_port_number(port) + + + # Load the default DownloadStartupConfig + dlcfgfilename = get_default_dscfg_filename(s) + try: + defaultDLConfig = DefaultDownloadStartupConfig.load(dlcfgfilename) + except: + defaultDLConfig = DefaultDownloadStartupConfig.getInstance() + #print_exc() + defaultdestdir = os.path.join(get_default_dest_dir()) + defaultDLConfig.set_dest_dir(defaultdestdir) + + # 29/08/08 boudewijn: convert abc.conf to DefaultDownloadStartupConfig + self.utility.convert__postsession_4_1__4_2(s, defaultDLConfig) + + s.set_coopdlconfig(defaultDLConfig) + + # Loading of checkpointed Downloads delayed to allow GUI to paint, + # see loadSessionCheckpoint + + # Create global rate limiter + self.ratelimiter = UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager() + self.rateadjustcount = 0 + + maxup = self.utility.config.Read('maxuploadrate', "int") + if maxup == -1: # no upload + self.ratelimiter.set_global_max_speed(UPLOAD, 0.00001) + else: + self.ratelimiter.set_global_max_speed(UPLOAD, maxup) + + + maxdown = self.utility.config.Read('maxdownloadrate', "int") + self.ratelimiter.set_global_max_speed(DOWNLOAD, maxdown) + + + maxupseed = self.utility.config.Read('maxseeduploadrate', "int") + self.ratelimiter.set_global_max_seedupload_speed(maxupseed) + self.utility.ratelimiter = self.ratelimiter + +# SelectiveSeeding _ + self.seedingmanager = GlobalSeedingManager(self.utility.config.Read)#, self.utility.session) + self.seedingcount = 0 +# _SelectiveSeeding + + # seeding stats crawling + self.seeding_snapshot_count = 0 + self.seedingstats_settings = s.open_dbhandler(NTFY_SEEDINGSTATSSETTINGS).loadCrawlingSettings() + self.seedingstats_enabled = self.seedingstats_settings[0][2] + self.seedingstats_interval = self.seedingstats_settings[0][1] + + # Only allow updates to come in after we defined ratelimiter + s.set_download_states_callback(self.sesscb_states_callback) + + # Load friends from friends.txt + #friends.init(s) + + # Schedule task for checkpointing Session, to avoid hash checks after + # crashes. + # + self.guiserver.add_task(self.guiservthread_checkpoint_timer,SESSION_CHECKPOINT_INTERVAL) + + + def sesscb_states_callback(self,dslist): + """ Called by SessionThread """ + wx.CallAfter(self.gui_states_callback,dslist) + return(1.0, True) + + def get_reputation(self): + """ get the current reputation score""" + bc_db = self.utility.session.open_dbhandler(NTFY_BARTERCAST) + reputation = bc_db.getMyReputation() + self.utility.session.close_dbhandler(bc_db) + return reputation + + def get_total_down(self): + bc_db = self.utility.session.open_dbhandler(NTFY_BARTERCAST) + return bc_db.total_down + + def get_total_up(self): + bc_db = self.utility.session.open_dbhandler(NTFY_BARTERCAST) + return bc_db.total_up + + + def set_reputation(self): + """ set the reputation in the GUI""" + reputation = self.get_reputation() + if sys.platform == 'win32': + self.frame.top_bg.updateReputation(reputation) + elif self.frame.top_bg.sr_msg: + if reputation < -0.33: + self.frame.top_bg.sr_msg.SetLabel('Poor') + self.frame.top_bg.sr_msg.SetForegroundColour((255,51,0)) + elif reputation < 0.33: + self.frame.top_bg.sr_msg.SetLabel('Average') + self.frame.top_bg.sr_msg.SetForegroundColour(wx.BLACK) + else: + self.frame.top_bg.sr_msg.SetLabel('Good') + self.frame.top_bg.sr_msg.SetForegroundColour((0,80,120)) + + + + + if DEBUG: + print >> sys.stderr , "main: My Reputation",reputation + + self.frame.top_bg.help.SetToolTipString(self.utility.lang.get('help') % (reputation)) + + d = int(self.get_total_down()) * 1024.0 + + if d < 10: + s = '%dB Down ' % d + elif d < 100: + s = '%dB Down ' % d + elif d < 1000: + s = '%dB Down ' % d + elif d < 1024: + s = '%1.1fKB Down' % (d/1024.0) + elif d < 10240: + s = '%dKB Down ' % (d//1024) + elif d < 102400: + s = '%dKB Down ' % (d//1024) + elif d < 1022796: + s = '%dKB Down' % (d//1024) + elif d < 1048576: + s = '%1.1fMB Down' % (d//1048576.0) + elif d < 10485760: + s = '%dMB Down ' % (d//1048576) + elif d < 104857600: + s = '%dMB Down ' % (d//1048576) + elif d < 1047527425: + s = '%dMB Down' % (d//1048576) + elif d < 1073741824: + s = '%1.1fGB Down' % (d//1073741824.0) + elif d < 10737418240: + s = '%dGB Down ' % (d//1073741824) + elif d < 107374182400: + s = '%dGB Down ' % (d//1073741824) + else: + s = '%dGB Down' % (d//1073741824) + + + #if d < 10: + # s = '%dB Down ' % d + #elif d < 100: + # s = '%dB Down ' % d + #elif d < 1000: + # s = '%dB Down ' % d + #elif d < 10000: + # s = '%dKB Down ' % (d//1000L) + #elif d < 100000: + # s = '%dKB Down ' % (d//1000L) + #elif d < 1000000: + # s = '%dKB Down' % (d//1000L) + #elif d < 10000000: + # s = '%dMB Down ' % (d//1000000L) + #elif d < 100000000: + # s = '%dMB Down ' % (d//1000000L) + #elif d < 1000000000: + # s = '%dMB Down' % (d//1000000L) + #elif d < 10000000000: + # s = '%dGB Down ' % (d//1000000000L) + #elif d < 100000000000: + # s = '%dGB Down ' % (d//1000000000L) + #else: + # s = '%dGB Down' % (d//1000000000L) + + self.frame.top_bg.total_down.SetLabel(s) + + + u = self.get_total_up() * 1024.0 + + + if u < 1000: + s = '%4dB Up' % u + elif u < 1024: + s = '%1.1fKB Up' % (u/1024.0) + elif u < 1022796: + s = '%3dKB Up' % (u//1024) + elif u < 1048576: + s = '%1.1fMB Up' % (u//1048576.0) + elif u < 1047527425: + s = '%3dMB Up' % (u//1048576) + elif u < 1073741824: + s = '%1.1fGB Up' % (u//1073741824.0) + else: + s = '%3dGB Up' % (u//1073741824) + + + #if u < 1000: + # s = '%4dB Up' % u + #elif u < 1000000: + # s = '%3dKB Up' % (u//1000L) + #elif u < 1000000000: + # s = '%3dMB Up' % (u//1000000L) + #else: + # s = '%3dGB Up' % (u//1000000000L) + + self.frame.top_bg.total_up.SetLabel(s) + + + self.frame.hsizer = self.frame.top_bg.sr_indicator.GetContainingSizer() + self.frame.hsizer.Remove(0) + self.frame.hsizer.Prepend(wx.Size(reputation*40+50,0),0,wx.LEFT,0) + + self.frame.hsizer.Layout() + + ##self.old_reputation = reputation + + + def guiservthread_update_reputation(self): + """ update the reputation""" + wx.CallAfter(self.set_reputation) + self.guiserver.add_task(self.guiservthread_update_reputation,10.0) + + + + + def gui_states_callback(self,dslist): + """ Called by MainThread """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Stats:" + + torrentdb = self.utility.session.open_dbhandler(NTFY_TORRENTS) + peerdb = self.utility.session.open_dbhandler(NTFY_PEERS) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Stats: Total torrents found",torrentdb.size(),"peers",peerdb.size() + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Stats: NAT",self.utility.session.get_nat_type() + try: + # Print stats on Console + for ds in dslist: + # safename = `ds.get_download().get_def().get_name()` + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Stats: %s %.1f%% %s dl %.1f ul %.1f n %d\n" % (dlstatus_strings[ds.get_status()],100.0*ds.get_progress(),safename,ds.get_current_speed(DOWNLOAD),ds.get_current_speed(UPLOAD),ds.get_num_peers()) + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Infohash:",`ds.get_download().get_def().get_infohash()` + if ds.get_status() == DLSTATUS_STOPPED_ON_ERROR: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Error:",`ds.get_error()` + + # Find State of currently playing video + playds = None + d = self.videoplayer.get_vod_download() + for ds in dslist: + if ds.get_download() == d: + playds = ds + break + + # Apply status displaying from SwarmPlayer + if playds: + videoplayer_mediastate = self.videoplayer.get_state() + + totalhelping = 0 + totalspeed = {UPLOAD:0.0,DOWNLOAD:0.0} + for ds in dslist: + totalspeed[UPLOAD] += ds.get_current_speed(UPLOAD) + totalspeed[DOWNLOAD] += ds.get_current_speed(DOWNLOAD) + totalhelping += ds.get_num_peers() + + [topmsg,msg,self.said_start_playback,self.decodeprogress] = get_status_msgs(playds,videoplayer_mediastate,"Tribler",self.said_start_playback,self.decodeprogress,totalhelping,totalspeed) + # Update status msg and progress bar + if topmsg != '': + + if videoplayer_mediastate == MEDIASTATE_PLAYING or (videoplayer_mediastate == MEDIASTATE_STOPPED and self.said_start_playback): + # In SwarmPlayer we would display "Decoding: N secs" + # when VLC was playing but the video was not yet + # being displayed (because VLC was looking for an + # I-frame). We would display it in the area where + # VLC would paint if it was ready to display. + # Hence, our text would be overwritten when the + # video was ready. We write the status text to + # its own area here, so trick doesn't work. + # For now: just hide. + text = msg + else: + text = topmsg + else: + text = msg + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Messages",topmsg,msg,`playds.get_download().get_def().get_name()` + + self.videoplayer.set_player_status_and_progress(text,playds.get_pieces_complete()) + + # Pass DownloadStates to libaryView + try: + if self.guiUtility.standardOverview is not None: + mode = self.guiUtility.standardOverview.mode + #if mode == 'libraryMode' or mode == 'friendsMode': + # Also pass dslist to friendsView, for coopdl boosting info + # Arno, 2009-02-11: We also need it in filesMode now. + modedata = self.guiUtility.standardOverview.data[mode] + grid = modedata.get('grid') + if grid is not None: + gm = grid.gridManager + gm.download_state_gui_callback(dslist) + except KeyError: + # Apparently libraryMode only has has a 'grid' key when visible + print_exc() + except AttributeError: + print_exc() + except: + print_exc() + + # Restart other torrents when the single torrent that was + # running in VOD mode is done + currdlist = [] + for ds in dslist: + currdlist.append(ds.get_download()) + vodd = self.videoplayer.get_vod_download() + for ds in dslist: + d = ds.get_download() + if d == vodd and ds.get_status() == DLSTATUS_SEEDING: + self.restart_other_downloads(currdlist) + break + + # Adjust speeds once every 4 seconds + adjustspeeds = False + if self.rateadjustcount % 4 == 0: + adjustspeeds = True + self.rateadjustcount += 1 + + if adjustspeeds: + self.ratelimiter.add_downloadstatelist(dslist) + self.ratelimiter.adjust_speeds() + + # Update stats in lower right overview box + self.guiUtility.refreshTorrentStats(dslist) + + # Upload overall upload states + self.guiUtility.refreshUploadStats(dslist) + +# SelectiveSeeding_ + # Apply seeding policy every 60 seconds, for performance + applyseedingpolicy = False + if self.seedingcount % 60 == 0: + applyseedingpolicy = True + self.seedingcount += 1 + + if applyseedingpolicy: + self.seedingmanager.apply_seeding_policy(dslist) +# _SelectiveSeeding + +# Crawling Seeding Stats_ + if self.seedingstats_enabled == 1: + snapshot_seeding_stats = False + if self.seeding_snapshot_count % self.seedingstats_interval == 0: + snapshot_seeding_stats = True + self.seeding_snapshot_count += 1 + + if snapshot_seeding_stats: + bc_db = self.utility.session.open_dbhandler(NTFY_BARTERCAST) + reputation = bc_db.getMyReputation() + self.utility.session.close_dbhandler(bc_db) + + seedingstats_db = self.utility.session.open_dbhandler(NTFY_SEEDINGSTATS) + seedingstats_db.updateSeedingStats(self.utility.session.get_permid(), reputation, dslist, self.seedingstats_interval) + self.utility.session.close_dbhandler(seedingstats_db) +# _Crawling Seeding Stats + + except: + print_exc() + + def restart_other_downloads(self,currdlist): + restartdlist = self.videoplayer.get_vod_postponed_downloads() + self.videoplayer.set_vod_postponed_downloads([]) # restart only once + for d in restartdlist: + if d in currdlist: + d.set_mode(DLMODE_NORMAL) + d.restart() + + + def OnClosingVideoFrameOrExtPlayer(self): + vodd = self.videoplayer.get_vod_download() + if vodd is not None: + if vodd.get_def().get_live(): + # Arno, 2009-03-27: Works poorly with VLC 0.9 without MPEGTS + # patch. There VLC may close the HTTP connection and we interpret + # it as a window close (no window in 5.0) and stop live, thereby + # killing any future attempts. Should see how this works with + # MPEGTS patch put in. + # + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: OnClosingVideoFrameOrExtPlayer: vodd is live, stopping",vodd.get_def().get_name_as_unicode() + vodd.stop() + self.restart_other_downloads(self.utility.session.get_downloads()) + #else: playing Web2 video + + def loadSessionCheckpoint(self): + # Load all other downloads + # TODO: reset all saved DownloadConfig to new default? + if self.params[0] != "": + # There is something on the cmdline, start all stopped + self.utility.session.load_checkpoint(initialdlstatus=DLSTATUS_STOPPED) + else: + self.utility.session.load_checkpoint() + + def guiservthread_checkpoint_timer(self): + """ Periodically checkpoint Session """ + try: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Checkpointing Session" + self.utility.session.checkpoint() + self.guiserver.add_task(self.guiservthread_checkpoint_timer,SESSION_CHECKPOINT_INTERVAL) + except: + print_exc() + + + def sesscb_ntfy_dbstats(self,subject,changeType,objectID,*args): + """ Called by SessionCallback thread """ + wx.CallAfter(self.setDBStats) + # Test + #if subject == NTFY_PEERS: + # self.frame.friendsmgr.sesscb_friendship_callback(objectID,{}) + + def setDBStats(self): + """ Set total # peers and torrents discovered """ + + # Arno: GUI thread accessing database + now = time() + if now - self.last_update < self.update_freq: + return + self.last_update = now + peer_db = self.utility.session.open_dbhandler(NTFY_PEERS) + npeers = peer_db.getNumberPeers() + torrent_db = self.utility.session.open_dbhandler(NTFY_TORRENTS) + nfiles = torrent_db.getNumberTorrents() + if nfiles > 30 and npeers > 30: + self.update_freq = 2 + # Arno: not closing db connections, assuming main thread's will be + # closed at end. + + #self.frame.numberPersons.SetLabel('%d' % npeers) + #self.frame.numberFiles.SetLabel('%d' % nfiles) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "************>>>>>>>> setDBStats", npeers, nfiles + + def sesscb_ntfy_activities(self,subject,changeType,objectID,*args): + # Called by SessionCallback thread + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: sesscb_ntfy_activities called:",subject,"ct",changeType,"oid",objectID,"a",args + wx.CallAfter(self.frame.setActivity,objectID,*args) + + def sesscb_ntfy_reachable(self,subject,changeType,objectID,msg): + wx.CallAfter(self.frame.standardOverview.onReachable) + + + def sesscb_ntfy_friends(self,subject,changeType,objectID,*args): + """ Called by SessionCallback thread """ + if subject == NTFY_PEERS: + peerdb = self.utility.session.open_dbhandler(NTFY_PEERS) + peer = peerdb.getPeer(objectID) + #self.utility.session.close_dbhandler(peerdb) + else: + peer = None + wx.CallAfter(self.gui_ntfy_friends,subject,changeType,objectID,args,peer) + + def gui_ntfy_friends(self,subject,changeType,objectID,args,peer): + """ A change in friendship status, report via message window """ + if len(args) == 2: + if args[0] == 'friend': + fs = args[1] + if fs != FS_I_INVITED and fs != FS_I_DENIED and fs != FS_NOFRIEND: + fstext = fs2text(fs) + if peer['name'] is None or peer['name'] == '': + name = show_permid_short(objectID) + else: + name = peer['name'] + msg = name + u" " + fstext + wx.CallAfter(self.frame.setActivity,NTFY_ACT_NONE,msg) + + def onError(self,source=None): + # Don't use language independence stuff, self.utility may not be + # valid. + msg = "Unfortunately, Tribler ran into an internal error:\n\n" + if source is not None: + msg += source + msg += str(self.error.__class__)+':'+str(self.error) + msg += '\n' + msg += 'Please see the FAQ on www.tribler.org on how to act.' + dlg = wx.MessageDialog(None, msg, "Tribler Fatal Error", wx.OK|wx.ICON_ERROR) + result = dlg.ShowModal() + print_exc() + dlg.Destroy() + + + def OnExit(self): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: ONEXIT" + + #friends.done(self.utility.session) + + #self.torrentfeed.shutdown() + + # Don't checkpoint, interferes with current way of saving Preferences, + # see Tribler/Main/Dialogs/abcoption.py + self.utility.session.shutdown(hacksessconfcheckpoint=False) + + while not self.utility.session.has_shutdown(): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main ONEXIT: Waiting for Session to shutdown" + sleep(1) + + + if not ALLOW_MULTIPLE: + del self.single_instance_checker + return 0 + + def db_exception_handler(self,e): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Database Exception handler called",e,"value",e.args,"#" + try: + if e.args[1] == "DB object has been closed": + return # We caused this non-fatal error, don't show. + if self.error is not None and self.error.args[1] == e.args[1]: + return # don't repeat same error + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "main: db_exception_handler error", e, type(e) + print_exc() + #print_stack() + self.error = e + onerror_lambda = lambda:self.onError(source="The database layer reported: ") + wx.CallAfter(onerror_lambda) + + def getConfigPath(self): + return self.utility.getConfigPath() + + def startWithRightView(self): + if self.params[0] != "": + self.guiUtility.standardLibraryOverview() + + + def i2ithread_readlinecallback(self,ic,cmd): + """ Called by Instance2Instance thread """ + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Another instance called us with cmd",cmd + ic.close() + + if cmd.startswith('START '): + param = cmd[len('START '):] + torrentfilename = None + if param.startswith('http:'): + # Retrieve from web + f = tempfile.NamedTemporaryFile() + n = urllib2.urlopen(param) + data = n.read() + f.write(data) + f.close() + n.close() + torrentfilename = f.name + else: + torrentfilename = param + + # Switch to GUI thread + # New for 5.0: Start in VOD mode + def start_asked_download(): + self.frame.startDownload(torrentfilename,vodmode=True) + self.guiUtility.standardLibraryOverview(refresh=True) + + wx.CallAfter(start_asked_download) + + + +def get_status_msgs(ds,videoplayer_mediastate,appname,said_start_playback,decodeprogress,totalhelping,totalspeed): + + intime = "Not playing for quite some time." + ETA = ((60 * 15, "Playing in less than 15 minutes."), + (60 * 10, "Playing in less than 10 minutes."), + (60 * 5, "Playing in less than 5 minutes."), + (60, "Playing in less than a minute.")) + + topmsg = '' + msg = '' + + logmsgs = ds.get_log_messages() + logmsg = None + if len(logmsgs) > 0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Log",logmsgs[0] + logmsg = logmsgs[-1][1] + + preprogress = ds.get_vod_prebuffering_progress() + playable = ds.get_vod_playable() + t = ds.get_vod_playable_after() + + intime = ETA[0][1] + for eta_time, eta_msg in ETA: + if t > eta_time: + break + intime = eta_msg + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: playble",playable,"preprog",preprogress + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: ETA is",t,"secs" + # if t > float(2 ** 30): + # intime = "inf" + # elif t == 0.0: + # intime = "now" + # else: + # h, t = divmod(t, 60.0*60.0) + # m, s = divmod(t, 60.0) + # if h == 0.0: + # if m == 0.0: + # intime = "%ds" % (s) + # else: + # intime = "%dm:%02ds" % (m,s) + # else: + # intime = "%dh:%02dm:%02ds" % (h,m,s) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: VODStats",preprogress,playable,"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%" + + if ds.get_status() == DLSTATUS_HASHCHECKING: + genprogress = ds.get_progress() + pstr = str(int(genprogress*100)) + msg = "Checking already downloaded parts "+pstr+"% done" + elif ds.get_status() == DLSTATUS_STOPPED_ON_ERROR: + msg = 'Error playing: '+str(ds.get_error()) + elif ds.get_progress() == 1.0: + msg = '' + elif playable: + if not said_start_playback: + msg = "Starting playback..." + + if videoplayer_mediastate == MEDIASTATE_STOPPED and said_start_playback: + if totalhelping == 0: + topmsg = u"Please leave the "+appname+" running, this will help other "+appname+" users to download faster." + else: + topmsg = u"Helping "+str(totalhelping)+" "+appname+" users to download. Please leave it running in the background." + + # Display this on status line + # TODO: Show balloon in systray when closing window to indicate things continue there + msg = '' + + elif videoplayer_mediastate == MEDIASTATE_PLAYING: + said_start_playback = True + # It may take a while for VLC to actually start displaying + # video, as it is trying to tune in to the stream (finding + # I-Frame). Display some info to show that: + # + cname = ds.get_download().get_def().get_name_as_unicode() + topmsg = u'Decoding: '+cname+' '+str(decodeprogress)+' s' + decodeprogress += 1 + msg = '' + elif videoplayer_mediastate == MEDIASTATE_PAUSED: + # msg = "Buffering... " + str(int(100.0*preprogress))+"%" + msg = "Buffering... " + str(int(100.0*preprogress))+"%. " + intime + else: + msg = '' + + elif preprogress != 1.0: + pstr = str(int(preprogress*100)) + npeers = ds.get_num_peers() + npeerstr = str(npeers) + if npeers == 0 and logmsg is not None: + msg = logmsg + elif npeers == 1: + msg = "Prebuffering "+pstr+"% done (connected to 1 person). " + intime + else: + msg = "Prebuffering "+pstr+"% done (connected to "+npeerstr+" people). " + intime + + try: + d = ds.get_download() + tdef = d.get_def() + videofiles = d.get_selected_files() + if len(videofiles) >= 1: + videofile = videofiles[0] + else: + videofile = None + if tdef.get_bitrate(videofile) is None: + msg += ' This video may not play properly because its bitrate is unknown' + except: + print_exc() + else: + # msg = "Waiting for sufficient download speed... "+intime + msg = 'Waiting for sufficient download speed... ' + intime + + npeers = ds.get_num_peers() + if npeers == 1: + msg = "One person found, receiving %.1f KB/s" % totalspeed[DOWNLOAD] + else: + msg = "%d people found, receiving %.1f KB/s" % (npeers, totalspeed[DOWNLOAD]) + + if playable: + if videoplayer_mediastate == MEDIASTATE_PAUSED and not ds.get_status() == DLSTATUS_SEEDING: + msg = "Buffering... " + msg + else: + msg = "" + + return [topmsg,msg,said_start_playback,decodeprogress] + + +############################################################## +# +# Main Program Start Here +# +############################################################## +def run(params = None): + if params is None: + params = [""] + + if len(sys.argv) > 1: + params = sys.argv[1:] + try: + # Create single instance semaphore + # Arno: On Linux and wxPython-2.8.1.1 the SingleInstanceChecker appears + # to mess up stderr, i.e., I get IOErrors when writing to it via print_exc() + # + if sys.platform != 'linux2': + single_instance_checker = wx.SingleInstanceChecker("tribler-" + wx.GetUserId()) + else: + single_instance_checker = LinuxSingleInstanceChecker("tribler") + + if not ALLOW_MULTIPLE and single_instance_checker.IsAnotherRunning(): + #Send torrent info to abc single instance + if params[0] != "": + torrentfilename = params[0] + i2ic = Instance2InstanceClient(I2I_LISTENPORT,'START',torrentfilename) + else: + arg0 = sys.argv[0].lower() + if arg0.endswith('.exe'): + # supply a unicode string to ensure that the unicode filesystem API is used (applies to windows) + installdir = os.path.abspath(os.path.dirname(unicode(sys.argv[0]))) + else: + # call the unicode specific getcwdu() otherwise homedirectories may crash + installdir = os.getcwdu() + # Arno: don't chdir to allow testing as other user from other dir. + #os.chdir(installdir) + + # Launch first abc single instance + app = ABCApp(False, params, single_instance_checker, installdir) + configpath = app.getConfigPath() + app.MainLoop() + + print "Client shutting down. Sleeping for a few seconds to allow other threads to finish" + sleep(1) + except: + print_exc() + + # This is the right place to close the database, unfortunately Linux has + # a problem, see ABCFrame.OnCloseWindow + # + #if sys.platform != 'linux2': + # tribler_done(configpath) + #os._exit(0) + +if __name__ == '__main__': + run() + diff --git a/tribler-mod/Tribler/Main/vwxGUI/ColumnHeader.py b/tribler-mod/Tribler/Main/vwxGUI/ColumnHeader.py new file mode 100644 index 0000000..c0c2212 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/ColumnHeader.py @@ -0,0 +1,290 @@ +from time import localtime, strftime +import wx, os, sys +from Tribler.Main.vwxGUI.bgPanel import ImagePanel +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility + +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles +from Tribler.Main.vwxGUI.standardFilter import filesFilter + +from font import * + +class ColumnHeader(wx.Panel): + + bitmapOrderUp = 'upSort' + bitmapOrderDown = 'downSort' + + def __init__(self, parent, title, picture, order, tip, sorting, reverse, component, dummy): + wx.Panel.__init__(self, parent, -1) + self.type = None + self.triblerStyles = TriblerStyles.getInstance() + + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + + self.unselectedColour = self.triblerStyles.sortingColumns(1) + self.selectedColour = self.triblerStyles.sortingColumns(2) + self.dummy = dummy + self.component = component + + self.addComponents(title, picture, tip, component) + if component == None: +# print '1. component = None' + self.setOrdering(order) + + self.sorting = sorting + if reverse: + self.reverse = True + else: + self.reverse = False + + + def addComponents(self, title, picture, tip, component): + self.SetBackgroundColour(self.unselectedColour) + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + self.text = None + self.icon = None + self.title = title + + if self.component == None or self.component == 'comboboxSort': + if picture: + self.icon = ImagePanel(self) + self.icon.setBitmapFromFile(picture) + self.icon.setBackground(self.unselectedColour) + self.hSizer.Add(self.icon, 0, wx.TOP,1 ) + if title: + if not picture: + self.hSizer.Add([10,5],0,wx.EXPAND|wx.FIXED_MINSIZE,3) + self.text = wx.StaticText(self, -1, title) + self.triblerStyles.setDarkText(self.text) + self.hSizer.Add(self.text, 1, wx.TOP, 3) + + self.dummy = self.dummy or (not picture and not title) + if picture == None and title == None: + raise Exception('No text nor an icon in columnheader') + + if False: + self.sortIcon = ImagePanel(self) + self.sortIcon.setBackground(self.unselectedColour) + self.sortIcon.Hide() + self.hSizer.Add(self.sortIcon, 0, wx.TOP, 1) + self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + else: + self.sortIcon = None + + # 2.8.4.2 return value of GetChildren changed + wl = [self] + for c in self.GetChildren(): + wl.append(c) + for element in wl: + ##element.Bind(wx.EVT_LEFT_UP, self.clicked) + ##element.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + element.SetToolTipString(tip) + + elif self.component == 'comboboxFilter': + self.sortIcon = None + #self.filesFilter = filesFilter(self) + #self.hSizer.Add(self.filesFilter, 1, wx.BOTTOM, 0) + + self.SetSizer(self.hSizer) + self.SetAutoLayout(True) + self.hSizer.Layout() + + def setText(self, t): + self.text.SetLabel(t) + + def setOrdering(self, type): + # up, down or none + #print 'Set ordering to %s' % type + self.type = type + if self.component == None: + if type == 'up': + self.setColour(self.selectedColour) + if self.sortIcon: + self.sortIcon.setBitmapFromFile(self.bitmapOrderUp) + if not self.sortIcon.IsShown(): + self.sortIcon.Show() + elif type == 'down': + self.setColour(self.selectedColour) + if self.sortIcon: + self.sortIcon.setBitmapFromFile(self.bitmapOrderDown) + if not self.sortIcon.IsShown(): + self.sortIcon.Show() + else: + if self.sortIcon: + self.sortIcon.setBitmapFromFile(self.bitmapOrderDown) + self.sortIcon.Hide() + self.setColour(self.unselectedColour) + + self.GetSizer().Layout() + + def clicked(self, event): + if self.dummy: + return + if not self.type or self.type == 'up': + newType = 'down' + elif self.type == 'down': + newType = 'up' + self.setOrdering(newType) + self.GetParent().setOrdering(self, newType) + + + + def mouseAction(self, event): + event.Skip() + if self.type: + return + colour = None + if event.Entering(): + colour = self.selectedColour + + elif event.Leaving(): + if sys.platform == 'win32': + position = event.GetPosition() + for i in xrange(2): + position[i]+=event.GetEventObject().GetPosition()[i] + position[i]-=self.GetPosition()[i] + size = self.GetSize() + + if position[0]<0 or position[0]>=size[0] or position[1]<0 or position[1]>=size[1]: + colour = self.unselectedColour + else: + colour = self.unselectedColour + if colour: + self.setColour(colour) + + + def setColour(self, colour): + for element in [self, self.icon, self.sortIcon, self.text]: + if element: + if element.__class__ == ImagePanel: + element.setBackground(colour) + element.SetBackgroundColour(colour) + self.Refresh() + +class ColumnHeaderBar(wx.Panel): + + def __init__(self, parent, itemPanel): +# print 'itemPanel = %s' % itemPanel + self.itemPanel = itemPanel + wx.Panel.__init__(self, parent, -1) + self.columns = [] + self.dynamicColumnName = None + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + + self.addComponents() + #self.SetMinSize((-1,30)) + self.Show(True) + + def addComponents(self): + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + #self.hSizer.Add([0,20],0,wx.FIXED_MINSIZE,0) + self.triblerStyles = TriblerStyles.getInstance() +# self.filesFilter = filesFilter() +# self.filesFilter = testFilter(self) +# print 'filesFilter = %s' % self.filesFilter +# self.filesFilter.SetSize((30,20)) + + + + ##cornerTL_file = os.path.join(self.utility.getPath(),"Tribler","Main","vwxGUI","images","5.0","wrapCorTL.png") + ##self.cornerTL_image = wx.Image(cornerTL_file, wx.BITMAP_TYPE_ANY) + ##self.cornerTL = wx.StaticBitmap(self, -1, wx.BitmapFromImage(self.cornerTL_image)) + + ##self.hSizer.Add(self.cornerTL,0,0,0) + + + + columns = self.itemPanel.getColumns() + currentSorting = self.guiUtility.standardOverview.getSorting() + comboboxSortChoices = [] + #print 'currentSorting: %s' % str(currentSorting) + for dict in columns: +# colours = (wx.Colour(203,203,203), wx.Colour(223,223,223)) + if (type(currentSorting) == str and currentSorting == dict['sort'] or + type(currentSorting) == tuple and currentSorting[0] == dict['sort']): + if (len(currentSorting) == 2 and currentSorting[1] == 'increase') ^ dict.get('reverse', False): + beginorder = 'up' + else: + beginorder = 'down' + else: + beginorder = None + header = ColumnHeader(self, dict.get('title'), dict.get('pic'), beginorder, dict['tip'], dict['sort'], dict.get('reverse'), dict.get('component'), dict.get('dummy', False)) + self.columns.append(header) + + if dict.get('component') != 'comboboxSort' : + self.hSizer.Add(header, dict.get('weight',0), wx.EXPAND|wx.BOTTOM, 0) + + if columns.index(dict) != len(columns)-1: + line = wx.StaticLine(self,-1,wx.DefaultPosition, ((0,0)), wx.LI_VERTICAL) + self.SetBackgroundColour(self.triblerStyles.sortingColumns(2)) + self.hSizer.Add(line, 0, wx.LEFT|wx.RIGHT|wx.EXPAND, 0) + if dict.get('width'): + header.SetSize((dict['width']+6, -1)) + header.SetMinSize((dict['width']+6, -1)) + else: + if dict.get('width'): + header.SetSize((dict['width']+3, -1)) + header.SetMinSize((dict['width']+3, -1)) + + else: + header.Hide() + comboboxSortChoices.append(header.title) + + +# print comboboxSortChoices[0] + if len(comboboxSortChoices) != 0: + self.extraSorting = wx.ComboBox(self,-1,comboboxSortChoices[0], wx.DefaultPosition,wx.Size(70,10),comboboxSortChoices, wx.FIXED_MINSIZE|wx.CB_DROPDOWN|wx.CB_READONLY) + self.extraSorting.Bind(wx.EVT_COMBOBOX, self.extraSortingMouseaction) + self.hSizer.Add(self.extraSorting, 0, wx.EXPAND|wx.BOTTOM, 0) + self.dynamicColumnName = comboboxSortChoices[0] +# print 'tb > comboboxSortChoices[0] = %s' % comboboxSortChoices[0] + +# self.dynamicColumnName = comboboxSortChoices[0].sorting +# self.extraSortingMouseaction(event='') + + #self.SetBackgroundColour(wx.Colour(100,100,100)) +# self.hSizer.Add(self.filesFilter, 0, wx.BOTTOM|wx.FIXED_MINSIZE, 0) + self.hSizer.Layout() + self.SetSizer(self.hSizer) + self.SetAutoLayout(True) + + def setOrdering(self, column, ordering): + for header in self.columns: + if header != column: + header.setOrdering(None) + if ordering == 'up' and not column.reverse or ordering == 'down' and column.reverse: + reverse = True + else: + reverse = False + oldfilter = self.guiUtility.standardOverview.getFilter() + if oldfilter: + self.sorting = oldfilter.getState().copy() + else: + from Tribler.Main.vwxGUI.standardGrid import GridState + self.sorting = GridState(self.guiUtility.standardOverview.mode, 'all', None) # peerview has no filter + + self.sorting.sort = column.sorting + self.sorting.reverse = reverse + self.guiUtility.standardOverview.filterChanged(self.sorting) + + def getSorting(self): + return self.sorting + + def extraSortingMouseaction(self, event): + selected = self.extraSorting.GetValue() + selectedColumn = [c for c in self.columns if c.title == selected] + self.dynamicColumnName = selectedColumn[0].sorting + selectedColumn[0].clicked(event) + + def getCategoryCombo(self): + for header in self.columns: + try: + return header.filesFilter + except: + pass + return None + + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/ColumnHeader.py.bak b/tribler-mod/Tribler/Main/vwxGUI/ColumnHeader.py.bak new file mode 100644 index 0000000..b835b37 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/ColumnHeader.py.bak @@ -0,0 +1,289 @@ +import wx, os, sys +from Tribler.Main.vwxGUI.bgPanel import ImagePanel +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility + +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles +from Tribler.Main.vwxGUI.standardFilter import filesFilter + +from font import * + +class ColumnHeader(wx.Panel): + + bitmapOrderUp = 'upSort' + bitmapOrderDown = 'downSort' + + def __init__(self, parent, title, picture, order, tip, sorting, reverse, component, dummy): + wx.Panel.__init__(self, parent, -1) + self.type = None + self.triblerStyles = TriblerStyles.getInstance() + + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + + self.unselectedColour = self.triblerStyles.sortingColumns(1) + self.selectedColour = self.triblerStyles.sortingColumns(2) + self.dummy = dummy + self.component = component + + self.addComponents(title, picture, tip, component) + if component == None: +# print '1. component = None' + self.setOrdering(order) + + self.sorting = sorting + if reverse: + self.reverse = True + else: + self.reverse = False + + + def addComponents(self, title, picture, tip, component): + self.SetBackgroundColour(self.unselectedColour) + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + self.text = None + self.icon = None + self.title = title + + if self.component == None or self.component == 'comboboxSort': + if picture: + self.icon = ImagePanel(self) + self.icon.setBitmapFromFile(picture) + self.icon.setBackground(self.unselectedColour) + self.hSizer.Add(self.icon, 0, wx.TOP,1 ) + if title: + if not picture: + self.hSizer.Add([10,5],0,wx.EXPAND|wx.FIXED_MINSIZE,3) + self.text = wx.StaticText(self, -1, title) + self.triblerStyles.setDarkText(self.text) + self.hSizer.Add(self.text, 1, wx.TOP, 3) + + self.dummy = self.dummy or (not picture and not title) + if picture == None and title == None: + raise Exception('No text nor an icon in columnheader') + + if False: + self.sortIcon = ImagePanel(self) + self.sortIcon.setBackground(self.unselectedColour) + self.sortIcon.Hide() + self.hSizer.Add(self.sortIcon, 0, wx.TOP, 1) + self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + else: + self.sortIcon = None + + # 2.8.4.2 return value of GetChildren changed + wl = [self] + for c in self.GetChildren(): + wl.append(c) + for element in wl: + ##element.Bind(wx.EVT_LEFT_UP, self.clicked) + ##element.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + element.SetToolTipString(tip) + + elif self.component == 'comboboxFilter': + self.sortIcon = None + #self.filesFilter = filesFilter(self) + #self.hSizer.Add(self.filesFilter, 1, wx.BOTTOM, 0) + + self.SetSizer(self.hSizer) + self.SetAutoLayout(True) + self.hSizer.Layout() + + def setText(self, t): + self.text.SetLabel(t) + + def setOrdering(self, type): + # up, down or none + #print 'Set ordering to %s' % type + self.type = type + if self.component == None: + if type == 'up': + self.setColour(self.selectedColour) + if self.sortIcon: + self.sortIcon.setBitmapFromFile(self.bitmapOrderUp) + if not self.sortIcon.IsShown(): + self.sortIcon.Show() + elif type == 'down': + self.setColour(self.selectedColour) + if self.sortIcon: + self.sortIcon.setBitmapFromFile(self.bitmapOrderDown) + if not self.sortIcon.IsShown(): + self.sortIcon.Show() + else: + if self.sortIcon: + self.sortIcon.setBitmapFromFile(self.bitmapOrderDown) + self.sortIcon.Hide() + self.setColour(self.unselectedColour) + + self.GetSizer().Layout() + + def clicked(self, event): + if self.dummy: + return + if not self.type or self.type == 'up': + newType = 'down' + elif self.type == 'down': + newType = 'up' + self.setOrdering(newType) + self.GetParent().setOrdering(self, newType) + + + + def mouseAction(self, event): + event.Skip() + if self.type: + return + colour = None + if event.Entering(): + colour = self.selectedColour + + elif event.Leaving(): + if sys.platform == 'win32': + position = event.GetPosition() + for i in xrange(2): + position[i]+=event.GetEventObject().GetPosition()[i] + position[i]-=self.GetPosition()[i] + size = self.GetSize() + + if position[0]<0 or position[0]>=size[0] or position[1]<0 or position[1]>=size[1]: + colour = self.unselectedColour + else: + colour = self.unselectedColour + if colour: + self.setColour(colour) + + + def setColour(self, colour): + for element in [self, self.icon, self.sortIcon, self.text]: + if element: + if element.__class__ == ImagePanel: + element.setBackground(colour) + element.SetBackgroundColour(colour) + self.Refresh() + +class ColumnHeaderBar(wx.Panel): + + def __init__(self, parent, itemPanel): +# print 'itemPanel = %s' % itemPanel + self.itemPanel = itemPanel + wx.Panel.__init__(self, parent, -1) + self.columns = [] + self.dynamicColumnName = None + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + + self.addComponents() + #self.SetMinSize((-1,30)) + self.Show(True) + + def addComponents(self): + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + #self.hSizer.Add([0,20],0,wx.FIXED_MINSIZE,0) + self.triblerStyles = TriblerStyles.getInstance() +# self.filesFilter = filesFilter() +# self.filesFilter = testFilter(self) +# print 'filesFilter = %s' % self.filesFilter +# self.filesFilter.SetSize((30,20)) + + + + ##cornerTL_file = os.path.join(self.utility.getPath(),"Tribler","Main","vwxGUI","images","5.0","wrapCorTL.png") + ##self.cornerTL_image = wx.Image(cornerTL_file, wx.BITMAP_TYPE_ANY) + ##self.cornerTL = wx.StaticBitmap(self, -1, wx.BitmapFromImage(self.cornerTL_image)) + + ##self.hSizer.Add(self.cornerTL,0,0,0) + + + + columns = self.itemPanel.getColumns() + currentSorting = self.guiUtility.standardOverview.getSorting() + comboboxSortChoices = [] + #print 'currentSorting: %s' % str(currentSorting) + for dict in columns: +# colours = (wx.Colour(203,203,203), wx.Colour(223,223,223)) + if (type(currentSorting) == str and currentSorting == dict['sort'] or + type(currentSorting) == tuple and currentSorting[0] == dict['sort']): + if (len(currentSorting) == 2 and currentSorting[1] == 'increase') ^ dict.get('reverse', False): + beginorder = 'up' + else: + beginorder = 'down' + else: + beginorder = None + header = ColumnHeader(self, dict.get('title'), dict.get('pic'), beginorder, dict['tip'], dict['sort'], dict.get('reverse'), dict.get('component'), dict.get('dummy', False)) + self.columns.append(header) + + if dict.get('component') != 'comboboxSort' : + self.hSizer.Add(header, dict.get('weight',0), wx.EXPAND|wx.BOTTOM, 0) + + if columns.index(dict) != len(columns)-1: + line = wx.StaticLine(self,-1,wx.DefaultPosition, ((0,0)), wx.LI_VERTICAL) + self.SetBackgroundColour(self.triblerStyles.sortingColumns(2)) + self.hSizer.Add(line, 0, wx.LEFT|wx.RIGHT|wx.EXPAND, 0) + if dict.get('width'): + header.SetSize((dict['width']+6, -1)) + header.SetMinSize((dict['width']+6, -1)) + else: + if dict.get('width'): + header.SetSize((dict['width']+3, -1)) + header.SetMinSize((dict['width']+3, -1)) + + else: + header.Hide() + comboboxSortChoices.append(header.title) + + +# print comboboxSortChoices[0] + if len(comboboxSortChoices) != 0: + self.extraSorting = wx.ComboBox(self,-1,comboboxSortChoices[0], wx.DefaultPosition,wx.Size(70,10),comboboxSortChoices, wx.FIXED_MINSIZE|wx.CB_DROPDOWN|wx.CB_READONLY) + self.extraSorting.Bind(wx.EVT_COMBOBOX, self.extraSortingMouseaction) + self.hSizer.Add(self.extraSorting, 0, wx.EXPAND|wx.BOTTOM, 0) + self.dynamicColumnName = comboboxSortChoices[0] +# print 'tb > comboboxSortChoices[0] = %s' % comboboxSortChoices[0] + +# self.dynamicColumnName = comboboxSortChoices[0].sorting +# self.extraSortingMouseaction(event='') + + #self.SetBackgroundColour(wx.Colour(100,100,100)) +# self.hSizer.Add(self.filesFilter, 0, wx.BOTTOM|wx.FIXED_MINSIZE, 0) + self.hSizer.Layout() + self.SetSizer(self.hSizer) + self.SetAutoLayout(True) + + def setOrdering(self, column, ordering): + for header in self.columns: + if header != column: + header.setOrdering(None) + if ordering == 'up' and not column.reverse or ordering == 'down' and column.reverse: + reverse = True + else: + reverse = False + oldfilter = self.guiUtility.standardOverview.getFilter() + if oldfilter: + self.sorting = oldfilter.getState().copy() + else: + from Tribler.Main.vwxGUI.standardGrid import GridState + self.sorting = GridState(self.guiUtility.standardOverview.mode, 'all', None) # peerview has no filter + + self.sorting.sort = column.sorting + self.sorting.reverse = reverse + self.guiUtility.standardOverview.filterChanged(self.sorting) + + def getSorting(self): + return self.sorting + + def extraSortingMouseaction(self, event): + selected = self.extraSorting.GetValue() + selectedColumn = [c for c in self.columns if c.title == selected] + self.dynamicColumnName = selectedColumn[0].sorting + selectedColumn[0].clicked(event) + + def getCategoryCombo(self): + for header in self.columns: + try: + return header.filesFilter + except: + pass + return None + + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/FilesItemDetailsSummary.py b/tribler-mod/Tribler/Main/vwxGUI/FilesItemDetailsSummary.py new file mode 100644 index 0000000..b7e4238 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/FilesItemDetailsSummary.py @@ -0,0 +1,310 @@ +from time import localtime, strftime +import wx +from Tribler.Main.vwxGUI.tribler_topButton import tribler_topButton, SwitchButton, TestButton +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles +from Tribler.Main.vwxGUI.TextButton import * +from Tribler.Main.vwxGUI.bgPanel import * + +from Tribler.Video.VideoPlayer import VideoPlayer + +from Tribler.Core.CacheDB.sqlitecachedb import bin2str +from Tribler.Core.simpledefs import * +from Tribler.Core.Utilities.utilities import * + + +class FilesItemDetailsSummary(bgPanel): + + def __init__(self, parent, torrentHash, torrent, web2data = None): + wx.Panel.__init__(self, parent, -1) + + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.mcdb = self.utility.session.open_dbhandler(NTFY_MODERATIONCAST) + self.vcdb = self.utility.session.open_dbhandler(NTFY_VOTECAST) + + self.session = self.utility.session + self.torrent_db = self.session.open_dbhandler(NTFY_TORRENTS) + + + self.infohash = torrentHash + self.torrent = torrent + + self.addComponents() + + + self.tile = True + self.backgroundColour = wx.Colour(102,102,102) + self.searchBitmap('blue_long.png') + self.createBackgroundImage() + + + self.gridmgr = parent.parent.getGridManager() + + self.Refresh(True) + self.Update() + + + + def addComponents(self): + self.triblerStyles = TriblerStyles.getInstance() + ##self.SetMinSize((300,40)) + + self.hSizer0 = wx.BoxSizer(wx.HORIZONTAL) + + self.vSizer = wx.BoxSizer(wx.VERTICAL) + + ## text information + + ##self.Seeders = wx.StaticText(self,-1,"Seeders:",wx.Point(0,0),wx.Size(125,22)) + ##self.Seeders.SetMinSize((125,14)) + ##self.triblerStyles.setDarkText(self.Seeders) + + ##self.NumSeeders = wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(125,22)) + ##self.NumSeeders.SetMinSize((125,14)) + ##self.triblerStyles.setDarkText(self.NumSeeders) + ##self.NumSeeders.SetLabel('%s' % self.torrent['num_seeders']) + + + ##self.Leechers = wx.StaticText(self,-1,"Leechers:",wx.Point(0,0),wx.Size(125,22)) + ##self.Leechers.SetMinSize((125,14)) + ##self.triblerStyles.setDarkText(self.Leechers) + + ##self.NumLeechers = wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(125,22)) + ##self.NumLeechers.SetMinSize((125,14)) + ##self.triblerStyles.setDarkText(self.NumLeechers) + ##self.NumLeechers.SetLabel('%s' % self.torrent['num_leechers']) + + self.Popularity = wx.StaticText(self,-1,"Popularity:",wx.Point(0,0),wx.Size(125,22)) + self.Popularity.SetMinSize((125,14)) + self.triblerStyles.setDarkText(self.Popularity) + + self.Popularity_info = wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(125,22)) + self.Popularity_info.SetMinSize((125,14)) + self.triblerStyles.setDarkText(self.Popularity_info) + pop = self.torrent['num_seeders'] + self.torrent['num_leechers'] + if pop > 0: + if pop == 1: + self.Popularity_info.SetLabel('%s person' %(pop)) + else: + self.Popularity_info.SetLabel('%s people' %(pop)) + else: + self.Popularity_info.SetLabel('unknown') + + + ##self.TriblerSources = wx.StaticText(self,-1,"Tribler sources:",wx.Point(0,0),wx.Size(125,22)) + ##self.TriblerSources.SetMinSize((125,14)) + ##self.triblerStyles.setDarkText(self.TriblerSources) + + self.CreationDate = wx.StaticText(self,-1,"Creation date:",wx.Point(0,0),wx.Size(125,22)) + self.CreationDate.SetMinSize((125,14)) + self.triblerStyles.setDarkText(self.CreationDate) + + self.CreationDate_info = wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(125,22)) + self.CreationDate_info.SetMinSize((125,14)) + self.triblerStyles.setDarkText(self.CreationDate_info) + self.CreationDate_info.SetLabel(friendly_time(self.torrent['creation_date'])) + + self.ModeratorName = wx.StaticText(self,-1,"Moderated by: ",wx.Point(0,0),wx.Size(125,22)) + self.ModeratorName.SetMinSize((125,14)) + self.triblerStyles.setLightText(self.ModeratorName) + + self.ModeratorName_info = wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(125,22)) + self.ModeratorName_info.SetMinSize((125,14)) + self.triblerStyles.setLightText(self.ModeratorName_info) + + + ## hSizer2 + self.hSizer2 = wx.BoxSizer(wx.HORIZONTAL) + + + + # check for moderation + if self.infohash is not None and self.mcdb.hasModeration(bin2str(self.infohash)): + moderation = self.mcdb.getModeration(bin2str(self.infohash)) + mod_name = moderation[1] + + # If the moderator is himself, he should not be able to rate the file properties + if moderation[0] != bin2str(self.session.get_permid()): + self.Rate = wx.StaticText(self,-1,"Rate these file properties as ",wx.Point(0,0),wx.Size(160,22)) + self.triblerStyles.setLightText(self.Rate) + self.Or = wx.StaticText(self,-1," or",wx.Point(0,0),wx.Size(25,22)) + self.triblerStyles.setLightText(self.Or) + + self.fake = TestButton(self, -1, name='fake') + self.fake.SetMinSize((35,16)) + self.fake.SetSize((35,16)) + self.guiUtility.fakeButton = self.fake + + self.real = TestButton(self, -1, name='real') + self.real.SetMinSize((35,16)) + self.real.SetSize((35,16)) + self.guiUtility.realButton = self.real + + self.hSizer2.Add(self.Rate,0,wx.LEFT|wx.FIXED_MINSIZE,0) + self.hSizer2.Add(self.fake,0,wx.LEFT|wx.FIXED_MINSIZE,5) + self.hSizer2.Add(self.Or,0,wx.LEFT|wx.FIXED_MINSIZE,5) + self.hSizer2.Add(self.real,0,wx.LEFT|wx.FIXED_MINSIZE,5) + + else: + mod_name = "None" + # disable fake and real buttons + ##self.fake.setState(False) + ##self.real.setState(False) + + self.ModeratorName_info.SetLabel(mod_name) + + self.vSizer.Add([0,10], 0, wx.BOTTOM|wx.FIXED_MINSIZE, 0) + + ##self.hSizer3 = wx.BoxSizer(wx.HORIZONTAL) + ##self.hSizer3.Add(self.Seeders,0,wx.FIXED_MINSIZE,5) + ##self.hSizer3.Add(self.NumSeeders,0,wx.FIXED_MINSIZE,5) + ##self.vSizer.Add(self.hSizer3,0,wx.LEFT|wx.FIXED_MINSIZE,5) + + ##self.vSizer.Add([0,2], 0, wx.BOTTOM|wx.FIXED_MINSIZE, 0) + + self.hSizer4 = wx.BoxSizer(wx.HORIZONTAL) + ##self.hSizer4.Add(self.Leechers,0,wx.FIXED_MINSIZE,5) + ##self.hSizer4.Add(self.NumLeechers,0,wx.FIXED_MINSIZE,5) + self.hSizer4.Add(self.Popularity,0,wx.FIXED_MINSIZE,5) + self.hSizer4.Add(self.Popularity_info,0,wx.FIXED_MINSIZE,5) + self.vSizer.Add(self.hSizer4,0,wx.LEFT|wx.FIXED_MINSIZE,5) + + ##self.vSizer.Add([0,2], 0, wx.BOTTOM|wx.FIXED_MINSIZE, 0) + ##self.vSizer.Add(self.TriblerSources,0,wx.LEFT|wx.FIXED_MINSIZE,5) + ##self.vSizer.Add([0,2], 0, wx.BOTTOM|wx.FIXED_MINSIZE, 0) + + self.hSizer5 = wx.BoxSizer(wx.HORIZONTAL) + self.hSizer5.Add(self.CreationDate,0,wx.FIXED_MINSIZE,5) + self.hSizer5.Add(self.CreationDate_info,0,wx.FIXED_MINSIZE,5) + self.vSizer.Add(self.hSizer5,0,wx.LEFT|wx.FIXED_MINSIZE,5) + + + ##self.vSizer.Add([0,20], 0, wx.BOTTOM|wx.FIXED_MINSIZE, 0) + self.vSizer.Add(self.hSizer2,0,wx.LEFT|wx.FIXED_MINSIZE,5) + self.vSizer.Add([0,2], 0, wx.BOTTOM|wx.FIXED_MINSIZE, 0) + + self.hSizer7 = wx.BoxSizer(wx.HORIZONTAL) + self.hSizer7.Add(self.ModeratorName,0,wx.FIXED_MINSIZE,5) + self.hSizer7.Add(self.ModeratorName_info,0,wx.FIXED_MINSIZE,5) + self.vSizer.Add(self.hSizer7,0,wx.LEFT|wx.FIXED_MINSIZE,5) + + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + + self.hSizer.Add([20,10], 0, wx.BOTTOM|wx.FIXED_MINSIZE, 1) + + ##self.save = wx.StaticText(self,-1,"save",wx.Point(0,0),wx.Size(50,22)) + ##self.save.SetMinSize((50,14)) + ##self.save.SetForegroundColour(wx.RED) + + + + self.download = tribler_topButton(self, -1, name='download') + self.download.SetMinSize((20,20)) + self.download.SetSize((20,20)) + + ##self.select_files = tribler_topButton(self, -1, name='select_files') + ##self.select_files.SetMinSize((148,16)) + ##self.select_files.SetSize((148,16)) + + ##self.view_related_files = tribler_topButton(self, -1, name='view_related_files') + ##self.view_related_files.SetMinSize((116,16)) + ##self.view_related_files.SetSize((116,16)) + + ##self.edit = tribler_topButton(self, -1, name='edit') + ##self.edit.SetMinSize((40,16)) + ##self.edit.SetSize((40,16)) + + + self.hSizer.Add(self.vSizer, 0, wx.FIXED_MINSIZE, 10) + + + self.play_big = SwitchButton(self, -1, name='playbig') + self.play_big.setToggled(True) + self.play_big.Bind(wx.EVT_LEFT_UP, self.playbig_clicked) + + def is_playable_callback(torrent, playable): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "PLAYABLE : " , playable + self.play_big.setToggled(playable) + + if not self.guiUtility.standardDetails.torrent_is_playable(callback=is_playable_callback): + + self.play_big.setToggled(False) + + + self.vSizer2 = wx.BoxSizer(wx.VERTICAL) + self.vSizer2.Add([0,20], 0, wx.FIXED_MINSIZE, 0) + self.vSizer2.Add(self.play_big, 0, wx.FIXED_MINSIZE, 4) + + + self.vSizer3 = wx.BoxSizer(wx.VERTICAL) + self.vSizer3.Add([0,37], 0, wx.FIXED_MINSIZE, 0) + self.vSizer3.Add(self.download, 0, wx.FIXED_MINSIZE, 4) + + + + + + ##self.vSizer = wx.BoxSizer(wx.VERTICAL) + ##self.hSizer.Add(self.vSizer, 0, wx.TOP, 25) + + ##self.downloading = self.data and self.data.get('myDownloadHistory') + + self.hSizer0.Add(self.hSizer, 1, wx.EXPAND , 10) + self.hSizer0.Add(self.vSizer2, 0 , wx.FIXED_MINSIZE , 4) + self.hSizer0.Add([10,0], 0, wx.FIXED_MINSIZE, 0) + self.hSizer0.Add(self.vSizer3, 0 , wx.FIXED_MINSIZE , 4) + self.hSizer0.Add([50,10], 0, wx.FIXED_MINSIZE, 0) + + self.SetSizer(self.hSizer0) + self.SetAutoLayout(1); + self.Layout() + + + + + + def playbig_clicked(self,event): + if self.play_big.isToggled(): + + ds = self.torrent.get('ds') + + videoplayer = self._get_videoplayer(exclude=ds) + videoplayer.stop_playback() # stop current playback + videoplayer.show_loading() + + ##self.play_big.setToggled() + ##self.guiUtility.buttonClicked(event) + if ds is None: + self.guiUtility.standardDetails.download(vodmode=True) + else: + self.play(ds) + + self.guiUtility.standardDetails.setVideodata(self.guiUtility.standardDetails.getData()) + self._get_videoplayer(exclude=ds).videoframe.get_videopanel().SetLoadingText(self.guiUtility.standardDetails.getVideodata()['name']) + if sys.platform == 'darwin': + self._get_videoplayer(exclude=ds).videoframe.show_videoframe() + self._get_videoplayer(exclude=ds).videoframe.get_videopanel().Refresh() + self._get_videoplayer(exclude=ds).videoframe.get_videopanel().Layout() + + def play(self,ds): + + + self._get_videoplayer(exclude=ds).play(ds) + + + def _get_videoplayer(self, exclude=None): + """ + Returns the VideoPlayer instance and ensures that it knows if + there are other downloads running. + """ + other_downloads = False + for ds in self.gridmgr.get_dslist(): + if ds is not exclude and ds.get_status() not in (DLSTATUS_STOPPED, DLSTATUS_STOPPED_ON_ERROR): + other_downloads = True + break + + videoplayer = VideoPlayer.getInstance() + videoplayer.set_other_downloads(other_downloads) + return videoplayer + diff --git a/tribler-mod/Tribler/Main/vwxGUI/FilesItemDetailsSummary.py.bak b/tribler-mod/Tribler/Main/vwxGUI/FilesItemDetailsSummary.py.bak new file mode 100644 index 0000000..82e18e1 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/FilesItemDetailsSummary.py.bak @@ -0,0 +1,309 @@ +import wx +from Tribler.Main.vwxGUI.tribler_topButton import tribler_topButton, SwitchButton, TestButton +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles +from Tribler.Main.vwxGUI.TextButton import * +from Tribler.Main.vwxGUI.bgPanel import * + +from Tribler.Video.VideoPlayer import VideoPlayer + +from Tribler.Core.CacheDB.sqlitecachedb import bin2str +from Tribler.Core.simpledefs import * +from Tribler.Core.Utilities.utilities import * + + +class FilesItemDetailsSummary(bgPanel): + + def __init__(self, parent, torrentHash, torrent, web2data = None): + wx.Panel.__init__(self, parent, -1) + + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.mcdb = self.utility.session.open_dbhandler(NTFY_MODERATIONCAST) + self.vcdb = self.utility.session.open_dbhandler(NTFY_VOTECAST) + + self.session = self.utility.session + self.torrent_db = self.session.open_dbhandler(NTFY_TORRENTS) + + + self.infohash = torrentHash + self.torrent = torrent + + self.addComponents() + + + self.tile = True + self.backgroundColour = wx.Colour(102,102,102) + self.searchBitmap('blue_long.png') + self.createBackgroundImage() + + + self.gridmgr = parent.parent.getGridManager() + + self.Refresh(True) + self.Update() + + + + def addComponents(self): + self.triblerStyles = TriblerStyles.getInstance() + ##self.SetMinSize((300,40)) + + self.hSizer0 = wx.BoxSizer(wx.HORIZONTAL) + + self.vSizer = wx.BoxSizer(wx.VERTICAL) + + ## text information + + ##self.Seeders = wx.StaticText(self,-1,"Seeders:",wx.Point(0,0),wx.Size(125,22)) + ##self.Seeders.SetMinSize((125,14)) + ##self.triblerStyles.setDarkText(self.Seeders) + + ##self.NumSeeders = wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(125,22)) + ##self.NumSeeders.SetMinSize((125,14)) + ##self.triblerStyles.setDarkText(self.NumSeeders) + ##self.NumSeeders.SetLabel('%s' % self.torrent['num_seeders']) + + + ##self.Leechers = wx.StaticText(self,-1,"Leechers:",wx.Point(0,0),wx.Size(125,22)) + ##self.Leechers.SetMinSize((125,14)) + ##self.triblerStyles.setDarkText(self.Leechers) + + ##self.NumLeechers = wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(125,22)) + ##self.NumLeechers.SetMinSize((125,14)) + ##self.triblerStyles.setDarkText(self.NumLeechers) + ##self.NumLeechers.SetLabel('%s' % self.torrent['num_leechers']) + + self.Popularity = wx.StaticText(self,-1,"Popularity:",wx.Point(0,0),wx.Size(125,22)) + self.Popularity.SetMinSize((125,14)) + self.triblerStyles.setDarkText(self.Popularity) + + self.Popularity_info = wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(125,22)) + self.Popularity_info.SetMinSize((125,14)) + self.triblerStyles.setDarkText(self.Popularity_info) + pop = self.torrent['num_seeders'] + self.torrent['num_leechers'] + if pop > 0: + if pop == 1: + self.Popularity_info.SetLabel('%s person' %(pop)) + else: + self.Popularity_info.SetLabel('%s people' %(pop)) + else: + self.Popularity_info.SetLabel('unknown') + + + ##self.TriblerSources = wx.StaticText(self,-1,"Tribler sources:",wx.Point(0,0),wx.Size(125,22)) + ##self.TriblerSources.SetMinSize((125,14)) + ##self.triblerStyles.setDarkText(self.TriblerSources) + + self.CreationDate = wx.StaticText(self,-1,"Creation date:",wx.Point(0,0),wx.Size(125,22)) + self.CreationDate.SetMinSize((125,14)) + self.triblerStyles.setDarkText(self.CreationDate) + + self.CreationDate_info = wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(125,22)) + self.CreationDate_info.SetMinSize((125,14)) + self.triblerStyles.setDarkText(self.CreationDate_info) + self.CreationDate_info.SetLabel(friendly_time(self.torrent['creation_date'])) + + self.ModeratorName = wx.StaticText(self,-1,"Moderated by: ",wx.Point(0,0),wx.Size(125,22)) + self.ModeratorName.SetMinSize((125,14)) + self.triblerStyles.setLightText(self.ModeratorName) + + self.ModeratorName_info = wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(125,22)) + self.ModeratorName_info.SetMinSize((125,14)) + self.triblerStyles.setLightText(self.ModeratorName_info) + + + ## hSizer2 + self.hSizer2 = wx.BoxSizer(wx.HORIZONTAL) + + + + # check for moderation + if self.infohash is not None and self.mcdb.hasModeration(bin2str(self.infohash)): + moderation = self.mcdb.getModeration(bin2str(self.infohash)) + mod_name = moderation[1] + + # If the moderator is himself, he should not be able to rate the file properties + if moderation[0] != bin2str(self.session.get_permid()): + self.Rate = wx.StaticText(self,-1,"Rate these file properties as ",wx.Point(0,0),wx.Size(160,22)) + self.triblerStyles.setLightText(self.Rate) + self.Or = wx.StaticText(self,-1," or",wx.Point(0,0),wx.Size(25,22)) + self.triblerStyles.setLightText(self.Or) + + self.fake = TestButton(self, -1, name='fake') + self.fake.SetMinSize((35,16)) + self.fake.SetSize((35,16)) + self.guiUtility.fakeButton = self.fake + + self.real = TestButton(self, -1, name='real') + self.real.SetMinSize((35,16)) + self.real.SetSize((35,16)) + self.guiUtility.realButton = self.real + + self.hSizer2.Add(self.Rate,0,wx.LEFT|wx.FIXED_MINSIZE,0) + self.hSizer2.Add(self.fake,0,wx.LEFT|wx.FIXED_MINSIZE,5) + self.hSizer2.Add(self.Or,0,wx.LEFT|wx.FIXED_MINSIZE,5) + self.hSizer2.Add(self.real,0,wx.LEFT|wx.FIXED_MINSIZE,5) + + else: + mod_name = "None" + # disable fake and real buttons + ##self.fake.setState(False) + ##self.real.setState(False) + + self.ModeratorName_info.SetLabel(mod_name) + + self.vSizer.Add([0,10], 0, wx.BOTTOM|wx.FIXED_MINSIZE, 0) + + ##self.hSizer3 = wx.BoxSizer(wx.HORIZONTAL) + ##self.hSizer3.Add(self.Seeders,0,wx.FIXED_MINSIZE,5) + ##self.hSizer3.Add(self.NumSeeders,0,wx.FIXED_MINSIZE,5) + ##self.vSizer.Add(self.hSizer3,0,wx.LEFT|wx.FIXED_MINSIZE,5) + + ##self.vSizer.Add([0,2], 0, wx.BOTTOM|wx.FIXED_MINSIZE, 0) + + self.hSizer4 = wx.BoxSizer(wx.HORIZONTAL) + ##self.hSizer4.Add(self.Leechers,0,wx.FIXED_MINSIZE,5) + ##self.hSizer4.Add(self.NumLeechers,0,wx.FIXED_MINSIZE,5) + self.hSizer4.Add(self.Popularity,0,wx.FIXED_MINSIZE,5) + self.hSizer4.Add(self.Popularity_info,0,wx.FIXED_MINSIZE,5) + self.vSizer.Add(self.hSizer4,0,wx.LEFT|wx.FIXED_MINSIZE,5) + + ##self.vSizer.Add([0,2], 0, wx.BOTTOM|wx.FIXED_MINSIZE, 0) + ##self.vSizer.Add(self.TriblerSources,0,wx.LEFT|wx.FIXED_MINSIZE,5) + ##self.vSizer.Add([0,2], 0, wx.BOTTOM|wx.FIXED_MINSIZE, 0) + + self.hSizer5 = wx.BoxSizer(wx.HORIZONTAL) + self.hSizer5.Add(self.CreationDate,0,wx.FIXED_MINSIZE,5) + self.hSizer5.Add(self.CreationDate_info,0,wx.FIXED_MINSIZE,5) + self.vSizer.Add(self.hSizer5,0,wx.LEFT|wx.FIXED_MINSIZE,5) + + + ##self.vSizer.Add([0,20], 0, wx.BOTTOM|wx.FIXED_MINSIZE, 0) + self.vSizer.Add(self.hSizer2,0,wx.LEFT|wx.FIXED_MINSIZE,5) + self.vSizer.Add([0,2], 0, wx.BOTTOM|wx.FIXED_MINSIZE, 0) + + self.hSizer7 = wx.BoxSizer(wx.HORIZONTAL) + self.hSizer7.Add(self.ModeratorName,0,wx.FIXED_MINSIZE,5) + self.hSizer7.Add(self.ModeratorName_info,0,wx.FIXED_MINSIZE,5) + self.vSizer.Add(self.hSizer7,0,wx.LEFT|wx.FIXED_MINSIZE,5) + + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + + self.hSizer.Add([20,10], 0, wx.BOTTOM|wx.FIXED_MINSIZE, 1) + + ##self.save = wx.StaticText(self,-1,"save",wx.Point(0,0),wx.Size(50,22)) + ##self.save.SetMinSize((50,14)) + ##self.save.SetForegroundColour(wx.RED) + + + + self.download = tribler_topButton(self, -1, name='download') + self.download.SetMinSize((20,20)) + self.download.SetSize((20,20)) + + ##self.select_files = tribler_topButton(self, -1, name='select_files') + ##self.select_files.SetMinSize((148,16)) + ##self.select_files.SetSize((148,16)) + + ##self.view_related_files = tribler_topButton(self, -1, name='view_related_files') + ##self.view_related_files.SetMinSize((116,16)) + ##self.view_related_files.SetSize((116,16)) + + ##self.edit = tribler_topButton(self, -1, name='edit') + ##self.edit.SetMinSize((40,16)) + ##self.edit.SetSize((40,16)) + + + self.hSizer.Add(self.vSizer, 0, wx.FIXED_MINSIZE, 10) + + + self.play_big = SwitchButton(self, -1, name='playbig') + self.play_big.setToggled(True) + self.play_big.Bind(wx.EVT_LEFT_UP, self.playbig_clicked) + + def is_playable_callback(torrent, playable): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "PLAYABLE : " , playable + self.play_big.setToggled(playable) + + if not self.guiUtility.standardDetails.torrent_is_playable(callback=is_playable_callback): + + self.play_big.setToggled(False) + + + self.vSizer2 = wx.BoxSizer(wx.VERTICAL) + self.vSizer2.Add([0,20], 0, wx.FIXED_MINSIZE, 0) + self.vSizer2.Add(self.play_big, 0, wx.FIXED_MINSIZE, 4) + + + self.vSizer3 = wx.BoxSizer(wx.VERTICAL) + self.vSizer3.Add([0,37], 0, wx.FIXED_MINSIZE, 0) + self.vSizer3.Add(self.download, 0, wx.FIXED_MINSIZE, 4) + + + + + + ##self.vSizer = wx.BoxSizer(wx.VERTICAL) + ##self.hSizer.Add(self.vSizer, 0, wx.TOP, 25) + + ##self.downloading = self.data and self.data.get('myDownloadHistory') + + self.hSizer0.Add(self.hSizer, 1, wx.EXPAND , 10) + self.hSizer0.Add(self.vSizer2, 0 , wx.FIXED_MINSIZE , 4) + self.hSizer0.Add([10,0], 0, wx.FIXED_MINSIZE, 0) + self.hSizer0.Add(self.vSizer3, 0 , wx.FIXED_MINSIZE , 4) + self.hSizer0.Add([50,10], 0, wx.FIXED_MINSIZE, 0) + + self.SetSizer(self.hSizer0) + self.SetAutoLayout(1); + self.Layout() + + + + + + def playbig_clicked(self,event): + if self.play_big.isToggled(): + + ds = self.torrent.get('ds') + + videoplayer = self._get_videoplayer(exclude=ds) + videoplayer.stop_playback() # stop current playback + videoplayer.show_loading() + + ##self.play_big.setToggled() + ##self.guiUtility.buttonClicked(event) + if ds is None: + self.guiUtility.standardDetails.download(vodmode=True) + else: + self.play(ds) + + self.guiUtility.standardDetails.setVideodata(self.guiUtility.standardDetails.getData()) + self._get_videoplayer(exclude=ds).videoframe.get_videopanel().SetLoadingText(self.guiUtility.standardDetails.getVideodata()['name']) + if sys.platform == 'darwin': + self._get_videoplayer(exclude=ds).videoframe.show_videoframe() + self._get_videoplayer(exclude=ds).videoframe.get_videopanel().Refresh() + self._get_videoplayer(exclude=ds).videoframe.get_videopanel().Layout() + + def play(self,ds): + + + self._get_videoplayer(exclude=ds).play(ds) + + + def _get_videoplayer(self, exclude=None): + """ + Returns the VideoPlayer instance and ensures that it knows if + there are other downloads running. + """ + other_downloads = False + for ds in self.gridmgr.get_dslist(): + if ds is not exclude and ds.get_status() not in (DLSTATUS_STOPPED, DLSTATUS_STOPPED_ON_ERROR): + other_downloads = True + break + + videoplayer = VideoPlayer.getInstance() + videoplayer.set_other_downloads(other_downloads) + return videoplayer + diff --git a/tribler-mod/Tribler/Main/vwxGUI/FriendsItemPanel.py b/tribler-mod/Tribler/Main/vwxGUI/FriendsItemPanel.py new file mode 100644 index 0000000..8d4cd95 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/FriendsItemPanel.py @@ -0,0 +1,482 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg, Maarten ten Brinke, Arno Bakker +# see LICENSE.txt for license information + +import wx, math, time, os, sys, threading +from traceback import print_exc +from copy import deepcopy +import cStringIO +from wx.lib.stattext import GenStaticText as StaticText + +from Tribler.Core.simpledefs import * +from Tribler.Core.Utilities.utilities import * +from Tribler.Core.Utilities.unicode import * +from Tribler.Main.Dialogs.makefriends import MakeFriendsDialog +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.vwxGUI.PersonsItemPanel import ThumbnailViewer +from Tribler.Main.Utility.utility import similarPeer, copyPeer +from Tribler.Main.vwxGUI.IconsManager import IconsManager, data2wxBitmap +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles +from Tribler.Main.vwxGUI.PersonsItemDetailsSummary import PersonsItemDetailsSummary +from font import * +from tribler_topButton import * +## import TasteHeart + +DEBUG=False + +# font sizes +if sys.platform == 'darwin': + FS_FRIENDTITLE = 10 + FS_STATUS = 10 + FS_SIMILARITY = 10 + FS_HEARTRANK = 8 + FS_ONLINE = 10 +else: + FS_FRIENDTITLE = 8 + FS_STATUS = 8 + FS_SIMILARITY = 8 + FS_HEARTRANK = 7 + FS_ONLINE = 8 + +class FriendsItemPanel(wx.Panel): + """ + PersonsItemPanel shows one persons item inside the PersonsGridPanel + """ + def __init__(self, parent, keyTypedFun= None): + self.triblerStyles = TriblerStyles.getInstance() + global TORRENTPANEL_BACKGROUND + + wx.Panel.__init__(self, parent, -1) + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.parent = parent + self.data = None + self.summary = None + self.datacopy = {} + self.FriendThumbnailViewer = ThumbnailViewer + self.titleLength = 77 # num characters + self.listItem = True + self.triblerGrey = wx.Colour(128,128,128) + self.selected = False + self.warningMode = False + self.guiserver = parent.guiserver + self.oldCategoryLabel = None + self.addComponents() + self.Show() + self.Refresh() + self.vSizerOverall = wx.BoxSizer(wx.VERTICAL) + + self.Layout() + + def addComponents(self): + self.Show(False) + self.SetMinSize((137,22+0)) + self.selectedColour = wx.Colour(255,200,187) + self.unselectedColour = wx.WHITE + + self.SetBackgroundColour(self.unselectedColour) + +# self.vSizerAll = wx.BoxSizer(wx.VERTICAL) + + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + self.vSizerOverall.Add(self.hSizer, 0, wx.FIXED|wx.EXPAND, 0) + self.Bind(wx.EVT_LEFT_UP, self.mouseAction) + self.Bind(wx.EVT_KEY_UP, self.keyTyped) + + # Add Spacer + self.hSizer.Add([8,22],0,wx.EXPAND|wx.FIXED_MINSIZE,0) + + # Add thumb + self.thumb = FriendThumbnailViewer(self, 'friendsMode') + self.thumb.setBackground(wx.BLACK) + self.thumb.SetSize((18,18)) + self.hSizer.Add(self.thumb, 0, wx.ALL, 2) + + # Add title + self.title =wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(100,15)) + self.title.SetMinSize((100,14)) + self.triblerStyles.setLightText(self.title) + self.hSizer.Add(self.title,1,wx.TOP,4) + + # Add left vertical line + self.vLine1 = self.addLine() + + # Add status + self.status =wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(130,12), wx.ALIGN_RIGHT | wx.ST_NO_AUTORESIZE ) + self.status.SetMinSize((165,12)) + self.triblerStyles.setLightText(self.status) + self.hSizer.Add(self.status,0,wx.TOP|wx.EXPAND,4) + + # Add left vertical line + self.vLine2 = self.addLine() + + # Add message > if today new content is discovered from him/her + self.helping =wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(130,12), wx.ALIGN_RIGHT | wx.ST_NO_AUTORESIZE) + self.helping.SetMinSize((30,14)) + self.triblerStyles.setLightText(self.helping) + self.hSizer.Add(self.helping,1,wx.TOP,4) + + # Add left vertical line + self.vLine3 = self.addLine() + + # Add Taste Heart - Add Spacer to keep space occupied when no heart available + self.vSizer = wx.BoxSizer(wx.VERTICAL) + self.vSizer.Add([60,2],0,wx.FIXED_MINSIZE,0) + self.hSizer2 = wx.BoxSizer(wx.HORIZONTAL) + self.tasteHeart = TasteHeart.TasteHeart(self, -1, wx.DefaultPosition, wx.Size(14,14),name='TasteHeart') + self.hSizer2.Add(self.tasteHeart, 0, wx.TOP, 0) + # Add Taste similarity + self.taste =wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(40,15)) + self.taste.SetMinSize((40,15)) + self.triblerStyles.setLightText(self.taste) + self.hSizer2.Add(self.taste, 0, wx.LEFT, 2) + self.vSizer.Add(self.hSizer2, 0, wx.TOP, 2) + self.hSizer.Add(self.vSizer, 0, wx.LEFT|wx.RIGHT, 2) + + self.hSizerSummary = wx.BoxSizer(wx.HORIZONTAL) + self.vSizerOverall.Add(self.hSizerSummary, 1, wx.FIXED_MINSIZE|wx.EXPAND, 0) + + self.SetSizer(self.vSizerOverall) + # Add delete button +## self.delete = tribler_topButton(self, -1, wx.Point(0,0), wx.Size(16,16),name='deleteFriend') +## self.hSizer.Add(self.delete, 0, wx.TOP|wx.RIGHT, 4) + +# self.vSizerAll.Add(self.hSizer, 0, wx.EXPAND, 0) + #Add bottom horizontal line +# self.addLine(False) + + self.SetAutoLayout(1); + self.Layout(); + self.Refresh() + + # 2.8.4.2 return value of GetChildren changed + wl = [] + for c in self.GetChildren(): + wl.append(c) + for window in wl: + window.Bind(wx.EVT_LEFT_UP, self.mouseAction) + window.Bind(wx.EVT_KEY_UP, self.keyTyped) + window.Bind(wx.EVT_RIGHT_DOWN, self.mouseAction) + + def getColumns(self): + return [{'sort':'', 'title':'', 'width':20, 'tip':''}, + {'sort':'name', 'reverse':True,'title':'name', 'weight':1,'tip':self.utility.lang.get('C_friendname') }, + {'sort':'last_connected', 'title':'status', 'width':165, 'tip':self.utility.lang.get('C_friendstatus'), 'order':'down'}, + {'sort':'??', 'dummy':True, 'title':'boosting','weight':1, 'tip':self.utility.lang.get('C_helping')}, + {'sort':'similarity','pic':'heartSmall', 'width':65, 'tip':self.utility.lang.get('C_recommpersons')} + ] + + def addLine(self, vertical=True): + if vertical: + vLine = wx.StaticLine(self,-1,wx.DefaultPosition, wx.Size(2,0),wx.LI_VERTICAL) + self.hSizer.Add(vLine, 0, wx.RIGHT|wx.LEFT, 3) + return vLine + else: + hLine = wx.StaticLine(self,-1,wx.DefaultPosition, wx.Size(-1,1),wx.LI_HORIZONTAL) + self.vSizer.Add(hLine, 0, wx.EXPAND, 0) + return hLine + + def setData(self, peer_data): + # set bitmap, rating, title + + + #if self.data is None: + # oldpermid = None + #else: + # oldpermid = self.data['permid'] + + self.data = peer_data + # do not reload similar peers + if peer_data is not None and 'coopdlstatus' in peer_data: + pass + # Arno, 2008-10-21: Make sure connected time is updated + #elif similarPeer(peer_data, self.datacopy): + # return + self.datacopy = copyPeer(peer_data) + + if peer_data is None: + peer_data = {} + + if peer_data.get('name'): + title = peer_data['name'][:self.titleLength] + self.title.Enable(True) + self.title.SetLabel(title) + self.title.Wrap(self.title.GetSize()[0]) + self.title.SetToolTipString(peer_data['ip']+':'+str(peer_data['port'])) + + # status issues + self.status.Enable(True) + label = peer2status(peer_data) + self.status.SetLabel(label) + + if 'coopdlstatus' in peer_data: + self.helping.SetLabel(peer_data['coopdlstatus']) + self.helping.SetToolTipString(peer_data['coopdlstatus']) + +# self.delete.Show() + self.tasteHeart.Show() + self.vLine1.Show() + self.vLine2.Show() + self.vLine3.Show() + else: + self.title.SetLabel('') + self.title.SetToolTipString('') + self.title.Enable(False) + self.status.SetLabel('') + self.helping.SetLabel('') +# self.delete.Hide() + self.tasteHeart.Hide() + self.vLine1.Hide() + self.vLine2.Hide() + self.vLine3.Hide() + + rank = peer_data.get('simRank',-1) + recommField = self.taste + if rank!=-1: + if rank == 1: + self.tasteHeart.SetToolTipString("%d" % rank + "st of top 20 of all discovered persons") + recommField.SetLabel("%d" % rank + "st") + elif rank == 2: + self.tasteHeart.SetToolTipString("%d" % rank + "nd of top 20 of all discovered persons") + recommField.SetLabel("%d" % rank + "nd") + elif rank == 3: + self.tasteHeart.SetToolTipString("%d" % rank + "rd of top 20 of all discovered persons") + recommField.SetLabel("%d" % rank + "rd") + else: + self.tasteHeart.SetToolTipString("%d" % rank + "th of top 20 of all discovered persons") + recommField.SetLabel("%d" % rank + "th") + self.tasteHeart.Show() + self.tasteHeart.setRank(rank) + else: + self.taste.SetLabel('') + self.tasteHeart.Hide() + + #if oldpermid is None or oldpermid != peer_data['permid']: + self.thumb.setData(peer_data, summary='') + + self.Layout() + self.Refresh() + #self.parent.Refresh() + + + + def select(self, rowIndex, colIndex, ignore1, ignore2, ignore3): + self.selected = True + colour = self.triblerStyles.selected(3) +# if colIndex == 0: +# self.vLine.Hide() +# else: +# self.vLine.Show() + self.thumb.setSelected(True) + self.title.SetBackgroundColour(colour) + self.status.SetBackgroundColour(colour) + self.helping.SetBackgroundColour(colour) + self.taste.SetBackgroundColour(colour) + self.tasteHeart.setBackground(colour) + self.SetBackgroundColour(colour) + self.toggleFriendsItemDetailsSummary(True) + self.guiUtility.standardOverview.selectedPeer = self.data['permid'] + self.Refresh() + self.SetFocus() + + def deselect(self, rowIndex, colIndex): + self.selected = False +# if colIndex == 0: +# self.vLine.Hide() +# else: +# self.vLine.Show() + if rowIndex % 2 == 0: + colour = self.triblerStyles.selected(1) + else: + colour = self.triblerStyles.selected(2) + + self.thumb.setSelected(False) + self.title.SetBackgroundColour(colour) + self.status.SetBackgroundColour(colour) + self.helping.SetBackgroundColour(colour) + self.taste.SetBackgroundColour(colour) + self.tasteHeart.setBackground(colour) + self.SetBackgroundColour(colour) + self.toggleFriendsItemDetailsSummary(False) + self.Refresh() + + def keyTyped(self, event): + if self.selected: + key = event.GetKeyCode() + if (key == wx.WXK_DELETE): + if self.data: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'contentpanel: deleting' + # self.guiUtility.deleteTorrent(self.data) + pass + event.Skip() + + def mouseAction(self, event): + event.Skip() + self.SetFocus() + if self.data: + self.guiUtility.selectPeer(self.data) + + if event.RightDown(): + self.rightMouseButton(event) + + + def rightMouseButton(self, event): + menu = self.guiUtility.OnRightMouseAction(event) + if menu is not None: + self.PopupMenu(menu, (-1,-1)) + + def getIdentifier(self): + if self.data: + return self.data['permid'] + + def toggleFriendsItemDetailsSummary(self, visible): + + if visible and not self.summary: + self.summary = PersonsItemDetailsSummary(self, mode='friends') + self.triblerStyles.setLightText(self.summary) + self.hSizerSummary.Add(self.summary, 1, wx.ALL|wx.EXPAND, 0) + self.SetMinSize((-1,140)) + + elif self.summary and not visible: + self.summary.Hide() + # the Thumb should be destoryed seperately because it has a different parent. + self.summary.thumbSummary.Destroy() + self.summary.DestroyChildren() + self.summary.Destroy() + self.summary = None + self.SetMinSize((-1,22)) + + + + + +class FriendThumbnailViewer(ThumbnailViewer): + def __init__(self, parent, mode, **kw): + self.parent = parent + ThumbnailViewer.__init__(self, parent, mode, **kw) + + def setThumbnail(self, data, summary=''): + # Get the file(s)data for this torrent + try: + width, height = self.GetSize() + bmp = None + # Check if we have already read the thumbnail and metadata information from this torrent file + if data.get('metadata') and data['metadata'].get('ThumbnailBitmapAsFriend'): + bmp = data['metadata'].get('ThumbnailBitmapAsFriend') + else: + self.GetParent().guiserver.add_task(lambda:self.loadMetadata(data,type="AsFriend"),0) + if not bmp: + bmp = self.iconsManager.get_default('friendsMode','DEFAULT_THUMB') + + self.setBitmap(bmp) + d = 1 + self.border = [wx.Point(0,d), wx.Point(width-d, d), wx.Point(width-d, height-d), wx.Point(d,height-d), wx.Point(d,0)] + self.Refresh() + + except: + print_exc(file=sys.stderr) + return {} + +#=============================================================================== +# def setBitmap(self, bmp, default=False): +# # Recalculate image placement +# w, h = self.GetSize() +# iw, ih = bmp.GetSize() +# +# self.dataBitmap = bmp +# self.xpos, self.ypos = (w-iw)/2, (h-ih)/2 +#=============================================================================== + + def OnPaint(self, evt): + dc = wx.BufferedPaintDC(self) + dc.SetBackground(wx.Brush(self.backgroundColor)) + dc.Clear() + + if self.dataBitmap: + dc.DrawBitmap(self.dataBitmap, self.xpos,self.ypos, True) +# if self.mouseOver: + if self.data is not None and type(self.data)==type({}) and self.data.get('permid'): + + rank = self.data.get('simRank',-1) + #because of the fact that hearts are coded so that lower index means higher ranking, then: + if rank > 0 and rank <= 5: + recomm = 0 + elif rank > 5 and rank <= 10: + recomm = 1 + elif rank > 10 and rank <= 15: + recomm = 2 + elif rank > 15 and rank <= 20: + recomm = 3 + else: + recomm = -1 + if self.mouseOver: + mask = self.iconsManager.get_default('friendsMode','MASK_BITMAP_OVERLAY') + y_pos = 0 + m_height = mask.GetSize()[1] + y_height = self.GetSize()[1] + while y_pos=0 or self.data.get('friend') or self.data.get('online'): + mask = self.iconsManager.get_default('personsMode','MASK_BITMAP') + dc.DrawBitmap(mask,0 ,62, True) + if recomm >=0: + dc.DrawBitmap(TasteHeart.BITMAPS[recomm],5 ,64, True) + dc.SetFont(wx.Font(FS_HEARTRANK, FONTFAMILY, FONTWEIGHT, wx.BOLD, False, FONTFACE)) + text = repr(rank) + dc.DrawText(text, 22, 66) + if self.data.get('friend'): + friend = self.iconsManager.get_default('personsMode','MASK_BITMAP') + dc.DrawBitmap(friend,60 ,65, True) + if self.data.get('online'): + dc.SetFont(wx.Font(FS_ONLINE, FONTFAMILY, FONTWEIGHT, wx.BOLD, False, FONTFACE)) + dc.SetTextForeground('#007303') + dc.DrawText('online', 26, 66) + + +# dc.SetTextForeground(wx.WHITE) + #dc.DrawText('rating', 5, 60) + if (self.selected and self.border): + dc.SetPen(wx.Pen(wx.Colour(255,51,0), 2)) + dc.DrawLines(self.border) + + +def peer2status(peer): + label = peer2seenstatus(peer) + + # Friend status to show whether this is an approved friend, or not + fs = peer.get('friend',FS_NOFRIEND) + + #if fs == FS_NOFRIEND or fs == FS_MUTUAL: + # pass + #else: + fstext = fs2text(fs) + label = label+", "+fstext + return label + +def peer2seenstatus(peer): + if peer.get('online'): + label = 'online' + elif peer.get('last_connected') is not None: + if peer['last_connected'] < 0: + label = 'never seen' + else: + label = 'met %s' % friendly_time(peer['last_connected']) + else: + label = 'unknown' + return label + +def fs2text(fs): + if fs == FS_NOFRIEND: + return "no friend" + elif fs == FS_MUTUAL: + return "is friend" + elif fs == FS_I_INVITED: + return "pending" + elif fs == FS_HE_INVITED: + return "invited you" + elif fs == FS_I_DENIED: + return "you refused" + elif fs == FS_HE_DENIED: + return "refused" diff --git a/tribler-mod/Tribler/Main/vwxGUI/FriendsItemPanel.py.bak b/tribler-mod/Tribler/Main/vwxGUI/FriendsItemPanel.py.bak new file mode 100644 index 0000000..ae546df --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/FriendsItemPanel.py.bak @@ -0,0 +1,481 @@ +# Written by Jelle Roozenburg, Maarten ten Brinke, Arno Bakker +# see LICENSE.txt for license information + +import wx, math, time, os, sys, threading +from traceback import print_exc +from copy import deepcopy +import cStringIO +from wx.lib.stattext import GenStaticText as StaticText + +from Tribler.Core.simpledefs import * +from Tribler.Core.Utilities.utilities import * +from Tribler.Core.Utilities.unicode import * +from Tribler.Main.Dialogs.makefriends import MakeFriendsDialog +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.vwxGUI.PersonsItemPanel import ThumbnailViewer +from Tribler.Main.Utility.utility import similarPeer, copyPeer +from Tribler.Main.vwxGUI.IconsManager import IconsManager, data2wxBitmap +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles +from Tribler.Main.vwxGUI.PersonsItemDetailsSummary import PersonsItemDetailsSummary +from font import * +from tribler_topButton import * +## import TasteHeart + +DEBUG=False + +# font sizes +if sys.platform == 'darwin': + FS_FRIENDTITLE = 10 + FS_STATUS = 10 + FS_SIMILARITY = 10 + FS_HEARTRANK = 8 + FS_ONLINE = 10 +else: + FS_FRIENDTITLE = 8 + FS_STATUS = 8 + FS_SIMILARITY = 8 + FS_HEARTRANK = 7 + FS_ONLINE = 8 + +class FriendsItemPanel(wx.Panel): + """ + PersonsItemPanel shows one persons item inside the PersonsGridPanel + """ + def __init__(self, parent, keyTypedFun= None): + self.triblerStyles = TriblerStyles.getInstance() + global TORRENTPANEL_BACKGROUND + + wx.Panel.__init__(self, parent, -1) + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.parent = parent + self.data = None + self.summary = None + self.datacopy = {} + self.FriendThumbnailViewer = ThumbnailViewer + self.titleLength = 77 # num characters + self.listItem = True + self.triblerGrey = wx.Colour(128,128,128) + self.selected = False + self.warningMode = False + self.guiserver = parent.guiserver + self.oldCategoryLabel = None + self.addComponents() + self.Show() + self.Refresh() + self.vSizerOverall = wx.BoxSizer(wx.VERTICAL) + + self.Layout() + + def addComponents(self): + self.Show(False) + self.SetMinSize((137,22+0)) + self.selectedColour = wx.Colour(255,200,187) + self.unselectedColour = wx.WHITE + + self.SetBackgroundColour(self.unselectedColour) + +# self.vSizerAll = wx.BoxSizer(wx.VERTICAL) + + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + self.vSizerOverall.Add(self.hSizer, 0, wx.FIXED|wx.EXPAND, 0) + self.Bind(wx.EVT_LEFT_UP, self.mouseAction) + self.Bind(wx.EVT_KEY_UP, self.keyTyped) + + # Add Spacer + self.hSizer.Add([8,22],0,wx.EXPAND|wx.FIXED_MINSIZE,0) + + # Add thumb + self.thumb = FriendThumbnailViewer(self, 'friendsMode') + self.thumb.setBackground(wx.BLACK) + self.thumb.SetSize((18,18)) + self.hSizer.Add(self.thumb, 0, wx.ALL, 2) + + # Add title + self.title =wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(100,15)) + self.title.SetMinSize((100,14)) + self.triblerStyles.setLightText(self.title) + self.hSizer.Add(self.title,1,wx.TOP,4) + + # Add left vertical line + self.vLine1 = self.addLine() + + # Add status + self.status =wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(130,12), wx.ALIGN_RIGHT | wx.ST_NO_AUTORESIZE ) + self.status.SetMinSize((165,12)) + self.triblerStyles.setLightText(self.status) + self.hSizer.Add(self.status,0,wx.TOP|wx.EXPAND,4) + + # Add left vertical line + self.vLine2 = self.addLine() + + # Add message > if today new content is discovered from him/her + self.helping =wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(130,12), wx.ALIGN_RIGHT | wx.ST_NO_AUTORESIZE) + self.helping.SetMinSize((30,14)) + self.triblerStyles.setLightText(self.helping) + self.hSizer.Add(self.helping,1,wx.TOP,4) + + # Add left vertical line + self.vLine3 = self.addLine() + + # Add Taste Heart - Add Spacer to keep space occupied when no heart available + self.vSizer = wx.BoxSizer(wx.VERTICAL) + self.vSizer.Add([60,2],0,wx.FIXED_MINSIZE,0) + self.hSizer2 = wx.BoxSizer(wx.HORIZONTAL) + self.tasteHeart = TasteHeart.TasteHeart(self, -1, wx.DefaultPosition, wx.Size(14,14),name='TasteHeart') + self.hSizer2.Add(self.tasteHeart, 0, wx.TOP, 0) + # Add Taste similarity + self.taste =wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(40,15)) + self.taste.SetMinSize((40,15)) + self.triblerStyles.setLightText(self.taste) + self.hSizer2.Add(self.taste, 0, wx.LEFT, 2) + self.vSizer.Add(self.hSizer2, 0, wx.TOP, 2) + self.hSizer.Add(self.vSizer, 0, wx.LEFT|wx.RIGHT, 2) + + self.hSizerSummary = wx.BoxSizer(wx.HORIZONTAL) + self.vSizerOverall.Add(self.hSizerSummary, 1, wx.FIXED_MINSIZE|wx.EXPAND, 0) + + self.SetSizer(self.vSizerOverall) + # Add delete button +## self.delete = tribler_topButton(self, -1, wx.Point(0,0), wx.Size(16,16),name='deleteFriend') +## self.hSizer.Add(self.delete, 0, wx.TOP|wx.RIGHT, 4) + +# self.vSizerAll.Add(self.hSizer, 0, wx.EXPAND, 0) + #Add bottom horizontal line +# self.addLine(False) + + self.SetAutoLayout(1); + self.Layout(); + self.Refresh() + + # 2.8.4.2 return value of GetChildren changed + wl = [] + for c in self.GetChildren(): + wl.append(c) + for window in wl: + window.Bind(wx.EVT_LEFT_UP, self.mouseAction) + window.Bind(wx.EVT_KEY_UP, self.keyTyped) + window.Bind(wx.EVT_RIGHT_DOWN, self.mouseAction) + + def getColumns(self): + return [{'sort':'', 'title':'', 'width':20, 'tip':''}, + {'sort':'name', 'reverse':True,'title':'name', 'weight':1,'tip':self.utility.lang.get('C_friendname') }, + {'sort':'last_connected', 'title':'status', 'width':165, 'tip':self.utility.lang.get('C_friendstatus'), 'order':'down'}, + {'sort':'??', 'dummy':True, 'title':'boosting','weight':1, 'tip':self.utility.lang.get('C_helping')}, + {'sort':'similarity','pic':'heartSmall', 'width':65, 'tip':self.utility.lang.get('C_recommpersons')} + ] + + def addLine(self, vertical=True): + if vertical: + vLine = wx.StaticLine(self,-1,wx.DefaultPosition, wx.Size(2,0),wx.LI_VERTICAL) + self.hSizer.Add(vLine, 0, wx.RIGHT|wx.LEFT, 3) + return vLine + else: + hLine = wx.StaticLine(self,-1,wx.DefaultPosition, wx.Size(-1,1),wx.LI_HORIZONTAL) + self.vSizer.Add(hLine, 0, wx.EXPAND, 0) + return hLine + + def setData(self, peer_data): + # set bitmap, rating, title + + + #if self.data is None: + # oldpermid = None + #else: + # oldpermid = self.data['permid'] + + self.data = peer_data + # do not reload similar peers + if peer_data is not None and 'coopdlstatus' in peer_data: + pass + # Arno, 2008-10-21: Make sure connected time is updated + #elif similarPeer(peer_data, self.datacopy): + # return + self.datacopy = copyPeer(peer_data) + + if peer_data is None: + peer_data = {} + + if peer_data.get('name'): + title = peer_data['name'][:self.titleLength] + self.title.Enable(True) + self.title.SetLabel(title) + self.title.Wrap(self.title.GetSize()[0]) + self.title.SetToolTipString(peer_data['ip']+':'+str(peer_data['port'])) + + # status issues + self.status.Enable(True) + label = peer2status(peer_data) + self.status.SetLabel(label) + + if 'coopdlstatus' in peer_data: + self.helping.SetLabel(peer_data['coopdlstatus']) + self.helping.SetToolTipString(peer_data['coopdlstatus']) + +# self.delete.Show() + self.tasteHeart.Show() + self.vLine1.Show() + self.vLine2.Show() + self.vLine3.Show() + else: + self.title.SetLabel('') + self.title.SetToolTipString('') + self.title.Enable(False) + self.status.SetLabel('') + self.helping.SetLabel('') +# self.delete.Hide() + self.tasteHeart.Hide() + self.vLine1.Hide() + self.vLine2.Hide() + self.vLine3.Hide() + + rank = peer_data.get('simRank',-1) + recommField = self.taste + if rank!=-1: + if rank == 1: + self.tasteHeart.SetToolTipString("%d" % rank + "st of top 20 of all discovered persons") + recommField.SetLabel("%d" % rank + "st") + elif rank == 2: + self.tasteHeart.SetToolTipString("%d" % rank + "nd of top 20 of all discovered persons") + recommField.SetLabel("%d" % rank + "nd") + elif rank == 3: + self.tasteHeart.SetToolTipString("%d" % rank + "rd of top 20 of all discovered persons") + recommField.SetLabel("%d" % rank + "rd") + else: + self.tasteHeart.SetToolTipString("%d" % rank + "th of top 20 of all discovered persons") + recommField.SetLabel("%d" % rank + "th") + self.tasteHeart.Show() + self.tasteHeart.setRank(rank) + else: + self.taste.SetLabel('') + self.tasteHeart.Hide() + + #if oldpermid is None or oldpermid != peer_data['permid']: + self.thumb.setData(peer_data, summary='') + + self.Layout() + self.Refresh() + #self.parent.Refresh() + + + + def select(self, rowIndex, colIndex, ignore1, ignore2, ignore3): + self.selected = True + colour = self.triblerStyles.selected(3) +# if colIndex == 0: +# self.vLine.Hide() +# else: +# self.vLine.Show() + self.thumb.setSelected(True) + self.title.SetBackgroundColour(colour) + self.status.SetBackgroundColour(colour) + self.helping.SetBackgroundColour(colour) + self.taste.SetBackgroundColour(colour) + self.tasteHeart.setBackground(colour) + self.SetBackgroundColour(colour) + self.toggleFriendsItemDetailsSummary(True) + self.guiUtility.standardOverview.selectedPeer = self.data['permid'] + self.Refresh() + self.SetFocus() + + def deselect(self, rowIndex, colIndex): + self.selected = False +# if colIndex == 0: +# self.vLine.Hide() +# else: +# self.vLine.Show() + if rowIndex % 2 == 0: + colour = self.triblerStyles.selected(1) + else: + colour = self.triblerStyles.selected(2) + + self.thumb.setSelected(False) + self.title.SetBackgroundColour(colour) + self.status.SetBackgroundColour(colour) + self.helping.SetBackgroundColour(colour) + self.taste.SetBackgroundColour(colour) + self.tasteHeart.setBackground(colour) + self.SetBackgroundColour(colour) + self.toggleFriendsItemDetailsSummary(False) + self.Refresh() + + def keyTyped(self, event): + if self.selected: + key = event.GetKeyCode() + if (key == wx.WXK_DELETE): + if self.data: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'contentpanel: deleting' + # self.guiUtility.deleteTorrent(self.data) + pass + event.Skip() + + def mouseAction(self, event): + event.Skip() + self.SetFocus() + if self.data: + self.guiUtility.selectPeer(self.data) + + if event.RightDown(): + self.rightMouseButton(event) + + + def rightMouseButton(self, event): + menu = self.guiUtility.OnRightMouseAction(event) + if menu is not None: + self.PopupMenu(menu, (-1,-1)) + + def getIdentifier(self): + if self.data: + return self.data['permid'] + + def toggleFriendsItemDetailsSummary(self, visible): + + if visible and not self.summary: + self.summary = PersonsItemDetailsSummary(self, mode='friends') + self.triblerStyles.setLightText(self.summary) + self.hSizerSummary.Add(self.summary, 1, wx.ALL|wx.EXPAND, 0) + self.SetMinSize((-1,140)) + + elif self.summary and not visible: + self.summary.Hide() + # the Thumb should be destoryed seperately because it has a different parent. + self.summary.thumbSummary.Destroy() + self.summary.DestroyChildren() + self.summary.Destroy() + self.summary = None + self.SetMinSize((-1,22)) + + + + + +class FriendThumbnailViewer(ThumbnailViewer): + def __init__(self, parent, mode, **kw): + self.parent = parent + ThumbnailViewer.__init__(self, parent, mode, **kw) + + def setThumbnail(self, data, summary=''): + # Get the file(s)data for this torrent + try: + width, height = self.GetSize() + bmp = None + # Check if we have already read the thumbnail and metadata information from this torrent file + if data.get('metadata') and data['metadata'].get('ThumbnailBitmapAsFriend'): + bmp = data['metadata'].get('ThumbnailBitmapAsFriend') + else: + self.GetParent().guiserver.add_task(lambda:self.loadMetadata(data,type="AsFriend"),0) + if not bmp: + bmp = self.iconsManager.get_default('friendsMode','DEFAULT_THUMB') + + self.setBitmap(bmp) + d = 1 + self.border = [wx.Point(0,d), wx.Point(width-d, d), wx.Point(width-d, height-d), wx.Point(d,height-d), wx.Point(d,0)] + self.Refresh() + + except: + print_exc(file=sys.stderr) + return {} + +#=============================================================================== +# def setBitmap(self, bmp, default=False): +# # Recalculate image placement +# w, h = self.GetSize() +# iw, ih = bmp.GetSize() +# +# self.dataBitmap = bmp +# self.xpos, self.ypos = (w-iw)/2, (h-ih)/2 +#=============================================================================== + + def OnPaint(self, evt): + dc = wx.BufferedPaintDC(self) + dc.SetBackground(wx.Brush(self.backgroundColor)) + dc.Clear() + + if self.dataBitmap: + dc.DrawBitmap(self.dataBitmap, self.xpos,self.ypos, True) +# if self.mouseOver: + if self.data is not None and type(self.data)==type({}) and self.data.get('permid'): + + rank = self.data.get('simRank',-1) + #because of the fact that hearts are coded so that lower index means higher ranking, then: + if rank > 0 and rank <= 5: + recomm = 0 + elif rank > 5 and rank <= 10: + recomm = 1 + elif rank > 10 and rank <= 15: + recomm = 2 + elif rank > 15 and rank <= 20: + recomm = 3 + else: + recomm = -1 + if self.mouseOver: + mask = self.iconsManager.get_default('friendsMode','MASK_BITMAP_OVERLAY') + y_pos = 0 + m_height = mask.GetSize()[1] + y_height = self.GetSize()[1] + while y_pos=0 or self.data.get('friend') or self.data.get('online'): + mask = self.iconsManager.get_default('personsMode','MASK_BITMAP') + dc.DrawBitmap(mask,0 ,62, True) + if recomm >=0: + dc.DrawBitmap(TasteHeart.BITMAPS[recomm],5 ,64, True) + dc.SetFont(wx.Font(FS_HEARTRANK, FONTFAMILY, FONTWEIGHT, wx.BOLD, False, FONTFACE)) + text = repr(rank) + dc.DrawText(text, 22, 66) + if self.data.get('friend'): + friend = self.iconsManager.get_default('personsMode','MASK_BITMAP') + dc.DrawBitmap(friend,60 ,65, True) + if self.data.get('online'): + dc.SetFont(wx.Font(FS_ONLINE, FONTFAMILY, FONTWEIGHT, wx.BOLD, False, FONTFACE)) + dc.SetTextForeground('#007303') + dc.DrawText('online', 26, 66) + + +# dc.SetTextForeground(wx.WHITE) + #dc.DrawText('rating', 5, 60) + if (self.selected and self.border): + dc.SetPen(wx.Pen(wx.Colour(255,51,0), 2)) + dc.DrawLines(self.border) + + +def peer2status(peer): + label = peer2seenstatus(peer) + + # Friend status to show whether this is an approved friend, or not + fs = peer.get('friend',FS_NOFRIEND) + + #if fs == FS_NOFRIEND or fs == FS_MUTUAL: + # pass + #else: + fstext = fs2text(fs) + label = label+", "+fstext + return label + +def peer2seenstatus(peer): + if peer.get('online'): + label = 'online' + elif peer.get('last_connected') is not None: + if peer['last_connected'] < 0: + label = 'never seen' + else: + label = 'met %s' % friendly_time(peer['last_connected']) + else: + label = 'unknown' + return label + +def fs2text(fs): + if fs == FS_NOFRIEND: + return "no friend" + elif fs == FS_MUTUAL: + return "is friend" + elif fs == FS_I_INVITED: + return "pending" + elif fs == FS_HE_INVITED: + return "invited you" + elif fs == FS_I_DENIED: + return "you refused" + elif fs == FS_HE_DENIED: + return "refused" diff --git a/tribler-mod/Tribler/Main/vwxGUI/FriendshipManager.py b/tribler-mod/Tribler/Main/vwxGUI/FriendshipManager.py new file mode 100644 index 0000000..6d79ca6 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/FriendshipManager.py @@ -0,0 +1,112 @@ +from time import localtime, strftime +# Written by Ali Abbas, Arno Bakker, Tim Tucker +# see LICENSE.txt for license information + +######################################################################### +# Description : Ask whether or not to accept a friendship request +######################################################################### +import wx + +from Tribler.Core.API import * +from Tribler.Core.Utilities.utilities import show_permid_short + +from Tribler.Main.vwxGUI.IconsManager import IconsManager, data2wxBitmap + +class FriendshipManager: + + def __init__(self,utility,iconpath): + self.utility = utility + self.session = utility.session + self.iconpath = iconpath + self.iconsManager = IconsManager.getInstance() + + self.permids = [] + + self.session.set_friendship_callback(self.sesscb_friendship_callback) + + def sesscb_friendship_callback(self,permid,params): + """ Called by SessionThread """ + + # Find peer in DB, to get name + peerdb = self.utility.session.open_dbhandler(NTFY_PEERS) + peer = peerdb.getPeer(permid) + icon = peerdb.getPeerIcon(permid) + self.utility.session.close_dbhandler(peerdb) + + wx.CallAfter(self.gui_friendship_callback,permid,peer,icon) + + def gui_friendship_callback(self,permid,peer,icon): + + if permid in self.permids: + return + else: + self.permids.append(permid) + + if peer['name'] is None or peer['name'] == "": + name = show_permid_short(permid) + else: + name = peer['name'] + + defbm = self.iconsManager.get_default('personsMode','DEFAULT_THUMB') + if icon[0] is None: + bm = defbm + else: + bm = data2wxBitmap(icon[0],icon[1]) + if bm is None: + bm = defbm + + dial = FriendRequestDialog(None,self.utility,self.iconpath,name,bm) + returnValue = dial.ShowModal() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fm: displayReq: RETURN",returnValue + + self.permids.remove(permid) + if returnValue != wx.ID_CANCEL: + approved = returnValue == wx.ID_YES + # Send our response + self.session.send_friendship_message(permid,F_RESPONSE_MSG,approved=approved) + + + +class FriendRequestDialog(wx.Dialog): + + def __init__(self,parent,utility,iconpath,name,bm): + self.utility = utility + wx.Dialog.__init__(self,parent,-1,self.utility.lang.get('question')) + + # Set icons for Dialog + self.icons = wx.IconBundle() + self.icons.AddIconFromFile(iconpath,wx.BITMAP_TYPE_ICO) + self.SetIcons(self.icons) + + mainbox = wx.BoxSizer(wx.VERTICAL) + peerbox = wx.BoxSizer(wx.HORIZONTAL) + bb = wx.BitmapButton(self,-1,bm) + qtext = wx.StaticText(self, -1, self.utility.lang.get('addfriendfillin') % name) + peerbox.Add(bb, 1, wx.EXPAND|wx.ALL, 5) + peerbox.Add(qtext, 1, wx.EXPAND|wx.ALL , 5) + + # The standard stuff has problems: NO button doesn't return on ShowModal() + # buttonbox = self.CreateStdDialogButtonSizer(wx.YES_NO) + + self.yesbtn = wx.Button(self, -1, self.utility.lang.get('yes')) + self.Bind(wx.EVT_BUTTON, self.onYES, self.yesbtn) + self.nobtn = wx.Button(self, -1, self.utility.lang.get('no')) + self.Bind(wx.EVT_BUTTON, self.onNO, self.nobtn) + buttonbox = wx.BoxSizer( wx.HORIZONTAL ) + buttonbox.Add(self.yesbtn, 0, wx.ALL, 5) + buttonbox.Add(self.nobtn, 0, wx.ALL, 5) + + self.Bind(wx.EVT_CLOSE, self.onCloseWindow) + mainbox.Add(peerbox, 1, wx.EXPAND, 1) + mainbox.Add(buttonbox, 0, wx.ALIGN_BOTTOM|wx.EXPAND, 1) + self.SetSizerAndFit(mainbox) + + + def onYES(self, event = None): + self.EndModal(wx.ID_YES) + + def onNO(self, event = None): + self.EndModal(wx.ID_NO) + + def onCloseWindow(self, event = None): + self.EndModal(wx.ID_CANCEL) diff --git a/tribler-mod/Tribler/Main/vwxGUI/FriendshipManager.py.bak b/tribler-mod/Tribler/Main/vwxGUI/FriendshipManager.py.bak new file mode 100644 index 0000000..ca80fe1 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/FriendshipManager.py.bak @@ -0,0 +1,111 @@ +# Written by Ali Abbas, Arno Bakker, Tim Tucker +# see LICENSE.txt for license information + +######################################################################### +# Description : Ask whether or not to accept a friendship request +######################################################################### +import wx + +from Tribler.Core.API import * +from Tribler.Core.Utilities.utilities import show_permid_short + +from Tribler.Main.vwxGUI.IconsManager import IconsManager, data2wxBitmap + +class FriendshipManager: + + def __init__(self,utility,iconpath): + self.utility = utility + self.session = utility.session + self.iconpath = iconpath + self.iconsManager = IconsManager.getInstance() + + self.permids = [] + + self.session.set_friendship_callback(self.sesscb_friendship_callback) + + def sesscb_friendship_callback(self,permid,params): + """ Called by SessionThread """ + + # Find peer in DB, to get name + peerdb = self.utility.session.open_dbhandler(NTFY_PEERS) + peer = peerdb.getPeer(permid) + icon = peerdb.getPeerIcon(permid) + self.utility.session.close_dbhandler(peerdb) + + wx.CallAfter(self.gui_friendship_callback,permid,peer,icon) + + def gui_friendship_callback(self,permid,peer,icon): + + if permid in self.permids: + return + else: + self.permids.append(permid) + + if peer['name'] is None or peer['name'] == "": + name = show_permid_short(permid) + else: + name = peer['name'] + + defbm = self.iconsManager.get_default('personsMode','DEFAULT_THUMB') + if icon[0] is None: + bm = defbm + else: + bm = data2wxBitmap(icon[0],icon[1]) + if bm is None: + bm = defbm + + dial = FriendRequestDialog(None,self.utility,self.iconpath,name,bm) + returnValue = dial.ShowModal() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fm: displayReq: RETURN",returnValue + + self.permids.remove(permid) + if returnValue != wx.ID_CANCEL: + approved = returnValue == wx.ID_YES + # Send our response + self.session.send_friendship_message(permid,F_RESPONSE_MSG,approved=approved) + + + +class FriendRequestDialog(wx.Dialog): + + def __init__(self,parent,utility,iconpath,name,bm): + self.utility = utility + wx.Dialog.__init__(self,parent,-1,self.utility.lang.get('question')) + + # Set icons for Dialog + self.icons = wx.IconBundle() + self.icons.AddIconFromFile(iconpath,wx.BITMAP_TYPE_ICO) + self.SetIcons(self.icons) + + mainbox = wx.BoxSizer(wx.VERTICAL) + peerbox = wx.BoxSizer(wx.HORIZONTAL) + bb = wx.BitmapButton(self,-1,bm) + qtext = wx.StaticText(self, -1, self.utility.lang.get('addfriendfillin') % name) + peerbox.Add(bb, 1, wx.EXPAND|wx.ALL, 5) + peerbox.Add(qtext, 1, wx.EXPAND|wx.ALL , 5) + + # The standard stuff has problems: NO button doesn't return on ShowModal() + # buttonbox = self.CreateStdDialogButtonSizer(wx.YES_NO) + + self.yesbtn = wx.Button(self, -1, self.utility.lang.get('yes')) + self.Bind(wx.EVT_BUTTON, self.onYES, self.yesbtn) + self.nobtn = wx.Button(self, -1, self.utility.lang.get('no')) + self.Bind(wx.EVT_BUTTON, self.onNO, self.nobtn) + buttonbox = wx.BoxSizer( wx.HORIZONTAL ) + buttonbox.Add(self.yesbtn, 0, wx.ALL, 5) + buttonbox.Add(self.nobtn, 0, wx.ALL, 5) + + self.Bind(wx.EVT_CLOSE, self.onCloseWindow) + mainbox.Add(peerbox, 1, wx.EXPAND, 1) + mainbox.Add(buttonbox, 0, wx.ALIGN_BOTTOM|wx.EXPAND, 1) + self.SetSizerAndFit(mainbox) + + + def onYES(self, event = None): + self.EndModal(wx.ID_YES) + + def onNO(self, event = None): + self.EndModal(wx.ID_NO) + + def onCloseWindow(self, event = None): + self.EndModal(wx.ID_CANCEL) diff --git a/tribler-mod/Tribler/Main/vwxGUI/GridState.py b/tribler-mod/Tribler/Main/vwxGUI/GridState.py new file mode 100644 index 0000000..b8cd5d7 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/GridState.py @@ -0,0 +1,32 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg, Maarten ten Brinke +# see LICENSE.txt for license information + +class GridState(object): + def __init__(self, db, category, sort, reverse = False, library = False): + self.db = db # Constant from simpledefs, f.i. NTFY_TORRENTS + self.category = category + self.sort = sort + self.reverse = reverse + self.library = library + def __str__(self): + return '(db: %s, cat: %s, sort: %s, rev: %s, lib: %s)' % (self.db,self.category,self.sort,self.reverse,self.library) + + def copy(self): + return GridState(self.db, self.category, self.sort, self.reverse, self.library) + + def setDefault(self, gs): + if gs: + if self.db is None: + self.db = gs.db + if self.category is None: + self.category = gs.category + if self.sort is None: + self.sort = gs.sort + if self.reverse is None: + self.reverse = gs.reverse + + def isValid(self): + return (self.db is not None and + self.sort is not None and + self.category is not None) \ No newline at end of file diff --git a/tribler-mod/Tribler/Main/vwxGUI/GridState.py.bak b/tribler-mod/Tribler/Main/vwxGUI/GridState.py.bak new file mode 100644 index 0000000..d7ef4a4 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/GridState.py.bak @@ -0,0 +1,31 @@ +# Written by Jelle Roozenburg, Maarten ten Brinke +# see LICENSE.txt for license information + +class GridState(object): + def __init__(self, db, category, sort, reverse = False, library = False): + self.db = db # Constant from simpledefs, f.i. NTFY_TORRENTS + self.category = category + self.sort = sort + self.reverse = reverse + self.library = library + def __str__(self): + return '(db: %s, cat: %s, sort: %s, rev: %s, lib: %s)' % (self.db,self.category,self.sort,self.reverse,self.library) + + def copy(self): + return GridState(self.db, self.category, self.sort, self.reverse, self.library) + + def setDefault(self, gs): + if gs: + if self.db is None: + self.db = gs.db + if self.category is None: + self.category = gs.category + if self.sort is None: + self.sort = gs.sort + if self.reverse is None: + self.reverse = gs.reverse + + def isValid(self): + return (self.db is not None and + self.sort is not None and + self.category is not None) \ No newline at end of file diff --git a/tribler-mod/Tribler/Main/vwxGUI/GuiUtility.py b/tribler-mod/Tribler/Main/vwxGUI/GuiUtility.py new file mode 100644 index 0000000..558eead --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/GuiUtility.py @@ -0,0 +1,1307 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg, Maarten ten Brinke, Arno Bakker, Lucian Musat +# see LICENSE.txt for license information + +import wx, os +from wx import xrc +from traceback import print_exc +from threading import Event, Thread +import urllib +import webbrowser +from webbrowser import open_new + +from Tribler.Core.simpledefs import * +from Tribler.Core.Utilities.utilities import * + +from Tribler.TrackerChecking.TorrentChecking import TorrentChecking +#from Tribler.Subscriptions.rss_client import TorrentFeedThread +from Tribler.Category.Category import Category +from Tribler.Main.Dialogs.makefriends import MakeFriendsDialog, InviteFriendsDialog +from Tribler.Main.vwxGUI.bgPanel import * +from Tribler.Main.vwxGUI.GridState import GridState +from Tribler.Main.vwxGUI.SearchGridManager import TorrentSearchGridManager,PeerSearchGridManager +from Tribler.Main.Utility.constants import * + +from Tribler.Core.CacheDB.sqlitecachedb import bin2str + +from Tribler.Video.VideoPlayer import VideoPlayer + + + +DEBUG = False + +# fonts +if sys.platform == 'darwin': # mac os x + FONT_SIZE_SR_MSG=11 + FONT_SIZE_TOTAL_DOWN=9 + FONT_SIZE_TOTAL_UP=9 + FONT_SIZE_RESULTS=10 + FONT_SIZE_SETTINGS=10 + FONT_SIZE_MY_FILES=10 + FONT_SIZE_FAMILY_FILTER=10 + FONT_SIZE_FILES_FRIENDS=11 + FONT_SIZE_SHARING_REPUTATION=11 + FONT_SIZE_SEARCH_RESULTS=12 + FONT_SIZE_SEARCH=14 + +else: + + FONT_SIZE_SR_MSG=8 + FONT_SIZE_TOTAL_DOWN=7 + FONT_SIZE_TOTAL_UP=7 + FONT_SIZE_RESULTS=8 + FONT_SIZE_SETTINGS=8 + FONT_SIZE_MY_FILES=8 + FONT_SIZE_FAMILY_FILTER=8 + FONT_SIZE_FILES_FRIENDS=8 + FONT_SIZE_SHARING_REPUTATION=8 + FONT_SIZE_SEARCH_RESULTS=8 + FONT_SIZE_SEARCH=10 + + +class GUIUtility: + __single = None + + def __init__(self, utility = None, params = None): + if GUIUtility.__single: + raise RuntimeError, "GUIUtility is singleton" + GUIUtility.__single = self + # do other init + self.xrcResource = None + self.utility = utility + self.vwxGUI_path = os.path.join(self.utility.getPath(), 'Tribler', 'Main', 'vwxGUI') + self.utility.guiUtility = self + self.params = params + self.frame = None + self.selectedMainButton = None + self.standardOverview = None + self.reachable = False + self.DELETE_TORRENT_ASK = True + self.DELETE_TORRENT_ASK_OLD = True + self.DELETE_TORRENT_PREF = 1 # 1 : from Library + # 2 : from Library and Harddisk + + + # Moderation cast + self.moderatedinfohash = None + self.modcast_db = None + self.fakeButton = None + self.realButton = None + + # videoplayer + self.videoplayer = VideoPlayer.getInstance() + + # current GUI page + self.guiPage = None + + # standardGrid + self.standardGrid = None + + + # port number + self.port_number = None + + + + # firewall + self.firewall_restart = False # ie Tribler needs to restart for the port number to be updated + + + + # Arno: 2008-04-16: I want to keep this for searching, as an extension + # of the standardGrid.GridManager + self.torrentsearch_manager = TorrentSearchGridManager.getInstance(self) + self.peersearch_manager = PeerSearchGridManager.getInstance(self) + + self.guiOpen = Event() + + + self.gridViewMode = 'thumbnails' + self.thumbnailViewer = None +# self.standardOverview = standardOverview() + + self.selectedColour = wx.Colour(216,233,240) ## 155,200,187 + self.unselectedColour = wx.Colour(255,255,255) ## 102,102,102 + self.unselectedColour2 = wx.Colour(255,255,255) ## 230,230,230 + self.unselectedColourDownload = wx.Colour(198,226,147) + self.unselectedColour2Download = wx.Colour(190,209,139) + self.selectedColourDownload = wx.Colour(145,173,78) + self.selectedColourPending = wx.Colour(216,233,240) ## 208,251,244 + self.triblerRed = wx.Colour(255, 51, 0) + self.bgColour = wx.Colour(102,102,102) + self.darkTextColour = wx.Colour(51,51,51) + + self.max_remote_queries = 10 # max number of remote peers to query + self.remote_search_threshold = 20 # start remote search when results is less than this number + + + def getInstance(*args, **kw): + if GUIUtility.__single is None: + GUIUtility(*args, **kw) + return GUIUtility.__single + getInstance = staticmethod(getInstance) + + def open_dbs(self): + self.modcast_db = self.utility.session.open_dbhandler(NTFY_MODERATIONCAST) + + def buttonClicked(self, event): + "One of the buttons in the GUI has been clicked" + self.frame.SetFocus() + + event.Skip(True) #should let other handlers use this event!!!!!!! + + name = "" + obj = event.GetEventObject() + + print 'tb > name of object that is clicked = %s' % obj.GetName() + + try: + name = obj.GetName() + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'GUIUtil: Error: Could not get name of buttonObject: %s' % obj + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'GUIUtil: Button clicked %s' % name + #print_stack() + + + if name == 'moreFileInfo': + self.standardFileDetailsOverview() + elif name == 'moreFileInfoPlaylist': + self.standardFileDetailsOverview() +# self.standardPlaylistOverview() + elif name == 'more info >': + self.standardPersonDetailsOverview() + elif name == 'backButton': + self.standardStartpage() + + elif name == 'All popular files': + self.standardFilesOverview() ## + + elif name == 'viewThumbs' or name == 'viewList': +# print 'currentpanel = %s' % self.standardOverview.currentPanel.GetName() +# self.viewThumbs = xrc.XRCCTRL(self.frame, "viewThumbs") +# self.viewList = xrc.XRCCTRL(self.frame, "viewList") + + grid = self.standardOverview.data[self.standardOverview.mode].get('grid') + if name == 'viewThumbs': + self.viewThumbs.setSelected(True) + self.viewList.setSelected(False) + grid.onViewModeChange(mode='thumbnails') + self.gridViewMode = 'thumbnails' + elif name == 'viewList': + self.viewThumbs.setSelected(False) + self.viewList.setSelected(True) + grid.onViewModeChange(mode='list') + self.gridViewMode = 'list' + + elif name.lower().find('detailstab') > -1: + self.detailsTabClicked(name) + elif name == 'refresh': + self.refreshTracker() + elif name == "addAsFriend" or name == 'deleteFriend': + self.standardDetails.addAsFriend() + + elif name in ('download', 'download1'): + self.standardDetails.download() + elif name == 'addFriend': + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","GUIUtil: buttonClicked: parent is",obj.GetParent().GetName() + dialog = MakeFriendsDialog(obj,self.utility) + ret = dialog.ShowModal() + dialog.Destroy() + elif name == 'inviteFriends': + self.emailFriend(event) + + #else: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","GUIUtil: buttonClicked: dlbooster: Torrent is None" + + elif name == 'browse': + self.standardOverview.currentPanel.sendClick(event) + + elif (name == 'edit' or name == "top10Sharers" or name.startswith('bgPanel')) and obj.GetParent().GetName() == "profileOverview": + self.standardOverview.currentPanel.sendClick(event) + self.detailsTabClicked(name) #a panel was clicked in the profile overview and this is the most elegant so far method of informing the others + elif name == "takeMeThere0" : #a button to go to preferences was clicked + panel_name = self.standardDetails.currentPanel.GetName() + if panel_name == "profileDetails_Files": + #self.utility.actions[ACTION_PREFERENCES].action() + self.utility.actions[ACTION_PREFERENCES].action(openname=self.utility.lang.get('triblersetting')) + self.selectData(self.standardDetails.getData()) + if panel_name == "profileDetails_Download": + #self.utility.actions[ACTION_PREFERENCES].action(openname=self.utility.lang.get('triblersetting')) + self.utility.actions[ACTION_PREFERENCES].action(openname=self.utility.lang.get('videosetting')) + self.selectData(self.standardDetails.getData()) + elif panel_name == "profileDetails_Presence": + self.emailFriend(event) + #self.mainButtonClicked( 'mainButtonPersons', self.frame.mainButtonPersons) + #generate event to change page -> this should be done as a parameter to action because is modal + #event = wx.TreeEvent(wx.EVT_TREE_ITEM_ACTIVATED) + #wx.PostEvent() + elif name == "takeMeThere1": #switch to another view + panel_name = self.standardDetails.currentPanel.GetName() + if panel_name == "profileDetails_Download": + self.emailFriend(event) + #self.mainButtonClicked( 'mainButtonPersons', self.frame.mainButtonPersons) + if panel_name == "profileDetails_Presence": + URL = 'http://www.tribler.org/' + webbrowser.open(URL) + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'GUIUtil: A button was clicked, but no action is defined for: %s' % name + + elif name == "takeMeThere2": #switch to another view + panel_name = self.standardDetails.currentPanel.GetName() + if panel_name == "profileDetails_Download": + URL = 'http://www.tribler.org/' + webbrowser.open(URL) + elif name == 'subscribe': + self.subscribe() + elif name == 'firewallStatus': + self.firewallStatusClick() + elif name == 'options': + self.standardDetails.rightMouseButton(event) + elif name == 'viewModus': + self.onChangeViewModus() + elif name == 'searchClear': + # this has to be a callafter to avoid segmentation fault + # otherwise the panel with the event generating button is destroyed + # in the execution of the event. + self.clearSearch() + + wx.CallAfter(self.standardOverview.toggleSearchDetailsPanel, False) + elif name == 'familyfilter': + catobj = Category.getInstance() + ff_enabled = not catobj.family_filter_enabled() + print 'Setting family filter to: %s' % ff_enabled + ccatobj.set_family_filter(ff_enabled) + self.familyButton.setToggled() +# obj.setToggled(ff_enabled) + for filtername in ['filesFilter', 'libraryFilter']: + filterCombo = xrc.XRCCTRL(self.frame, filtername) + if filterCombo: + filterCombo.refresh() + + elif name == 'familyFilterOn' or name == 'familyFilterOff': ## not used anymore + if ((self.familyButtonOn.isToggled() and name == 'familyFilterOff') or + (self.familyButtonOff.isToggled() and name == 'familyFilterOn')): + + catobj = Category.getInstance() + ff_enabled = not catobj.family_filter_enabled() + print 'Setting family filter to: %s' % ff_enabled + catobj.set_family_filter(ff_enabled) + self.familyButtonOn.setToggled() + self.familyButtonOff.setToggled() +# obj.setToggled(ff_enabled) + for filtername in ['filesFilter', 'libraryFilter']: + filterCombo = xrc.XRCCTRL(self.frame, filtername) + if filterCombo: + filterCombo.refresh() + + elif name == 'playAdd' or name == 'play' or name == 'playAdd1' or name == 'play1': + playableFiles = self.standardOverview.data['fileDetailsMode']['panel'].selectedFiles[:] + + if name == 'play' or name == 'play1': + self.standardDetails.addToPlaylist(name = '', add=False) + + for p in playableFiles: + if p != '': + self.standardDetails.addToPlaylist(name = p.GetLabel(), add=True) + + elif name == 'advancedFiltering': + if self.filterStandard.visible: + self.filterStandard.Hide() + self.filterStandard.visible = False + self.standardOverview.GetParent().Layout() + # self.frame.Refresh() + else: + self.filterStandard.Show() + self.filterStandard.visible = True + self.standardOverview.GetParent().Layout() + # self.frame.Refresh() + + elif name == 'fake': + self.realButton.setState(False) # disabled real button + moderation = self.modcast_db.getModeration(bin2str(self.moderatedinfohash)) + # ARNO50: Please turn DB records into dicts. Not doing this makes + # the whole code DB schema dependent! + self.modcast_db.blockModerator(moderation[0]) + + elif name == 'real': + self.fakeButton.setState(False) # disable fake button + moderation = self.modcast_db.getModeration(bin2str(self.moderatedinfohash)) + self.modcast_db.forwardModerator(moderation[0]) + + + + elif name == 'remove': + + ##if self.DELETE_TORRENT_ASK: + ## xrcResource = os.path.join(self.vwxGUI_path, 'deleteTorrent.xrc') + ## res = xrc.XmlResource(xrcResource) + ## self.dialogFrame = res.LoadFrame(None, "torrentDialog") + + #self.dialogFrame.SetFocus() + ## self.dialogFrame.Centre() + ## self.dialogFrame.Show(True) + + ## self.dialogFrame.Library = xrc.XRCCTRL(self.dialogFrame,c "Library") + ## self.dialogFrame.LibraryHardDisk = xrc.XRCCTRL(self.dialogFrame, "LibraryHardDisk") + ## self.dialogFrame.Cancel = xrc.XRCCTRL(self.dialogFrame, "Cancel") + ## self.dialogFrame.checkbox = xrc.XRCCTRL(self.dialogFrame, "checkBox") + + + ## self.dialogFrame.Library.Bind(wx.EVT_BUTTON, self.LibraryClicked) + ## self.dialogFrame.LibraryHardDisk.Bind(wx.EVT_BUTTON, self.HardDiskClicked) + ## self.dialogFrame.Cancel.Bind(wx.EVT_BUTTON, self.CancelClicked) + ## self.dialogFrame.checkbox.Bind(wx.EVT_CHECKBOX, self.checkboxClicked) + + + + ##elif self.DELETE_TORRENT_PREF == 1: + ## self.onDeleteTorrentFromLibrary() + ##else: + ## self.onDeleteTorrentFromDisk() + self.onDeleteTorrentFromDisk() # default behaviour for preview 1 + + + + + + ##elif name == 'settings': + ## self.settingsOverview() + + + ##elif name == 'my_files': + ## self.standardLibraryOverview() + + elif name == 'edit': + self.standardOverview.currentPanel.sendClick(event) + self.detailsTabClicked(name) + + + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'GUIUtil: A button was clicked, but no action is defined for: %s' % name + + +# def mainButtonClicked(self, name, button): +# "One of the mainbuttons in the top has been clicked" +# +# if not button.isSelected(): +# if self.selectedMainButton: +# self.selectedMainButton.setSelected(False) +# button.setSelected(True) +# self.selectedMainButton = button +# +# if name == 'mainButtonStartpage': +# self.standardStartpage() +# if name == 'mainButtonStats': +# self.standardStats() +# elif name == 'mainButtonFiles': +# self.standardFilesOverview() +# elif name == 'mainButtonPersons': +# self.standardPersonsOverview() +# elif name == 'mainButtonProfile': +# self.standardProfileOverview() +# elif name == 'mainButtonLibrary': +# self.standardLibraryOverview() +# elif name == 'mainButtonFriends': +# self.standardFriendsOverview() +# elif name == 'mainButtonRss': +# self.standardSubscriptionsOverview() +# elif name == 'mainButtonFileDetails': +# self.standardFileDetailsOverview() +## print 'tb debug> guiUtility button press ready' +# elif name == 'mainButtonPersonDetails': +# self.standardPersonDetailsOverview() +# elif name == 'mainButtonMessages': +# self.standardMessagesOverview() +# elif DEBUG: +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","GUIUtil: MainButtonClicked: unhandled name",name + + + def LibraryClicked(self, event): + self.DELETE_TORRENT_ASK_OLD = self.DELETE_TORRENT_ASK + self.DELETE_TORRENT_PREF = 1 + self.dialogFrame.Close() + self.standardOverview.Refresh() + wx.CallAfter(self.onDeleteTorrentFromLibrary) + + def HardDiskClicked(self, event): + self.DELETE_TORRENT_ASK_OLD = self.DELETE_TORRENT_ASK + self.DELETE_TORRENT_PREF = 2 + self.dialogFrame.Close() + self.standardOverview.Refresh() + wx.CallAfter(self.onDeleteTorrentFromDisk) + + + def CancelClicked(self, event): + self.DELETE_TORRENT_ASK = self.DELETE_TORRENT_ASK_OLD + self.dialogFrame.Close() + self.standardOverview.Refresh() + + def checkboxClicked(self, event): + self.DELETE_TORRENT_ASK = not self.DELETE_TORRENT_ASK + + + def set_port_number(self, port_number): + self.port_number = port_number + + def get_port_number(self): + return self.port_number + + + + def OnResultsClicked(self): + if self.guiPage == None: + self.guiPage = 'search_results' + if self.guiPage != 'search_results': + self.guiPage = 'search_results' + if self.frame.top_bg.ag.IsPlaying(): + self.frame.top_bg.ag.Show() + + #self.standardGrid.deselectAll() + #self.standardGrid.clearAllData() + + self.standardFilesOverview() + if sys.platform != 'darwin': + self.frame.videoframe.show_videoframe() + self.frame.videoparentpanel.Show() + + if self.frame.videoframe.videopanel.vlcwin.is_animation_running(): + self.frame.videoframe.videopanel.vlcwin.show_loading() + + + #self.frame.top_bg.search_results.SetColour(wx.BLACK) + + wx.CallAfter(self.frame.top_bg.settings.SetForegroundColour,(255,51,0)) + wx.CallAfter(self.frame.top_bg.my_files.SetForegroundColour,(255,51,0)) + self.frame.top_bg.settings.SetFont(wx.Font(FONT_SIZE_SETTINGS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.frame.top_bg.my_files.SetFont(wx.Font(FONT_SIZE_MY_FILES, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + + if sys.platform == 'win32': + self.frame.top_bg.Refresh() + + self.frame.pagerPanel.Show() + + + def toggleFamilyFilter(self, state = None): + catobj = Category.getInstance() + ff_enabled = not catobj.family_filter_enabled() + print 'Setting family filter to: %s' % ff_enabled + if state is not None: + ff_enabled = state + catobj.set_family_filter(ff_enabled) + + if sys.platform == 'win32': + self.frame.top_bg.familyfilter.setToggled(ff_enabled) + else: + if ff_enabled: + self.frame.top_bg.familyfilter.SetLabel('Family Filter:ON') + else: + self.frame.top_bg.familyfilter.SetLabel('Family Filter:OFF') + #obj.setToggled(ff_enabled) + for filtername in ['filesFilter', 'libraryFilter']: + filterCombo = xrc.XRCCTRL(self.frame, filtername) + if filterCombo: + filterCombo.refresh() + + + + + def standardStartpage(self, filters = ['','']): + ##self.frame.pageTitle.SetLabel('START PAGE') + filesDetailsList = [] + self.standardOverview.setMode('startpageMode') + ##self.frame.files_friends.Hide() + ##self.frame.ag.Hide() + ##self.frame.go.Hide() + ##self.frame.search.Hide() + ##self.standardOverview.searchCentre.SetFocus() + ##self.standardOverview.searchCentre.Bind(wx.EVT_KEY_DOWN, self.OnSearchKeyDow) + ##self.standardOverview.Refresh() + +# self.standardOverview.filterChanged(filters) +# self.standardDetails.setMode('fileDetails') + + def standardStats(self, filters = ['','']): + self.frame.pageTitle.SetLabel('STATS') +# filesDetailsList = [] + self.standardOverview.setMode('statsMode') + + def standardFilesOverview(self, filters = ['','']): + if self.guiPage != 'search_results': + self.guiPage = 'search_results' + if self.frame.top_bg.ag.IsPlaying(): + self.frame.top_bg.ag.Show() + + if sys.platform != 'darwin': + self.frame.videoframe.show_videoframe() + self.frame.videoparentpanel.Show() + + if self.frame.videoframe.videopanel.vlcwin.is_animation_running(): + self.frame.videoframe.videopanel.vlcwin.show_loading() + + + #self.frame.top_bg.search_results.SetColour(wx.BLACK) + + self.frame.top_bg.results.SetForegroundColour((0,105,156)) + self.frame.top_bg.settings.SetForegroundColour((255,51,0)) + self.frame.top_bg.my_files.SetForegroundColour((255,51,0)) + self.frame.top_bg.results.SetFont(wx.Font(FONT_SIZE_RESULTS+1, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.frame.top_bg.settings.SetFont(wx.Font(FONT_SIZE_SETTINGS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.frame.top_bg.my_files.SetFont(wx.Font(FONT_SIZE_MY_FILES, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + + self.frame.top_bg.search_results.Show() + + if sys.platform == 'win32': + self.frame.top_bg.Refresh() + + self.frame.pagerPanel.Show() + + self.standardOverview.setMode('filesMode') + gridState = GridState('filesMode', 'all', 'rameezmetric') + + self.standardOverview.filterChanged(gridState) + try: + if self.standardDetails: + self.standardDetails.setMode('filesMode', None) + except: + pass + + + + def settingsOverview(self): + if self.guiPage != 'settings': + self.guiPage = 'settings' + if sys.platform == 'darwin': + self.frame.top_bg.ag.Stop() # only calling Hide() on mac isnt sufficient + self.frame.top_bg.ag.Hide() + if sys.platform == 'win32': + self.frame.top_bg.Layout() + + self.frame.top_bg.results.SetForegroundColour((255,51,0)) + self.frame.top_bg.settings.SetForegroundColour((0,105,156)) + self.frame.top_bg.my_files.SetForegroundColour((255,51,0)) + self.frame.top_bg.results.SetFont(wx.Font(FONT_SIZE_RESULTS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.frame.top_bg.settings.SetFont(wx.Font(FONT_SIZE_SETTINGS+1, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.frame.top_bg.my_files.SetFont(wx.Font(FONT_SIZE_MY_FILES, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + + + self.frame.videoframe.hide_videoframe() + self.frame.videoparentpanel.Hide() + + if sys.platform == 'darwin': + self.frame.videoframe.videopanel.vlcwin.stop_animation() + + self.frame.pagerPanel.Hide() + if self.frame.top_bg.search_results.GetLabel() != '': + self.frame.top_bg.search_results.Hide() + ## self.frame.top_bg.search_results.SetLabel('Return to Results') + ## self.frame.top_bg.search_results.SetForegroundColour(wx.RED) + self.frame.Layout() + self.standardOverview.setMode('settingsMode') + #if self.standardOverview.firewallStatus.initDone == True: + # self.standardOverview.firewallStatus.setToggled(True) + + + + def standardPersonsOverview(self): + self.frame.pageTitle.SetLabel('TRIBLER') + self.standardOverview.setMode('personsMode') + if not self.standardOverview.getSorting(): + gridState = GridState('personsMode', 'all', 'last_connected', reverse=False) + self.standardOverview.filterChanged(gridState) + self.standardDetails.setMode('personsMode') + #self.standardOverview.clearSearch() + #self.standardOverview.toggleSearchDetailsPanel(False) + + def standardFriendsOverview(self): + self.frame.pageTitle.SetLabel('ALL FRIENDS') + self.standardOverview.setMode('friendsMode') + if not self.standardOverview.getSorting(): + gridState = GridState('friendsMode', 'all', 'name', reverse=True) + self.standardOverview.filterChanged(gridState) + self.standardDetails.setMode('friendsMode') + #self.standardOverview.clearSearch() + #self.standardOverview.toggleSearchDetailsPanel(False) + + def standardProfileOverview(self): + self.frame.pageTitle.SetLabel('PROFILE') + profileList = [] + panel = self.standardOverview.data['profileMode'].get('panel',None) + if panel is not None: + panel.seldomReloadData() + self.standardOverview.setMode('profileMode') + self.standardDetails.seldomReloadData() + self.standardDetails.setMode('profileMode') + + + def standardLibraryOverview(self, filters = None, refresh=False): + + setmode = refresh + if self.guiPage != 'my_files': + self.guiPage = 'my_files' + if sys.platform == 'darwin': + self.frame.top_bg.ag.Stop() + self.frame.top_bg.ag.Hide() + self.frame.top_bg.results.SetForegroundColour((255,51,0)) + self.frame.top_bg.settings.SetForegroundColour((255,51,0)) + self.frame.top_bg.my_files.SetForegroundColour((0,105,156)) + self.frame.top_bg.results.SetFont(wx.Font(FONT_SIZE_RESULTS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.frame.top_bg.settings.SetFont(wx.Font(FONT_SIZE_SETTINGS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.frame.top_bg.my_files.SetFont(wx.Font(FONT_SIZE_MY_FILES+1, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + + + #if self.standardGrid: + # self.standardGrid.deselectAll() + # self.standardGrid.clearAllData() + + + if sys.platform != 'darwin': + self.frame.videoframe.show_videoframe() + self.frame.videoparentpanel.Show() + + + if self.frame.top_bg.search_results.GetLabel() != '': + self.frame.top_bg.search_results.Hide() + ## self.frame.top_bg.search_results.SetLabel('Return to Results') + ## self.frame.top_bg.search_results.SetForegroundColour(wx.RED) + self.frame.top_bg.Layout() + + + self.frame.pagerPanel.Show() + + + setmode = True + + if setmode: + #self.frame.pageTitle.SetLabel('DOWNLOADS') + self.standardOverview.setMode('libraryMode',refreshGrid=refresh) + #gridState = self.standardOverview.getFilter().getState() + #if not gridState or not gridState.isValid(): + gridState = GridState('libraryMode', 'all', 'name') + self.standardOverview.filterChanged(gridState) + + if sys.platform != 'darwin': + wx.CallAfter(self.frame.videoframe.show_videoframe) + + self.standardDetails.setMode('libraryMode') + + wx.CallAfter(self.frame.standardPager.Show,self.standardOverview.getGrid().getGridManager().get_total_items()>0) + + + def standardSubscriptionsOverview(self): + self.frame.pageTitle.SetLabel('SUBSCRIPTIONS') + self.standardOverview.setMode('subscriptionsMode') + gridState = GridState('subscriptionMode', 'all', 'name') + self.standardOverview.filterChanged(gridState) + self.standardDetails.setMode('subscriptionsMode') + + def standardFileDetailsOverview(self, filters = ['','']): + filesDetailsList = [] + self.standardOverview.setMode('fileDetailsMode') +# print 'tb > self.standardOverview.GetSize() 1= %s ' % self.standardOverview.GetSize() +# print 'tb > self.frame = %s ' % self.frame.GetSize() + + frameSize = self.frame.GetSize() +# self.standardOverview.SetMinSize((1000,2000)) +# self.scrollWindow.FitInside() +# print 'tb > self.standardOverview.GetSize() 2= %s ' % self.standardOverview.GetSize() +# self.scrollWindow.SetScrollbars(1,1,1024,2000) + +## self.scrollWindow.SetScrollbars(1,1,frameSize[0],frameSize[1]) +# self.standardOverview.SetSize((-1, 2000)) +# print 'tb > self.standardOverview.GetSize() = %s' % self.standardOverview.GetSize() +# self.standardOverview.filterChanged(filters) +# self.standardDetails.setMode('fileDetails') + def standardPlaylistOverview(self, filters = ['','']): + filesDetailsList = [] + self.standardOverview.setMode('playlistMode') + + + def standardPersonDetailsOverview(self, filters = ['','']): + filesDetailsList = [] + self.standardOverview.setMode('personDetailsMode') + + def standardMessagesOverview(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'GUIUtil: standardMessagesOverview: Not yet implemented;' + + + def initStandardOverview(self, standardOverview): + "Called by standardOverview when ready with init" + self.standardOverview = standardOverview +# self.standardFilesOverview(filters = ['all', 'seeder']) + + + self.standardStartpage() + self.standardOverview.Show(True) + wx.CallAfter(self.refreshOnResize) + + # Preselect mainButtonFiles +# filesButton = xrc.XRCCTRL(self.frame, 'Start page') +# filesButton.setSelected(True) +# self.selectedMainButton = filesButton + + # init thumb / list view + self.gridViewMode = 'list' + + #self.filterStandard.Hide() ## hide the standardOverview at startup + + # Init family filter + ##self.familyButton = xrc.XRCCTRL(self.frame, 'familyfilter') + + # Family filter initialized from configuration file + catobj = Category.getInstance() + print >> sys.stderr , "FAMILY FILTER :" , self.utility.config.Read('family_filter', "boolean") + #catobj.set_family_filter(self.utility.config.Read('family_filter', "boolean")) + + + def initFilterStandard(self, filterStandard): + self.filterStandard = filterStandard + self.advancedFiltering = xrc.XRCCTRL(self.frame, "advancedFiltering") + + + def getOverviewElement(self): + """should get the last selected item for the current standard overview, or + the first one if none was previously selected""" + firstItem = self.standardOverview.getFirstItem() + return firstItem + + def initStandardDetails(self, standardDetails): + "Called by standardDetails when ready with init" + self.standardDetails = standardDetails + firstItem = self.standardOverview.getFirstItem() + self.standardDetails.setMode('filesMode', firstItem) +# self.standardDetails.Hide() + # init player here? + self.standardDetails.refreshStatusPanel(True) + self.guiOpen.set() + + def deleteSubscription(self,subscrip): + self.standardOverview.loadSubscriptionData() + self.standardOverview.refreshData() + + def addTorrentAsHelper(self): + if self.standardOverview.mode == 'libraryMode': + self.standardOverview.filterChanged(None) + #self.standardOverview.refreshData() + + def selectData(self, data): + "User clicked on item. Has to be selected in detailPanel" + self.standardDetails.setData(data) + self.standardOverview.updateSelection() + + def selectTorrent(self, torrent): + "User clicked on torrent. Has to be selected in detailPanel" + self.standardDetails.setData(torrent) + self.standardOverview.updateSelection() + + def selectPeer(self, peer_data): + "User clicked on peer. Has to be selected in detailPanel" + self.standardDetails.setData(peer_data) + self.standardOverview.updateSelection() + + def selectSubscription(self, sub_data): + "User clicked on subscription. Has to be selected in detailPanel" + self.standardDetails.setData(sub_data) + self.standardOverview.updateSelection() + + def detailsTabClicked(self, name): + "A tab in the detailsPanel was clicked" + self.standardDetails.tabClicked(name) + + def refreshOnResize(self): +# print 'tb > REFRESH ON RESIZE' +# print self.standardOverview.GetContainingSizer().GetItem(0) + +# self.standardOverview.GetContainingSizer().GetItem(self.standardOverview).SetProportion(1) +# self.standardOverview.SetProportion(1) + try: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'GuiUtility: explicit refresh' + self.mainSizer.FitInside(self.frame) + self.standardDetails.Refresh() + self.frame.topBackgroundRight.Refresh() + self.frame.topBackgroundRight.GetSizer().Layout() + self.frame.topBackgroundRight.GetContainingSizer().Layout() + self.updateSizeOfStandardOverview() + self.standardDetails.Layout() + self.standardDetail.GetContainingSizer.Layout() + self.standardOverview.Refresh() + + except: + pass # When resize is done before panels are loaded: no refresh + + def updateSizeOfStandardOverview(self): + print 'tb > SetProportion' + self.standardOverview.SetProportion(1) + + + if self.standardOverview.gridIsAutoResizing(): + #print 'size1: %d, size2: %d' % (self.frame.GetClientSize()[1], self.frame.window.GetClientSize()[1]) + margin = 10 + newSize = (-1, #self.scrollWindow.GetClientSize()[1] - + self.frame.GetClientSize()[1] - + 100 - # height of top bar + self.standardOverview.getPager().GetSize()[1] - + margin) + else: + newSize = self.standardOverview.GetSize() + + #print 'ClientSize: %s, virtual : %s' % (str(self.scrollWindow.GetClientSize()), str(self.scrollWindow.GetVirtualSize())) + #print 'Position: %s' % str(self.standardOverview.GetPosition()) + self.standardOverview.SetSize(newSize) + self.standardOverview.SetMinSize(newSize) + self.standardOverview.SetMaxSize(newSize) + #print 'Overview is now: %s' % str(self.standardOverview.GetSize()) + self.standardOverview.GetContainingSizer().Layout() + + def refreshTracker(self): + torrent = self.standardDetails.getData() + if not torrent: + return + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'GUIUtility: refresh ' + repr(torrent.get('content_name', 'no_name')) + if torrent: + check = TorrentChecking(torrent['infohash']) + check.start() + + + def refreshTorrentStats(self,dslist): + """ Called from ABCApp by MainThread to refresh statistics of downloading torrents""" + pass + ##try: + ## if self.guiOpen.isSet(): + ## self.standardDetails.refreshTorrentStats(dslist) + ##except: + ## print_exc() + + def refreshUploadStats(self, dslist): + pass + ##try: + ## if self.guiOpen.isSet(): + ## self.standardDetails.refreshUploadStats(dslist) + ##except: + ## print_exc() + + def emailFriend(self, event): + ip = self.utility.config.Read('bind') + if ip is None or ip == '': + ip = self.utility.session.get_external_ip() + mypermid = self.utility.session.get_permid() + + permid_txt = self.utility.lang.get('permid')+": "+show_permid(mypermid) + ip_txt = self.utility.lang.get('ipaddress')+": "+ip + + port = self.utility.session.get_listen_port() + port_txt = self.utility.lang.get('portnumber')+" "+str(port) + + subject = self.utility.lang.get('invitation_subject') + invitation_body = self.utility.lang.get('invitation_body') + invitation_body = invitation_body.replace('\\n', '\n') + invitation_body += ip_txt + '\n\r' + invitation_body += port_txt + '\n\r' + invitation_body += permid_txt + '\n\r\n\r\n\r' + + if sys.platform == "darwin": + body = invitation_body.replace('\\r','') + body = body.replace('\r','') + else: + body = urllib.quote(invitation_body) + mailToURL = 'mailto:%s?subject=%s&body=%s'%('', subject, body) + try: + webbrowser.open(mailToURL) + except: + text = invitation_body.split("\n") + InviteFriendsDialog(text) + + def get_nat_type(self, callback=None): + return self.utility.session.get_nat_type(callback=callback) + + def dosearch(self): + sf = self.frame.top_bg.searchField + #sf = self.standardOverview.getSearchField() + if sf is None: + return + input = sf.GetValue().strip() + if input == '': + return + + ##sizer = self.frame.search.GetContainingSizer() + ##self.frame.go.setToggled(True) + ##self.frame.go.SetMinSize((61,24)) + ##sizer.Layout() + ##self.frame.top_bg.Refresh() + ##self.frame.top_bg.Update() + + #self.standardOverview.toggleSearchDetailsPanel(True) + if self.standardOverview.mode in ["filesMode" ]: + self.searchFiles(self.standardOverview.mode, input) + elif self.standardOverview.mode in ["personsMode", 'friendsMode']: + self.searchPersons(self.standardOverview.mode, input) + + + + + def searchFiles(self, mode, input): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","GUIUtil: searchFiles:",input + low = input.lower() + wantkeywords = [i for i in low.split(' ') if i] + self.torrentsearch_manager.setSearchKeywords(wantkeywords, mode) + self.torrentsearch_manager.set_gridmgr(self.standardOverview.getGrid().getGridManager()) + #print "******** gui uti searchFiles", wantkeywords + gridstate = GridState(self.standardOverview.mode, 'all', 'rameezmetric') + self.standardOverview.filterChanged(gridstate) + + # + # Query the peers we are connected to + # + q = 'SIMPLE ' + for kw in wantkeywords: + q += kw+' ' + + self.utility.session.query_connected_peers(q,self.sesscb_got_remote_hits,self.max_remote_queries) + self.standardOverview.setSearchFeedback('remote', False, 0, wantkeywords,self.frame.top_bg.search_results) + + # + # Query YouTube, etc. + # + #web2on = self.utility.config.Read('enableweb2search',"boolean") + #if mode == 'filesMode' and web2on: + # self.torrentsearch_manager.searchWeb2(60) # 3 pages, TODO: calc from grid size + + def complete(self, term): + """autocompletes term.""" + completion = self.utility.session.open_dbhandler(NTFY_TERM).getTermsStartingWith(term, num=1) + if completion: + return completion[0][len(term):] + # boudewijn: may only return unicode compatible strings. While + # "" is unicode compatible it is better to return u"" to + # indicate that it must be unicode compatible. + return u"" + + def sesscb_got_remote_hits(self,permid,query,hits): + # Called by SessionCallback thread + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","GUIUtil: sesscb_got_remote_hits",len(hits) + + kwstr = query[len('SIMPLE '):] + kws = kwstr.split() + wx.CallAfter(self.torrentsearch_manager.gotRemoteHits,permid,kws,hits,self.standardOverview.getMode()) + + def stopSearch(self): + self.frame.go.setToggled(False) + self.frame.top_bg.createBackgroundImage() + self.frame.top_bg.Refresh() + self.frame.top_bg.Update() + self.frame.search.SetFocus() + mode = self.standardOverview.getMode() + if mode == 'filesMode' or mode == 'libraryMode': + self.torrentsearch_manager.stopSearch() + if mode == 'personsMode' or mode == 'friendsMode': + self.peersearch_manager.stopSearch() + + def clearSearch(self): + mode = self.standardOverview.getMode() + self.standardOverview.data[mode]['search'].Clear() + if mode == 'filesMode' or mode == 'libraryMode': + self.torrentsearch_manager.setSearchKeywords([],mode) + gridState = self.standardOverview.getFilter().getState() + if not gridState or not gridState.isValid(): + gridState = GridState(mode, 'all', 'num_seeders') + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'GUIUtil: clearSearch, back to: %s' % gridState + self.standardOverview.filterChanged(gridState) + if mode == 'personsMode' or mode == 'friendsMode': + self.peersearch_manager.setSearchKeywords([],mode) + gridState = GridState(mode, 'all', 'last_connected', reverse=False) + self.standardOverview.filterChanged(gridState) + + def searchPersons(self, mode, input): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","GUIUtil: searchPersons:",input + low = input.lower().strip() + wantkeywords = low.split(' ') + + self.peersearch_manager.setSearchKeywords(wantkeywords, mode) + self.peersearch_manager.set_gridmgr(self.standardOverview.getGrid().getGridManager()) + #print "******** gui uti searchFiles", wantkeywords + gridstate = GridState(self.standardOverview.mode, 'all', 'last_connected') + self.standardOverview.filterChanged(gridstate) + + + def OnSearchKeyDown(self,event): + + keycode = event.GetKeyCode() + #if event.CmdDown(): + #print "OnSearchKeyDown: keycode",keycode + if keycode == wx.WXK_RETURN: + self.frame.Hide() + self.standardFilesOverview() + self.dosearch() + else: + event.Skip() + + def OnSubscribeKeyDown(self,event): + keycode = event.GetKeyCode() + if keycode == wx.WXK_RETURN: + self.subscribe() + event.Skip() + + def OnSubscribeMouseAction(self,event): + obj = event.GetEventObject() + + # TODO: smarter behavior + obj.SetSelection(-1,-1) + event.Skip() + + + """ + def subscribe(self): + rssurlctrl = self.standardOverview.getRSSUrlCtrl() + url = rssurlctrl.GetValue() + if not url: + return + if not "://" in url: + url = "http://" + url + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","GUIUtil: subscribe:",url + try: + stream = urllib2.urlopen(url) + stream.close() + except Exception,e: + dlg = wx.MessageDialog(self.standardOverview, "Could not resolve URL:\n\n"+str(e), 'Tribler Warning',wx.OK | wx.ICON_WARNING) + result = dlg.ShowModal() + dlg.Destroy() + return + + torrentfeed = TorrentFeedThread.getInstance() + torrentfeed.addURL(url) + self.standardOverview.loadSubscriptionData() + self.standardOverview.refreshData() + """ + + def set_firewall_restart(self,b): + self.firewall_restart = b + + + def firewallStatusClick(self,event=None): + title = self.utility.lang.get('tribler_information') + if self.firewall_restart: + type = wx.ICON_WARNING + msg = self.utility.lang.get('restart_tooltip') + elif self.isReachable(): + type = wx.ICON_INFORMATION + msg = self.utility.lang.get('reachable_tooltip') + else: + type = wx.ICON_INFORMATION + msg = self.utility.lang.get('connecting_tooltip') + + dlg = wx.MessageDialog(None, msg, title, wx.OK|type) + result = dlg.ShowModal() + dlg.Destroy() + + def OnSearchMouseAction(self,event): + sf = self.standardOverview.getSearchField() + if sf is None: + return + + eventType = event.GetEventType() + #print 'event: %s, double: %s, leftup: %s' % (eventType, wx.EVT_LEFT_DCLICK, wx.EVT_LEFT_UP) + #print 'value: "%s", 1: "%s", 2: "%s"' % (sf.GetValue(), self.utility.lang.get('filesdefaultsearchweb2txt'),self.utility.lang.get('filesdefaultsearchtxt')) + if event.LeftDClick() or \ + ( event.LeftUp() and sf.GetValue() in [self.utility.lang.get('filesdefaultsearchweb2txt'),self.utility.lang.get('filesdefaultsearchtxt')]): + ##print 'select' + sf.SetSelection(-1,-1) + + if not event.LeftDClick(): + event.Skip() + + def getSearchField(self,mode=None): + return self.standardOverview.getSearchField(mode=mode) + + def isReachable(self): + return self.utility.session.get_externally_reachable() + + + def onChangeViewModus(self): + # clicked on changemodus button in title bar of overviewPanel + changeViewModus = wx.Menu() + self.utility.makePopup(changeViewModus, None, 'rChangeViewModusThumb', type="checkitem", status="active") + self.utility.makePopup(changeViewModus, None, 'rChangeViewModusList', type="checkitem") + return (changeViewMouse) + + + + def OnRightMouseAction(self,event): + # called from "*ItemPanel" or from "standardDetails" + item = self.standardDetails.getData() + if not item: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'GUIUtil: Used right mouse menu, but no item in DetailWindow' + return + + rightMouse = wx.Menu() + + + + if self.standardOverview.mode == "filesMode" and not item.get('myDownloadHistory', False): + self.utility.makePopup(rightMouse, None, 'rOptions') + if item.get('web2'): + self.utility.makePopup(rightMouse, self.onDownloadOpen, 'rPlay') + else: + #self.utility.makePopup(rightMouse, self.onRecommend, 'rRecommend') + #if secret: + self.utility.makePopup(rightMouse, self.onDownloadOpen, 'rDownloadOpenly') + #else: + #self.utility.makePopup(rightMouse, self.onDownloadSecret, 'rDownloadSecretly') + + # if in library: + elif self.standardOverview.mode == "libraryMode" or item.get('myDownloadHistory'): + #self.utility.makePopup(rightMouse, self.onRecommend, 'rRecommend') + #rightMouse.AppendSeparator() + self.utility.makePopup(rightMouse, None, 'rLibraryOptions') + self.utility.makePopup(rightMouse, self.onOpenFileDest, 'rOpenfilename') + self.utility.makePopup(rightMouse, self.onOpenDest, 'rOpenfiledestination') + self.utility.makePopup(rightMouse, self.onDeleteTorrentFromLibrary, 'rRemoveFromList') + self.utility.makePopup(rightMouse, self.onDeleteTorrentFromDisk, 'rRemoveFromListAndHD') + #rightMouse.AppendSeparator() + #self.utility.makePopup(rightMouse, self.onAdvancedInfoInLibrary, 'rAdvancedInfo') + elif self.standardOverview.mode == "personsMode" or self.standardOverview.mode == "friendsMode": + self.utility.makePopup(rightMouse, None, 'rOptions') + fs = item.get('friend') + if fs == FS_MUTUAL or fs == FS_I_INVITED: + self.utility.makePopup(rightMouse, self.onChangeFriendStatus, 'rRemoveAsFriend') + self.utility.makePopup(rightMouse, self.onChangeFriendInfo, 'rChangeInfo') + else: + self.utility.makePopup(rightMouse, self.onChangeFriendStatus, 'rAddAsFriend') + + # if in friends: +## if self.standardOverview.mode == "friendsMode": +## rightMouse.AppendSeparator() +## self.utility.makePopup(rightMouse, None, 'rFriendsOptions') +## self.utility.makePopup(rightMouse, None, 'rSendAMessage') + elif self.standardOverview.mode == "subscriptionsMode": + event.Skip() +## self.utility.makePopup(rightMouse, None, 'rOptions') +## self.utility.makePopup(rightMouse, None, 'rChangeSubscrTitle') +## self.utility.makePopup(rightMouse, None, 'rRemoveSubscr') + + + + return (rightMouse) + #self.PopupMenu(rightMouse, (-1,-1)) + +# ================== actions for rightMouse button ========================================== + def onOpenFileDest(self, event = None): + # open File + self.onOpenDest(event, openFile=True) + + def onOpenDest(self, event = None, openFile=False): + # open Destination + item = self.standardDetails.getData() + state = item.get('ds') + + if state: + dest = state.get_download().get_dest_dir() + if openFile: + destfiles = state.get_download().get_dest_files() + if len(destfiles) == 1: + dest = destfiles[0][1] + if sys.platform == 'darwin': + dest = 'file://%s' % dest + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","GUIUtil: onOpenDest",dest + complete = True + # check if destination exists + assert dest is not None and os.access(dest, os.R_OK), 'Could not retrieve destination' + try: + t = Thread(target = open_new, args=(str(dest),)) + t.setName( "FilesOpenNew"+t.getName() ) + t.setDaemon(True) + t.start() + except: + print_exc() + + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'GUIUtil: onOpenFileDest failed: no torrent selected' + + def onDeleteTorrentFromDisk(self, event = None): + item = self.standardDetails.getData() + + if item.get('ds'): + self.utility.session.remove_download(item['ds'].get_download(),removecontent = True) + + self.standardOverview.removeTorrentFromLibrary(item) + + + def onDeleteTorrentFromLibrary(self, event = None): + item = self.standardDetails.getData() + + if item.get('ds'): + self.utility.session.remove_download(item['ds'].get_download(),removecontent = False) + + self.standardOverview.removeTorrentFromLibrary(item) + + + def onAdvancedInfoInLibrary(self, event = None): + # open torrent details frame + item = self.standardDetails.getData() + abctorrent = item.get('abctorrent') + if abctorrent: + abctorrent.dialogs.advancedDetails(item) + + event.Skip() + + def onModerate(self, event = None): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'GUIUtil: ---tb--- Moderate event' + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",event + # todo + event.Skip() + + def onRecommend(self, event = None): + # todo + event.Skip() + + def onDownloadOpen(self, event = None): + self.standardDetails.download() + event.Skip() + + def onDownloadSecret(self, event = None): + self.standardDetails.download(secret=True) + event.Skip() + + def onChangeFriendStatus(self, event = None): + self.standardDetails.addAsFriend() + event.Skip() + + def onChangeFriendInfo(self, event = None): + item = self.standardDetails.getData() + dialog = MakeFriendsDialog(self.frame,self.utility, item) + ret = dialog.ShowModal() + dialog.Destroy() + event.Skip() + + + def getGuiElement(self, name): + if not self.elements.has_key(name) or not self.elements[name]: + return None + return self.elements[name] + + + + +# =========END ========= actions for rightMouse button ========================================== + + def superRefresh(self, sizer): + print 'supersizer to the rescue' + for item in sizer.GetChildren(): + if item.IsSizer(): + self.superRefresh(item.GetSizer()) + item.GetSizer().Layout() + elif item.IsWindow(): + item.GetWindow().Refresh() diff --git a/tribler-mod/Tribler/Main/vwxGUI/GuiUtility.py.bak b/tribler-mod/Tribler/Main/vwxGUI/GuiUtility.py.bak new file mode 100644 index 0000000..43e5675 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/GuiUtility.py.bak @@ -0,0 +1,1306 @@ +# Written by Jelle Roozenburg, Maarten ten Brinke, Arno Bakker, Lucian Musat +# see LICENSE.txt for license information + +import wx, os +from wx import xrc +from traceback import print_exc +from threading import Event, Thread +import urllib +import webbrowser +from webbrowser import open_new + +from Tribler.Core.simpledefs import * +from Tribler.Core.Utilities.utilities import * + +from Tribler.TrackerChecking.TorrentChecking import TorrentChecking +#from Tribler.Subscriptions.rss_client import TorrentFeedThread +from Tribler.Category.Category import Category +from Tribler.Main.Dialogs.makefriends import MakeFriendsDialog, InviteFriendsDialog +from Tribler.Main.vwxGUI.bgPanel import * +from Tribler.Main.vwxGUI.GridState import GridState +from Tribler.Main.vwxGUI.SearchGridManager import TorrentSearchGridManager,PeerSearchGridManager +from Tribler.Main.Utility.constants import * + +from Tribler.Core.CacheDB.sqlitecachedb import bin2str + +from Tribler.Video.VideoPlayer import VideoPlayer + + + +DEBUG = False + +# fonts +if sys.platform == 'darwin': # mac os x + FONT_SIZE_SR_MSG=11 + FONT_SIZE_TOTAL_DOWN=9 + FONT_SIZE_TOTAL_UP=9 + FONT_SIZE_RESULTS=10 + FONT_SIZE_SETTINGS=10 + FONT_SIZE_MY_FILES=10 + FONT_SIZE_FAMILY_FILTER=10 + FONT_SIZE_FILES_FRIENDS=11 + FONT_SIZE_SHARING_REPUTATION=11 + FONT_SIZE_SEARCH_RESULTS=12 + FONT_SIZE_SEARCH=14 + +else: + + FONT_SIZE_SR_MSG=8 + FONT_SIZE_TOTAL_DOWN=7 + FONT_SIZE_TOTAL_UP=7 + FONT_SIZE_RESULTS=8 + FONT_SIZE_SETTINGS=8 + FONT_SIZE_MY_FILES=8 + FONT_SIZE_FAMILY_FILTER=8 + FONT_SIZE_FILES_FRIENDS=8 + FONT_SIZE_SHARING_REPUTATION=8 + FONT_SIZE_SEARCH_RESULTS=8 + FONT_SIZE_SEARCH=10 + + +class GUIUtility: + __single = None + + def __init__(self, utility = None, params = None): + if GUIUtility.__single: + raise RuntimeError, "GUIUtility is singleton" + GUIUtility.__single = self + # do other init + self.xrcResource = None + self.utility = utility + self.vwxGUI_path = os.path.join(self.utility.getPath(), 'Tribler', 'Main', 'vwxGUI') + self.utility.guiUtility = self + self.params = params + self.frame = None + self.selectedMainButton = None + self.standardOverview = None + self.reachable = False + self.DELETE_TORRENT_ASK = True + self.DELETE_TORRENT_ASK_OLD = True + self.DELETE_TORRENT_PREF = 1 # 1 : from Library + # 2 : from Library and Harddisk + + + # Moderation cast + self.moderatedinfohash = None + self.modcast_db = None + self.fakeButton = None + self.realButton = None + + # videoplayer + self.videoplayer = VideoPlayer.getInstance() + + # current GUI page + self.guiPage = None + + # standardGrid + self.standardGrid = None + + + # port number + self.port_number = None + + + + # firewall + self.firewall_restart = False # ie Tribler needs to restart for the port number to be updated + + + + # Arno: 2008-04-16: I want to keep this for searching, as an extension + # of the standardGrid.GridManager + self.torrentsearch_manager = TorrentSearchGridManager.getInstance(self) + self.peersearch_manager = PeerSearchGridManager.getInstance(self) + + self.guiOpen = Event() + + + self.gridViewMode = 'thumbnails' + self.thumbnailViewer = None +# self.standardOverview = standardOverview() + + self.selectedColour = wx.Colour(216,233,240) ## 155,200,187 + self.unselectedColour = wx.Colour(255,255,255) ## 102,102,102 + self.unselectedColour2 = wx.Colour(255,255,255) ## 230,230,230 + self.unselectedColourDownload = wx.Colour(198,226,147) + self.unselectedColour2Download = wx.Colour(190,209,139) + self.selectedColourDownload = wx.Colour(145,173,78) + self.selectedColourPending = wx.Colour(216,233,240) ## 208,251,244 + self.triblerRed = wx.Colour(255, 51, 0) + self.bgColour = wx.Colour(102,102,102) + self.darkTextColour = wx.Colour(51,51,51) + + self.max_remote_queries = 10 # max number of remote peers to query + self.remote_search_threshold = 20 # start remote search when results is less than this number + + + def getInstance(*args, **kw): + if GUIUtility.__single is None: + GUIUtility(*args, **kw) + return GUIUtility.__single + getInstance = staticmethod(getInstance) + + def open_dbs(self): + self.modcast_db = self.utility.session.open_dbhandler(NTFY_MODERATIONCAST) + + def buttonClicked(self, event): + "One of the buttons in the GUI has been clicked" + self.frame.SetFocus() + + event.Skip(True) #should let other handlers use this event!!!!!!! + + name = "" + obj = event.GetEventObject() + + print 'tb > name of object that is clicked = %s' % obj.GetName() + + try: + name = obj.GetName() + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'GUIUtil: Error: Could not get name of buttonObject: %s' % obj + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'GUIUtil: Button clicked %s' % name + #print_stack() + + + if name == 'moreFileInfo': + self.standardFileDetailsOverview() + elif name == 'moreFileInfoPlaylist': + self.standardFileDetailsOverview() +# self.standardPlaylistOverview() + elif name == 'more info >': + self.standardPersonDetailsOverview() + elif name == 'backButton': + self.standardStartpage() + + elif name == 'All popular files': + self.standardFilesOverview() ## + + elif name == 'viewThumbs' or name == 'viewList': +# print 'currentpanel = %s' % self.standardOverview.currentPanel.GetName() +# self.viewThumbs = xrc.XRCCTRL(self.frame, "viewThumbs") +# self.viewList = xrc.XRCCTRL(self.frame, "viewList") + + grid = self.standardOverview.data[self.standardOverview.mode].get('grid') + if name == 'viewThumbs': + self.viewThumbs.setSelected(True) + self.viewList.setSelected(False) + grid.onViewModeChange(mode='thumbnails') + self.gridViewMode = 'thumbnails' + elif name == 'viewList': + self.viewThumbs.setSelected(False) + self.viewList.setSelected(True) + grid.onViewModeChange(mode='list') + self.gridViewMode = 'list' + + elif name.lower().find('detailstab') > -1: + self.detailsTabClicked(name) + elif name == 'refresh': + self.refreshTracker() + elif name == "addAsFriend" or name == 'deleteFriend': + self.standardDetails.addAsFriend() + + elif name in ('download', 'download1'): + self.standardDetails.download() + elif name == 'addFriend': + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","GUIUtil: buttonClicked: parent is",obj.GetParent().GetName() + dialog = MakeFriendsDialog(obj,self.utility) + ret = dialog.ShowModal() + dialog.Destroy() + elif name == 'inviteFriends': + self.emailFriend(event) + + #else: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","GUIUtil: buttonClicked: dlbooster: Torrent is None" + + elif name == 'browse': + self.standardOverview.currentPanel.sendClick(event) + + elif (name == 'edit' or name == "top10Sharers" or name.startswith('bgPanel')) and obj.GetParent().GetName() == "profileOverview": + self.standardOverview.currentPanel.sendClick(event) + self.detailsTabClicked(name) #a panel was clicked in the profile overview and this is the most elegant so far method of informing the others + elif name == "takeMeThere0" : #a button to go to preferences was clicked + panel_name = self.standardDetails.currentPanel.GetName() + if panel_name == "profileDetails_Files": + #self.utility.actions[ACTION_PREFERENCES].action() + self.utility.actions[ACTION_PREFERENCES].action(openname=self.utility.lang.get('triblersetting')) + self.selectData(self.standardDetails.getData()) + if panel_name == "profileDetails_Download": + #self.utility.actions[ACTION_PREFERENCES].action(openname=self.utility.lang.get('triblersetting')) + self.utility.actions[ACTION_PREFERENCES].action(openname=self.utility.lang.get('videosetting')) + self.selectData(self.standardDetails.getData()) + elif panel_name == "profileDetails_Presence": + self.emailFriend(event) + #self.mainButtonClicked( 'mainButtonPersons', self.frame.mainButtonPersons) + #generate event to change page -> this should be done as a parameter to action because is modal + #event = wx.TreeEvent(wx.EVT_TREE_ITEM_ACTIVATED) + #wx.PostEvent() + elif name == "takeMeThere1": #switch to another view + panel_name = self.standardDetails.currentPanel.GetName() + if panel_name == "profileDetails_Download": + self.emailFriend(event) + #self.mainButtonClicked( 'mainButtonPersons', self.frame.mainButtonPersons) + if panel_name == "profileDetails_Presence": + URL = 'http://www.tribler.org/' + webbrowser.open(URL) + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'GUIUtil: A button was clicked, but no action is defined for: %s' % name + + elif name == "takeMeThere2": #switch to another view + panel_name = self.standardDetails.currentPanel.GetName() + if panel_name == "profileDetails_Download": + URL = 'http://www.tribler.org/' + webbrowser.open(URL) + elif name == 'subscribe': + self.subscribe() + elif name == 'firewallStatus': + self.firewallStatusClick() + elif name == 'options': + self.standardDetails.rightMouseButton(event) + elif name == 'viewModus': + self.onChangeViewModus() + elif name == 'searchClear': + # this has to be a callafter to avoid segmentation fault + # otherwise the panel with the event generating button is destroyed + # in the execution of the event. + self.clearSearch() + + wx.CallAfter(self.standardOverview.toggleSearchDetailsPanel, False) + elif name == 'familyfilter': + catobj = Category.getInstance() + ff_enabled = not catobj.family_filter_enabled() + print 'Setting family filter to: %s' % ff_enabled + ccatobj.set_family_filter(ff_enabled) + self.familyButton.setToggled() +# obj.setToggled(ff_enabled) + for filtername in ['filesFilter', 'libraryFilter']: + filterCombo = xrc.XRCCTRL(self.frame, filtername) + if filterCombo: + filterCombo.refresh() + + elif name == 'familyFilterOn' or name == 'familyFilterOff': ## not used anymore + if ((self.familyButtonOn.isToggled() and name == 'familyFilterOff') or + (self.familyButtonOff.isToggled() and name == 'familyFilterOn')): + + catobj = Category.getInstance() + ff_enabled = not catobj.family_filter_enabled() + print 'Setting family filter to: %s' % ff_enabled + catobj.set_family_filter(ff_enabled) + self.familyButtonOn.setToggled() + self.familyButtonOff.setToggled() +# obj.setToggled(ff_enabled) + for filtername in ['filesFilter', 'libraryFilter']: + filterCombo = xrc.XRCCTRL(self.frame, filtername) + if filterCombo: + filterCombo.refresh() + + elif name == 'playAdd' or name == 'play' or name == 'playAdd1' or name == 'play1': + playableFiles = self.standardOverview.data['fileDetailsMode']['panel'].selectedFiles[:] + + if name == 'play' or name == 'play1': + self.standardDetails.addToPlaylist(name = '', add=False) + + for p in playableFiles: + if p != '': + self.standardDetails.addToPlaylist(name = p.GetLabel(), add=True) + + elif name == 'advancedFiltering': + if self.filterStandard.visible: + self.filterStandard.Hide() + self.filterStandard.visible = False + self.standardOverview.GetParent().Layout() + # self.frame.Refresh() + else: + self.filterStandard.Show() + self.filterStandard.visible = True + self.standardOverview.GetParent().Layout() + # self.frame.Refresh() + + elif name == 'fake': + self.realButton.setState(False) # disabled real button + moderation = self.modcast_db.getModeration(bin2str(self.moderatedinfohash)) + # ARNO50: Please turn DB records into dicts. Not doing this makes + # the whole code DB schema dependent! + self.modcast_db.blockModerator(moderation[0]) + + elif name == 'real': + self.fakeButton.setState(False) # disable fake button + moderation = self.modcast_db.getModeration(bin2str(self.moderatedinfohash)) + self.modcast_db.forwardModerator(moderation[0]) + + + + elif name == 'remove': + + ##if self.DELETE_TORRENT_ASK: + ## xrcResource = os.path.join(self.vwxGUI_path, 'deleteTorrent.xrc') + ## res = xrc.XmlResource(xrcResource) + ## self.dialogFrame = res.LoadFrame(None, "torrentDialog") + + #self.dialogFrame.SetFocus() + ## self.dialogFrame.Centre() + ## self.dialogFrame.Show(True) + + ## self.dialogFrame.Library = xrc.XRCCTRL(self.dialogFrame,c "Library") + ## self.dialogFrame.LibraryHardDisk = xrc.XRCCTRL(self.dialogFrame, "LibraryHardDisk") + ## self.dialogFrame.Cancel = xrc.XRCCTRL(self.dialogFrame, "Cancel") + ## self.dialogFrame.checkbox = xrc.XRCCTRL(self.dialogFrame, "checkBox") + + + ## self.dialogFrame.Library.Bind(wx.EVT_BUTTON, self.LibraryClicked) + ## self.dialogFrame.LibraryHardDisk.Bind(wx.EVT_BUTTON, self.HardDiskClicked) + ## self.dialogFrame.Cancel.Bind(wx.EVT_BUTTON, self.CancelClicked) + ## self.dialogFrame.checkbox.Bind(wx.EVT_CHECKBOX, self.checkboxClicked) + + + + ##elif self.DELETE_TORRENT_PREF == 1: + ## self.onDeleteTorrentFromLibrary() + ##else: + ## self.onDeleteTorrentFromDisk() + self.onDeleteTorrentFromDisk() # default behaviour for preview 1 + + + + + + ##elif name == 'settings': + ## self.settingsOverview() + + + ##elif name == 'my_files': + ## self.standardLibraryOverview() + + elif name == 'edit': + self.standardOverview.currentPanel.sendClick(event) + self.detailsTabClicked(name) + + + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'GUIUtil: A button was clicked, but no action is defined for: %s' % name + + +# def mainButtonClicked(self, name, button): +# "One of the mainbuttons in the top has been clicked" +# +# if not button.isSelected(): +# if self.selectedMainButton: +# self.selectedMainButton.setSelected(False) +# button.setSelected(True) +# self.selectedMainButton = button +# +# if name == 'mainButtonStartpage': +# self.standardStartpage() +# if name == 'mainButtonStats': +# self.standardStats() +# elif name == 'mainButtonFiles': +# self.standardFilesOverview() +# elif name == 'mainButtonPersons': +# self.standardPersonsOverview() +# elif name == 'mainButtonProfile': +# self.standardProfileOverview() +# elif name == 'mainButtonLibrary': +# self.standardLibraryOverview() +# elif name == 'mainButtonFriends': +# self.standardFriendsOverview() +# elif name == 'mainButtonRss': +# self.standardSubscriptionsOverview() +# elif name == 'mainButtonFileDetails': +# self.standardFileDetailsOverview() +## print 'tb debug> guiUtility button press ready' +# elif name == 'mainButtonPersonDetails': +# self.standardPersonDetailsOverview() +# elif name == 'mainButtonMessages': +# self.standardMessagesOverview() +# elif DEBUG: +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","GUIUtil: MainButtonClicked: unhandled name",name + + + def LibraryClicked(self, event): + self.DELETE_TORRENT_ASK_OLD = self.DELETE_TORRENT_ASK + self.DELETE_TORRENT_PREF = 1 + self.dialogFrame.Close() + self.standardOverview.Refresh() + wx.CallAfter(self.onDeleteTorrentFromLibrary) + + def HardDiskClicked(self, event): + self.DELETE_TORRENT_ASK_OLD = self.DELETE_TORRENT_ASK + self.DELETE_TORRENT_PREF = 2 + self.dialogFrame.Close() + self.standardOverview.Refresh() + wx.CallAfter(self.onDeleteTorrentFromDisk) + + + def CancelClicked(self, event): + self.DELETE_TORRENT_ASK = self.DELETE_TORRENT_ASK_OLD + self.dialogFrame.Close() + self.standardOverview.Refresh() + + def checkboxClicked(self, event): + self.DELETE_TORRENT_ASK = not self.DELETE_TORRENT_ASK + + + def set_port_number(self, port_number): + self.port_number = port_number + + def get_port_number(self): + return self.port_number + + + + def OnResultsClicked(self): + if self.guiPage == None: + self.guiPage = 'search_results' + if self.guiPage != 'search_results': + self.guiPage = 'search_results' + if self.frame.top_bg.ag.IsPlaying(): + self.frame.top_bg.ag.Show() + + #self.standardGrid.deselectAll() + #self.standardGrid.clearAllData() + + self.standardFilesOverview() + if sys.platform != 'darwin': + self.frame.videoframe.show_videoframe() + self.frame.videoparentpanel.Show() + + if self.frame.videoframe.videopanel.vlcwin.is_animation_running(): + self.frame.videoframe.videopanel.vlcwin.show_loading() + + + #self.frame.top_bg.search_results.SetColour(wx.BLACK) + + wx.CallAfter(self.frame.top_bg.settings.SetForegroundColour,(255,51,0)) + wx.CallAfter(self.frame.top_bg.my_files.SetForegroundColour,(255,51,0)) + self.frame.top_bg.settings.SetFont(wx.Font(FONT_SIZE_SETTINGS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.frame.top_bg.my_files.SetFont(wx.Font(FONT_SIZE_MY_FILES, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + + if sys.platform == 'win32': + self.frame.top_bg.Refresh() + + self.frame.pagerPanel.Show() + + + def toggleFamilyFilter(self, state = None): + catobj = Category.getInstance() + ff_enabled = not catobj.family_filter_enabled() + print 'Setting family filter to: %s' % ff_enabled + if state is not None: + ff_enabled = state + catobj.set_family_filter(ff_enabled) + + if sys.platform == 'win32': + self.frame.top_bg.familyfilter.setToggled(ff_enabled) + else: + if ff_enabled: + self.frame.top_bg.familyfilter.SetLabel('Family Filter:ON') + else: + self.frame.top_bg.familyfilter.SetLabel('Family Filter:OFF') + #obj.setToggled(ff_enabled) + for filtername in ['filesFilter', 'libraryFilter']: + filterCombo = xrc.XRCCTRL(self.frame, filtername) + if filterCombo: + filterCombo.refresh() + + + + + def standardStartpage(self, filters = ['','']): + ##self.frame.pageTitle.SetLabel('START PAGE') + filesDetailsList = [] + self.standardOverview.setMode('startpageMode') + ##self.frame.files_friends.Hide() + ##self.frame.ag.Hide() + ##self.frame.go.Hide() + ##self.frame.search.Hide() + ##self.standardOverview.searchCentre.SetFocus() + ##self.standardOverview.searchCentre.Bind(wx.EVT_KEY_DOWN, self.OnSearchKeyDow) + ##self.standardOverview.Refresh() + +# self.standardOverview.filterChanged(filters) +# self.standardDetails.setMode('fileDetails') + + def standardStats(self, filters = ['','']): + self.frame.pageTitle.SetLabel('STATS') +# filesDetailsList = [] + self.standardOverview.setMode('statsMode') + + def standardFilesOverview(self, filters = ['','']): + if self.guiPage != 'search_results': + self.guiPage = 'search_results' + if self.frame.top_bg.ag.IsPlaying(): + self.frame.top_bg.ag.Show() + + if sys.platform != 'darwin': + self.frame.videoframe.show_videoframe() + self.frame.videoparentpanel.Show() + + if self.frame.videoframe.videopanel.vlcwin.is_animation_running(): + self.frame.videoframe.videopanel.vlcwin.show_loading() + + + #self.frame.top_bg.search_results.SetColour(wx.BLACK) + + self.frame.top_bg.results.SetForegroundColour((0,105,156)) + self.frame.top_bg.settings.SetForegroundColour((255,51,0)) + self.frame.top_bg.my_files.SetForegroundColour((255,51,0)) + self.frame.top_bg.results.SetFont(wx.Font(FONT_SIZE_RESULTS+1, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.frame.top_bg.settings.SetFont(wx.Font(FONT_SIZE_SETTINGS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.frame.top_bg.my_files.SetFont(wx.Font(FONT_SIZE_MY_FILES, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + + self.frame.top_bg.search_results.Show() + + if sys.platform == 'win32': + self.frame.top_bg.Refresh() + + self.frame.pagerPanel.Show() + + self.standardOverview.setMode('filesMode') + gridState = GridState('filesMode', 'all', 'rameezmetric') + + self.standardOverview.filterChanged(gridState) + try: + if self.standardDetails: + self.standardDetails.setMode('filesMode', None) + except: + pass + + + + def settingsOverview(self): + if self.guiPage != 'settings': + self.guiPage = 'settings' + if sys.platform == 'darwin': + self.frame.top_bg.ag.Stop() # only calling Hide() on mac isnt sufficient + self.frame.top_bg.ag.Hide() + if sys.platform == 'win32': + self.frame.top_bg.Layout() + + self.frame.top_bg.results.SetForegroundColour((255,51,0)) + self.frame.top_bg.settings.SetForegroundColour((0,105,156)) + self.frame.top_bg.my_files.SetForegroundColour((255,51,0)) + self.frame.top_bg.results.SetFont(wx.Font(FONT_SIZE_RESULTS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.frame.top_bg.settings.SetFont(wx.Font(FONT_SIZE_SETTINGS+1, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.frame.top_bg.my_files.SetFont(wx.Font(FONT_SIZE_MY_FILES, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + + + self.frame.videoframe.hide_videoframe() + self.frame.videoparentpanel.Hide() + + if sys.platform == 'darwin': + self.frame.videoframe.videopanel.vlcwin.stop_animation() + + self.frame.pagerPanel.Hide() + if self.frame.top_bg.search_results.GetLabel() != '': + self.frame.top_bg.search_results.Hide() + ## self.frame.top_bg.search_results.SetLabel('Return to Results') + ## self.frame.top_bg.search_results.SetForegroundColour(wx.RED) + self.frame.Layout() + self.standardOverview.setMode('settingsMode') + #if self.standardOverview.firewallStatus.initDone == True: + # self.standardOverview.firewallStatus.setToggled(True) + + + + def standardPersonsOverview(self): + self.frame.pageTitle.SetLabel('TRIBLER') + self.standardOverview.setMode('personsMode') + if not self.standardOverview.getSorting(): + gridState = GridState('personsMode', 'all', 'last_connected', reverse=False) + self.standardOverview.filterChanged(gridState) + self.standardDetails.setMode('personsMode') + #self.standardOverview.clearSearch() + #self.standardOverview.toggleSearchDetailsPanel(False) + + def standardFriendsOverview(self): + self.frame.pageTitle.SetLabel('ALL FRIENDS') + self.standardOverview.setMode('friendsMode') + if not self.standardOverview.getSorting(): + gridState = GridState('friendsMode', 'all', 'name', reverse=True) + self.standardOverview.filterChanged(gridState) + self.standardDetails.setMode('friendsMode') + #self.standardOverview.clearSearch() + #self.standardOverview.toggleSearchDetailsPanel(False) + + def standardProfileOverview(self): + self.frame.pageTitle.SetLabel('PROFILE') + profileList = [] + panel = self.standardOverview.data['profileMode'].get('panel',None) + if panel is not None: + panel.seldomReloadData() + self.standardOverview.setMode('profileMode') + self.standardDetails.seldomReloadData() + self.standardDetails.setMode('profileMode') + + + def standardLibraryOverview(self, filters = None, refresh=False): + + setmode = refresh + if self.guiPage != 'my_files': + self.guiPage = 'my_files' + if sys.platform == 'darwin': + self.frame.top_bg.ag.Stop() + self.frame.top_bg.ag.Hide() + self.frame.top_bg.results.SetForegroundColour((255,51,0)) + self.frame.top_bg.settings.SetForegroundColour((255,51,0)) + self.frame.top_bg.my_files.SetForegroundColour((0,105,156)) + self.frame.top_bg.results.SetFont(wx.Font(FONT_SIZE_RESULTS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.frame.top_bg.settings.SetFont(wx.Font(FONT_SIZE_SETTINGS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.frame.top_bg.my_files.SetFont(wx.Font(FONT_SIZE_MY_FILES+1, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + + + #if self.standardGrid: + # self.standardGrid.deselectAll() + # self.standardGrid.clearAllData() + + + if sys.platform != 'darwin': + self.frame.videoframe.show_videoframe() + self.frame.videoparentpanel.Show() + + + if self.frame.top_bg.search_results.GetLabel() != '': + self.frame.top_bg.search_results.Hide() + ## self.frame.top_bg.search_results.SetLabel('Return to Results') + ## self.frame.top_bg.search_results.SetForegroundColour(wx.RED) + self.frame.top_bg.Layout() + + + self.frame.pagerPanel.Show() + + + setmode = True + + if setmode: + #self.frame.pageTitle.SetLabel('DOWNLOADS') + self.standardOverview.setMode('libraryMode',refreshGrid=refresh) + #gridState = self.standardOverview.getFilter().getState() + #if not gridState or not gridState.isValid(): + gridState = GridState('libraryMode', 'all', 'name') + self.standardOverview.filterChanged(gridState) + + if sys.platform != 'darwin': + wx.CallAfter(self.frame.videoframe.show_videoframe) + + self.standardDetails.setMode('libraryMode') + + wx.CallAfter(self.frame.standardPager.Show,self.standardOverview.getGrid().getGridManager().get_total_items()>0) + + + def standardSubscriptionsOverview(self): + self.frame.pageTitle.SetLabel('SUBSCRIPTIONS') + self.standardOverview.setMode('subscriptionsMode') + gridState = GridState('subscriptionMode', 'all', 'name') + self.standardOverview.filterChanged(gridState) + self.standardDetails.setMode('subscriptionsMode') + + def standardFileDetailsOverview(self, filters = ['','']): + filesDetailsList = [] + self.standardOverview.setMode('fileDetailsMode') +# print 'tb > self.standardOverview.GetSize() 1= %s ' % self.standardOverview.GetSize() +# print 'tb > self.frame = %s ' % self.frame.GetSize() + + frameSize = self.frame.GetSize() +# self.standardOverview.SetMinSize((1000,2000)) +# self.scrollWindow.FitInside() +# print 'tb > self.standardOverview.GetSize() 2= %s ' % self.standardOverview.GetSize() +# self.scrollWindow.SetScrollbars(1,1,1024,2000) + +## self.scrollWindow.SetScrollbars(1,1,frameSize[0],frameSize[1]) +# self.standardOverview.SetSize((-1, 2000)) +# print 'tb > self.standardOverview.GetSize() = %s' % self.standardOverview.GetSize() +# self.standardOverview.filterChanged(filters) +# self.standardDetails.setMode('fileDetails') + def standardPlaylistOverview(self, filters = ['','']): + filesDetailsList = [] + self.standardOverview.setMode('playlistMode') + + + def standardPersonDetailsOverview(self, filters = ['','']): + filesDetailsList = [] + self.standardOverview.setMode('personDetailsMode') + + def standardMessagesOverview(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'GUIUtil: standardMessagesOverview: Not yet implemented;' + + + def initStandardOverview(self, standardOverview): + "Called by standardOverview when ready with init" + self.standardOverview = standardOverview +# self.standardFilesOverview(filters = ['all', 'seeder']) + + + self.standardStartpage() + self.standardOverview.Show(True) + wx.CallAfter(self.refreshOnResize) + + # Preselect mainButtonFiles +# filesButton = xrc.XRCCTRL(self.frame, 'Start page') +# filesButton.setSelected(True) +# self.selectedMainButton = filesButton + + # init thumb / list view + self.gridViewMode = 'list' + + #self.filterStandard.Hide() ## hide the standardOverview at startup + + # Init family filter + ##self.familyButton = xrc.XRCCTRL(self.frame, 'familyfilter') + + # Family filter initialized from configuration file + catobj = Category.getInstance() + print >> sys.stderr , "FAMILY FILTER :" , self.utility.config.Read('family_filter', "boolean") + #catobj.set_family_filter(self.utility.config.Read('family_filter', "boolean")) + + + def initFilterStandard(self, filterStandard): + self.filterStandard = filterStandard + self.advancedFiltering = xrc.XRCCTRL(self.frame, "advancedFiltering") + + + def getOverviewElement(self): + """should get the last selected item for the current standard overview, or + the first one if none was previously selected""" + firstItem = self.standardOverview.getFirstItem() + return firstItem + + def initStandardDetails(self, standardDetails): + "Called by standardDetails when ready with init" + self.standardDetails = standardDetails + firstItem = self.standardOverview.getFirstItem() + self.standardDetails.setMode('filesMode', firstItem) +# self.standardDetails.Hide() + # init player here? + self.standardDetails.refreshStatusPanel(True) + self.guiOpen.set() + + def deleteSubscription(self,subscrip): + self.standardOverview.loadSubscriptionData() + self.standardOverview.refreshData() + + def addTorrentAsHelper(self): + if self.standardOverview.mode == 'libraryMode': + self.standardOverview.filterChanged(None) + #self.standardOverview.refreshData() + + def selectData(self, data): + "User clicked on item. Has to be selected in detailPanel" + self.standardDetails.setData(data) + self.standardOverview.updateSelection() + + def selectTorrent(self, torrent): + "User clicked on torrent. Has to be selected in detailPanel" + self.standardDetails.setData(torrent) + self.standardOverview.updateSelection() + + def selectPeer(self, peer_data): + "User clicked on peer. Has to be selected in detailPanel" + self.standardDetails.setData(peer_data) + self.standardOverview.updateSelection() + + def selectSubscription(self, sub_data): + "User clicked on subscription. Has to be selected in detailPanel" + self.standardDetails.setData(sub_data) + self.standardOverview.updateSelection() + + def detailsTabClicked(self, name): + "A tab in the detailsPanel was clicked" + self.standardDetails.tabClicked(name) + + def refreshOnResize(self): +# print 'tb > REFRESH ON RESIZE' +# print self.standardOverview.GetContainingSizer().GetItem(0) + +# self.standardOverview.GetContainingSizer().GetItem(self.standardOverview).SetProportion(1) +# self.standardOverview.SetProportion(1) + try: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'GuiUtility: explicit refresh' + self.mainSizer.FitInside(self.frame) + self.standardDetails.Refresh() + self.frame.topBackgroundRight.Refresh() + self.frame.topBackgroundRight.GetSizer().Layout() + self.frame.topBackgroundRight.GetContainingSizer().Layout() + self.updateSizeOfStandardOverview() + self.standardDetails.Layout() + self.standardDetail.GetContainingSizer.Layout() + self.standardOverview.Refresh() + + except: + pass # When resize is done before panels are loaded: no refresh + + def updateSizeOfStandardOverview(self): + print 'tb > SetProportion' + self.standardOverview.SetProportion(1) + + + if self.standardOverview.gridIsAutoResizing(): + #print 'size1: %d, size2: %d' % (self.frame.GetClientSize()[1], self.frame.window.GetClientSize()[1]) + margin = 10 + newSize = (-1, #self.scrollWindow.GetClientSize()[1] - + self.frame.GetClientSize()[1] - + 100 - # height of top bar + self.standardOverview.getPager().GetSize()[1] - + margin) + else: + newSize = self.standardOverview.GetSize() + + #print 'ClientSize: %s, virtual : %s' % (str(self.scrollWindow.GetClientSize()), str(self.scrollWindow.GetVirtualSize())) + #print 'Position: %s' % str(self.standardOverview.GetPosition()) + self.standardOverview.SetSize(newSize) + self.standardOverview.SetMinSize(newSize) + self.standardOverview.SetMaxSize(newSize) + #print 'Overview is now: %s' % str(self.standardOverview.GetSize()) + self.standardOverview.GetContainingSizer().Layout() + + def refreshTracker(self): + torrent = self.standardDetails.getData() + if not torrent: + return + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'GUIUtility: refresh ' + repr(torrent.get('content_name', 'no_name')) + if torrent: + check = TorrentChecking(torrent['infohash']) + check.start() + + + def refreshTorrentStats(self,dslist): + """ Called from ABCApp by MainThread to refresh statistics of downloading torrents""" + pass + ##try: + ## if self.guiOpen.isSet(): + ## self.standardDetails.refreshTorrentStats(dslist) + ##except: + ## print_exc() + + def refreshUploadStats(self, dslist): + pass + ##try: + ## if self.guiOpen.isSet(): + ## self.standardDetails.refreshUploadStats(dslist) + ##except: + ## print_exc() + + def emailFriend(self, event): + ip = self.utility.config.Read('bind') + if ip is None or ip == '': + ip = self.utility.session.get_external_ip() + mypermid = self.utility.session.get_permid() + + permid_txt = self.utility.lang.get('permid')+": "+show_permid(mypermid) + ip_txt = self.utility.lang.get('ipaddress')+": "+ip + + port = self.utility.session.get_listen_port() + port_txt = self.utility.lang.get('portnumber')+" "+str(port) + + subject = self.utility.lang.get('invitation_subject') + invitation_body = self.utility.lang.get('invitation_body') + invitation_body = invitation_body.replace('\\n', '\n') + invitation_body += ip_txt + '\n\r' + invitation_body += port_txt + '\n\r' + invitation_body += permid_txt + '\n\r\n\r\n\r' + + if sys.platform == "darwin": + body = invitation_body.replace('\\r','') + body = body.replace('\r','') + else: + body = urllib.quote(invitation_body) + mailToURL = 'mailto:%s?subject=%s&body=%s'%('', subject, body) + try: + webbrowser.open(mailToURL) + except: + text = invitation_body.split("\n") + InviteFriendsDialog(text) + + def get_nat_type(self, callback=None): + return self.utility.session.get_nat_type(callback=callback) + + def dosearch(self): + sf = self.frame.top_bg.searchField + #sf = self.standardOverview.getSearchField() + if sf is None: + return + input = sf.GetValue().strip() + if input == '': + return + + ##sizer = self.frame.search.GetContainingSizer() + ##self.frame.go.setToggled(True) + ##self.frame.go.SetMinSize((61,24)) + ##sizer.Layout() + ##self.frame.top_bg.Refresh() + ##self.frame.top_bg.Update() + + #self.standardOverview.toggleSearchDetailsPanel(True) + if self.standardOverview.mode in ["filesMode" ]: + self.searchFiles(self.standardOverview.mode, input) + elif self.standardOverview.mode in ["personsMode", 'friendsMode']: + self.searchPersons(self.standardOverview.mode, input) + + + + + def searchFiles(self, mode, input): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","GUIUtil: searchFiles:",input + low = input.lower() + wantkeywords = [i for i in low.split(' ') if i] + self.torrentsearch_manager.setSearchKeywords(wantkeywords, mode) + self.torrentsearch_manager.set_gridmgr(self.standardOverview.getGrid().getGridManager()) + #print "******** gui uti searchFiles", wantkeywords + gridstate = GridState(self.standardOverview.mode, 'all', 'rameezmetric') + self.standardOverview.filterChanged(gridstate) + + # + # Query the peers we are connected to + # + q = 'SIMPLE ' + for kw in wantkeywords: + q += kw+' ' + + self.utility.session.query_connected_peers(q,self.sesscb_got_remote_hits,self.max_remote_queries) + self.standardOverview.setSearchFeedback('remote', False, 0, wantkeywords,self.frame.top_bg.search_results) + + # + # Query YouTube, etc. + # + #web2on = self.utility.config.Read('enableweb2search',"boolean") + #if mode == 'filesMode' and web2on: + # self.torrentsearch_manager.searchWeb2(60) # 3 pages, TODO: calc from grid size + + def complete(self, term): + """autocompletes term.""" + completion = self.utility.session.open_dbhandler(NTFY_TERM).getTermsStartingWith(term, num=1) + if completion: + return completion[0][len(term):] + # boudewijn: may only return unicode compatible strings. While + # "" is unicode compatible it is better to return u"" to + # indicate that it must be unicode compatible. + return u"" + + def sesscb_got_remote_hits(self,permid,query,hits): + # Called by SessionCallback thread + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","GUIUtil: sesscb_got_remote_hits",len(hits) + + kwstr = query[len('SIMPLE '):] + kws = kwstr.split() + wx.CallAfter(self.torrentsearch_manager.gotRemoteHits,permid,kws,hits,self.standardOverview.getMode()) + + def stopSearch(self): + self.frame.go.setToggled(False) + self.frame.top_bg.createBackgroundImage() + self.frame.top_bg.Refresh() + self.frame.top_bg.Update() + self.frame.search.SetFocus() + mode = self.standardOverview.getMode() + if mode == 'filesMode' or mode == 'libraryMode': + self.torrentsearch_manager.stopSearch() + if mode == 'personsMode' or mode == 'friendsMode': + self.peersearch_manager.stopSearch() + + def clearSearch(self): + mode = self.standardOverview.getMode() + self.standardOverview.data[mode]['search'].Clear() + if mode == 'filesMode' or mode == 'libraryMode': + self.torrentsearch_manager.setSearchKeywords([],mode) + gridState = self.standardOverview.getFilter().getState() + if not gridState or not gridState.isValid(): + gridState = GridState(mode, 'all', 'num_seeders') + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'GUIUtil: clearSearch, back to: %s' % gridState + self.standardOverview.filterChanged(gridState) + if mode == 'personsMode' or mode == 'friendsMode': + self.peersearch_manager.setSearchKeywords([],mode) + gridState = GridState(mode, 'all', 'last_connected', reverse=False) + self.standardOverview.filterChanged(gridState) + + def searchPersons(self, mode, input): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","GUIUtil: searchPersons:",input + low = input.lower().strip() + wantkeywords = low.split(' ') + + self.peersearch_manager.setSearchKeywords(wantkeywords, mode) + self.peersearch_manager.set_gridmgr(self.standardOverview.getGrid().getGridManager()) + #print "******** gui uti searchFiles", wantkeywords + gridstate = GridState(self.standardOverview.mode, 'all', 'last_connected') + self.standardOverview.filterChanged(gridstate) + + + def OnSearchKeyDown(self,event): + + keycode = event.GetKeyCode() + #if event.CmdDown(): + #print "OnSearchKeyDown: keycode",keycode + if keycode == wx.WXK_RETURN: + self.frame.Hide() + self.standardFilesOverview() + self.dosearch() + else: + event.Skip() + + def OnSubscribeKeyDown(self,event): + keycode = event.GetKeyCode() + if keycode == wx.WXK_RETURN: + self.subscribe() + event.Skip() + + def OnSubscribeMouseAction(self,event): + obj = event.GetEventObject() + + # TODO: smarter behavior + obj.SetSelection(-1,-1) + event.Skip() + + + """ + def subscribe(self): + rssurlctrl = self.standardOverview.getRSSUrlCtrl() + url = rssurlctrl.GetValue() + if not url: + return + if not "://" in url: + url = "http://" + url + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","GUIUtil: subscribe:",url + try: + stream = urllib2.urlopen(url) + stream.close() + except Exception,e: + dlg = wx.MessageDialog(self.standardOverview, "Could not resolve URL:\n\n"+str(e), 'Tribler Warning',wx.OK | wx.ICON_WARNING) + result = dlg.ShowModal() + dlg.Destroy() + return + + torrentfeed = TorrentFeedThread.getInstance() + torrentfeed.addURL(url) + self.standardOverview.loadSubscriptionData() + self.standardOverview.refreshData() + """ + + def set_firewall_restart(self,b): + self.firewall_restart = b + + + def firewallStatusClick(self,event=None): + title = self.utility.lang.get('tribler_information') + if self.firewall_restart: + type = wx.ICON_WARNING + msg = self.utility.lang.get('restart_tooltip') + elif self.isReachable(): + type = wx.ICON_INFORMATION + msg = self.utility.lang.get('reachable_tooltip') + else: + type = wx.ICON_INFORMATION + msg = self.utility.lang.get('connecting_tooltip') + + dlg = wx.MessageDialog(None, msg, title, wx.OK|type) + result = dlg.ShowModal() + dlg.Destroy() + + def OnSearchMouseAction(self,event): + sf = self.standardOverview.getSearchField() + if sf is None: + return + + eventType = event.GetEventType() + #print 'event: %s, double: %s, leftup: %s' % (eventType, wx.EVT_LEFT_DCLICK, wx.EVT_LEFT_UP) + #print 'value: "%s", 1: "%s", 2: "%s"' % (sf.GetValue(), self.utility.lang.get('filesdefaultsearchweb2txt'),self.utility.lang.get('filesdefaultsearchtxt')) + if event.LeftDClick() or \ + ( event.LeftUp() and sf.GetValue() in [self.utility.lang.get('filesdefaultsearchweb2txt'),self.utility.lang.get('filesdefaultsearchtxt')]): + ##print 'select' + sf.SetSelection(-1,-1) + + if not event.LeftDClick(): + event.Skip() + + def getSearchField(self,mode=None): + return self.standardOverview.getSearchField(mode=mode) + + def isReachable(self): + return self.utility.session.get_externally_reachable() + + + def onChangeViewModus(self): + # clicked on changemodus button in title bar of overviewPanel + changeViewModus = wx.Menu() + self.utility.makePopup(changeViewModus, None, 'rChangeViewModusThumb', type="checkitem", status="active") + self.utility.makePopup(changeViewModus, None, 'rChangeViewModusList', type="checkitem") + return (changeViewMouse) + + + + def OnRightMouseAction(self,event): + # called from "*ItemPanel" or from "standardDetails" + item = self.standardDetails.getData() + if not item: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'GUIUtil: Used right mouse menu, but no item in DetailWindow' + return + + rightMouse = wx.Menu() + + + + if self.standardOverview.mode == "filesMode" and not item.get('myDownloadHistory', False): + self.utility.makePopup(rightMouse, None, 'rOptions') + if item.get('web2'): + self.utility.makePopup(rightMouse, self.onDownloadOpen, 'rPlay') + else: + #self.utility.makePopup(rightMouse, self.onRecommend, 'rRecommend') + #if secret: + self.utility.makePopup(rightMouse, self.onDownloadOpen, 'rDownloadOpenly') + #else: + #self.utility.makePopup(rightMouse, self.onDownloadSecret, 'rDownloadSecretly') + + # if in library: + elif self.standardOverview.mode == "libraryMode" or item.get('myDownloadHistory'): + #self.utility.makePopup(rightMouse, self.onRecommend, 'rRecommend') + #rightMouse.AppendSeparator() + self.utility.makePopup(rightMouse, None, 'rLibraryOptions') + self.utility.makePopup(rightMouse, self.onOpenFileDest, 'rOpenfilename') + self.utility.makePopup(rightMouse, self.onOpenDest, 'rOpenfiledestination') + self.utility.makePopup(rightMouse, self.onDeleteTorrentFromLibrary, 'rRemoveFromList') + self.utility.makePopup(rightMouse, self.onDeleteTorrentFromDisk, 'rRemoveFromListAndHD') + #rightMouse.AppendSeparator() + #self.utility.makePopup(rightMouse, self.onAdvancedInfoInLibrary, 'rAdvancedInfo') + elif self.standardOverview.mode == "personsMode" or self.standardOverview.mode == "friendsMode": + self.utility.makePopup(rightMouse, None, 'rOptions') + fs = item.get('friend') + if fs == FS_MUTUAL or fs == FS_I_INVITED: + self.utility.makePopup(rightMouse, self.onChangeFriendStatus, 'rRemoveAsFriend') + self.utility.makePopup(rightMouse, self.onChangeFriendInfo, 'rChangeInfo') + else: + self.utility.makePopup(rightMouse, self.onChangeFriendStatus, 'rAddAsFriend') + + # if in friends: +## if self.standardOverview.mode == "friendsMode": +## rightMouse.AppendSeparator() +## self.utility.makePopup(rightMouse, None, 'rFriendsOptions') +## self.utility.makePopup(rightMouse, None, 'rSendAMessage') + elif self.standardOverview.mode == "subscriptionsMode": + event.Skip() +## self.utility.makePopup(rightMouse, None, 'rOptions') +## self.utility.makePopup(rightMouse, None, 'rChangeSubscrTitle') +## self.utility.makePopup(rightMouse, None, 'rRemoveSubscr') + + + + return (rightMouse) + #self.PopupMenu(rightMouse, (-1,-1)) + +# ================== actions for rightMouse button ========================================== + def onOpenFileDest(self, event = None): + # open File + self.onOpenDest(event, openFile=True) + + def onOpenDest(self, event = None, openFile=False): + # open Destination + item = self.standardDetails.getData() + state = item.get('ds') + + if state: + dest = state.get_download().get_dest_dir() + if openFile: + destfiles = state.get_download().get_dest_files() + if len(destfiles) == 1: + dest = destfiles[0][1] + if sys.platform == 'darwin': + dest = 'file://%s' % dest + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","GUIUtil: onOpenDest",dest + complete = True + # check if destination exists + assert dest is not None and os.access(dest, os.R_OK), 'Could not retrieve destination' + try: + t = Thread(target = open_new, args=(str(dest),)) + t.setName( "FilesOpenNew"+t.getName() ) + t.setDaemon(True) + t.start() + except: + print_exc() + + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'GUIUtil: onOpenFileDest failed: no torrent selected' + + def onDeleteTorrentFromDisk(self, event = None): + item = self.standardDetails.getData() + + if item.get('ds'): + self.utility.session.remove_download(item['ds'].get_download(),removecontent = True) + + self.standardOverview.removeTorrentFromLibrary(item) + + + def onDeleteTorrentFromLibrary(self, event = None): + item = self.standardDetails.getData() + + if item.get('ds'): + self.utility.session.remove_download(item['ds'].get_download(),removecontent = False) + + self.standardOverview.removeTorrentFromLibrary(item) + + + def onAdvancedInfoInLibrary(self, event = None): + # open torrent details frame + item = self.standardDetails.getData() + abctorrent = item.get('abctorrent') + if abctorrent: + abctorrent.dialogs.advancedDetails(item) + + event.Skip() + + def onModerate(self, event = None): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'GUIUtil: ---tb--- Moderate event' + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",event + # todo + event.Skip() + + def onRecommend(self, event = None): + # todo + event.Skip() + + def onDownloadOpen(self, event = None): + self.standardDetails.download() + event.Skip() + + def onDownloadSecret(self, event = None): + self.standardDetails.download(secret=True) + event.Skip() + + def onChangeFriendStatus(self, event = None): + self.standardDetails.addAsFriend() + event.Skip() + + def onChangeFriendInfo(self, event = None): + item = self.standardDetails.getData() + dialog = MakeFriendsDialog(self.frame,self.utility, item) + ret = dialog.ShowModal() + dialog.Destroy() + event.Skip() + + + def getGuiElement(self, name): + if not self.elements.has_key(name) or not self.elements[name]: + return None + return self.elements[name] + + + + +# =========END ========= actions for rightMouse button ========================================== + + def superRefresh(self, sizer): + print 'supersizer to the rescue' + for item in sizer.GetChildren(): + if item.IsSizer(): + self.superRefresh(item.GetSizer()) + item.GetSizer().Layout() + elif item.IsWindow(): + item.GetWindow().Refresh() diff --git a/tribler-mod/Tribler/Main/vwxGUI/IconsManager.py b/tribler-mod/Tribler/Main/vwxGUI/IconsManager.py new file mode 100644 index 0000000..0d12a4a --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/IconsManager.py @@ -0,0 +1,208 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg, Maarten ten Brinke, Arno Bakker +# see LICENSE.txt for license information + +import wx, os +import cStringIO + +from Tribler.Core.API import * +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility + +ICON_MAX_DIM = 80 +SMALL_ICON_MAX_DIM = 32 + + +class IconsManager: + + __single = None + + def __init__(self): + + if IconsManager.__single: + raise RuntimeError, "IconsManager is singleton" + + self.guiUtility = GUIUtility.getInstance() + self.guiImagePath = os.path.join(self.guiUtility.utility.getPath(), 'Tribler', 'Main', 'vwxGUI', 'images') + self.defaults = {} + self.defaults['filesMode'] = {} + self.defaults['personsMode'] = {} + self.defaults['personsMode']['DEFAULT_THUMB'] = wx.Bitmap(os.path.join(self.guiImagePath, 'defaultThumbPeer.png')) + + + self.DOWNLOAD_BUTTON_DOWNLOAD = wx.Bitmap(os.path.join(self.guiImagePath, 'download.png')) + self.DOWNLOAD_BUTTON_DOWNLOAD_S = wx.Bitmap(os.path.join(self.guiImagePath, 'download_clicked.png')) + + self.categoryThumbs = {} + + # Added from mugshot manager to show items in left menu + ####################################################### + + + self.peer_db = self.guiUtility.utility.session.open_dbhandler(NTFY_PEERS) + + IconsManager.__single = self + + + + def getInstance(*args, **kw): + """ Returns the IconsManager singleton if it exists or otherwise + creates it first, in which case you need to pass the constructor + params. + @return IconsManager.""" + if IconsManager.__single is None: + IconsManager(*args, **kw) + return IconsManager.__single + getInstance = staticmethod(getInstance) + + def get_default(self,mode,name): + return self.defaults[mode][name] + + def getCategoryIcon(self, mode, cat, thumbtype = 'normal', web2 = False): + #print "**** getCategoryIcon", mode, cat, thumbtype, web2 + + categoryConverter = {'picture':'other', + 'videoclips':'video', + 'document':'other'} + thumbType = {'normal':'defaultThumb_%s.png', + 'large':'defaultThumbL_%s.png', + 'small':'defaultThumbS_%s.png', + 'icon':'icon_%s.png' + } + if type(cat) == list: + cat = cat[0] + if web2: + cat = 'video' + elif cat == None: + return None + + cat = cat.lower() + + if cat in categoryConverter: + cat = categoryConverter[cat] + + + if self.categoryThumbs.get((cat, thumbtype)): + return self.categoryThumbs[(cat, thumbtype)] + else: + filename = thumbType[thumbtype] % cat + pathname = os.path.join(self.guiImagePath, filename) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'iconm: Looking for category image:',pathname + if os.path.isfile(pathname): + bm = wx.Bitmap(pathname) + else: + bm = None + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'iconm: No thumb found for category: %s' % cat + self.categoryThumbs[(cat, thumbtype)] = bm + return bm + + + def getDownloadButton(self, mode): + if mode == 'play': + return self.DOWNLOAD_BUTTON_PLAY, self.DOWNLOAD_BUTTON_PLAY_S + elif mode == 'download': + return self.DOWNLOAD_BUTTON_DOWNLOAD, self.DOWNLOAD_BUTTON_DOWNLOAD_S + elif mode == 'library': + return self.DOWNLOAD_BUTTON_LIBRARY, self.DOWNLOAD_BUTTON_LIBRARY_S + else: + raise Exception('No such mode') + + def getSourceIcon(self, source): + if source == 'tribler': + return self.SOURCE_ICON_TRIBLER + elif source == 'youtube': + return self.SOURCE_ICON_YOUTUBE + elif source == 'liveleak': + return self.SOURCE_ICON_LIVELEAK + elif source == 'remote': + return self.SOURCE_ICON_REMOTE + elif not source: + return None + else: + raise Exception('No such source') + + + def create_wxImageList(self,peerswpermid,setindex=False): + """ peerswpermid is a list of dictionaries that contain the + name and permid of a peer + """ + if len(peerswpermid) == 0: + return None + + # scale default to proper size + defaultThumb = self.get_default('personsMode','DEFAULT_THUMB') + defaultThumb = wx.BitmapFromImage(defaultThumb.ConvertToImage().Scale(SMALL_ICON_MAX_DIM,SMALL_ICON_MAX_DIM)) + + list = [] + for peer in peerswpermid: + bm = self.load_wxBitmap(peer['permid'], SMALL_ICON_MAX_DIM) + if bm is None: + bm = defaultThumb + list.append(bm) + imgList = wx.ImageList(SMALL_ICON_MAX_DIM,SMALL_ICON_MAX_DIM) + if imgList is None: + return None + for peer in peerswpermid: + bm = list.pop(0) + index = imgList.Add(bm) + if setindex: + peer['tempiconindex'] = index + return imgList + + + def create_from_file(self,permid,srcfilename): + """ srcfilename must point to an image file processable by wx.Image """ + try: + sim = wx.Image(srcfilename).Scale(ICON_MAX_DIM,ICON_MAX_DIM) + sim.SaveFile(dstfilename,wx.BITMAP_TYPE_JPEG) + f = cStringIO.StringIO() + sim.SaveStream(f,wx.BITMAP_TYPE_JPEG) + self.peer_db.updatePeerIcon('image/jpeg',f.getvalue()) + f.close() + except: + if DEBUG: + print_exc() + + def load_wxBitmap(self,permid, dim = ICON_MAX_DIM): + [_mimetype,data] = self.peer_db.getPeerIcon(permid) + if data is None: + return None + else: + return data2wxBitmap('image/jpeg',data, dim) + + +def data2wxImage(type,data,dim=ICON_MAX_DIM): + try: + if data is None: + return None + + mi = cStringIO.StringIO(data) + # St*pid wx says "No handler for image/bmp defined" while this + # is the image handler that is guaranteed to always be there, + # according to the docs :-( + if type == 'image/bmp': + im = wx.ImageFromStream(mi,wx.BITMAP_TYPE_BMP) + else: + im = wx.ImageFromStreamMime(mi,type) + + return im.Scale(dim,dim) + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'data2wxImage called (%s, %s)' % (`type`,`dim`) + print_exc() + return None + + +def data2wxBitmap(type,data,dim=ICON_MAX_DIM): + try: + im = data2wxImage(type,data,dim=dim) + if im is None: + bm = None + else: + bm = wx.BitmapFromImage(im,-1) + + return bm + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'data2wxBitmap called (%s, %s)' % (`type`,`dim`) + print_exc() + return None + diff --git a/tribler-mod/Tribler/Main/vwxGUI/IconsManager.py.bak b/tribler-mod/Tribler/Main/vwxGUI/IconsManager.py.bak new file mode 100644 index 0000000..cb67e51 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/IconsManager.py.bak @@ -0,0 +1,207 @@ +# Written by Jelle Roozenburg, Maarten ten Brinke, Arno Bakker +# see LICENSE.txt for license information + +import wx, os +import cStringIO + +from Tribler.Core.API import * +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility + +ICON_MAX_DIM = 80 +SMALL_ICON_MAX_DIM = 32 + + +class IconsManager: + + __single = None + + def __init__(self): + + if IconsManager.__single: + raise RuntimeError, "IconsManager is singleton" + + self.guiUtility = GUIUtility.getInstance() + self.guiImagePath = os.path.join(self.guiUtility.utility.getPath(), 'Tribler', 'Main', 'vwxGUI', 'images') + self.defaults = {} + self.defaults['filesMode'] = {} + self.defaults['personsMode'] = {} + self.defaults['personsMode']['DEFAULT_THUMB'] = wx.Bitmap(os.path.join(self.guiImagePath, 'defaultThumbPeer.png')) + + + self.DOWNLOAD_BUTTON_DOWNLOAD = wx.Bitmap(os.path.join(self.guiImagePath, 'download.png')) + self.DOWNLOAD_BUTTON_DOWNLOAD_S = wx.Bitmap(os.path.join(self.guiImagePath, 'download_clicked.png')) + + self.categoryThumbs = {} + + # Added from mugshot manager to show items in left menu + ####################################################### + + + self.peer_db = self.guiUtility.utility.session.open_dbhandler(NTFY_PEERS) + + IconsManager.__single = self + + + + def getInstance(*args, **kw): + """ Returns the IconsManager singleton if it exists or otherwise + creates it first, in which case you need to pass the constructor + params. + @return IconsManager.""" + if IconsManager.__single is None: + IconsManager(*args, **kw) + return IconsManager.__single + getInstance = staticmethod(getInstance) + + def get_default(self,mode,name): + return self.defaults[mode][name] + + def getCategoryIcon(self, mode, cat, thumbtype = 'normal', web2 = False): + #print "**** getCategoryIcon", mode, cat, thumbtype, web2 + + categoryConverter = {'picture':'other', + 'videoclips':'video', + 'document':'other'} + thumbType = {'normal':'defaultThumb_%s.png', + 'large':'defaultThumbL_%s.png', + 'small':'defaultThumbS_%s.png', + 'icon':'icon_%s.png' + } + if type(cat) == list: + cat = cat[0] + if web2: + cat = 'video' + elif cat == None: + return None + + cat = cat.lower() + + if cat in categoryConverter: + cat = categoryConverter[cat] + + + if self.categoryThumbs.get((cat, thumbtype)): + return self.categoryThumbs[(cat, thumbtype)] + else: + filename = thumbType[thumbtype] % cat + pathname = os.path.join(self.guiImagePath, filename) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'iconm: Looking for category image:',pathname + if os.path.isfile(pathname): + bm = wx.Bitmap(pathname) + else: + bm = None + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'iconm: No thumb found for category: %s' % cat + self.categoryThumbs[(cat, thumbtype)] = bm + return bm + + + def getDownloadButton(self, mode): + if mode == 'play': + return self.DOWNLOAD_BUTTON_PLAY, self.DOWNLOAD_BUTTON_PLAY_S + elif mode == 'download': + return self.DOWNLOAD_BUTTON_DOWNLOAD, self.DOWNLOAD_BUTTON_DOWNLOAD_S + elif mode == 'library': + return self.DOWNLOAD_BUTTON_LIBRARY, self.DOWNLOAD_BUTTON_LIBRARY_S + else: + raise Exception('No such mode') + + def getSourceIcon(self, source): + if source == 'tribler': + return self.SOURCE_ICON_TRIBLER + elif source == 'youtube': + return self.SOURCE_ICON_YOUTUBE + elif source == 'liveleak': + return self.SOURCE_ICON_LIVELEAK + elif source == 'remote': + return self.SOURCE_ICON_REMOTE + elif not source: + return None + else: + raise Exception('No such source') + + + def create_wxImageList(self,peerswpermid,setindex=False): + """ peerswpermid is a list of dictionaries that contain the + name and permid of a peer + """ + if len(peerswpermid) == 0: + return None + + # scale default to proper size + defaultThumb = self.get_default('personsMode','DEFAULT_THUMB') + defaultThumb = wx.BitmapFromImage(defaultThumb.ConvertToImage().Scale(SMALL_ICON_MAX_DIM,SMALL_ICON_MAX_DIM)) + + list = [] + for peer in peerswpermid: + bm = self.load_wxBitmap(peer['permid'], SMALL_ICON_MAX_DIM) + if bm is None: + bm = defaultThumb + list.append(bm) + imgList = wx.ImageList(SMALL_ICON_MAX_DIM,SMALL_ICON_MAX_DIM) + if imgList is None: + return None + for peer in peerswpermid: + bm = list.pop(0) + index = imgList.Add(bm) + if setindex: + peer['tempiconindex'] = index + return imgList + + + def create_from_file(self,permid,srcfilename): + """ srcfilename must point to an image file processable by wx.Image """ + try: + sim = wx.Image(srcfilename).Scale(ICON_MAX_DIM,ICON_MAX_DIM) + sim.SaveFile(dstfilename,wx.BITMAP_TYPE_JPEG) + f = cStringIO.StringIO() + sim.SaveStream(f,wx.BITMAP_TYPE_JPEG) + self.peer_db.updatePeerIcon('image/jpeg',f.getvalue()) + f.close() + except: + if DEBUG: + print_exc() + + def load_wxBitmap(self,permid, dim = ICON_MAX_DIM): + [_mimetype,data] = self.peer_db.getPeerIcon(permid) + if data is None: + return None + else: + return data2wxBitmap('image/jpeg',data, dim) + + +def data2wxImage(type,data,dim=ICON_MAX_DIM): + try: + if data is None: + return None + + mi = cStringIO.StringIO(data) + # St*pid wx says "No handler for image/bmp defined" while this + # is the image handler that is guaranteed to always be there, + # according to the docs :-( + if type == 'image/bmp': + im = wx.ImageFromStream(mi,wx.BITMAP_TYPE_BMP) + else: + im = wx.ImageFromStreamMime(mi,type) + + return im.Scale(dim,dim) + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'data2wxImage called (%s, %s)' % (`type`,`dim`) + print_exc() + return None + + +def data2wxBitmap(type,data,dim=ICON_MAX_DIM): + try: + im = data2wxImage(type,data,dim=dim) + if im is None: + bm = None + else: + bm = wx.BitmapFromImage(im,-1) + + return bm + except: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'data2wxBitmap called (%s, %s)' % (`type`,`dim`) + print_exc() + return None + diff --git a/tribler-mod/Tribler/Main/vwxGUI/LibraryItemPanel.py b/tribler-mod/Tribler/Main/vwxGUI/LibraryItemPanel.py new file mode 100644 index 0000000..533a68b --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/LibraryItemPanel.py @@ -0,0 +1,857 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg, Maarten ten Brinke, Arno Bakker +# see LICENSE.txt for license information + +import wx, math, time, os, sys, threading +from traceback import print_exc,print_stack +from copy import deepcopy +from wx.lib.stattext import GenStaticText as StaticText + +from Tribler.Core.API import * +from Tribler.Core.Utilities.unicode import * +from Tribler.Core.Utilities.utilities import * +# LAYERVIOLATION +from Tribler.Core.Overlay.MetadataHandler import get_filename + +from Tribler.Main.Utility.constants import * +from Tribler.Main.Utility import * +from Tribler.Main.vwxGUI.tribler_topButton import tribler_topButton, SwitchButton +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.vwxGUI.filesItemPanel import ThumbnailViewer, libraryModeThumbSize +from Tribler.Main.Dialogs.GUITaskQueue import GUITaskQueue +from Tribler.Main.Dialogs.dlhelperframe import DownloadHelperFrame +from Tribler.Main.vwxGUI.bgPanel import ImagePanel +from Tribler.Video.VideoPlayer import VideoPlayer +from Tribler.Video.Progress import ProgressBar +from Tribler.Video.utils import videoextdefaults +from bgPanel import * +from font import * +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles + +from Tribler.Main.Utility.constants import * +from Tribler.Main.Utility import * + +DEBUG = False + +[ID_MENU_1418,ID_MENU_1419,ID_MENU_1420] = 1418,1419,1420 + +# font sizes + +#if sys.platform == 'darwin': +# FS_FRIENDTITLE = 11 +# FS_STATUS = 10 +# FS_SIMILARITY = 10 +# FS_HEARTRANK = 10 +# FS_ONLINE = 10 +#else: +# FS_FRIENDTITLE = 11 +# FS_STATUS = 9 +# FS_SIMILARITY = 10 +# FS_HEARTRANK = 7 +# FS_ONLINE = 8 + +if sys.platform == 'darwin': + FS_TITLE = 10 + FS_PERC = 9 + FS_SPEED = 9 + FS_PAUSE = 10 +elif sys.platform == 'win32': + FS_TITLE = 8 + FS_PERC = 6 + FS_SPEED = 7 + FS_PAUSE = 7 +else: + FS_TITLE = 8 + FS_PERC = 7 + FS_SPEED = 7 + FS_PAUSE = 7 + + +statusLibrary = {"downloading" : "LibStatus_downloading.png", + "stopped" : "LibStatus_stopped.png", + "boosting" : "LibStatus_boosting.png", + "completed" : "LibStatus_completed.png", + "seeding" : "LibSatus_seeding.png"} + + +class LibraryItemPanel(wx.Panel): + def __init__(self, parent, keyTypedFun = None, name='regular'): + + global TORRENTPANEL_BACKGROUND + + wx.Panel.__init__(self, parent, -1) + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.parent = parent + if self.parent.GetName() == 'libraryGrid': + self.listItem = (self.parent.viewmode == 'list') +# self.guiserver = parent.guiserver + else: + self.listItem = True +# self.guiserver = GUIServer.getInstance() + + + self.guiserver = parent.guiserver + self.triblerGrey = wx.Colour(128,128,128) + + #self.statusTorrent = TorrentStatus(self) + self.ThumbnailViewer = ThumbnailViewer +# self.listItem = True # library always in listmode + self.data = None + self.status = None + self.rightMouse = None + self.titleLength = 40 # num characters + self.selected = False + self.warningMode = False + self.summary = None + self.oldCategoryLabel = None + self.name = name + self.torrentDetailsFrame = None + self.first = True + self.containsvideo = None # None means unknown, True=yes, False=no ;o) + + self.addComponents() + + #self.Bind(wx.EVT_RIGHT_DOWN, self.rightMouseButton) + +# self.Bind(wx.EVT_PAINT, self.OnPaint) + #self.Bind(wx.EVT_RIGHT_DOWN, self.rightMouseButton) + self.cache_progress = {} + self.gui_server = GUITaskQueue.getInstance() + +# self.SetMinSize((-1, 130)) + self.selected = False + self.Show() + self.Refresh() + self.Layout() + + + + self.triblerStyles = TriblerStyles.getInstance() + def addComponents(self): + + + self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + self.Show(False) + + self.SetMinSize((660,22)) + + self.vSizerOverall = wx.BoxSizer(wx.VERTICAL) + + + imgpath = os.path.join(self.utility.getPath(),"Tribler","Main","vwxGUI","images","5.0","line3.png") + self.line_file = wx.Image(imgpath, wx.BITMAP_TYPE_ANY) + + self.hLine = wx.StaticBitmap(self, -1, wx.BitmapFromImage(self.line_file)) + + + + #self.hLine = wx.StaticLine(self,-1,wx.DefaultPosition, wx.Size(220,2),wx.LI_HORIZONTAL) + #self.hLine.SetBackgroundColour((255,0,0)) + self.vSizerOverall.Add(self.hLine, 0, wx.FIXED_MINSIZE|wx.EXPAND, 0) ## + + + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + + self.vSizerOverall.Add(self.hSizer, 0 , wx.EXPAND, 0) + + self.SetBackgroundColour(wx.WHITE) + + + # Add Spacer + self.hSizer.Add([5,0],0,wx.EXPAND|wx.FIXED_MINSIZE,0) + + + # Add thumb + #self.thumb = ThumbnailViewer(self, 'libraryMode') + #self.thumb.setBackground(wx.BLACK) + #self.thumb.SetSize(libraryModeThumbSize) + #self.thumb.Hide() + #self.hSizer.Add(self.thumb, 0, wx.ALL, 2) + + + # add play button + self.library_play = tribler_topButton(self, name="library_play") ## before libraryPlay + self.library_play.setBackground(wx.WHITE) + self.library_play.SetMinSize((17,17)) + self.library_play.SetSize((17,17)) + self.hSizer.Add(self.library_play, 0, wx.TOP|wx.ALIGN_RIGHT, 2) + self.library_play.Hide() + + + + self.hSizer.Add([5,0],0,wx.FIXED_MINSIZE,0) + + + # Add title + self.title = wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(250,14)) + self.title.SetBackgroundColour(wx.WHITE) + self.title.SetFont(wx.Font(FS_TITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + self.title.SetMinSize((250,14)) + self.hSizer.Add(self.title,0,wx.TOP,3) + + self.hSizer.Add([20,0],0,wx.FIXED_MINSIZE,0) + + + + + # estimated time left + if sys.platform == 'win32': + self.eta = wx.StaticText(self,-1," ") + else: + self.eta = wx.StaticText(self,-1," ") + self.eta.SetForegroundColour((150,150,150)) + if sys.platform == 'win32': + self.eta.SetMinSize((120,14)) + else: + self.eta.SetMinSize((50,14)) + self.eta.SetFont(wx.Font(FS_PERC,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + #self.hSizer.Add(self.eta, 0, wx.FIXED_MINSIZE, 0) + + + self.pause_resume = wx.StaticText(self,-1,"Pause",wx.Point(0,0),wx.Size(50,14)) + self.pause_resume.SetForegroundColour((255,51,0)) + self.pause_resume.SetFont(wx.Font(FS_PAUSE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + self.pause_resume.Bind(wx.EVT_LEFT_UP, self.pause_resume_clicked) + + self.hSizer.Add(self.pause_resume, 0, wx.TOP,3) + + + + # remove from library button + self.remove = tribler_topButton(self, name="remove") + self.remove.setBackground(wx.WHITE) + self.remove.SetMinSize((17,17)) + self.remove.SetSize((17,17)) + self.hSizer.Add(self.remove, 0, wx.TOP|wx.ALIGN_RIGHT, 2) + + if sys.platform == 'win32': + self.hSizer.Add([40,0],0,wx.FIXED_MINSIZE,0) + elif sys.platform == 'linux2': + self.hSizer.Add([55,0],0,wx.FIXED_MINSIZE,0) + else: + self.hSizer.Add([60,0],0,wx.FIXED_MINSIZE,0) + + + if sys.platform == 'win32': + size = 55 + elif sys.platform == 'linux2': + size = 45 + else: + size = 45 + self.speedDown2 = wx.StaticText(self,-1,"0.0 KB/s",wx.Point(274,3),wx.Size(size,12), wx.ST_NO_AUTORESIZE) + self.speedDown2.SetForegroundColour(wx.BLACK) + self.speedDown2.SetFont(wx.Font(FS_SPEED,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + self.speedDown2.SetMinSize((size,12)) + + + self.speedUp2 = wx.StaticText(self,-1,"0.0 KB/s",wx.Point(274,3),wx.Size(size,12), wx.ST_NO_AUTORESIZE) + self.speedUp2.SetForegroundColour(wx.BLACK) + self.speedUp2.SetFont(wx.Font(FS_SPEED,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + self.speedUp2.SetMinSize((size,12)) + + + self.hSizer.Add(self.speedDown2, 0, wx.TOP|wx.EXPAND, 4) + self.hSizer.Add([18,0],0,0,0) + self.hSizer.Add(self.speedUp2, 0, wx.TOP|wx.EXPAND, 4) + + if sys.platform == 'linux2': + self.hSizer.Add([25,0],0,wx.FIXED_MINSIZE,0) + elif sys.platform == 'darwin': + self.hSizer.Add([21,0],0,wx.FIXED_MINSIZE,0) + else: + self.hSizer.Add([30,0],0,wx.FIXED_MINSIZE,0) + self.pb = ProgressBar(self,pos=wx.Point(450,0),size=wx.Size(60,5)) + self.pb.SetMinSize((100,5)) + + self.pbSizer = wx.BoxSizer(wx.VERTICAL) + if sys.platform == 'win32': + self.pbSizer.Add([0,3],0,wx.FIXED_MINSIZE,0) + else: + self.pbSizer.Add([0,1], 0, wx.FIXED_MINSIZE, 0) + self.pbSizer.Add(self.pb,0,wx.FIXED_MINSIZE,0) + self.pbSizer.Add([0,1],0,wx.FIXED_MINSIZE,0) + self.pbSizer.Add(self.eta,0,wx.FIXED_MINSIZE,0) + + + + # Percentage + self.percentage = wx.StaticText(self,-1,"?%",wx.Point(800,0),wx.Size(40,14)) + self.percentage.SetForegroundColour(self.triblerGrey) + self.percentage.SetFont(wx.Font(FS_PERC,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + + + self.hSizer.Add(self.pbSizer, 0, wx.LEFT|wx.RIGHT|wx.EXPAND, 0) + self.hSizer.Add([9,0],0,wx.FIXED_MINSIZE,0) + self.hSizer.Add(self.percentage, 0, wx.LEFT|wx.RIGHT|wx.EXPAND, 0) + + # pause/stop button + #self.pause = SwitchButton(self, -1, wx.Point(542,3), wx.Size(16,16),name='pause' ) + #self.hSizer.Add(self.pause,0,wx.TOP|wx.FIXED_MINSIZE,2) + + # V Line + ## self.addLine() + + self.hSizer.Add([14,0],0,wx.FIXED_MINSIZE,0) + + + + # V Line + ## self.addLine() + + # Status Icon +## self.statusIcon = ImagePanel(self, -1, name="LibStatus_boosting") +## self.statusIcon.searchBitmap(name = statusLibrary["stopped"]) +## +## self.hSizer.Add(self.statusIcon, 0, wx.TOP|wx.RIGHT|wx.EXPAND, 2) + + # Status message + ##self.statusField = wx.StaticText(self, -1,'', wx.Point(),wx.Size()) + ##self.statusField.SetForegroundColour(self.triblerGrey) + ##self.statusField.SetFont(wx.Font(FS_SPEED,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + ##self.statusField.SetMinSize((60,12)) +# self.statusField.SetMinSize((125,12)) + ##self.hSizer.Add(self.statusField, 0, wx.TOP, 4) + + ##self.hSizer.Add([20,20],0,wx.EXPAND|wx.FIXED_MINSIZE,0) + + # V Line + ## self.addLine() + + # Boost button + ##self.boost = SwitchButton(self, name="boost") + ##self.boost.setBackground(wx.WHITE) + ##self.boost.SetSize((50,16)) + ##self.boost.setEnabled(False) + ##self.hSizer.Add(self.boost, 0, wx.TOP|wx.ALIGN_RIGHT, 2) + ##self.hSizer.Add([2,20],0,wx.EXPAND|wx.FIXED_MINSIZE,0) + + # Play Fast + ##self.playFast = SwitchButton(self, name="playFast") + ##self.playFast.setBackground(wx.WHITE) + ##self.playFast.SetSize((39,16)) + ##self.playFast.setEnabled(False) + ##self.hSizer.Add(self.playFast, 0, wx.TOP|wx.ALIGN_RIGHT, 2) + ##self.hSizer.Add([2,20],0,wx.EXPAND|wx.FIXED_MINSIZE,0) + + # Play + ##self.playsmall = SwitchButton(self, name="playsmall") ## before libraryPlay + ##self.playsmall.setBackground(wx.WHITE) + ##self.playsmall.SetSize((16,16)) + ##self.playsmall.setEnabled(True) + ##self.hSizer.Add(self.playsmall, 1, wx.TOP|wx.ALIGN_RIGHT, 2) + + ##self.hSizerSummary = wx.BoxSizer(wx.HORIZONTAL) ## + ##self.vSizerOverall.Add(self.hSizerSummary, 1, wx.FIXED_MINSIZE|wx.EXPAND, 0) ## + + + + + # Add Refresh + self.SetSizer(self.vSizerOverall); + self.SetAutoLayout(1); + self.Layout(); + self.Refresh() + +# print 'tb > self.bgPanel size = %s' % self.titleBG.GetSize(), self.titleBG.GetPosition() + + # 2.8.4.2 return value of GetChildren changed + wl = [self] + for c in self.GetChildren(): + wl.append(c) + for window in wl: + window.Bind(wx.EVT_LEFT_UP, self.mouseAction) + #window.Bind(wx.EVT_LEFT_DCLICK, self.doubleClicked) + # Arno, 2009-03-05: this binding persists, so even when results page + # is shown after libView, hitting the delete button generates + # an event in this class. + #window.Bind(wx.EVT_KEY_UP, self.keyTyped) + window.Bind(wx.EVT_RIGHT_DOWN, self.mouseAction) + + + def getColumns(self): + if sys.platform == 'win32': + title = 'Down && Up Speed' + else: + title = 'Down &&&& Up Speed' + return [{'sort':'name', 'reverse':True, 'title':'Name', 'width':400,'weight':0,'tip':self.utility.lang.get('C_filename'), 'order':'down'}, + {'sort':'??', 'dummy':True, 'title':title,'width':130, 'tip':self.utility.lang.get('C_downupspeed')}, + {'sort':'progress', 'title':'Completion/ETA', 'width':120, 'tip':self.utility.lang.get('C_progress')} + ] + + # pause or resume a download + def pause_resume_clicked(self, event): + if event.LeftUp(): + if self.pause_resume.GetLabel() == 'Pause': + self.pauseDownload() + else: + self.resumeDownload() + else: + event.Skip() + + + def pauseDownload(self): + self.pause_resume.SetLabel('Resume') + if self.data.get('ds'): + ds = self.data.get('ds') + ds.get_download().stop() + + + + def resumeDownload(self): + self.pause_resume.SetLabel('Pause') + if self.data.get('ds'): + ds = self.data.get('ds') + ds.get_download().restart() + + + def refreshData(self): + self.setData(self.data) + + def addLine(self): + vLine = wx.StaticLine(self,-1,wx.DefaultPosition, wx.Size(2,0),wx.LI_VERTICAL) +# vLine.Show(False) + self.vSizer1.Add(vLine, 0, wx.LEFT|wx.RIGHT, 3) + + def updateProgress(self, infohash, progress): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Lib: updateProgress: %s %s' % (self.title.GetLabel(), progress) + + if infohash not in self.cache_progress: + self.cache_progress[infohash] = 0 # progress + now = time() + if progress - self.cache_progress[infohash] > 1: + self.cache_progress[infohash] = progress + self.guiserver.add_task(lambda:self.updateProgressInDB(infohash,progress), 0) + + def updateProgressInDB(self, infohash, progress): + try: + mypref_db = self.utility.session.open_dbhandler(NTFY_MYPREFERENCES) + mypref_db.updateProgress(infohash, progress, commit=True) + except: + print_exc() # lock error + + def setData(self, torrent): + # set bitmap, rating, title + + #print_stack() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","lip: setData called" + + #if torrent == None and self.library_play is not None: + # self.library_play.Destroy() + + if threading.currentThread().getName() != "MainThread": + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","lip: setData called by nonMainThread!",threading.currentThread().getName() + print_stack() + + if self.data is None: + oldinfohash = None + else: + oldinfohash = self.data['infohash'] + + self.data = torrent + + if torrent is None: + for child in self.GetChildren(): + child.Hide() + torrent = {} + else: + for child in self.GetChildren(): + child.Show() + + if torrent and oldinfohash != self.data['infohash']: + self.containsvideo = None + + if torrent.get('ds'): + #print '%s is an active torrent' % torrent['name'] + ds = torrent['ds'] + #abctorrent.setLibraryPanel(self) + + # Check if torrent just finished for resort + #abctorrent.status.checkJustFinished() + + + if ds.get_status() == DLSTATUS_STOPPED: + self.pause_resume.SetLabel('Resume') + + if ds.get_status() in (DLSTATUS_SEEDING, DLSTATUS_DOWNLOADING): + self.pause_resume.SetLabel('Pause') + + + #self.pb.setEnabled(True) + self.pb.Show() +# self.downSpeed.Show() + self.speedDown2.Show() +# self.upSpeed.Show() + self.speedUp2.Show() + + dls = ds.get_current_speed(DOWNLOAD)*1024 # speed_format needs byte/s + uls = ds.get_current_speed(UPLOAD)*1024 + + self.speedDown2.SetLabel(self.utility.speed_format_new(dls)) + self.speedUp2.SetLabel(self.utility.speed_format_new(uls)) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '%s %s %s' % (`ds.get_download().get_def().get_name()`, ds.get_progress(), dlstatus_strings[ds.get_status()]) + if ds.get_status() == DLSTATUS_STOPPED_ON_ERROR: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "ERROR IS",ds.get_error() + + finished = ds.get_progress() == 1.0 or ds.get_status() == DLSTATUS_SEEDING + progress = (ds.get_progress() or 0.0) * 100.0 + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '****** libraryitempanel:', torrent['torrent_id'], progress + self.updateProgress(torrent['infohash'], progress) + + self.percentage.SetLabel('%.1f%%' % progress) + eta = self.utility.eta_value(ds.get_eta(), truncate=2) + if finished: + eta = "Finished" + elif eta == '' or eta.find('unknown') != -1: + eta = '' + self.eta.SetLabel(eta) + self.eta.SetToolTipString(self.utility.lang.get('eta')+eta) + + havedigest = None + showPlayButton = False + + active = ds.get_status() in (DLSTATUS_SEEDING, DLSTATUS_DOWNLOADING) + + # Allow STOPPED_ON_ERROR, sometimes transient + startable = not ds.get_status() in [DLSTATUS_WAITING4HASHCHECK, DLSTATUS_ALLOCATING_DISKSPACE, DLSTATUS_HASHCHECKING] + if startable: + isVideo = bool(ds.get_download().get_def().get_files(exts=videoextdefaults)) + showPlayButton = isVideo + havedigest = ds.get_pieces_complete() + + if finished: + self.pb.reset(colour=2) # Show as complete + self.pb.Refresh() + elif havedigest: + self.pb.set_pieces(havedigest) + self.pb.Refresh() + elif progress > 0: + self.pb.reset(colour=1) # Show as having some + self.pb.Refresh() + else: + self.pb.reset(colour=0) # Show as having none + self.pb.Refresh() + + self.library_play.setEnabled(showPlayButton) + + elif torrent: # inactive torrent + + if not self.listItem: + #self.pb.setEnabled(False) + self.downSpeed2.Hide() + self.speedDown2.SetLabel('--') + self.upSpeed.Hide() + self.speedUp2.SetLabel('--') + self.library_play.setEnabled(False) + else: + if self.containsvideo is None: + self.async_check_torrentfile_contains_video(torrent) + if self.containsvideo is not None: + self.library_play.setEnabled(self.containsvideo) + else: + self.library_play.setEnabled(False) + + #self.eta.SetLabel('') + + if torrent.get('progress') != None: + self.percentage.SetLabel('%0.1f%%' % torrent['progress']) + self.pb.setNormalPercentage(torrent['progress']) + else: + self.percentage.SetLabel('?') + self.pb.reset() + + self.pb.Show() + self.pb.Refresh() + + if torrent and oldinfohash != self.data['infohash']: + if torrent.get('name'): + title = torrent['name'][:self.titleLength] + self.title.Show() + self.title.SetLabel(title) + self.title.Wrap(self.title.GetSize()[0]) + self.title.SetToolTipString(torrent['name']) + + # Only reload thumb when changing torrent displayed + ##if torrent['infohash'] != oldinfohash: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","REFRESH THUMBNAIL",`torrent['name']` + ##self.thumb.setTorrent(torrent) + + self.remove.SetToolTipString(self.utility.lang.get('C_remove')) + + + + else: + self.title.SetLabel('') + self.title.SetToolTipString('') + self.title.Hide() + + self.Layout() + self.Refresh() + self.GetContainingSizer().Layout() + self.parent.Refresh() + + def select(self, rowIndex, colIndex, ignore1, ignore2, ignore3): + self.selected = True + self.guiUtility.standardOverview.selectedTorrent = self.data['infohash'] + + def deselect(self, rowIndex, colIndex): + self.hLine.Show() + self.selected = False + + def keyTyped(self, event): + if self.selected: + key = event.GetKeyCode() + if (key == wx.WXK_DELETE): + if self.data: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'lib: deleting' + # Arno; 2007-05-11: TODO: use right method here, deleteTorrent does nothing at the + # moment, see below for right method + #self.guiUtility.deleteTorrent(self.data) + event.Skip() + + def mouseAction(self, event): + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","lip: mouseaction: name",event.GetEventObject().GetName() + + event.Skip() + + + if not self.data: + return + + obj = event.GetEventObject() + name = obj.GetName() + + + + self.SetFocus() + if self.data: + self.guiUtility.selectTorrent(self.data) + + # buttons that are switched off, should not generate events + try: + if not obj.isEnabled(): + #print 'Button %s was not enabled' % name + return + except: + pass + + + if self.data.get('ds'): + ds = self.data.get('ds') +# if name == 'deleteLibraryitem': +# removeFiles = False +# ds.get_download().stop() # ?? +# + if name == 'pause': + # ARNOCOMMENT: need to get/store/cache current status of Download somehow + if ds.get_status() == DLSTATUS_STOPPED or ds.get_status() == DLSTATUS_STOPPED_ON_ERROR: + if ds.get_download().get_def().get_live(): + self.switch_to_vod(ds) + else: + ds.get_download().restart() + obj.setToggled(False) + else: + ds.get_download().stop() + obj.setToggled(True) + + from Tribler.Video.VideoPlayer import VideoPlayer + videoplayer = VideoPlayer.getInstance() + stopd = ds.get_download() + playd = videoplayer.get_vod_download() + if stopd == playd: + videoplayer.close() + + elif name == 'library_play': + self.guiUtility.standardDetails.setVideodata(self.guiUtility.standardDetails.getData()) + self._get_videoplayer(exclude=ds).videoframe.get_videopanel().SetLoadingText(self.guiUtility.standardDetails.getVideodata()['name']) + if sys.platform == 'darwin': + self._get_videoplayer(exclude=ds).videoframe.show_videoframe() + self._get_videoplayer(exclude=ds).videoframe.get_videopanel().Refresh() + self._get_videoplayer(exclude=ds).videoframe.get_videopanel().Layout() + + self.play(ds) + + + + elif name == 'remove': + from Tribler.Video.VideoPlayer import VideoPlayer + videoplayer = VideoPlayer.getInstance() + playd = videoplayer.get_vod_download() + removed = ds.get_download() + if playd == removed: + self._get_videoplayer(exclude=ds).stop_playback() + + + + + #if self.guiUtility.standardDetails.getVideodata() is not None: + # if self.guiUtility.standardDetails.getVideodata()['name'] == self.guiUtility.standardDetails.getData()['name']: + # self.guiUtility.standardDetails.setVideodata(None) + # self._get_videoplayer(exclude=ds).stop_playback() + + + else: # no abctorrent + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","lip: mouseAction: No ds" + if name == 'pause': + #playbutton + dest_dir = self.data.get('destdir') + if dest_dir is not None: + # Start torrent again + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'lip: starting torrent %s with data in dir %s' % (repr(self.data['name']), dest_dir) + + if os.path.isfile(dest_dir): + # Arno: the 4.1 database values are wrong, also for + # single-file torrents the content_dir is + # "savedir"+torrentname. Try to componsate + dest_dir = os.path.dirname(dest_dir) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'lip: starting torrent %s with data in corrected 4.1 dir %s' % (repr(self.data['name']), dest_dir) + + self.guiUtility.standardDetails.download(self.data, dest = dest_dir, force = True) + + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'lip: Could not make abctorrent active, no destdir in dictionary: %s' % repr(self.data.get('name')) + + elif name == 'library_play': + # Todo: make non-abctorrent files playable. + dest_dir = self.data.get('destdir') + + if dest_dir is None: # workaround for testing + dest_dir = get_default_dest_dir() + + if dest_dir is not None: + # Start torrent again + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'lip: starting torrent %s with data in dir %s' % (repr(self.data['name']), dest_dir) + self.guiUtility.standardDetails.download(self.data, dest = dest_dir, force = True, vodmode = True) + + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'lip: Could not make abctorrent active, no destdir in dictionary: %s' % repr(self.data.get('name')) + + if name == 'deleteLibraryitem': + # delete works for active and inactive torrents + self.guiUtility.onDeleteTorrentFromLibrary() + + if event.RightDown(): + self.rightMouseButton(event) + + + def rightMouseButton(self, event): + # Open right-click menu (windows menu key) + # >>makePopup(self, menu, event = None, label = "", extralabel = "", bindto = None): + + menu = self.guiUtility.OnRightMouseAction(event) + if menu is not None: + self.PopupMenu(menu, (-1,-1)) + + + def getIdentifier(self): + if self.data: + return self.data.get('infohash') + else: + return None + + def _get_videoplayer(self, exclude=None): + """ + Returns the VideoPlayer instance and ensures that it knows if + there are other downloads running. + """ + + # 22/08/08 Boudewijn: The videoplayer has to know if there are + # downloads running. + other_downloads = False + for ds in self.parent.gridManager.dslist: + if ds is not exclude and ds.get_status() not in (DLSTATUS_STOPPED, DLSTATUS_STOPPED_ON_ERROR): + other_downloads = True + break + + videoplayer = VideoPlayer.getInstance() + videoplayer.set_other_downloads(other_downloads) + return videoplayer + + def play(self,ds): + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","lip: play" + + self.pause_resume.SetLabel('Pause') + + self._get_videoplayer(exclude=ds).stop_playback() # stop current playback + + self._get_videoplayer(exclude=ds).videoframe.get_videopanel().vlcwin.agVideo.Show() + self._get_videoplayer(exclude=ds).videoframe.get_videopanel().vlcwin.agVideo.Play() + self._get_videoplayer(exclude=ds).videoframe.get_videopanel().vlcwin.Refresh() + + self._get_videoplayer(exclude=ds).play(ds) + + + def OnPaint(self, evt): + dc = wx.BufferedPaintDC(self) + dc.SetBackground(wx.Brush(wx.BLUE)) + + dc.Clear() + + if self.title: +# print 'tb > self.title.GetLabel() = %s' % self.title.GetLabel() + dc.SetFont(wx.Font(14,FONTFAMILY,FONTWEIGHT, wx.BOLD, False,FONTFACE)) + dc.SetTextForeground('#007303') +# dc.DrawText(self.title.GetLabel(), 0, 0) + dc.DrawText('online', 38, 64) + self.title.Hide() + + + def async_check_torrentfile_contains_video(self,torrent): + if 'torrent_file_name' in torrent and torrent['torrent_file_name'] != '': + torrent_dir = self.utility.session.get_torrent_collecting_dir() + torrent_filename = os.path.join(torrent_dir, torrent['torrent_file_name']) + + if DEBUG: + print "lip: Scheduling read videofiles from",`torrent['name']`,"from",torrent_filename + + def loadMetaDataNow(): + try: + self.guiservthread_loadMetadata(torrent,torrent_filename) + except wx.PyDeadObjectError: + pass + + try: + self.GetParent().guiserver.add_task(loadMetaDataNow,0) + except wx.PyDeadObjectError: + # ARNO: TODO: The FileItemPanels that use this ThumbnailViewer now get deleted, and thus + # also the ThumbnailViewer objects. Or at least the C++ part of them. As a result we + # can no longer schedule these loadMetadata callbacks on the GUITaskQueue thread. + # + # At the moment, the wx code protects us, and throws an exception that the C++ part + # of the ThumbnailViewer object is gone. But we should clean this up. + pass + else: + self.containsvideo = False + + def guiservthread_loadMetadata(self, torrent,torrent_filename): + """ Called by separate non-GUI thread """ + + isVideo = False + try: + if os.path.isfile(torrent_filename): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","lip: Reading",torrent_filename,"to see if contains video" + tdef = TorrentDef.load(torrent_filename) + isVideo = bool(tdef.get_files(exts=videoextdefaults)) + except: + print_exc() + + if torrent['infohash'] == self.data['infohash']: + self.containsvideo = isVideo + wx.CallAfter(self.metadata_loaded,torrent,None) + + + def metadata_loaded(self,torrent,metadata): + """ Called by GUI thread """ + try: + if torrent['infohash'] == self.data['infohash']: + self.library_play.setEnabled(self.containsvideo) + except wx.PyDeadObjectError: + pass diff --git a/tribler-mod/Tribler/Main/vwxGUI/LibraryItemPanel.py.bak b/tribler-mod/Tribler/Main/vwxGUI/LibraryItemPanel.py.bak new file mode 100644 index 0000000..641a0fd --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/LibraryItemPanel.py.bak @@ -0,0 +1,856 @@ +# Written by Jelle Roozenburg, Maarten ten Brinke, Arno Bakker +# see LICENSE.txt for license information + +import wx, math, time, os, sys, threading +from traceback import print_exc,print_stack +from copy import deepcopy +from wx.lib.stattext import GenStaticText as StaticText + +from Tribler.Core.API import * +from Tribler.Core.Utilities.unicode import * +from Tribler.Core.Utilities.utilities import * +# LAYERVIOLATION +from Tribler.Core.Overlay.MetadataHandler import get_filename + +from Tribler.Main.Utility.constants import * +from Tribler.Main.Utility import * +from Tribler.Main.vwxGUI.tribler_topButton import tribler_topButton, SwitchButton +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.vwxGUI.filesItemPanel import ThumbnailViewer, libraryModeThumbSize +from Tribler.Main.Dialogs.GUITaskQueue import GUITaskQueue +from Tribler.Main.Dialogs.dlhelperframe import DownloadHelperFrame +from Tribler.Main.vwxGUI.bgPanel import ImagePanel +from Tribler.Video.VideoPlayer import VideoPlayer +from Tribler.Video.Progress import ProgressBar +from Tribler.Video.utils import videoextdefaults +from bgPanel import * +from font import * +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles + +from Tribler.Main.Utility.constants import * +from Tribler.Main.Utility import * + +DEBUG = False + +[ID_MENU_1418,ID_MENU_1419,ID_MENU_1420] = 1418,1419,1420 + +# font sizes + +#if sys.platform == 'darwin': +# FS_FRIENDTITLE = 11 +# FS_STATUS = 10 +# FS_SIMILARITY = 10 +# FS_HEARTRANK = 10 +# FS_ONLINE = 10 +#else: +# FS_FRIENDTITLE = 11 +# FS_STATUS = 9 +# FS_SIMILARITY = 10 +# FS_HEARTRANK = 7 +# FS_ONLINE = 8 + +if sys.platform == 'darwin': + FS_TITLE = 10 + FS_PERC = 9 + FS_SPEED = 9 + FS_PAUSE = 10 +elif sys.platform == 'win32': + FS_TITLE = 8 + FS_PERC = 6 + FS_SPEED = 7 + FS_PAUSE = 7 +else: + FS_TITLE = 8 + FS_PERC = 7 + FS_SPEED = 7 + FS_PAUSE = 7 + + +statusLibrary = {"downloading" : "LibStatus_downloading.png", + "stopped" : "LibStatus_stopped.png", + "boosting" : "LibStatus_boosting.png", + "completed" : "LibStatus_completed.png", + "seeding" : "LibSatus_seeding.png"} + + +class LibraryItemPanel(wx.Panel): + def __init__(self, parent, keyTypedFun = None, name='regular'): + + global TORRENTPANEL_BACKGROUND + + wx.Panel.__init__(self, parent, -1) + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.parent = parent + if self.parent.GetName() == 'libraryGrid': + self.listItem = (self.parent.viewmode == 'list') +# self.guiserver = parent.guiserver + else: + self.listItem = True +# self.guiserver = GUIServer.getInstance() + + + self.guiserver = parent.guiserver + self.triblerGrey = wx.Colour(128,128,128) + + #self.statusTorrent = TorrentStatus(self) + self.ThumbnailViewer = ThumbnailViewer +# self.listItem = True # library always in listmode + self.data = None + self.status = None + self.rightMouse = None + self.titleLength = 40 # num characters + self.selected = False + self.warningMode = False + self.summary = None + self.oldCategoryLabel = None + self.name = name + self.torrentDetailsFrame = None + self.first = True + self.containsvideo = None # None means unknown, True=yes, False=no ;o) + + self.addComponents() + + #self.Bind(wx.EVT_RIGHT_DOWN, self.rightMouseButton) + +# self.Bind(wx.EVT_PAINT, self.OnPaint) + #self.Bind(wx.EVT_RIGHT_DOWN, self.rightMouseButton) + self.cache_progress = {} + self.gui_server = GUITaskQueue.getInstance() + +# self.SetMinSize((-1, 130)) + self.selected = False + self.Show() + self.Refresh() + self.Layout() + + + + self.triblerStyles = TriblerStyles.getInstance() + def addComponents(self): + + + self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + self.Show(False) + + self.SetMinSize((660,22)) + + self.vSizerOverall = wx.BoxSizer(wx.VERTICAL) + + + imgpath = os.path.join(self.utility.getPath(),"Tribler","Main","vwxGUI","images","5.0","line3.png") + self.line_file = wx.Image(imgpath, wx.BITMAP_TYPE_ANY) + + self.hLine = wx.StaticBitmap(self, -1, wx.BitmapFromImage(self.line_file)) + + + + #self.hLine = wx.StaticLine(self,-1,wx.DefaultPosition, wx.Size(220,2),wx.LI_HORIZONTAL) + #self.hLine.SetBackgroundColour((255,0,0)) + self.vSizerOverall.Add(self.hLine, 0, wx.FIXED_MINSIZE|wx.EXPAND, 0) ## + + + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + + self.vSizerOverall.Add(self.hSizer, 0 , wx.EXPAND, 0) + + self.SetBackgroundColour(wx.WHITE) + + + # Add Spacer + self.hSizer.Add([5,0],0,wx.EXPAND|wx.FIXED_MINSIZE,0) + + + # Add thumb + #self.thumb = ThumbnailViewer(self, 'libraryMode') + #self.thumb.setBackground(wx.BLACK) + #self.thumb.SetSize(libraryModeThumbSize) + #self.thumb.Hide() + #self.hSizer.Add(self.thumb, 0, wx.ALL, 2) + + + # add play button + self.library_play = tribler_topButton(self, name="library_play") ## before libraryPlay + self.library_play.setBackground(wx.WHITE) + self.library_play.SetMinSize((17,17)) + self.library_play.SetSize((17,17)) + self.hSizer.Add(self.library_play, 0, wx.TOP|wx.ALIGN_RIGHT, 2) + self.library_play.Hide() + + + + self.hSizer.Add([5,0],0,wx.FIXED_MINSIZE,0) + + + # Add title + self.title = wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(250,14)) + self.title.SetBackgroundColour(wx.WHITE) + self.title.SetFont(wx.Font(FS_TITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + self.title.SetMinSize((250,14)) + self.hSizer.Add(self.title,0,wx.TOP,3) + + self.hSizer.Add([20,0],0,wx.FIXED_MINSIZE,0) + + + + + # estimated time left + if sys.platform == 'win32': + self.eta = wx.StaticText(self,-1," ") + else: + self.eta = wx.StaticText(self,-1," ") + self.eta.SetForegroundColour((150,150,150)) + if sys.platform == 'win32': + self.eta.SetMinSize((120,14)) + else: + self.eta.SetMinSize((50,14)) + self.eta.SetFont(wx.Font(FS_PERC,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + #self.hSizer.Add(self.eta, 0, wx.FIXED_MINSIZE, 0) + + + self.pause_resume = wx.StaticText(self,-1,"Pause",wx.Point(0,0),wx.Size(50,14)) + self.pause_resume.SetForegroundColour((255,51,0)) + self.pause_resume.SetFont(wx.Font(FS_PAUSE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + self.pause_resume.Bind(wx.EVT_LEFT_UP, self.pause_resume_clicked) + + self.hSizer.Add(self.pause_resume, 0, wx.TOP,3) + + + + # remove from library button + self.remove = tribler_topButton(self, name="remove") + self.remove.setBackground(wx.WHITE) + self.remove.SetMinSize((17,17)) + self.remove.SetSize((17,17)) + self.hSizer.Add(self.remove, 0, wx.TOP|wx.ALIGN_RIGHT, 2) + + if sys.platform == 'win32': + self.hSizer.Add([40,0],0,wx.FIXED_MINSIZE,0) + elif sys.platform == 'linux2': + self.hSizer.Add([55,0],0,wx.FIXED_MINSIZE,0) + else: + self.hSizer.Add([60,0],0,wx.FIXED_MINSIZE,0) + + + if sys.platform == 'win32': + size = 55 + elif sys.platform == 'linux2': + size = 45 + else: + size = 45 + self.speedDown2 = wx.StaticText(self,-1,"0.0 KB/s",wx.Point(274,3),wx.Size(size,12), wx.ST_NO_AUTORESIZE) + self.speedDown2.SetForegroundColour(wx.BLACK) + self.speedDown2.SetFont(wx.Font(FS_SPEED,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + self.speedDown2.SetMinSize((size,12)) + + + self.speedUp2 = wx.StaticText(self,-1,"0.0 KB/s",wx.Point(274,3),wx.Size(size,12), wx.ST_NO_AUTORESIZE) + self.speedUp2.SetForegroundColour(wx.BLACK) + self.speedUp2.SetFont(wx.Font(FS_SPEED,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + self.speedUp2.SetMinSize((size,12)) + + + self.hSizer.Add(self.speedDown2, 0, wx.TOP|wx.EXPAND, 4) + self.hSizer.Add([18,0],0,0,0) + self.hSizer.Add(self.speedUp2, 0, wx.TOP|wx.EXPAND, 4) + + if sys.platform == 'linux2': + self.hSizer.Add([25,0],0,wx.FIXED_MINSIZE,0) + elif sys.platform == 'darwin': + self.hSizer.Add([21,0],0,wx.FIXED_MINSIZE,0) + else: + self.hSizer.Add([30,0],0,wx.FIXED_MINSIZE,0) + self.pb = ProgressBar(self,pos=wx.Point(450,0),size=wx.Size(60,5)) + self.pb.SetMinSize((100,5)) + + self.pbSizer = wx.BoxSizer(wx.VERTICAL) + if sys.platform == 'win32': + self.pbSizer.Add([0,3],0,wx.FIXED_MINSIZE,0) + else: + self.pbSizer.Add([0,1], 0, wx.FIXED_MINSIZE, 0) + self.pbSizer.Add(self.pb,0,wx.FIXED_MINSIZE,0) + self.pbSizer.Add([0,1],0,wx.FIXED_MINSIZE,0) + self.pbSizer.Add(self.eta,0,wx.FIXED_MINSIZE,0) + + + + # Percentage + self.percentage = wx.StaticText(self,-1,"?%",wx.Point(800,0),wx.Size(40,14)) + self.percentage.SetForegroundColour(self.triblerGrey) + self.percentage.SetFont(wx.Font(FS_PERC,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + + + self.hSizer.Add(self.pbSizer, 0, wx.LEFT|wx.RIGHT|wx.EXPAND, 0) + self.hSizer.Add([9,0],0,wx.FIXED_MINSIZE,0) + self.hSizer.Add(self.percentage, 0, wx.LEFT|wx.RIGHT|wx.EXPAND, 0) + + # pause/stop button + #self.pause = SwitchButton(self, -1, wx.Point(542,3), wx.Size(16,16),name='pause' ) + #self.hSizer.Add(self.pause,0,wx.TOP|wx.FIXED_MINSIZE,2) + + # V Line + ## self.addLine() + + self.hSizer.Add([14,0],0,wx.FIXED_MINSIZE,0) + + + + # V Line + ## self.addLine() + + # Status Icon +## self.statusIcon = ImagePanel(self, -1, name="LibStatus_boosting") +## self.statusIcon.searchBitmap(name = statusLibrary["stopped"]) +## +## self.hSizer.Add(self.statusIcon, 0, wx.TOP|wx.RIGHT|wx.EXPAND, 2) + + # Status message + ##self.statusField = wx.StaticText(self, -1,'', wx.Point(),wx.Size()) + ##self.statusField.SetForegroundColour(self.triblerGrey) + ##self.statusField.SetFont(wx.Font(FS_SPEED,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + ##self.statusField.SetMinSize((60,12)) +# self.statusField.SetMinSize((125,12)) + ##self.hSizer.Add(self.statusField, 0, wx.TOP, 4) + + ##self.hSizer.Add([20,20],0,wx.EXPAND|wx.FIXED_MINSIZE,0) + + # V Line + ## self.addLine() + + # Boost button + ##self.boost = SwitchButton(self, name="boost") + ##self.boost.setBackground(wx.WHITE) + ##self.boost.SetSize((50,16)) + ##self.boost.setEnabled(False) + ##self.hSizer.Add(self.boost, 0, wx.TOP|wx.ALIGN_RIGHT, 2) + ##self.hSizer.Add([2,20],0,wx.EXPAND|wx.FIXED_MINSIZE,0) + + # Play Fast + ##self.playFast = SwitchButton(self, name="playFast") + ##self.playFast.setBackground(wx.WHITE) + ##self.playFast.SetSize((39,16)) + ##self.playFast.setEnabled(False) + ##self.hSizer.Add(self.playFast, 0, wx.TOP|wx.ALIGN_RIGHT, 2) + ##self.hSizer.Add([2,20],0,wx.EXPAND|wx.FIXED_MINSIZE,0) + + # Play + ##self.playsmall = SwitchButton(self, name="playsmall") ## before libraryPlay + ##self.playsmall.setBackground(wx.WHITE) + ##self.playsmall.SetSize((16,16)) + ##self.playsmall.setEnabled(True) + ##self.hSizer.Add(self.playsmall, 1, wx.TOP|wx.ALIGN_RIGHT, 2) + + ##self.hSizerSummary = wx.BoxSizer(wx.HORIZONTAL) ## + ##self.vSizerOverall.Add(self.hSizerSummary, 1, wx.FIXED_MINSIZE|wx.EXPAND, 0) ## + + + + + # Add Refresh + self.SetSizer(self.vSizerOverall); + self.SetAutoLayout(1); + self.Layout(); + self.Refresh() + +# print 'tb > self.bgPanel size = %s' % self.titleBG.GetSize(), self.titleBG.GetPosition() + + # 2.8.4.2 return value of GetChildren changed + wl = [self] + for c in self.GetChildren(): + wl.append(c) + for window in wl: + window.Bind(wx.EVT_LEFT_UP, self.mouseAction) + #window.Bind(wx.EVT_LEFT_DCLICK, self.doubleClicked) + # Arno, 2009-03-05: this binding persists, so even when results page + # is shown after libView, hitting the delete button generates + # an event in this class. + #window.Bind(wx.EVT_KEY_UP, self.keyTyped) + window.Bind(wx.EVT_RIGHT_DOWN, self.mouseAction) + + + def getColumns(self): + if sys.platform == 'win32': + title = 'Down && Up Speed' + else: + title = 'Down &&&& Up Speed' + return [{'sort':'name', 'reverse':True, 'title':'Name', 'width':400,'weight':0,'tip':self.utility.lang.get('C_filename'), 'order':'down'}, + {'sort':'??', 'dummy':True, 'title':title,'width':130, 'tip':self.utility.lang.get('C_downupspeed')}, + {'sort':'progress', 'title':'Completion/ETA', 'width':120, 'tip':self.utility.lang.get('C_progress')} + ] + + # pause or resume a download + def pause_resume_clicked(self, event): + if event.LeftUp(): + if self.pause_resume.GetLabel() == 'Pause': + self.pauseDownload() + else: + self.resumeDownload() + else: + event.Skip() + + + def pauseDownload(self): + self.pause_resume.SetLabel('Resume') + if self.data.get('ds'): + ds = self.data.get('ds') + ds.get_download().stop() + + + + def resumeDownload(self): + self.pause_resume.SetLabel('Pause') + if self.data.get('ds'): + ds = self.data.get('ds') + ds.get_download().restart() + + + def refreshData(self): + self.setData(self.data) + + def addLine(self): + vLine = wx.StaticLine(self,-1,wx.DefaultPosition, wx.Size(2,0),wx.LI_VERTICAL) +# vLine.Show(False) + self.vSizer1.Add(vLine, 0, wx.LEFT|wx.RIGHT, 3) + + def updateProgress(self, infohash, progress): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Lib: updateProgress: %s %s' % (self.title.GetLabel(), progress) + + if infohash not in self.cache_progress: + self.cache_progress[infohash] = 0 # progress + now = time() + if progress - self.cache_progress[infohash] > 1: + self.cache_progress[infohash] = progress + self.guiserver.add_task(lambda:self.updateProgressInDB(infohash,progress), 0) + + def updateProgressInDB(self, infohash, progress): + try: + mypref_db = self.utility.session.open_dbhandler(NTFY_MYPREFERENCES) + mypref_db.updateProgress(infohash, progress, commit=True) + except: + print_exc() # lock error + + def setData(self, torrent): + # set bitmap, rating, title + + #print_stack() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","lip: setData called" + + #if torrent == None and self.library_play is not None: + # self.library_play.Destroy() + + if threading.currentThread().getName() != "MainThread": + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","lip: setData called by nonMainThread!",threading.currentThread().getName() + print_stack() + + if self.data is None: + oldinfohash = None + else: + oldinfohash = self.data['infohash'] + + self.data = torrent + + if torrent is None: + for child in self.GetChildren(): + child.Hide() + torrent = {} + else: + for child in self.GetChildren(): + child.Show() + + if torrent and oldinfohash != self.data['infohash']: + self.containsvideo = None + + if torrent.get('ds'): + #print '%s is an active torrent' % torrent['name'] + ds = torrent['ds'] + #abctorrent.setLibraryPanel(self) + + # Check if torrent just finished for resort + #abctorrent.status.checkJustFinished() + + + if ds.get_status() == DLSTATUS_STOPPED: + self.pause_resume.SetLabel('Resume') + + if ds.get_status() in (DLSTATUS_SEEDING, DLSTATUS_DOWNLOADING): + self.pause_resume.SetLabel('Pause') + + + #self.pb.setEnabled(True) + self.pb.Show() +# self.downSpeed.Show() + self.speedDown2.Show() +# self.upSpeed.Show() + self.speedUp2.Show() + + dls = ds.get_current_speed(DOWNLOAD)*1024 # speed_format needs byte/s + uls = ds.get_current_speed(UPLOAD)*1024 + + self.speedDown2.SetLabel(self.utility.speed_format_new(dls)) + self.speedUp2.SetLabel(self.utility.speed_format_new(uls)) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '%s %s %s' % (`ds.get_download().get_def().get_name()`, ds.get_progress(), dlstatus_strings[ds.get_status()]) + if ds.get_status() == DLSTATUS_STOPPED_ON_ERROR: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "ERROR IS",ds.get_error() + + finished = ds.get_progress() == 1.0 or ds.get_status() == DLSTATUS_SEEDING + progress = (ds.get_progress() or 0.0) * 100.0 + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '****** libraryitempanel:', torrent['torrent_id'], progress + self.updateProgress(torrent['infohash'], progress) + + self.percentage.SetLabel('%.1f%%' % progress) + eta = self.utility.eta_value(ds.get_eta(), truncate=2) + if finished: + eta = "Finished" + elif eta == '' or eta.find('unknown') != -1: + eta = '' + self.eta.SetLabel(eta) + self.eta.SetToolTipString(self.utility.lang.get('eta')+eta) + + havedigest = None + showPlayButton = False + + active = ds.get_status() in (DLSTATUS_SEEDING, DLSTATUS_DOWNLOADING) + + # Allow STOPPED_ON_ERROR, sometimes transient + startable = not ds.get_status() in [DLSTATUS_WAITING4HASHCHECK, DLSTATUS_ALLOCATING_DISKSPACE, DLSTATUS_HASHCHECKING] + if startable: + isVideo = bool(ds.get_download().get_def().get_files(exts=videoextdefaults)) + showPlayButton = isVideo + havedigest = ds.get_pieces_complete() + + if finished: + self.pb.reset(colour=2) # Show as complete + self.pb.Refresh() + elif havedigest: + self.pb.set_pieces(havedigest) + self.pb.Refresh() + elif progress > 0: + self.pb.reset(colour=1) # Show as having some + self.pb.Refresh() + else: + self.pb.reset(colour=0) # Show as having none + self.pb.Refresh() + + self.library_play.setEnabled(showPlayButton) + + elif torrent: # inactive torrent + + if not self.listItem: + #self.pb.setEnabled(False) + self.downSpeed2.Hide() + self.speedDown2.SetLabel('--') + self.upSpeed.Hide() + self.speedUp2.SetLabel('--') + self.library_play.setEnabled(False) + else: + if self.containsvideo is None: + self.async_check_torrentfile_contains_video(torrent) + if self.containsvideo is not None: + self.library_play.setEnabled(self.containsvideo) + else: + self.library_play.setEnabled(False) + + #self.eta.SetLabel('') + + if torrent.get('progress') != None: + self.percentage.SetLabel('%0.1f%%' % torrent['progress']) + self.pb.setNormalPercentage(torrent['progress']) + else: + self.percentage.SetLabel('?') + self.pb.reset() + + self.pb.Show() + self.pb.Refresh() + + if torrent and oldinfohash != self.data['infohash']: + if torrent.get('name'): + title = torrent['name'][:self.titleLength] + self.title.Show() + self.title.SetLabel(title) + self.title.Wrap(self.title.GetSize()[0]) + self.title.SetToolTipString(torrent['name']) + + # Only reload thumb when changing torrent displayed + ##if torrent['infohash'] != oldinfohash: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","REFRESH THUMBNAIL",`torrent['name']` + ##self.thumb.setTorrent(torrent) + + self.remove.SetToolTipString(self.utility.lang.get('C_remove')) + + + + else: + self.title.SetLabel('') + self.title.SetToolTipString('') + self.title.Hide() + + self.Layout() + self.Refresh() + self.GetContainingSizer().Layout() + self.parent.Refresh() + + def select(self, rowIndex, colIndex, ignore1, ignore2, ignore3): + self.selected = True + self.guiUtility.standardOverview.selectedTorrent = self.data['infohash'] + + def deselect(self, rowIndex, colIndex): + self.hLine.Show() + self.selected = False + + def keyTyped(self, event): + if self.selected: + key = event.GetKeyCode() + if (key == wx.WXK_DELETE): + if self.data: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'lib: deleting' + # Arno; 2007-05-11: TODO: use right method here, deleteTorrent does nothing at the + # moment, see below for right method + #self.guiUtility.deleteTorrent(self.data) + event.Skip() + + def mouseAction(self, event): + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","lip: mouseaction: name",event.GetEventObject().GetName() + + event.Skip() + + + if not self.data: + return + + obj = event.GetEventObject() + name = obj.GetName() + + + + self.SetFocus() + if self.data: + self.guiUtility.selectTorrent(self.data) + + # buttons that are switched off, should not generate events + try: + if not obj.isEnabled(): + #print 'Button %s was not enabled' % name + return + except: + pass + + + if self.data.get('ds'): + ds = self.data.get('ds') +# if name == 'deleteLibraryitem': +# removeFiles = False +# ds.get_download().stop() # ?? +# + if name == 'pause': + # ARNOCOMMENT: need to get/store/cache current status of Download somehow + if ds.get_status() == DLSTATUS_STOPPED or ds.get_status() == DLSTATUS_STOPPED_ON_ERROR: + if ds.get_download().get_def().get_live(): + self.switch_to_vod(ds) + else: + ds.get_download().restart() + obj.setToggled(False) + else: + ds.get_download().stop() + obj.setToggled(True) + + from Tribler.Video.VideoPlayer import VideoPlayer + videoplayer = VideoPlayer.getInstance() + stopd = ds.get_download() + playd = videoplayer.get_vod_download() + if stopd == playd: + videoplayer.close() + + elif name == 'library_play': + self.guiUtility.standardDetails.setVideodata(self.guiUtility.standardDetails.getData()) + self._get_videoplayer(exclude=ds).videoframe.get_videopanel().SetLoadingText(self.guiUtility.standardDetails.getVideodata()['name']) + if sys.platform == 'darwin': + self._get_videoplayer(exclude=ds).videoframe.show_videoframe() + self._get_videoplayer(exclude=ds).videoframe.get_videopanel().Refresh() + self._get_videoplayer(exclude=ds).videoframe.get_videopanel().Layout() + + self.play(ds) + + + + elif name == 'remove': + from Tribler.Video.VideoPlayer import VideoPlayer + videoplayer = VideoPlayer.getInstance() + playd = videoplayer.get_vod_download() + removed = ds.get_download() + if playd == removed: + self._get_videoplayer(exclude=ds).stop_playback() + + + + + #if self.guiUtility.standardDetails.getVideodata() is not None: + # if self.guiUtility.standardDetails.getVideodata()['name'] == self.guiUtility.standardDetails.getData()['name']: + # self.guiUtility.standardDetails.setVideodata(None) + # self._get_videoplayer(exclude=ds).stop_playback() + + + else: # no abctorrent + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","lip: mouseAction: No ds" + if name == 'pause': + #playbutton + dest_dir = self.data.get('destdir') + if dest_dir is not None: + # Start torrent again + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'lip: starting torrent %s with data in dir %s' % (repr(self.data['name']), dest_dir) + + if os.path.isfile(dest_dir): + # Arno: the 4.1 database values are wrong, also for + # single-file torrents the content_dir is + # "savedir"+torrentname. Try to componsate + dest_dir = os.path.dirname(dest_dir) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'lip: starting torrent %s with data in corrected 4.1 dir %s' % (repr(self.data['name']), dest_dir) + + self.guiUtility.standardDetails.download(self.data, dest = dest_dir, force = True) + + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'lip: Could not make abctorrent active, no destdir in dictionary: %s' % repr(self.data.get('name')) + + elif name == 'library_play': + # Todo: make non-abctorrent files playable. + dest_dir = self.data.get('destdir') + + if dest_dir is None: # workaround for testing + dest_dir = get_default_dest_dir() + + if dest_dir is not None: + # Start torrent again + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'lip: starting torrent %s with data in dir %s' % (repr(self.data['name']), dest_dir) + self.guiUtility.standardDetails.download(self.data, dest = dest_dir, force = True, vodmode = True) + + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'lip: Could not make abctorrent active, no destdir in dictionary: %s' % repr(self.data.get('name')) + + if name == 'deleteLibraryitem': + # delete works for active and inactive torrents + self.guiUtility.onDeleteTorrentFromLibrary() + + if event.RightDown(): + self.rightMouseButton(event) + + + def rightMouseButton(self, event): + # Open right-click menu (windows menu key) + # >>makePopup(self, menu, event = None, label = "", extralabel = "", bindto = None): + + menu = self.guiUtility.OnRightMouseAction(event) + if menu is not None: + self.PopupMenu(menu, (-1,-1)) + + + def getIdentifier(self): + if self.data: + return self.data.get('infohash') + else: + return None + + def _get_videoplayer(self, exclude=None): + """ + Returns the VideoPlayer instance and ensures that it knows if + there are other downloads running. + """ + + # 22/08/08 Boudewijn: The videoplayer has to know if there are + # downloads running. + other_downloads = False + for ds in self.parent.gridManager.dslist: + if ds is not exclude and ds.get_status() not in (DLSTATUS_STOPPED, DLSTATUS_STOPPED_ON_ERROR): + other_downloads = True + break + + videoplayer = VideoPlayer.getInstance() + videoplayer.set_other_downloads(other_downloads) + return videoplayer + + def play(self,ds): + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","lip: play" + + self.pause_resume.SetLabel('Pause') + + self._get_videoplayer(exclude=ds).stop_playback() # stop current playback + + self._get_videoplayer(exclude=ds).videoframe.get_videopanel().vlcwin.agVideo.Show() + self._get_videoplayer(exclude=ds).videoframe.get_videopanel().vlcwin.agVideo.Play() + self._get_videoplayer(exclude=ds).videoframe.get_videopanel().vlcwin.Refresh() + + self._get_videoplayer(exclude=ds).play(ds) + + + def OnPaint(self, evt): + dc = wx.BufferedPaintDC(self) + dc.SetBackground(wx.Brush(wx.BLUE)) + + dc.Clear() + + if self.title: +# print 'tb > self.title.GetLabel() = %s' % self.title.GetLabel() + dc.SetFont(wx.Font(14,FONTFAMILY,FONTWEIGHT, wx.BOLD, False,FONTFACE)) + dc.SetTextForeground('#007303') +# dc.DrawText(self.title.GetLabel(), 0, 0) + dc.DrawText('online', 38, 64) + self.title.Hide() + + + def async_check_torrentfile_contains_video(self,torrent): + if 'torrent_file_name' in torrent and torrent['torrent_file_name'] != '': + torrent_dir = self.utility.session.get_torrent_collecting_dir() + torrent_filename = os.path.join(torrent_dir, torrent['torrent_file_name']) + + if DEBUG: + print "lip: Scheduling read videofiles from",`torrent['name']`,"from",torrent_filename + + def loadMetaDataNow(): + try: + self.guiservthread_loadMetadata(torrent,torrent_filename) + except wx.PyDeadObjectError: + pass + + try: + self.GetParent().guiserver.add_task(loadMetaDataNow,0) + except wx.PyDeadObjectError: + # ARNO: TODO: The FileItemPanels that use this ThumbnailViewer now get deleted, and thus + # also the ThumbnailViewer objects. Or at least the C++ part of them. As a result we + # can no longer schedule these loadMetadata callbacks on the GUITaskQueue thread. + # + # At the moment, the wx code protects us, and throws an exception that the C++ part + # of the ThumbnailViewer object is gone. But we should clean this up. + pass + else: + self.containsvideo = False + + def guiservthread_loadMetadata(self, torrent,torrent_filename): + """ Called by separate non-GUI thread """ + + isVideo = False + try: + if os.path.isfile(torrent_filename): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","lip: Reading",torrent_filename,"to see if contains video" + tdef = TorrentDef.load(torrent_filename) + isVideo = bool(tdef.get_files(exts=videoextdefaults)) + except: + print_exc() + + if torrent['infohash'] == self.data['infohash']: + self.containsvideo = isVideo + wx.CallAfter(self.metadata_loaded,torrent,None) + + + def metadata_loaded(self,torrent,metadata): + """ Called by GUI thread """ + try: + if torrent['infohash'] == self.data['infohash']: + self.library_play.setEnabled(self.containsvideo) + except wx.PyDeadObjectError: + pass diff --git a/tribler-mod/Tribler/Main/vwxGUI/LoadingDetails.py b/tribler-mod/Tribler/Main/vwxGUI/LoadingDetails.py new file mode 100644 index 0000000..1d9ef65 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/LoadingDetails.py @@ -0,0 +1,47 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg, Maarten ten Brinke +# see LICENSE.txt for license information + +import wx +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility + +class LoadingDetailsPanel(wx.Panel): + + def __init__(self, parent): + wx.Panel.__init__(self, parent, -1) + self.guiUtility = GUIUtility.getInstance() + + self.addComponents() + + + def addComponents(self): + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + + self.hSizer.Add([8,5],0,wx.EXPAND|wx.FIXED_MINSIZE,0) + self.textPanel = wx.Panel(self) + sizer = wx.BoxSizer(wx.HORIZONTAL) + self.text = wx.StaticText(self.textPanel, -1, '') + self.text.SetForegroundColour(wx.Colour(255,255,255)) + sizer.Add(self.text, 1, wx.ALL, 0) + self.textPanel.SetSizer(sizer) + self.textPanel.SetAutoLayout(1) + self.textPanel.SetForegroundColour(wx.WHITE) + self.textPanel.SetBackgroundColour(wx.Colour(53,53,53)) + + self.text.SetSize((-1, 15)) + self.hSizer.Add(self.textPanel, 1, wx.TOP|wx.EXPAND, 3) + + self.SetSizer(self.hSizer); + self.SetAutoLayout(1); + self.SetMinSize((-1, 19)) + self.SetBackgroundColour(wx.Colour(53,53,53)) + self.hSizer.Layout() + self.Layout() + self.searchBusy = True #?? + #self.Show(True) + self.results = {} + + def setMessage(self,msg): + self.text.SetLabel(msg) + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/LoadingDetails.py.bak b/tribler-mod/Tribler/Main/vwxGUI/LoadingDetails.py.bak new file mode 100644 index 0000000..4223c8b --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/LoadingDetails.py.bak @@ -0,0 +1,46 @@ +# Written by Jelle Roozenburg, Maarten ten Brinke +# see LICENSE.txt for license information + +import wx +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility + +class LoadingDetailsPanel(wx.Panel): + + def __init__(self, parent): + wx.Panel.__init__(self, parent, -1) + self.guiUtility = GUIUtility.getInstance() + + self.addComponents() + + + def addComponents(self): + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + + self.hSizer.Add([8,5],0,wx.EXPAND|wx.FIXED_MINSIZE,0) + self.textPanel = wx.Panel(self) + sizer = wx.BoxSizer(wx.HORIZONTAL) + self.text = wx.StaticText(self.textPanel, -1, '') + self.text.SetForegroundColour(wx.Colour(255,255,255)) + sizer.Add(self.text, 1, wx.ALL, 0) + self.textPanel.SetSizer(sizer) + self.textPanel.SetAutoLayout(1) + self.textPanel.SetForegroundColour(wx.WHITE) + self.textPanel.SetBackgroundColour(wx.Colour(53,53,53)) + + self.text.SetSize((-1, 15)) + self.hSizer.Add(self.textPanel, 1, wx.TOP|wx.EXPAND, 3) + + self.SetSizer(self.hSizer); + self.SetAutoLayout(1); + self.SetMinSize((-1, 19)) + self.SetBackgroundColour(wx.Colour(53,53,53)) + self.hSizer.Layout() + self.Layout() + self.searchBusy = True #?? + #self.Show(True) + self.results = {} + + def setMessage(self,msg): + self.text.SetLabel(msg) + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/MainFrame.py b/tribler-mod/Tribler/Main/vwxGUI/MainFrame.py new file mode 100644 index 0000000..2424f3a --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/MainFrame.py @@ -0,0 +1,713 @@ +from time import localtime, strftime +######################################################################### +# +# Author : Choopan RATTANAPOKA, Jie Yang, Arno Bakker +# +# Description : Main ABC [Yet Another Bittorrent Client] python script. +# you can run from source code by using +# >python abc.py +# need Python, WxPython in order to run from source code. +# +# see LICENSE.txt for license information +######################################################################### + +import os,sys + +# TODO: cleanup imports + +# Arno, 2008-03-21: see what happens when we disable this locale thing. Gives +# errors on Vista in "Regional and Language Settings Options" different from +# "English[United Kingdom]" +#import locale +import signal +import commands +import pickle + +try: + import wxversion + wxversion.select('2.8') +except: + pass +import wx +from wx import xrc +#import hotshot + +from threading import Thread, Event,currentThread,enumerate +import time +from traceback import print_exc, print_stack +from cStringIO import StringIO +import urllib + +from Tribler.Main.Utility.utility import Utility +from Tribler.Main.Utility.constants import * #IGNORE:W0611 +import Tribler.Main.vwxGUI.font as font +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.Dialogs.GUITaskQueue import GUITaskQueue +from Tribler.Main.Dialogs.systray import ABCTaskBarIcon +from Tribler.Main.notification import init as notification_init +from Tribler.Main.globals import DefaultDownloadStartupConfig,get_default_dscfg_filename +from Tribler.Video.VideoPlayer import VideoPlayer +from Tribler.Video.defs import * +from Tribler.Video.VideoFrame import VideoFrame +from Tribler.Video.utils import videoextdefaults + +from Tribler.Category.Category import Category + + +from Tribler.Core.simpledefs import * +from Tribler.Core.API import * +from Tribler.Core.Utilities.utilities import show_permid + +DEBUG = False + + +################################################################ +# +# Class: FileDropTarget +# +# To enable drag and drop for ABC list in main menu +# +################################################################ +class FileDropTarget(wx.FileDropTarget): + def __init__(self, frame): + # Initialize the wsFileDropTarget Object + wx.FileDropTarget.__init__(self) + # Store the Object Reference for dropped files + self.frame = frame + + def OnDropFiles(self, x, y, filenames): + for filename in filenames: + self.frame.startDownload(filename) + return True + + + +# Custom class loaded by XRC +class MainFrame(wx.Frame): + def __init__(self, *args): + self.firewallStatus = None + self.utility = None + + if len(args) == 0: + pre = wx.PreFrame() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Frame.__init__(self, args[0], args[1], args[2], args[3]) + self._PostInit() + self.wxapp = None + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.params = self.guiUtility.params + self.utility.frame = self + self.torrentfeed = None + self.category = Category.getInstance() + + title = self.utility.lang.get('title') + \ + " " + \ + self.utility.lang.get('version') + + # Get window size and position from config file + size, position = self.getWindowSettings() + style = wx.DEFAULT_FRAME_STYLE | wx.CLIP_CHILDREN + + self.SetSize(size) + self.SetPosition(position) + self.SetTitle(title) + tt = self.GetToolTip() + if tt is not None: + tt.SetTip('') + + #wx.Frame.__init__(self, None, -1, title, position, size, style = style) + + self.doneflag = Event() + + dragdroplist = FileDropTarget(self) + self.SetDropTarget(dragdroplist) + + self.tbicon = None + + try: + self.SetIcon(self.utility.icon) + except: + pass + + # Don't update GUI as often when iconized + self.GUIupdate = True + self.oldframe = None + self.window = self.GetChildren()[0] + self.window.utility = self.utility + self.buddyFrame = None + self.fileFrame = None + self.buddyFrame_page = 0 + self.buddyFrame_size = (800, 500) + self.buddyFrame_pos = None + self.fileFrame_size = (800, 500) + self.fileFrame_pos = None + + # Menu Events + ############################ + + self.Bind(wx.EVT_CLOSE, self.OnCloseWindow) +# self.Bind(wx.EVT_MENU, self.OnMenuExit, id = wx.ID_CLOSE) + + # leaving here for the time being: + # wxMSW apparently sends the event to the App object rather than + # the top-level Frame, but there seemed to be some possibility of + # change + self.Bind(wx.EVT_QUERY_END_SESSION, self.OnCloseWindow) + self.Bind(wx.EVT_END_SESSION, self.OnCloseWindow) + + try: + self.tbicon = ABCTaskBarIcon(self) + except: + print_exc() + self.Bind(wx.EVT_ICONIZE, self.onIconify) + self.Bind(wx.EVT_SET_FOCUS, self.onFocus) + self.Bind(wx.EVT_SIZE, self.onSize) + self.Bind(wx.EVT_MAXIMIZE, self.onSize) + #self.Bind(wx.EVT_IDLE, self.onIdle) + + + # transparency + # self.SetTransparent(240) + + + # Init video player + self.videoFrame = None + sys.stdout.write('GUI Complete.\n') + + ##self.standardOverview.Show(True) + self.Show(True) + + + # Just for debugging: add test permids and display top 5 peers from which the most is downloaded in bartercastdb +# bartercastdb = self.utility.session.open_dbhandler(NTFY_BARTERCAST) +# mypermid = bartercastdb.my_permid +# +# if DEBUG: +# +# top = bartercastdb.getTopNPeers(5)['top'] +# +# print 'My Permid: ', show_permid(mypermid) +# +# print 'Top 5 BarterCast peers:' +# print '=======================' +# +# i = 1 +# for (permid, up, down) in top: +# print '%2d: %15s - %10d up %10d down' % (i, bartercastdb.getName(permid), up, down) +# i += 1 + + self.checkVersion() + + # If the user passed a torrentfile on the cmdline, load it. + wx.CallAfter(self.startCMDLineTorrent) + + def startCMDLineTorrent(self): + if self.params[0] != "": + torrentfilename = self.params[0] + self.startDownload(torrentfilename,cmdline=True,vodmode=True) + self.guiUtility.standardLibraryOverview(refresh=True) + + + def startDownload(self,torrentfilename,destdir=None,tdef = None,cmdline=False,clicklog=None,name=None,vodmode=False): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","mainframe: startDownload:",torrentfilename,destdir,tdef + try: + if tdef is None: + tdef = TorrentDef.load(torrentfilename) + defaultDLConfig = DefaultDownloadStartupConfig.getInstance() + dscfg = defaultDLConfig.copy() + if destdir is not None: + dscfg.set_dest_dir(destdir) + + videofiles = tdef.get_files(exts=videoextdefaults) + if vodmode and len(videofiles) == 0: + vodmode = False + + if vodmode or tdef.get_live(): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'MainFrame: startDownload: Starting in VOD mode' + videoplayer = VideoPlayer.getInstance() + result = videoplayer.start_and_play(tdef,dscfg) + + # 02/03/09 boudewijn: feedback to the user when there + # are no playable files in the torrent + if not result: + dlg = wx.MessageDialog(None, + self.utility.lang.get("invalid_torrent_no_playable_files_msg"), + self.utility.lang.get("invalid_torrent_no_playable_files_title"), + wx.OK|wx.ICON_INFORMATION) + dlg.ShowModal() + dlg.Destroy() + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'MainFrame: startDownload: Starting in DL mode' + result = self.utility.session.start_download(tdef,dscfg) + if result: + # ARNO50: Richard will look at this + self.guiserver = GUITaskQueue.getInstance() + self.guiserver.add_task(lambda:wx.CallAfter(self.show_saved), 0.2) + + # store result because we want to store clicklog data + # right after download was started, then return result + if clicklog is not None: + mypref = self.utility.session.open_dbhandler(NTFY_MYPREFERENCES) + mypref.addClicklogToMyPreference(tdef.get_infohash(), clicklog) + + return result + + except DuplicateDownloadException: + # show nice warning dialog + dlg = wx.MessageDialog(None, + self.utility.lang.get('duplicate_download_msg'), + self.utility.lang.get('duplicate_download_title'), + wx.OK|wx.ICON_INFORMATION) + result = dlg.ShowModal() + dlg.Destroy() + + # If there is something on the cmdline, all other torrents start + # in STOPPED state. Restart + if cmdline: + dlist = self.utility.session.get_downloads() + for d in dlist: + if d.get_def().get_infohash() == tdef.get_infohash(): + d.restart() + break + + except Exception,e: + print_exc() + self.onWarning(e) + return None + + + def show_saved(self): + + self.guiUtility.frame.top_bg.newFile.Show(True) + self.guiUtility.frame.top_bg.Layout() + self.guiserver.add_task(lambda:wx.CallAfter(self.hide_saved), 5.0) + + + def hide_saved(self): + self.guiUtility.frame.top_bg.newFile.Show(False) + + + def checkVersion(self): + guiserver = GUITaskQueue.getInstance() + guiserver.add_task(self._checkVersion,10.0) + + def _checkVersion(self): + # Called by GUITaskQueue thread + my_version = self.utility.getVersion() + try: + curr_status = urllib.urlopen('http://tribler.org/version/').readlines() + line1 = curr_status[0] + if len(curr_status) > 1: + self.update_url = curr_status[1].strip() + else: + self.update_url = 'http://tribler.org' + _curr_status = line1.split() + self.curr_version = _curr_status[0] + if self.newversion(self.curr_version, my_version): + # Arno: we are a separate thread, delegate GUI updates to MainThread + self.upgradeCallback() + + # Also check new version of web2definitions for youtube etc. search + ##Web2Updater(self.utility).checkUpdate() + except Exception,e: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Tribler: Version check failed", time.ctime(time.time()), str(e) + #print_exc() + + def newversion(self, curr_version, my_version): + curr = curr_version.split('.') + my = my_version.split('.') + if len(my) >= len(curr): + nversion = len(my) + else: + nversion = len(curr) + for i in range(nversion): + if i < len(my): + my_v = int(my[i]) + else: + my_v = 0 + if i < len(curr): + curr_v = int(curr[i]) + else: + curr_v = 0 + if curr_v > my_v: + return True + elif curr_v < my_v: + return False + return False + + def upgradeCallback(self): + wx.CallAfter(self.OnUpgrade) + # TODO: warn multiple times? + + def OnUpgrade(self, event=None): + self.setActivity(NTFY_ACT_NEW_VERSION) + guiserver = GUITaskQueue.getInstance() + guiserver.add_task(self.upgradeCallback,10.0) + + def onFocus(self, event = None): + if event is not None: + event.Skip() + #self.window.getSelectedList(event).SetFocus() + + def setGUIupdate(self, update): + oldval = self.GUIupdate + self.GUIupdate = update + + if self.GUIupdate and not oldval: + # Force an update of all torrents + for torrent in self.utility.torrents["all"]: + torrent.updateColumns() + torrent.updateColor() + + + def taskbarCallback(self): + wx.CallAfter(self.onTaskBarActivate) + + + ####################################### + # minimize to tray bar control + ####################################### + def onTaskBarActivate(self, event = None): + self.Iconize(False) + self.Show(True) + self.Raise() + + if self.tbicon is not None: + self.tbicon.updateIcon(False) + + #self.window.list.SetFocus() + + # Resume updating GUI + self.setGUIupdate(True) + + def onIconify(self, event = None): + # This event handler is called both when being minimalized + # and when being restored. + if DEBUG: + if event is not None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: onIconify(",event.Iconized() + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: onIconify event None" + if event.Iconized(): + videoplayer = VideoPlayer.getInstance() + videoplayer.videoframe.get_videopanel().Pause() # when minimzed pause playback + + if (self.utility.config.Read('mintray', "int") > 0 + and self.tbicon is not None): + self.tbicon.updateIcon(True) + self.Show(False) + + # Don't update GUI while minimized + self.setGUIupdate(False) + else: + videoplayer = VideoPlayer.getInstance() + embed = videoplayer.videoframe.get_videopanel() + if embed.GetState() == MEDIASTATE_PAUSED: + embed.ppbtn.setToggled(False) + embed.vlcwin.setloadingtext('') + embed.vlcwrap.resume() + self.setGUIupdate(True) + if event is not None: + event.Skip() + + def onSize(self, event = None): + # Arno: On Windows when I enable the tray icon and then change + # virtual desktop (see MS DeskmanPowerToySetup.exe) + # I get a onIconify(event.Iconized()==True) event, but when + # I switch back, I don't get an event. As a result the GUIupdate + # remains turned off. The wxWidgets wiki on the TaskBarIcon suggests + # catching the onSize event. + if DEBUG: + if event is not None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: onSize:",self.GetSize() + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: onSize: None" + self.setGUIupdate(True) + if event is not None: + if event.GetEventType() == wx.EVT_MAXIMIZE: + self.window.SetClientSize(self.GetClientSize()) + event.Skip() + + + # Refresh subscreens + self.refreshNeeded = True + #self.guiUtility.refreshOnResize() + + def onIdle(self, event = None): + """ + Only refresh screens (especially detailsPanel) when resizes are finished + This gives less flickering, but doesnt look pretty, so i commented it out + """ + if self.refreshNeeded: + self.guiUtility.refreshOnResize() + self.refreshNeeded = False + + def getWindowSettings(self): + width = self.utility.config.Read("window_width") + height = self.utility.config.Read("window_height") + try: + size = wx.Size(int(width), int(height)) + except: + size = wx.Size(710, 400) + + x = self.utility.config.Read("window_x") + y = self.utility.config.Read("window_y") + if (x == "" or y == ""): + #position = wx.DefaultPosition + + # On Mac, the default position will be underneath the menu bar, so lookup (top,left) of + # the primary display + primarydisplay = wx.Display(0) + dsize = primarydisplay.GetClientArea() + position = dsize.GetTopLeft() + + # Decrease size to fit on screen, if needed + width = min( size.GetWidth(), dsize.GetWidth() ) + height = min( size.GetHeight(), dsize.GetHeight() ) + size = wx.Size( width, height ) + else: + position = wx.Point(int(x), int(y)) + + return size, position + + def saveWindowSettings(self): + width, height = self.GetSizeTuple() + x, y = self.GetPositionTuple() + self.utility.config.Write("window_width", width) + self.utility.config.Write("window_height", height) + self.utility.config.Write("window_x", x) + self.utility.config.Write("window_y", y) + + self.utility.config.Flush() + + ################################## + # Close Program + ################################## + + def OnCloseWindow(self, event = None): + found = False + if event != None: + nr = event.GetEventType() + lookup = { wx.EVT_CLOSE.evtType[0]: "EVT_CLOSE", wx.EVT_QUERY_END_SESSION.evtType[0]: "EVT_QUERY_END_SESSION", wx.EVT_END_SESSION.evtType[0]: "EVT_END_SESSION" } + if nr in lookup: + nr = lookup[nr] + found = True + + print "mainframe: Closing due to event ",nr,`event` + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","mainframe: Closing due to event ",nr,`event` + else: + print "mainframe: Closing untriggered by event" + + + # Don't do anything if the event gets called twice for some reason + if self.utility.abcquitting: + return + + # Check to see if we can veto the shutdown + # (might not be able to in case of shutting down windows) + if event is not None: + try: + if isinstance(event,wx.CloseEvent) and event.CanVeto() and self.utility.config.Read('confirmonclose', "boolean") and not event.GetEventType() == wx.EVT_QUERY_END_SESSION.evtType[0]: + dialog = wx.MessageDialog(None, self.utility.lang.get('confirmmsg'), self.utility.lang.get('confirm'), wx.OK|wx.CANCEL) + result = dialog.ShowModal() + dialog.Destroy() + if result != wx.ID_OK: + event.Veto() + return + except: + print_exc() + + self.utility.abcquitting = True + self.GUIupdate = False + + videoplayer = VideoPlayer.getInstance() + videoplayer.stop_playback() + + self.guiUtility.guiOpen.clear() + + try: + # Restore the window before saving size and position + # (Otherwise we'll get the size of the taskbar button and a negative position) + self.onTaskBarActivate() + self.saveWindowSettings() + except: + print_exc() + + try: + if self.buddyFrame is not None: + self.buddyFrame.Destroy() + if self.fileFrame is not None: + self.fileFrame.Destroy() + if self.videoFrame is not None: + self.videoFrame.Destroy() + except: + pass + + try: + if self.tbicon is not None: + self.tbicon.RemoveIcon() + self.tbicon.Destroy() + self.Destroy() + except: + print_exc() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","mainframe: OnCloseWindow END" + + if DEBUG: + ts = enumerate() + for t in ts: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","mainframe: Thread still running",t.getName(),"daemon",t.isDaemon() + + if not found or sys.platform =="darwin": + # On Linux with wx 2.8.7.1 this method gets sometimes called with + # a CommandEvent instead of EVT_CLOSE, wx.EVT_QUERY_END_SESSION or + # wx.EVT_END_SESSION + self.quit() + + + def onWarning(self,exc): + msg = self.utility.lang.get('tribler_startup_nonfatalerror') + msg += str(exc.__class__)+':'+str(exc) + dlg = wx.MessageDialog(None, msg, self.utility.lang.get('tribler_warning'), wx.OK|wx.ICON_WARNING) + result = dlg.ShowModal() + dlg.Destroy() + + def onUPnPError(self,upnp_type,listenport,error_type,exc=None,listenproto='TCP'): + + if error_type == 0: + errormsg = unicode(' UPnP mode '+str(upnp_type)+' ')+self.utility.lang.get('tribler_upnp_error1') + elif error_type == 1: + errormsg = unicode(' UPnP mode '+str(upnp_type)+' ')+self.utility.lang.get('tribler_upnp_error2')+unicode(str(exc))+self.utility.lang.get('tribler_upnp_error2_postfix') + elif error_type == 2: + errormsg = unicode(' UPnP mode '+str(upnp_type)+' ')+self.utility.lang.get('tribler_upnp_error3') + else: + errormsg = unicode(' UPnP mode '+str(upnp_type)+' Unknown error') + + msg = self.utility.lang.get('tribler_upnp_error_intro') + msg += listenproto+' ' + msg += str(listenport) + msg += self.utility.lang.get('tribler_upnp_error_intro_postfix') + msg += errormsg + msg += self.utility.lang.get('tribler_upnp_error_extro') + + dlg = wx.MessageDialog(None, msg, self.utility.lang.get('tribler_warning'), wx.OK|wx.ICON_WARNING) + result = dlg.ShowModal() + dlg.Destroy() + + + + def setActivity(self,type,msg=u'',arg2=None): + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","MainFrame: setActivity: t",type,"m",msg,"a2",arg2 + + if self.utility is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","MainFrame: setActivity: Cannot display: t",type,"m",msg,"a2",arg2 + return + + if currentThread().getName() != "MainThread": + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: setActivity thread",currentThread().getName(),"is NOT MAIN THREAD" + print_stack() + + if type == NTFY_ACT_NONE: + prefix = msg + msg = u'' + elif type == NTFY_ACT_ACTIVE: + prefix = u"" + if msg == "no network": + text = "%s\nLast activity: %.1f seconds ago" % (msg, arg2) + self.SetTitle(text) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Activity",`text` + + elif type == NTFY_ACT_UPNP: + prefix = self.utility.lang.get('act_upnp') + elif type == NTFY_ACT_REACHABLE: + prefix = self.utility.lang.get('act_reachable') + elif type == NTFY_ACT_GET_EXT_IP_FROM_PEERS: + prefix = self.utility.lang.get('act_get_ext_ip_from_peers') + elif type == NTFY_ACT_MEET: + prefix = self.utility.lang.get('act_meet') + elif type == NTFY_ACT_GOT_METADATA: + prefix = self.utility.lang.get('act_got_metadata') + + if self.category.family_filter_enabled() and arg2 == 7: # XXX category + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","MainFrame: setActivity: Hiding XXX torrent",msg + return + + elif type == NTFY_ACT_RECOMMEND: + prefix = self.utility.lang.get('act_recommend') + elif type == NTFY_ACT_DISK_FULL: + prefix = self.utility.lang.get('act_disk_full') + elif type == NTFY_ACT_NEW_VERSION: + prefix = self.utility.lang.get('act_new_version') + if msg == u'': + text = prefix + else: + text = unicode( prefix+u' '+msg) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Activity",`text` + #self.messageField.SetLabel(text) + + def set_player_status(self,s): + """ Called by VideoServer when using an external player """ + if self.videoFrame is not None: + self.videoFrame.set_player_status(s) + + def set_wxapp(self,wxapp): + self.wxapp = wxapp + + def quit(self): + if self.wxapp is not None: + self.wxapp.ExitMainLoop() + + + +class PlayerFrame(VideoFrame): + """ + Wrapper around VideoFrame that allows us to catch the Close event. On + that event we should notify tribler such that it can stop any live torrents, + and restart others that may have been stopped. + """ + def __init__(self,parent,title,iconpath,vlcwrap,logopath): + VideoFrame.__init__(self,parent,title,iconpath,vlcwrap,logopath) + self.parent = parent + self.closed = False + + self.Bind(wx.EVT_CLOSE, self.OnCloseWindow) + + def show_videoframe(self): + self.closed = False + VideoFrame.show_videoframe(self) + + def OnCloseWindow(self, event = None): + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PlayerFrame: ON CLOSE WINDOW" + if not self.closed: + self.closed = True + VideoFrame.OnCloseWindow(self,event) + + if self.parent.wxapp is not None: + self.parent.wxapp.OnClosingVideoFrameOrExtPlayer() + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PlayerFrame: Closing done" + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/MainFrame.py.bak b/tribler-mod/Tribler/Main/vwxGUI/MainFrame.py.bak new file mode 100644 index 0000000..56fbd6b --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/MainFrame.py.bak @@ -0,0 +1,712 @@ +######################################################################### +# +# Author : Choopan RATTANAPOKA, Jie Yang, Arno Bakker +# +# Description : Main ABC [Yet Another Bittorrent Client] python script. +# you can run from source code by using +# >python abc.py +# need Python, WxPython in order to run from source code. +# +# see LICENSE.txt for license information +######################################################################### + +import os,sys + +# TODO: cleanup imports + +# Arno, 2008-03-21: see what happens when we disable this locale thing. Gives +# errors on Vista in "Regional and Language Settings Options" different from +# "English[United Kingdom]" +#import locale +import signal +import commands +import pickle + +try: + import wxversion + wxversion.select('2.8') +except: + pass +import wx +from wx import xrc +#import hotshot + +from threading import Thread, Event,currentThread,enumerate +import time +from traceback import print_exc, print_stack +from cStringIO import StringIO +import urllib + +from Tribler.Main.Utility.utility import Utility +from Tribler.Main.Utility.constants import * #IGNORE:W0611 +import Tribler.Main.vwxGUI.font as font +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.Dialogs.GUITaskQueue import GUITaskQueue +from Tribler.Main.Dialogs.systray import ABCTaskBarIcon +from Tribler.Main.notification import init as notification_init +from Tribler.Main.globals import DefaultDownloadStartupConfig,get_default_dscfg_filename +from Tribler.Video.VideoPlayer import VideoPlayer +from Tribler.Video.defs import * +from Tribler.Video.VideoFrame import VideoFrame +from Tribler.Video.utils import videoextdefaults + +from Tribler.Category.Category import Category + + +from Tribler.Core.simpledefs import * +from Tribler.Core.API import * +from Tribler.Core.Utilities.utilities import show_permid + +DEBUG = False + + +################################################################ +# +# Class: FileDropTarget +# +# To enable drag and drop for ABC list in main menu +# +################################################################ +class FileDropTarget(wx.FileDropTarget): + def __init__(self, frame): + # Initialize the wsFileDropTarget Object + wx.FileDropTarget.__init__(self) + # Store the Object Reference for dropped files + self.frame = frame + + def OnDropFiles(self, x, y, filenames): + for filename in filenames: + self.frame.startDownload(filename) + return True + + + +# Custom class loaded by XRC +class MainFrame(wx.Frame): + def __init__(self, *args): + self.firewallStatus = None + self.utility = None + + if len(args) == 0: + pre = wx.PreFrame() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Frame.__init__(self, args[0], args[1], args[2], args[3]) + self._PostInit() + self.wxapp = None + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.params = self.guiUtility.params + self.utility.frame = self + self.torrentfeed = None + self.category = Category.getInstance() + + title = self.utility.lang.get('title') + \ + " " + \ + self.utility.lang.get('version') + + # Get window size and position from config file + size, position = self.getWindowSettings() + style = wx.DEFAULT_FRAME_STYLE | wx.CLIP_CHILDREN + + self.SetSize(size) + self.SetPosition(position) + self.SetTitle(title) + tt = self.GetToolTip() + if tt is not None: + tt.SetTip('') + + #wx.Frame.__init__(self, None, -1, title, position, size, style = style) + + self.doneflag = Event() + + dragdroplist = FileDropTarget(self) + self.SetDropTarget(dragdroplist) + + self.tbicon = None + + try: + self.SetIcon(self.utility.icon) + except: + pass + + # Don't update GUI as often when iconized + self.GUIupdate = True + self.oldframe = None + self.window = self.GetChildren()[0] + self.window.utility = self.utility + self.buddyFrame = None + self.fileFrame = None + self.buddyFrame_page = 0 + self.buddyFrame_size = (800, 500) + self.buddyFrame_pos = None + self.fileFrame_size = (800, 500) + self.fileFrame_pos = None + + # Menu Events + ############################ + + self.Bind(wx.EVT_CLOSE, self.OnCloseWindow) +# self.Bind(wx.EVT_MENU, self.OnMenuExit, id = wx.ID_CLOSE) + + # leaving here for the time being: + # wxMSW apparently sends the event to the App object rather than + # the top-level Frame, but there seemed to be some possibility of + # change + self.Bind(wx.EVT_QUERY_END_SESSION, self.OnCloseWindow) + self.Bind(wx.EVT_END_SESSION, self.OnCloseWindow) + + try: + self.tbicon = ABCTaskBarIcon(self) + except: + print_exc() + self.Bind(wx.EVT_ICONIZE, self.onIconify) + self.Bind(wx.EVT_SET_FOCUS, self.onFocus) + self.Bind(wx.EVT_SIZE, self.onSize) + self.Bind(wx.EVT_MAXIMIZE, self.onSize) + #self.Bind(wx.EVT_IDLE, self.onIdle) + + + # transparency + # self.SetTransparent(240) + + + # Init video player + self.videoFrame = None + sys.stdout.write('GUI Complete.\n') + + ##self.standardOverview.Show(True) + self.Show(True) + + + # Just for debugging: add test permids and display top 5 peers from which the most is downloaded in bartercastdb +# bartercastdb = self.utility.session.open_dbhandler(NTFY_BARTERCAST) +# mypermid = bartercastdb.my_permid +# +# if DEBUG: +# +# top = bartercastdb.getTopNPeers(5)['top'] +# +# print 'My Permid: ', show_permid(mypermid) +# +# print 'Top 5 BarterCast peers:' +# print '=======================' +# +# i = 1 +# for (permid, up, down) in top: +# print '%2d: %15s - %10d up %10d down' % (i, bartercastdb.getName(permid), up, down) +# i += 1 + + self.checkVersion() + + # If the user passed a torrentfile on the cmdline, load it. + wx.CallAfter(self.startCMDLineTorrent) + + def startCMDLineTorrent(self): + if self.params[0] != "": + torrentfilename = self.params[0] + self.startDownload(torrentfilename,cmdline=True,vodmode=True) + self.guiUtility.standardLibraryOverview(refresh=True) + + + def startDownload(self,torrentfilename,destdir=None,tdef = None,cmdline=False,clicklog=None,name=None,vodmode=False): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","mainframe: startDownload:",torrentfilename,destdir,tdef + try: + if tdef is None: + tdef = TorrentDef.load(torrentfilename) + defaultDLConfig = DefaultDownloadStartupConfig.getInstance() + dscfg = defaultDLConfig.copy() + if destdir is not None: + dscfg.set_dest_dir(destdir) + + videofiles = tdef.get_files(exts=videoextdefaults) + if vodmode and len(videofiles) == 0: + vodmode = False + + if vodmode or tdef.get_live(): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'MainFrame: startDownload: Starting in VOD mode' + videoplayer = VideoPlayer.getInstance() + result = videoplayer.start_and_play(tdef,dscfg) + + # 02/03/09 boudewijn: feedback to the user when there + # are no playable files in the torrent + if not result: + dlg = wx.MessageDialog(None, + self.utility.lang.get("invalid_torrent_no_playable_files_msg"), + self.utility.lang.get("invalid_torrent_no_playable_files_title"), + wx.OK|wx.ICON_INFORMATION) + dlg.ShowModal() + dlg.Destroy() + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'MainFrame: startDownload: Starting in DL mode' + result = self.utility.session.start_download(tdef,dscfg) + if result: + # ARNO50: Richard will look at this + self.guiserver = GUITaskQueue.getInstance() + self.guiserver.add_task(lambda:wx.CallAfter(self.show_saved), 0.2) + + # store result because we want to store clicklog data + # right after download was started, then return result + if clicklog is not None: + mypref = self.utility.session.open_dbhandler(NTFY_MYPREFERENCES) + mypref.addClicklogToMyPreference(tdef.get_infohash(), clicklog) + + return result + + except DuplicateDownloadException: + # show nice warning dialog + dlg = wx.MessageDialog(None, + self.utility.lang.get('duplicate_download_msg'), + self.utility.lang.get('duplicate_download_title'), + wx.OK|wx.ICON_INFORMATION) + result = dlg.ShowModal() + dlg.Destroy() + + # If there is something on the cmdline, all other torrents start + # in STOPPED state. Restart + if cmdline: + dlist = self.utility.session.get_downloads() + for d in dlist: + if d.get_def().get_infohash() == tdef.get_infohash(): + d.restart() + break + + except Exception,e: + print_exc() + self.onWarning(e) + return None + + + def show_saved(self): + + self.guiUtility.frame.top_bg.newFile.Show(True) + self.guiUtility.frame.top_bg.Layout() + self.guiserver.add_task(lambda:wx.CallAfter(self.hide_saved), 5.0) + + + def hide_saved(self): + self.guiUtility.frame.top_bg.newFile.Show(False) + + + def checkVersion(self): + guiserver = GUITaskQueue.getInstance() + guiserver.add_task(self._checkVersion,10.0) + + def _checkVersion(self): + # Called by GUITaskQueue thread + my_version = self.utility.getVersion() + try: + curr_status = urllib.urlopen('http://tribler.org/version/').readlines() + line1 = curr_status[0] + if len(curr_status) > 1: + self.update_url = curr_status[1].strip() + else: + self.update_url = 'http://tribler.org' + _curr_status = line1.split() + self.curr_version = _curr_status[0] + if self.newversion(self.curr_version, my_version): + # Arno: we are a separate thread, delegate GUI updates to MainThread + self.upgradeCallback() + + # Also check new version of web2definitions for youtube etc. search + ##Web2Updater(self.utility).checkUpdate() + except Exception,e: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Tribler: Version check failed", time.ctime(time.time()), str(e) + #print_exc() + + def newversion(self, curr_version, my_version): + curr = curr_version.split('.') + my = my_version.split('.') + if len(my) >= len(curr): + nversion = len(my) + else: + nversion = len(curr) + for i in range(nversion): + if i < len(my): + my_v = int(my[i]) + else: + my_v = 0 + if i < len(curr): + curr_v = int(curr[i]) + else: + curr_v = 0 + if curr_v > my_v: + return True + elif curr_v < my_v: + return False + return False + + def upgradeCallback(self): + wx.CallAfter(self.OnUpgrade) + # TODO: warn multiple times? + + def OnUpgrade(self, event=None): + self.setActivity(NTFY_ACT_NEW_VERSION) + guiserver = GUITaskQueue.getInstance() + guiserver.add_task(self.upgradeCallback,10.0) + + def onFocus(self, event = None): + if event is not None: + event.Skip() + #self.window.getSelectedList(event).SetFocus() + + def setGUIupdate(self, update): + oldval = self.GUIupdate + self.GUIupdate = update + + if self.GUIupdate and not oldval: + # Force an update of all torrents + for torrent in self.utility.torrents["all"]: + torrent.updateColumns() + torrent.updateColor() + + + def taskbarCallback(self): + wx.CallAfter(self.onTaskBarActivate) + + + ####################################### + # minimize to tray bar control + ####################################### + def onTaskBarActivate(self, event = None): + self.Iconize(False) + self.Show(True) + self.Raise() + + if self.tbicon is not None: + self.tbicon.updateIcon(False) + + #self.window.list.SetFocus() + + # Resume updating GUI + self.setGUIupdate(True) + + def onIconify(self, event = None): + # This event handler is called both when being minimalized + # and when being restored. + if DEBUG: + if event is not None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: onIconify(",event.Iconized() + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: onIconify event None" + if event.Iconized(): + videoplayer = VideoPlayer.getInstance() + videoplayer.videoframe.get_videopanel().Pause() # when minimzed pause playback + + if (self.utility.config.Read('mintray', "int") > 0 + and self.tbicon is not None): + self.tbicon.updateIcon(True) + self.Show(False) + + # Don't update GUI while minimized + self.setGUIupdate(False) + else: + videoplayer = VideoPlayer.getInstance() + embed = videoplayer.videoframe.get_videopanel() + if embed.GetState() == MEDIASTATE_PAUSED: + embed.ppbtn.setToggled(False) + embed.vlcwin.setloadingtext('') + embed.vlcwrap.resume() + self.setGUIupdate(True) + if event is not None: + event.Skip() + + def onSize(self, event = None): + # Arno: On Windows when I enable the tray icon and then change + # virtual desktop (see MS DeskmanPowerToySetup.exe) + # I get a onIconify(event.Iconized()==True) event, but when + # I switch back, I don't get an event. As a result the GUIupdate + # remains turned off. The wxWidgets wiki on the TaskBarIcon suggests + # catching the onSize event. + if DEBUG: + if event is not None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: onSize:",self.GetSize() + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: onSize: None" + self.setGUIupdate(True) + if event is not None: + if event.GetEventType() == wx.EVT_MAXIMIZE: + self.window.SetClientSize(self.GetClientSize()) + event.Skip() + + + # Refresh subscreens + self.refreshNeeded = True + #self.guiUtility.refreshOnResize() + + def onIdle(self, event = None): + """ + Only refresh screens (especially detailsPanel) when resizes are finished + This gives less flickering, but doesnt look pretty, so i commented it out + """ + if self.refreshNeeded: + self.guiUtility.refreshOnResize() + self.refreshNeeded = False + + def getWindowSettings(self): + width = self.utility.config.Read("window_width") + height = self.utility.config.Read("window_height") + try: + size = wx.Size(int(width), int(height)) + except: + size = wx.Size(710, 400) + + x = self.utility.config.Read("window_x") + y = self.utility.config.Read("window_y") + if (x == "" or y == ""): + #position = wx.DefaultPosition + + # On Mac, the default position will be underneath the menu bar, so lookup (top,left) of + # the primary display + primarydisplay = wx.Display(0) + dsize = primarydisplay.GetClientArea() + position = dsize.GetTopLeft() + + # Decrease size to fit on screen, if needed + width = min( size.GetWidth(), dsize.GetWidth() ) + height = min( size.GetHeight(), dsize.GetHeight() ) + size = wx.Size( width, height ) + else: + position = wx.Point(int(x), int(y)) + + return size, position + + def saveWindowSettings(self): + width, height = self.GetSizeTuple() + x, y = self.GetPositionTuple() + self.utility.config.Write("window_width", width) + self.utility.config.Write("window_height", height) + self.utility.config.Write("window_x", x) + self.utility.config.Write("window_y", y) + + self.utility.config.Flush() + + ################################## + # Close Program + ################################## + + def OnCloseWindow(self, event = None): + found = False + if event != None: + nr = event.GetEventType() + lookup = { wx.EVT_CLOSE.evtType[0]: "EVT_CLOSE", wx.EVT_QUERY_END_SESSION.evtType[0]: "EVT_QUERY_END_SESSION", wx.EVT_END_SESSION.evtType[0]: "EVT_END_SESSION" } + if nr in lookup: + nr = lookup[nr] + found = True + + print "mainframe: Closing due to event ",nr,`event` + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","mainframe: Closing due to event ",nr,`event` + else: + print "mainframe: Closing untriggered by event" + + + # Don't do anything if the event gets called twice for some reason + if self.utility.abcquitting: + return + + # Check to see if we can veto the shutdown + # (might not be able to in case of shutting down windows) + if event is not None: + try: + if isinstance(event,wx.CloseEvent) and event.CanVeto() and self.utility.config.Read('confirmonclose', "boolean") and not event.GetEventType() == wx.EVT_QUERY_END_SESSION.evtType[0]: + dialog = wx.MessageDialog(None, self.utility.lang.get('confirmmsg'), self.utility.lang.get('confirm'), wx.OK|wx.CANCEL) + result = dialog.ShowModal() + dialog.Destroy() + if result != wx.ID_OK: + event.Veto() + return + except: + print_exc() + + self.utility.abcquitting = True + self.GUIupdate = False + + videoplayer = VideoPlayer.getInstance() + videoplayer.stop_playback() + + self.guiUtility.guiOpen.clear() + + try: + # Restore the window before saving size and position + # (Otherwise we'll get the size of the taskbar button and a negative position) + self.onTaskBarActivate() + self.saveWindowSettings() + except: + print_exc() + + try: + if self.buddyFrame is not None: + self.buddyFrame.Destroy() + if self.fileFrame is not None: + self.fileFrame.Destroy() + if self.videoFrame is not None: + self.videoFrame.Destroy() + except: + pass + + try: + if self.tbicon is not None: + self.tbicon.RemoveIcon() + self.tbicon.Destroy() + self.Destroy() + except: + print_exc() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","mainframe: OnCloseWindow END" + + if DEBUG: + ts = enumerate() + for t in ts: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","mainframe: Thread still running",t.getName(),"daemon",t.isDaemon() + + if not found or sys.platform =="darwin": + # On Linux with wx 2.8.7.1 this method gets sometimes called with + # a CommandEvent instead of EVT_CLOSE, wx.EVT_QUERY_END_SESSION or + # wx.EVT_END_SESSION + self.quit() + + + def onWarning(self,exc): + msg = self.utility.lang.get('tribler_startup_nonfatalerror') + msg += str(exc.__class__)+':'+str(exc) + dlg = wx.MessageDialog(None, msg, self.utility.lang.get('tribler_warning'), wx.OK|wx.ICON_WARNING) + result = dlg.ShowModal() + dlg.Destroy() + + def onUPnPError(self,upnp_type,listenport,error_type,exc=None,listenproto='TCP'): + + if error_type == 0: + errormsg = unicode(' UPnP mode '+str(upnp_type)+' ')+self.utility.lang.get('tribler_upnp_error1') + elif error_type == 1: + errormsg = unicode(' UPnP mode '+str(upnp_type)+' ')+self.utility.lang.get('tribler_upnp_error2')+unicode(str(exc))+self.utility.lang.get('tribler_upnp_error2_postfix') + elif error_type == 2: + errormsg = unicode(' UPnP mode '+str(upnp_type)+' ')+self.utility.lang.get('tribler_upnp_error3') + else: + errormsg = unicode(' UPnP mode '+str(upnp_type)+' Unknown error') + + msg = self.utility.lang.get('tribler_upnp_error_intro') + msg += listenproto+' ' + msg += str(listenport) + msg += self.utility.lang.get('tribler_upnp_error_intro_postfix') + msg += errormsg + msg += self.utility.lang.get('tribler_upnp_error_extro') + + dlg = wx.MessageDialog(None, msg, self.utility.lang.get('tribler_warning'), wx.OK|wx.ICON_WARNING) + result = dlg.ShowModal() + dlg.Destroy() + + + + def setActivity(self,type,msg=u'',arg2=None): + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","MainFrame: setActivity: t",type,"m",msg,"a2",arg2 + + if self.utility is None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","MainFrame: setActivity: Cannot display: t",type,"m",msg,"a2",arg2 + return + + if currentThread().getName() != "MainThread": + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: setActivity thread",currentThread().getName(),"is NOT MAIN THREAD" + print_stack() + + if type == NTFY_ACT_NONE: + prefix = msg + msg = u'' + elif type == NTFY_ACT_ACTIVE: + prefix = u"" + if msg == "no network": + text = "%s\nLast activity: %.1f seconds ago" % (msg, arg2) + self.SetTitle(text) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Activity",`text` + + elif type == NTFY_ACT_UPNP: + prefix = self.utility.lang.get('act_upnp') + elif type == NTFY_ACT_REACHABLE: + prefix = self.utility.lang.get('act_reachable') + elif type == NTFY_ACT_GET_EXT_IP_FROM_PEERS: + prefix = self.utility.lang.get('act_get_ext_ip_from_peers') + elif type == NTFY_ACT_MEET: + prefix = self.utility.lang.get('act_meet') + elif type == NTFY_ACT_GOT_METADATA: + prefix = self.utility.lang.get('act_got_metadata') + + if self.category.family_filter_enabled() and arg2 == 7: # XXX category + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","MainFrame: setActivity: Hiding XXX torrent",msg + return + + elif type == NTFY_ACT_RECOMMEND: + prefix = self.utility.lang.get('act_recommend') + elif type == NTFY_ACT_DISK_FULL: + prefix = self.utility.lang.get('act_disk_full') + elif type == NTFY_ACT_NEW_VERSION: + prefix = self.utility.lang.get('act_new_version') + if msg == u'': + text = prefix + else: + text = unicode( prefix+u' '+msg) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Activity",`text` + #self.messageField.SetLabel(text) + + def set_player_status(self,s): + """ Called by VideoServer when using an external player """ + if self.videoFrame is not None: + self.videoFrame.set_player_status(s) + + def set_wxapp(self,wxapp): + self.wxapp = wxapp + + def quit(self): + if self.wxapp is not None: + self.wxapp.ExitMainLoop() + + + +class PlayerFrame(VideoFrame): + """ + Wrapper around VideoFrame that allows us to catch the Close event. On + that event we should notify tribler such that it can stop any live torrents, + and restart others that may have been stopped. + """ + def __init__(self,parent,title,iconpath,vlcwrap,logopath): + VideoFrame.__init__(self,parent,title,iconpath,vlcwrap,logopath) + self.parent = parent + self.closed = False + + self.Bind(wx.EVT_CLOSE, self.OnCloseWindow) + + def show_videoframe(self): + self.closed = False + VideoFrame.show_videoframe(self) + + def OnCloseWindow(self, event = None): + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PlayerFrame: ON CLOSE WINDOW" + if not self.closed: + self.closed = True + VideoFrame.OnCloseWindow(self,event) + + if self.parent.wxapp is not None: + self.parent.wxapp.OnClosingVideoFrameOrExtPlayer() + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PlayerFrame: Closing done" + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/MyFrame.xrc b/tribler-mod/Tribler/Main/vwxGUI/MyFrame.xrc new file mode 100644 index 0000000..ea2ccdf --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/MyFrame.xrc @@ -0,0 +1,154 @@ + + + + + 1 + 1,1 + 3,2 + + #d4d0c8 + + + 0,0 + 1100,683 + #FFFFFF + + wxHORIZONTAL + + + wxVERTICAL + + + 1024,768 + + wxVERTICAL + + + 1000,90 + + + wxLEFT|wxRIGHT|wxFIXED_MINSIZE + 10 + 1000,90 + + + wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 0 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxVERTICAL + + + 230,224 + 523,559 + #FFFFFF + + + wxTOP|wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 10 + + + + + 5,0 + + + + wxVERTICAL + + wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + + 709,172 + 0,0 + #FFFFFF + + + + + wxEXPAND + + + + wxVERTICAL + + 0,10 + + + + 360,500 + #FFFFFF + + + + + wxEXPAND + + + 0,0 + wxRIGHT + -30 + + + + + + wxHORIZONTAL + + 10,0 + + + + 669,20 + #E6E6E6 + + wxHORIZONTAL + + 270,28 + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + wxTOP|wxBOTTOM|wxEXPAND|wxFIXED_MINSIZE + 2 + + + 370,460 + 382,20 + + 382,20 + + + 10,28 + wxEXPAND|wxFIXED_MINSIZE + 0 + + + + + + + + wxEXPAND + + + #FFFFFF + + + wxALIGN_CENTRE_HORIZONTAL + 1024,768 + + + + wxALIGN_CENTRE_HORIZONTAL + + + + + \ No newline at end of file diff --git a/tribler-mod/Tribler/Main/vwxGUI/MyPlayer.xrc b/tribler-mod/Tribler/Main/vwxGUI/MyPlayer.xrc new file mode 100644 index 0000000..a41f340 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/MyPlayer.xrc @@ -0,0 +1,168 @@ + + + + 0,0 + 800,600 + + wxVERTICAL + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,0 + 20,20 + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 20,0 + 54,30 + + wxHORIZONTAL + + 10,10 + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 111,0 + 20,20 + + + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,0 + 588,475 + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxHORIZONTAL + + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxALIGN_CENTER_HORIZONTAL|wxFIXED_MINSIZE + 3 + + + 3,487 + 20,85 + + wxHORIZONTAL + + 80,60 + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxVERTICAL + + 20,40 + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + wxFIXED_MINSIZE + 3 + + + 98,43 + 290,24 + + + + + + wxFIXED_MINSIZE + 3 + + + 356,0 + 75,85 + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 361,0 + 70,85 + + + + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxVERTICAL + + 20,40 + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + wxFIXED_MINSIZE + 3 + + + 504,43 + 150,24 + + + + + + 80,60 + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 713,0 + 70,85 + + + + + + + + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/MyText.py b/tribler-mod/Tribler/Main/vwxGUI/MyText.py new file mode 100644 index 0000000..992b0e6 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/MyText.py @@ -0,0 +1,70 @@ +from time import localtime, strftime +import wx + +class MyText(wx.Panel): + def __init__(self, parent, label, colour, font): + wx.Panel.__init__(self, parent, -1) + self._PostInit(parent, label, colour, font) + + def _PostInit(self, parent, label, colour, font): + self.parent = parent + self.label = label + self.colour = colour + self.font = font + + #self.bitmap = wx.Bitmap('../../icons/download.gif', wx.BITMAP_TYPE_ANY) + self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) + #self.SetBackgroundColour(wx.NullColour) + self.Bind(wx.EVT_PAINT, self.onPaint) + self.Bind(wx.EVT_ERASE_BACKGROUND, self.onErase) +# def HasTransparentBackground(selfs): +# return False + + + def onPaint(self, event): + print 'Paint MyText' + dc = wx.PaintDC(self) + + x, y = self.GetPositionTuple() + l, h = dc.GetTextExtent(self.label) + self.SetSize((l, h)) + dc2 = wx.BufferedPaintDC(self.parent) + dc.Blit(0, 0, l, h, dc2, x, y) + + #dc.FloodFill(0,0, wx.RED) + + #dc.SetBackgroundMode(wx.TRANSPARENT) + dc.SetTextBackground(wx.NullColour) + dc.SetTextForeground(self.colour) + #dc.DrawBitmap(self.bitmap, 10,10, True) + + #dc.DrawRectangle(0,0,l,h) + #dc.GradientFillLinear((0,0,l,h),wx.RED,wx.BLUE,wx.WEST) + dc.SetFont(self.font) + dc.DrawText(self.label , 0, 0) + #wx.StaticText.OnPaint(self, event) + #event.Skip() + + + def onErase(self, event): + dc = event.GetDC() + dc.Clear() + + def SetText(self, text): + self.label = text + wx.EVT_PAINT(self,self.onPaint) + #self.Refresh() + + def SetFont(self, font): + self.font = font + wx.EVT_PAINT(self,self.onPaint) + + def SetColour(self, colour): + self.colour = colour + wx.EVT_PAINT(self,self.onPaint) + + def refresh(self): + wx.EVT_PAINT(self,self.onPaint) + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/MyText.py.bak b/tribler-mod/Tribler/Main/vwxGUI/MyText.py.bak new file mode 100644 index 0000000..2c7393c --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/MyText.py.bak @@ -0,0 +1,69 @@ +import wx + +class MyText(wx.Panel): + def __init__(self, parent, label, colour, font): + wx.Panel.__init__(self, parent, -1) + self._PostInit(parent, label, colour, font) + + def _PostInit(self, parent, label, colour, font): + self.parent = parent + self.label = label + self.colour = colour + self.font = font + + #self.bitmap = wx.Bitmap('../../icons/download.gif', wx.BITMAP_TYPE_ANY) + self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) + #self.SetBackgroundColour(wx.NullColour) + self.Bind(wx.EVT_PAINT, self.onPaint) + self.Bind(wx.EVT_ERASE_BACKGROUND, self.onErase) +# def HasTransparentBackground(selfs): +# return False + + + def onPaint(self, event): + print 'Paint MyText' + dc = wx.PaintDC(self) + + x, y = self.GetPositionTuple() + l, h = dc.GetTextExtent(self.label) + self.SetSize((l, h)) + dc2 = wx.BufferedPaintDC(self.parent) + dc.Blit(0, 0, l, h, dc2, x, y) + + #dc.FloodFill(0,0, wx.RED) + + #dc.SetBackgroundMode(wx.TRANSPARENT) + dc.SetTextBackground(wx.NullColour) + dc.SetTextForeground(self.colour) + #dc.DrawBitmap(self.bitmap, 10,10, True) + + #dc.DrawRectangle(0,0,l,h) + #dc.GradientFillLinear((0,0,l,h),wx.RED,wx.BLUE,wx.WEST) + dc.SetFont(self.font) + dc.DrawText(self.label , 0, 0) + #wx.StaticText.OnPaint(self, event) + #event.Skip() + + + def onErase(self, event): + dc = event.GetDC() + dc.Clear() + + def SetText(self, text): + self.label = text + wx.EVT_PAINT(self,self.onPaint) + #self.Refresh() + + def SetFont(self, font): + self.font = font + wx.EVT_PAINT(self,self.onPaint) + + def SetColour(self, colour): + self.colour = colour + wx.EVT_PAINT(self,self.onPaint) + + def refresh(self): + wx.EVT_PAINT(self,self.onPaint) + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/NewStaticText.py b/tribler-mod/Tribler/Main/vwxGUI/NewStaticText.py new file mode 100644 index 0000000..5302895 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/NewStaticText.py @@ -0,0 +1,53 @@ +from time import localtime, strftime +import wx + +class NewStaticText(wx.StaticText): + def __init__(self, parent, label, colour, font): + wx.Panel.__init__(self, parent, -1) + self.parent = parent + self.label = label + self.font = font + self.colour = colour + #self.SetLabel(label) + self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) + self.Bind(wx.EVT_PAINT, self.OnPaint) + + def OnPaint(self, event): + x, y = self.GetPositionTuple() + dc = wx.PaintDC(self) + l, h = dc.GetTextExtent(self.label) + self.SetSize((l, h)) + dc = wx.PaintDC(self) + dc2 = wx.BufferedPaintDC(self.parent) + dc.Blit(0, 0, l, h, dc2, x, y) + dc.SetTextBackground(wx.NullColour) + dc.SetTextForeground(self.colour) + ##dc.SetBackgroundMode(wx.TRANSPARENT) + ##dc.SetBrush(wx.Brush((0,0,0),wx.TRANSPARENT)) + dc.SetFont(self.font) + dc.DrawText(self.label, 0, 0) + + + def SetColour(self, colour): + self.colour = colour + wx.EVT_PAINT(self,self.OnPaint) + + + def SetText(self, text): + self.label = text + #self.Refresh() + + wx.EVT_PAINT(self,self.OnPaint) + + + + def OnErase(self, evt): + pass + + + def Paint(self, evt): + dc = wx.PaintDC(self) + dc.Clear() + dc.SetTextForeground((0,105,156)) + dc.SetFont(self.font) + dc.DrawText(self.label, 0, 0) diff --git a/tribler-mod/Tribler/Main/vwxGUI/NewStaticText.py.bak b/tribler-mod/Tribler/Main/vwxGUI/NewStaticText.py.bak new file mode 100644 index 0000000..3dfee08 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/NewStaticText.py.bak @@ -0,0 +1,52 @@ +import wx + +class NewStaticText(wx.StaticText): + def __init__(self, parent, label, colour, font): + wx.Panel.__init__(self, parent, -1) + self.parent = parent + self.label = label + self.font = font + self.colour = colour + #self.SetLabel(label) + self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) + self.Bind(wx.EVT_PAINT, self.OnPaint) + + def OnPaint(self, event): + x, y = self.GetPositionTuple() + dc = wx.PaintDC(self) + l, h = dc.GetTextExtent(self.label) + self.SetSize((l, h)) + dc = wx.PaintDC(self) + dc2 = wx.BufferedPaintDC(self.parent) + dc.Blit(0, 0, l, h, dc2, x, y) + dc.SetTextBackground(wx.NullColour) + dc.SetTextForeground(self.colour) + ##dc.SetBackgroundMode(wx.TRANSPARENT) + ##dc.SetBrush(wx.Brush((0,0,0),wx.TRANSPARENT)) + dc.SetFont(self.font) + dc.DrawText(self.label, 0, 0) + + + def SetColour(self, colour): + self.colour = colour + wx.EVT_PAINT(self,self.OnPaint) + + + def SetText(self, text): + self.label = text + #self.Refresh() + + wx.EVT_PAINT(self,self.OnPaint) + + + + def OnErase(self, evt): + pass + + + def Paint(self, evt): + dc = wx.PaintDC(self) + dc.Clear() + dc.SetTextForeground((0,105,156)) + dc.SetFont(self.font) + dc.DrawText(self.label, 0, 0) diff --git a/tribler-mod/Tribler/Main/vwxGUI/PersonsItemDetailsSummary.py b/tribler-mod/Tribler/Main/vwxGUI/PersonsItemDetailsSummary.py new file mode 100644 index 0000000..c76587a --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/PersonsItemDetailsSummary.py @@ -0,0 +1,116 @@ +from time import localtime, strftime +import wx, os +from Tribler.Main.vwxGUI.tribler_topButton import tribler_topButton, SwitchButton +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles +from Tribler.Main.vwxGUI.TextButton import * + + +class PersonsItemDetailsSummary(wx.Panel): + + def __init__(self, parent, mode): + wx.Panel.__init__(self, parent, -1) + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + + self.mode = mode + self.addComponents() + + self.setData() + + + def addComponents(self): + self.triblerStyles = TriblerStyles.getInstance() + self.SetMinSize((300,40)) + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + + self.vSizer = wx.BoxSizer(wx.VERTICAL) + + if self.mode == 'persons': + self.thumbSummary = self.Parent.ThumbnailViewer(self.Parent, 'personsItemSummary') + elif self.mode == 'friends': + self.thumbSummary = self.Parent.FriendThumbnailViewer(self.Parent, 'personsItemSummary') + self.thumbSummary.setBackground(wx.BLACK) + self.thumbSummary.SetSize((80,80)) + + self.vSizer.Add(self.thumbSummary, 0, wx.LEFT, 10) + self.hSizer.Add(self.vSizer, 0, wx.TOP, 25) + + self.hSizer.Add([5,1],1,wx.ALL,0) + + self.vSizer2 = wx.BoxSizer(wx.VERTICAL) + self.vSizer2.Add([100,1],0,wx.ALL,0) + self.discFiles = wx.StaticText(self, -1, 'Discovered files:') + self.discPersons = wx.StaticText(self, -1, 'Discovered persons:') + self.numberDownloads = wx.StaticText(self, -1, 'Number of downloads') + + self.triblerStyles.setDarkText(self.discFiles) + self.triblerStyles.setDarkText(self.discPersons) + self.triblerStyles.setDarkText(self.numberDownloads) + +# self.vSizer2.Add([100,10], 0, wx.BOTTOM|wx.FIXED_MINSIZE, 1) + self.vSizer2.Add(self.discFiles, 0, wx.BOTTOM, 1) + self.vSizer2.Add(self.discPersons, 0, wx.BOTTOM, 1) + self.vSizer2.Add(self.numberDownloads, 0, wx.BOTTOM, 1) + + self.hSizer.Add(self.vSizer2, 0, wx.TOP|wx.RIGHT|wx.LEFT|wx.EXPAND, 3) + + self.vSizer3 = wx.BoxSizer(wx.VERTICAL) + self.vSizer3.Add([50,1],0,wx.ALL,0) + self.theDiscFiles = wx.StaticText(self, -1, 'good (DVD)', wx.Point(0,0),wx.Size(50,-1), wx.ALIGN_RIGHT | wx.ST_NO_AUTORESIZE ) + self.theDiscPersons = wx.StaticText(self, -1, 'English', wx.Point(0,0),wx.Size(50,-1), wx.ALIGN_RIGHT | wx.ST_NO_AUTORESIZE ) + self.theNumberDownloads = wx.StaticText(self, -1, '13 included',wx.Point(0,0),wx.Size(50,-1), wx.ALIGN_RIGHT | wx.ST_NO_AUTORESIZE ) + self.moreInfo = TextButton(self, name = "more info >") + + self.triblerStyles.setDarkText(self.theDiscFiles) + self.triblerStyles.setDarkText(self.theDiscPersons) + self.triblerStyles.setDarkText(self.theNumberDownloads) + +# self.vSizer3.Add([100,10], 0, wx.BOTTOM|wx.FIXED_MINSIZE, 1) + self.vSizer3.Add(self.theDiscFiles, 0, wx.BOTTOM, 1) + self.vSizer3.Add(self.theDiscPersons, 0, wx.BOTTOM, 1) + self.vSizer3.Add(self.theNumberDownloads, 0, wx.BOTTOM, 1) + self.vSizer3.Add(self.moreInfo, 0, wx.BOTTOM|wx.EXPAND, 1) + self.hSizer.Add(self.vSizer3, 0, wx.TOP|wx.RIGHT|wx.EXPAND, 3) + + self.SetSizer(self.hSizer) + self.SetAutoLayout(1); + self.Layout() + + def setData(self): + item = self.Parent.data +# descriptionText = self.Parent.data['metadata'].get('Description') +# if descriptionText != None: +# self.Description.SetLabel(descriptionText) +# self.Description.Wrap(-1) +## self.Description.Wrap(300) +# else: +# self.Description.SetLabel('no description available') +#npeers +#ntorrents +#nprefs + self.thumbSummary.setData(self.Parent.data, summary='filesItemSummary') + if 'npeers' in item: + n = unicode(item['npeers']) + if not n or n=='0': + n = '?' + self.theDiscFiles.SetLabel(n) + if 'ntorrents' in item: + n = unicode(item['ntorrents']) + if not n or n == '0': + n = '?' + self.theDiscPersons.SetLabel(n) +# if 'nprefs' in item: + + + permid = item['permid'] + hash_list = self.guiUtility.peer_manager.getPeerHistFiles(permid) + nprefs = max(item.get('nprefs',0), len(hash_list)) + + print 'tb> hashlist = %s' % len(hash_list) + print 'tb> npref = %s' % item.get('nprefs',0) + print 'tb> nprefs = %s' % nprefs + + self.theNumberDownloads.SetLabel(str(nprefs)) + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/PersonsItemDetailsSummary.py.bak b/tribler-mod/Tribler/Main/vwxGUI/PersonsItemDetailsSummary.py.bak new file mode 100644 index 0000000..12c841b --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/PersonsItemDetailsSummary.py.bak @@ -0,0 +1,115 @@ +import wx, os +from Tribler.Main.vwxGUI.tribler_topButton import tribler_topButton, SwitchButton +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles +from Tribler.Main.vwxGUI.TextButton import * + + +class PersonsItemDetailsSummary(wx.Panel): + + def __init__(self, parent, mode): + wx.Panel.__init__(self, parent, -1) + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + + self.mode = mode + self.addComponents() + + self.setData() + + + def addComponents(self): + self.triblerStyles = TriblerStyles.getInstance() + self.SetMinSize((300,40)) + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + + self.vSizer = wx.BoxSizer(wx.VERTICAL) + + if self.mode == 'persons': + self.thumbSummary = self.Parent.ThumbnailViewer(self.Parent, 'personsItemSummary') + elif self.mode == 'friends': + self.thumbSummary = self.Parent.FriendThumbnailViewer(self.Parent, 'personsItemSummary') + self.thumbSummary.setBackground(wx.BLACK) + self.thumbSummary.SetSize((80,80)) + + self.vSizer.Add(self.thumbSummary, 0, wx.LEFT, 10) + self.hSizer.Add(self.vSizer, 0, wx.TOP, 25) + + self.hSizer.Add([5,1],1,wx.ALL,0) + + self.vSizer2 = wx.BoxSizer(wx.VERTICAL) + self.vSizer2.Add([100,1],0,wx.ALL,0) + self.discFiles = wx.StaticText(self, -1, 'Discovered files:') + self.discPersons = wx.StaticText(self, -1, 'Discovered persons:') + self.numberDownloads = wx.StaticText(self, -1, 'Number of downloads') + + self.triblerStyles.setDarkText(self.discFiles) + self.triblerStyles.setDarkText(self.discPersons) + self.triblerStyles.setDarkText(self.numberDownloads) + +# self.vSizer2.Add([100,10], 0, wx.BOTTOM|wx.FIXED_MINSIZE, 1) + self.vSizer2.Add(self.discFiles, 0, wx.BOTTOM, 1) + self.vSizer2.Add(self.discPersons, 0, wx.BOTTOM, 1) + self.vSizer2.Add(self.numberDownloads, 0, wx.BOTTOM, 1) + + self.hSizer.Add(self.vSizer2, 0, wx.TOP|wx.RIGHT|wx.LEFT|wx.EXPAND, 3) + + self.vSizer3 = wx.BoxSizer(wx.VERTICAL) + self.vSizer3.Add([50,1],0,wx.ALL,0) + self.theDiscFiles = wx.StaticText(self, -1, 'good (DVD)', wx.Point(0,0),wx.Size(50,-1), wx.ALIGN_RIGHT | wx.ST_NO_AUTORESIZE ) + self.theDiscPersons = wx.StaticText(self, -1, 'English', wx.Point(0,0),wx.Size(50,-1), wx.ALIGN_RIGHT | wx.ST_NO_AUTORESIZE ) + self.theNumberDownloads = wx.StaticText(self, -1, '13 included',wx.Point(0,0),wx.Size(50,-1), wx.ALIGN_RIGHT | wx.ST_NO_AUTORESIZE ) + self.moreInfo = TextButton(self, name = "more info >") + + self.triblerStyles.setDarkText(self.theDiscFiles) + self.triblerStyles.setDarkText(self.theDiscPersons) + self.triblerStyles.setDarkText(self.theNumberDownloads) + +# self.vSizer3.Add([100,10], 0, wx.BOTTOM|wx.FIXED_MINSIZE, 1) + self.vSizer3.Add(self.theDiscFiles, 0, wx.BOTTOM, 1) + self.vSizer3.Add(self.theDiscPersons, 0, wx.BOTTOM, 1) + self.vSizer3.Add(self.theNumberDownloads, 0, wx.BOTTOM, 1) + self.vSizer3.Add(self.moreInfo, 0, wx.BOTTOM|wx.EXPAND, 1) + self.hSizer.Add(self.vSizer3, 0, wx.TOP|wx.RIGHT|wx.EXPAND, 3) + + self.SetSizer(self.hSizer) + self.SetAutoLayout(1); + self.Layout() + + def setData(self): + item = self.Parent.data +# descriptionText = self.Parent.data['metadata'].get('Description') +# if descriptionText != None: +# self.Description.SetLabel(descriptionText) +# self.Description.Wrap(-1) +## self.Description.Wrap(300) +# else: +# self.Description.SetLabel('no description available') +#npeers +#ntorrents +#nprefs + self.thumbSummary.setData(self.Parent.data, summary='filesItemSummary') + if 'npeers' in item: + n = unicode(item['npeers']) + if not n or n=='0': + n = '?' + self.theDiscFiles.SetLabel(n) + if 'ntorrents' in item: + n = unicode(item['ntorrents']) + if not n or n == '0': + n = '?' + self.theDiscPersons.SetLabel(n) +# if 'nprefs' in item: + + + permid = item['permid'] + hash_list = self.guiUtility.peer_manager.getPeerHistFiles(permid) + nprefs = max(item.get('nprefs',0), len(hash_list)) + + print 'tb> hashlist = %s' % len(hash_list) + print 'tb> npref = %s' % item.get('nprefs',0) + print 'tb> nprefs = %s' % nprefs + + self.theNumberDownloads.SetLabel(str(nprefs)) + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/PersonsItemPanel.py b/tribler-mod/Tribler/Main/vwxGUI/PersonsItemPanel.py new file mode 100644 index 0000000..7658cc4 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/PersonsItemPanel.py @@ -0,0 +1,705 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg, Maarten ten Brinke, Arno Bakker +# see LICENSE.txt for license information + +import wx, time, sys +import random +from traceback import print_exc + +from Tribler.Core.simpledefs import * +from Tribler.Core.Utilities.utilities import * +from Tribler.Core.Utilities.unicode import * +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.Utility.utility import copyPeer, similarPeer +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles + +from Tribler.Main.vwxGUI.TextButton import TextButtonLeft +from Tribler.Main.vwxGUI.PersonsItemDetailsSummary import PersonsItemDetailsSummary +from Tribler.Main.vwxGUI.bgPanel import ImagePanel +from Tribler.Main.vwxGUI.filesItemPanel import getResizedBitmapFromImage +from Tribler.Main.vwxGUI.IconsManager import IconsManager, data2wxBitmap +from font import * +## import TasteHeart + +DEBUG = False + +# font sizes +if sys.platform == 'darwin': + FS_PERSONSTITLE = 10 + FS_SIMILARITY = 10 + FS_HEARTRANK = 8 + FS_ONLINE = 10 + FS_DISCOVERED = 8 +else: + FS_PERSONSTITLE = 8 + FS_SIMILARITY = 10 + FS_HEARTRANK = 7 + FS_ONLINE = 8 + FS_DISCOVERED = 7 + +class PersonsItemPanel(wx.Panel): + """ + PersonsItemPanel shows one persons item inside the PersonsGridPanel + """ + def __init__(self, parent, keyTypedFun=None): + global TORRENTPANEL_BACKGROUND + + wx.Panel.__init__(self, parent, -1) + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.parent = parent + self.listItem = (self.parent.cols == 1) + self.data = None + self.summary = None + self.datacopy = {} + self.titleLength = 137 # num characters + self.ThumbnailViewer = ThumbnailViewer + self.triblerGrey = wx.Colour(128,128,128) + self.selected = False + self.warningMode = False + self.oldCategoryLabel = None + self.guiserver = parent.guiserver + self.selected = False + self.superpeer_db = parent.superpeer_db + self.keyTypedFun = keyTypedFun + self.addComponents() + self.Show() + self.Refresh() + self.Layout() + self.triblerStyles = TriblerStyles.getInstance() + + + def addComponents(self): + self.Show(False) + + self.selectedColour = wx.Colour(255,200,187) + self.unselectedColour = wx.WHITE + +# self.SetBackgroundColour(self.unselectedColour) + self.Bind(wx.EVT_LEFT_UP, self.mouseAction) + self.Bind(wx.EVT_KEY_UP, self.keyTyped) + + if not self.listItem: + self.SetMinSize((80,140)) +# # Add spacer + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + self.hSizer.Add([10,5],0,wx.EXPAND|wx.FIXED_MINSIZE,3) + self.vSizer = wx.BoxSizer(wx.VERTICAL) + # Add thumb + self.thumb = ThumbnailViewer(self, 'personsMode') + self.thumb.setBackground(wx.BLACK) + self.thumb.SetSize((80,80)) + self.vSizer.Add(self.thumb, 0, wx.ALL, 0) + # Add title + self.title =wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(80,15)) + self.title.SetMinSize((80,30)) + self.triblerStyles.setLightText(self.title) + self.vSizer.Add(self.title, 0, wx.BOTTOM, 3) + + self.moreInfo = TextButtonLeft(self, name = "more info >") + self.moreInfo.SetMinSize((60,20)) + self.vSizer.Add(self.moreInfo, 0, wx.BOTTOM|wx.EXPAND|wx.ALIGN_RIGHT, 3) + # + self.hSizer.Add(self.vSizer,0,wx.ALL,0) + self.hSizer.Add([5,5],0,wx.EXPAND|wx.FIXED_MINSIZE,3) + self.SetSizer(self.hSizer); + + else: #list item + self.SetMinSize((670,22)) + + self.vSizerOverall = wx.BoxSizer(wx.VERTICAL) + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + self.hSizer.Add([10,5],0,wx.EXPAND|wx.FIXED_MINSIZE,3) + self.vSizerOverall.Add(self.hSizer, 0, wx.FIXED|wx.EXPAND, 0) + + self.thumb = ThumbnailViewer(self, 'personsMode') + self.thumb.setBackground(wx.BLACK) + self.thumb.SetSize((18,18)) + self.hSizer.Add(self.thumb, 0, wx.ALL, 2) + # Add title + self.title =wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(105,18), wx.ST_NO_AUTORESIZE) + self.title.SetMinSize((105,14)) + self.triblerStyles.setLightText(self.title) + self.hSizer.Add(self.title, 1,wx.TOP|wx.BOTTOM, 2) + # V Line + self.vLine3 = self.addLine() + # Add status + self.status= wx.StaticText(self,-1,"10",wx.Point(0,0),wx.Size(110,18), wx.ALIGN_RIGHT | wx.ST_NO_AUTORESIZE) + self.status.SetMinSize((165,18)) + self.triblerStyles.setLightText(self.status) + self.hSizer.Add(self.status, 0,wx.TOP|wx.BOTTOM, 2) + # V Line + self.vLine1 = self.addLine() + # Add discovered Files + self.discFiles = wx.StaticText(self,-1,"110000",wx.Point(0,0),wx.Size(75,18), wx.ALIGN_RIGHT | wx.ST_NO_AUTORESIZE) + self.discFiles.SetMinSize((40,18)) + self.triblerStyles.setLightText(self.discFiles) + self.hSizer.Add(self.discFiles, 0,wx.TOP, 3) + # V Line + self.vLine2 = self.addLine() + # Add discovered Persons + self.discPersons= wx.StaticText(self,-1,"100000",wx.Point(0,0),wx.Size(110,18), wx.ALIGN_RIGHT | wx.ST_NO_AUTORESIZE) + self.discPersons.SetMinSize((40,18)) + self.triblerStyles.setLightText(self.discPersons) + self.hSizer.Add(self.discPersons, 0,wx.TOP,3) + # V Line + self.vLine4 = self.addLine() + # Add Taste Heart - Add Spacer to keep space occupied when no heart available + self.vSizer2 = wx.BoxSizer(wx.VERTICAL) + self.vSizer2.Add([60,2],0,wx.EXPAND|wx.FIXED_MINSIZE,3) + self.hSizer2 = wx.BoxSizer(wx.HORIZONTAL) + self.tasteHeart = TasteHeart.TasteHeart(self, -1, wx.DefaultPosition, wx.Size(14,14),name='TasteHeart') + self.hSizer2.Add(self.tasteHeart, 0, wx.TOP, 0) + # Add Taste similarity + self.taste =wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(40,15)) + self.taste.SetBackgroundColour(wx.WHITE) + self.taste.SetFont(wx.Font(FS_HEARTRANK,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + self.taste.SetMinSize((40,15)) + self.taste.SetLabel('') + self.hSizer2.Add(self.taste, 0, wx.LEFT, 0) + self.vSizer2.Add(self.hSizer2,0, wx.EXPAND|wx.FIXED_MINSIZE, 0) + self.hSizer.Add(self.vSizer2,0,wx.EXPAND|wx.FIXED_MINSIZE, 0) + # V Line + self.vLine5 = self.addLine() + # Add Friends Icon + self.vSizer3 = wx.BoxSizer(wx.VERTICAL) + self.vSizer3.Add([22,2],0,wx.FIXED_MINSIZE,3) + self.friendsIcon = ImagePanel(self) + self.friendsIcon.setBackground(wx.WHITE) +# self.friendsIcon.SetMinSize((22,-1)) +# self.friendsIcon.SetSize((22,-1)) + self.friendsIcon.Hide() + self.vSizer3.Add(self.friendsIcon,0, wx.FIXED_MINSIZE, 0) + self.hSizer.Add(self.vSizer3, 0, wx.TOP|wx.RIGHT, 0) + + + self.hSizerSummary = wx.BoxSizer(wx.HORIZONTAL) + self.vSizerOverall.Add(self.hSizerSummary, 1, wx.FIXED_MINSIZE|wx.EXPAND, 0) + + self.SetSizer(self.vSizerOverall) +# self.hSizer.Add([10,5],0,wx.EXPAND|wx.FIXED_MINSIZE,3) +# self.SetSizer(self.hSizer); + + + + self.SetAutoLayout(1); + self.Layout(); + self.Refresh() + + for window in self.GetChildren(): + if window.GetName() != 'more info >': + window.Bind(wx.EVT_LEFT_UP, self.mouseAction) + window.Bind(wx.EVT_KEY_UP, self.keyTyped) + window.Bind(wx.EVT_RIGHT_DOWN, self.mouseAction) + else: + window.Bind(wx.EVT_LEFT_UP, self.moreInfoClicked) + + def getColumns(self): + return [{'sort':'name', 'reverse':True, 'title':'name', 'weight':1,'tip':self.utility.lang.get('C_personname') }, + {'sort':'last_connected', 'reverse': False, 'title':'status', 'width':165, 'tip':self.utility.lang.get('C_status'), 'order':'down'}, + {'sort':'num_torrents', 'reverse':False, 'pic':'iconDiscFiles','width':40, 'tip':self.utility.lang.get('C_discfiles')}, + {'sort':'num_peers', 'reverse':False, 'pic':'iconDiscPersons', 'width':40, 'tip':self.utility.lang.get('C_discpersons')}, + {'sort':'similarity', 'reverse':False, 'pic':'heartSmall', 'width':60, 'tip':self.utility.lang.get('C_recommpersons')}, + {'sort':'friend', 'reverse':True, 'pic':'iconfriends', 'width':22, 'tip':self.utility.lang.get('C_friends')} + ] + + def setData(self, peer_data): + # set bitmap, rating, title + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pip: setData:",peer_data + + self.data = peer_data + + # do not reload similar peers + if similarPeer(self.data, self.datacopy): + return + self.datacopy = copyPeer(peer_data) + + + if peer_data is None: + peer_data = {} + + if peer_data.get('permid'): + title = peer_data['name'][:self.titleLength] + self.title.Enable(True) + self.title.SetLabel(title) + + if not self.listItem: + self.title.Wrap(self.title.GetSize()[0]) + + try: + ipport = peer_data['ip']+':'+str(peer_data['port']) + except: + ipport = peer_data['name'] + self.title.SetToolTipString(ipport) + + if self.listItem: + # self.discFiles.Enable(True) + # self.discFiles.SetLabel(peer_data['??']) + # self.discPersons.Enable(True) + # self.discPersons.SetLabel(peer_data['??']) + + self.vLine1.Show() + self.vLine2.Show() + self.vLine3.Show() + self.vLine4.Show() + self.vLine5.Show() + + # -- status issues + self.status.Enable(True) + #self.status.SetLabel(peer_data['last_connected']) + statusPeer = peer_data['last_connected'] + + if peer_data.get('online'): + self.status.SetLabel('online') + elif statusPeer is not None: + if statusPeer < 0: + self.status.SetLabel('never seen') + else: + self.status.SetLabel('conn. %s' % friendly_time(statusPeer)) + else: + self.status.SetLabel( 'unknown') + + # number of Discovered files and persons + n = peer_data.get('num_peers') + if n is None: + n = '?' + self.discPersons.SetLabel(unicode(n)) + + t = peer_data.get('num_torrents') + if t is None: + t = '?' + self.discFiles.SetLabel(unicode(t)) + + # -- taste issues + rank = peer_data.get('simRank',-1) + recommField = self.taste + if rank!=-1: + if rank == 1: + self.tasteHeart.SetToolTipString("%d" % rank + "st of top 20 of all discovered persons") + recommField.SetLabel("%d" % rank + "st") + elif rank == 2: + self.tasteHeart.SetToolTipString("%d" % rank + "nd of top 20 of all discovered persons") + recommField.SetLabel("%d" % rank + "nd") + elif rank == 3: + self.tasteHeart.SetToolTipString("%d" % rank + "rd of top 20 of all discovered persons") + recommField.SetLabel("%d" % rank + "rd") + else: + self.tasteHeart.SetToolTipString("%d" % rank + "th of top 20 of all discovered persons") + recommField.SetLabel("%d" % rank + "th") + self.tasteHeart.Show() + self.tasteHeart.setRank(rank) + else: + self.tasteHeart.Hide() + self.taste.SetLabel('') + + # -- friend issues + fs = self.data.get('friend') + if fs == FS_MUTUAL or fs == FS_I_INVITED: + if self.data.get('online'): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'pip: friend online' + self.friendsIcon.setBitmapFromFile('friend') + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'pip: friend offline' + self.friendsIcon.setBitmapFromFile('friend_offline') + self.friendsIcon.Show() + else: + self.friendsIcon.Hide() + + else: + self.title.SetLabel('') + self.title.SetToolTipString('') + self.title.Enable(False) + + if self.listItem: + self.discFiles.SetLabel('') + self.discPersons.SetLabel('') + self.status.SetLabel('') + self.taste.SetLabel('') + self.tasteHeart.Hide() + self.friendsIcon.Hide() + self.vLine1.Hide() + self.vLine2.Hide() + self.vLine3.Hide() + else: + self.moreInfo.Show() + self.vLine4.Hide() + self.vLine5.Hide() + + self.thumb.setData(peer_data) + + self.Layout() + self.Refresh() + #self.parent.Refresh() + + + def addLine(self): + vLine = wx.StaticLine(self,-1,wx.DefaultPosition, wx.Size(2,22),wx.LI_VERTICAL) + self.hSizer.Add(vLine, 0, wx.RIGHT|wx.LEFT|wx.EXPAND, 3) + return vLine + + def select(self, rowIndex, colIndex, ignore1, ignore2, ignore3): + if DEBUG: + print 'pip: person selected' + colour = self.guiUtility.selectedColour + + self.selected = True + self.thumb.setSelected(True) + self.title.SetBackgroundColour(colour) + + if self.listItem: + self.SetBackgroundColour(colour) + self.discFiles.SetBackgroundColour(colour) + self.discPersons.SetBackgroundColour(colour) + self.status.SetBackgroundColour(colour) + self.tasteHeart.setBackground(colour) + self.taste.SetBackgroundColour(colour) + self.friendsIcon.setBackground(colour) + else: + self.moreInfo.Hide() + + self.Refresh() + + def deselect(self, rowIndex, colIndex): + + if rowIndex % 2 == 0 or not self.listItem: + colour = self.guiUtility.unselectedColour + else: + colour = self.guiUtility.unselectedColour2 + + self.selected = False + self.thumb.setSelected(False) + self.title.SetBackgroundColour(colour) + + if self.listItem: + self.SetBackgroundColour(colour) + self.discFiles.SetBackgroundColour(colour) + self.discPersons.SetBackgroundColour(colour) + self.status.SetBackgroundColour(colour) + self.tasteHeart.setBackground(colour) + self.togglePersonsItemDetailsSummary(True) + self.guiUtility.standardOverview.selectedPeer = self.data['permid'] + self.taste.SetBackgroundColour(colour) + self.friendsIcon.setBackground(colour) + + self.Refresh() + + def keyTyped(self, event): + if self.selected: + key = event.GetKeyCode() + if (key == wx.WXK_DELETE): + if self.data: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'pip: deleting' +# self.guiUtility.deleteTorrent(self.data) + event.Skip() + + def mouseAction(self, event): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pip: set focus" + self.SetFocus() + if self.data: + self.guiUtility.selectPeer(self.data) + + self.togglePersonsItemDetailsSummary(False) + if event.RightDown(): + self.rightMouseButton(event) + + + def rightMouseButton(self, event): + menu = self.guiUtility.OnRightMouseAction(event) + if menu is not None: + self.PopupMenu(menu, (-1,-1)) + + def getIdentifier(self): + if self.data: + return self.data['permid'] + +class ThumbnailViewer(wx.Panel): + """ + Show thumbnail and mast with info on mouseOver + """ + + def __init__(self, *args, **kw): + self.triblerGrey = wx.Colour(128,128,128) + self.triblerLightGrey = wx.Colour(203,203,203) + if len(args) == 0: + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, *args, **kw) + self._PostInit() + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + + def togglePersonsItemDetailsSummary(self, visible): + + if visible and not self.summary: + self.summary = PersonsItemDetailsSummary(self, mode='persons') + self.triblerStyles.setLightText(self.summary) + self.hSizerSummary.Add(self.summary, 1, wx.ALL|wx.EXPAND, 0) + self.SetMinSize((-1,140)) + + elif self.summary and not visible: + self.summary.Hide() + # the Thumb should be destoryed seperately because it has a different parent. + self.summary.thumbSummary.Destroy() + self.summary.DestroyChildren() + self.summary.Destroy() + self.summary = None + self.SetMinSize((-1,22)) + + def moreInfoClicked(self, event): + event.Skip() + self.guiUtility.standardOverview.selectedPeer = self.data['permid'] + self.guiUtility.buttonClicked(event) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here +# self.backgroundColor = wx.WHITE + self.dataBitmap = self.maskBitmap = None + self.data = None + self.mouseOver = False + self.triblerGrey = wx.Colour(128,128,128) + self.triblerLightGrey = wx.Colour(203,203,203) + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + self.Bind(wx.EVT_LEFT_UP, self.guiUtility.buttonClicked) + self.Bind(wx.EVT_PAINT, self.OnPaint) + self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase) + self.selected = False + self.border = None + #create the heart + #I will use TasteHeart.BITMAPS to paint the right one + self.peer_db = self.utility.session.open_dbhandler(NTFY_PEERS) + self.superpeer_db = self.utility.session.open_dbhandler(NTFY_SUPERPEERS) + self.iconsManager = IconsManager.getInstance() + + + def setData(self, data, summary=''): + + if not data: + self.Hide() + self.Refresh() + return + + if not self.IsShown(): + self.Show() + #if data != self.data: + self.data = data + self.setThumbnail(data) + + def setThumbnail(self, data, summary=''): + # Get the file(s)data for this torrent + try: + + listItem = self.GetParent().listItem + if listItem and summary != 'filesItemSummary': + defThumb = 'DEFAULT_THUMB_SMALL' + else: + defThumb = 'DEFAULT_THUMB' + + bmp_default = self.iconsManager.get_default('personsMode',defThumb) + # Check if we have already read the thumbnail and metadata information from this torrent file + if data.get('metadata'): + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pip: Reusing Bitmap",`data['name']` + + bmp = data['metadata'].get('ThumbnailBitmap') + tt = data['metadata'].get('tried_time') + if not bmp: + now = time() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","BMP IS NONE",data['name'] + if now > tt+(15*60.0): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","REFRESH OF PEER IMAGE SCHEDULED" + self.GetParent().guiserver.add_task(lambda:self.loadMetadata(data),0) + else: + bmp_default = bmp + else: + self.GetParent().guiserver.add_task(lambda:self.loadMetadata(data),0) + + self.setBitmap(bmp_default) + width, height = self.GetSize() + d = 1 + self.border = [wx.Point(0,d), wx.Point(width-d, d), wx.Point(width-d, height-d), wx.Point(d,height-d), wx.Point(d,0)] + self.Refresh() + #wx.Yield() + + except: + print_exc() + return {} + + + def setBitmap(self, bmp): + # Recalculate image placement + w, h = self.GetSize() + img = bmp.ConvertToImage() + bmp = getResizedBitmapFromImage(img, (w,h)) + + self.dataBitmap = bmp + iw, ih = bmp.GetSize() + self.xpos, self.ypos = (w-iw)/2, (h-ih)/2 + + + def loadMetadata(self,data,type=''): + """ Called by non-GUI thread """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pip: ThumbnailViewer: loadMetadata: Peer",show_permid_short(data['permid']),data['name'] + + # We can't do any wx stuff here apparently, so the only thing we can do is to + # read the data from the file and create the wxBitmap in the GUI callback. + [mimetype,bmpdata] = self.peer_db.getPeerIcon(data['permid']) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PersonsItemPanel: ThumbnailViewer: loadMetadata: Got",show_permid_short(data['permid']),mimetype + + wx.CallAfter(self.metadata_thread_gui_callback,data,mimetype,bmpdata,type) + + def metadata_thread_gui_callback(self,data,mimetype,bmpdata,type=''): + """ Called by GUI thread """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pip: ThumbnailViewer: GUI callback" + + metadata = {} + metadata['tried_time'] = time()+(random.random()*100) + if mimetype is not None: + metadata['ThumbnailBitmap'] = data2wxBitmap(mimetype,bmpdata) + else: + superpeers = self.superpeer_db.getSuperPeers() + + """ + if data['name'].lower().startswith("superpeer"): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pip: Name is superpeer",data['name'],"permid",show_permid_short(data['permid']) + for speer in superpeers: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pip: Comparing to superpeer",show_permid_short(speer) + """ + if data['permid'] in superpeers: + bm = self.iconsManager.get_default('personsMode','SUPERPEER_BITMAP') + metadata['ThumbnailBitmap'] = bm + else: + metadata['ThumbnailBitmap'] = None + + if type and metadata['ThumbnailBitmap'] is not None: + iw, ih = metadata['ThumbnailBitmap'].GetSize() + w, h = self.GetSize() + if (iw/float(ih)) > (w/float(h)): + nw = w + nh = int(ih * w/float(iw)) + else: + nh = h + nw = int(iw * h/float(ih)) + if nw != iw or nh != ih: + #print 'Rescale from (%d, %d) to (%d, %d)' % (iw, ih, nw, nh) + img = wx.ImageFromBitmap(metadata['ThumbnailBitmap']) + img.Rescale(nw, nh) + metadata['ThumbnailBitmap'+type] = wx.BitmapFromImage(img) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pip: Netresult is",metadata['ThumbnailBitmap'] + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pip: ThumbnailViewer: Setting metadata" + data['metadata'] = metadata + + # This item may be displaying another person right now, only show the icon + # when it's still the same person + if data['permid'] == self.data['permid']: + thumb_type = 'ThumbnailBitmap' + if type: + thumb_type = thumb_type+type + if thumb_type in metadata and metadata[thumb_type] is not None: + self.setBitmap(metadata[thumb_type]) + self.Refresh() + + + def OnErase(self, event): + pass + #event.Skip() + + def setSelected(self, sel): + self.selected = sel + self.Refresh() + + def isSelected(self): + return self.selected + + def mouseAction(self, event): + if event.Entering(): + if DEBUG: + print 'pip: enter' + self.mouseOver = True + self.Refresh() + elif event.Leaving(): + self.mouseOver = False + if DEBUG: + print 'pip: leave' + self.Refresh() +# elif event.ButtonUp(): +# self.ClickedButton() + #event.Skip() + """ + def ClickedButton(self): + print 'Click' + """ + + def setBackground(self, wxColor): + self.backgroundColor = wxColor + + def OnPaint(self, evt): + dc = wx.BufferedPaintDC(self) + dc.SetBackground(wx.Brush(self.backgroundColor)) + dc.Clear() + + if self.dataBitmap: + dc.DrawBitmap(self.dataBitmap, self.xpos,self.ypos, True) +# if self.mouseOver: + if self.data is not None and type(self.data)==dict and self.data.get('permid'): + rank = self.data['simRank'] + #because of the fact that hearts are coded so that lower index means higher ranking, then: + heartBitmap = TasteHeart.getHeartBitmap(rank) + if self.mouseOver: + mask = self.iconsManager.get_default('personsMode','MASK_BITMAP_CLEAN') + y_pos = 0 + m_height = mask.GetSize()[1] + y_height = self.GetSize()[1] + while y_pos>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pip: setData:",peer_data + + self.data = peer_data + + # do not reload similar peers + if similarPeer(self.data, self.datacopy): + return + self.datacopy = copyPeer(peer_data) + + + if peer_data is None: + peer_data = {} + + if peer_data.get('permid'): + title = peer_data['name'][:self.titleLength] + self.title.Enable(True) + self.title.SetLabel(title) + + if not self.listItem: + self.title.Wrap(self.title.GetSize()[0]) + + try: + ipport = peer_data['ip']+':'+str(peer_data['port']) + except: + ipport = peer_data['name'] + self.title.SetToolTipString(ipport) + + if self.listItem: + # self.discFiles.Enable(True) + # self.discFiles.SetLabel(peer_data['??']) + # self.discPersons.Enable(True) + # self.discPersons.SetLabel(peer_data['??']) + + self.vLine1.Show() + self.vLine2.Show() + self.vLine3.Show() + self.vLine4.Show() + self.vLine5.Show() + + # -- status issues + self.status.Enable(True) + #self.status.SetLabel(peer_data['last_connected']) + statusPeer = peer_data['last_connected'] + + if peer_data.get('online'): + self.status.SetLabel('online') + elif statusPeer is not None: + if statusPeer < 0: + self.status.SetLabel('never seen') + else: + self.status.SetLabel('conn. %s' % friendly_time(statusPeer)) + else: + self.status.SetLabel( 'unknown') + + # number of Discovered files and persons + n = peer_data.get('num_peers') + if n is None: + n = '?' + self.discPersons.SetLabel(unicode(n)) + + t = peer_data.get('num_torrents') + if t is None: + t = '?' + self.discFiles.SetLabel(unicode(t)) + + # -- taste issues + rank = peer_data.get('simRank',-1) + recommField = self.taste + if rank!=-1: + if rank == 1: + self.tasteHeart.SetToolTipString("%d" % rank + "st of top 20 of all discovered persons") + recommField.SetLabel("%d" % rank + "st") + elif rank == 2: + self.tasteHeart.SetToolTipString("%d" % rank + "nd of top 20 of all discovered persons") + recommField.SetLabel("%d" % rank + "nd") + elif rank == 3: + self.tasteHeart.SetToolTipString("%d" % rank + "rd of top 20 of all discovered persons") + recommField.SetLabel("%d" % rank + "rd") + else: + self.tasteHeart.SetToolTipString("%d" % rank + "th of top 20 of all discovered persons") + recommField.SetLabel("%d" % rank + "th") + self.tasteHeart.Show() + self.tasteHeart.setRank(rank) + else: + self.tasteHeart.Hide() + self.taste.SetLabel('') + + # -- friend issues + fs = self.data.get('friend') + if fs == FS_MUTUAL or fs == FS_I_INVITED: + if self.data.get('online'): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'pip: friend online' + self.friendsIcon.setBitmapFromFile('friend') + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'pip: friend offline' + self.friendsIcon.setBitmapFromFile('friend_offline') + self.friendsIcon.Show() + else: + self.friendsIcon.Hide() + + else: + self.title.SetLabel('') + self.title.SetToolTipString('') + self.title.Enable(False) + + if self.listItem: + self.discFiles.SetLabel('') + self.discPersons.SetLabel('') + self.status.SetLabel('') + self.taste.SetLabel('') + self.tasteHeart.Hide() + self.friendsIcon.Hide() + self.vLine1.Hide() + self.vLine2.Hide() + self.vLine3.Hide() + else: + self.moreInfo.Show() + self.vLine4.Hide() + self.vLine5.Hide() + + self.thumb.setData(peer_data) + + self.Layout() + self.Refresh() + #self.parent.Refresh() + + + def addLine(self): + vLine = wx.StaticLine(self,-1,wx.DefaultPosition, wx.Size(2,22),wx.LI_VERTICAL) + self.hSizer.Add(vLine, 0, wx.RIGHT|wx.LEFT|wx.EXPAND, 3) + return vLine + + def select(self, rowIndex, colIndex, ignore1, ignore2, ignore3): + if DEBUG: + print 'pip: person selected' + colour = self.guiUtility.selectedColour + + self.selected = True + self.thumb.setSelected(True) + self.title.SetBackgroundColour(colour) + + if self.listItem: + self.SetBackgroundColour(colour) + self.discFiles.SetBackgroundColour(colour) + self.discPersons.SetBackgroundColour(colour) + self.status.SetBackgroundColour(colour) + self.tasteHeart.setBackground(colour) + self.taste.SetBackgroundColour(colour) + self.friendsIcon.setBackground(colour) + else: + self.moreInfo.Hide() + + self.Refresh() + + def deselect(self, rowIndex, colIndex): + + if rowIndex % 2 == 0 or not self.listItem: + colour = self.guiUtility.unselectedColour + else: + colour = self.guiUtility.unselectedColour2 + + self.selected = False + self.thumb.setSelected(False) + self.title.SetBackgroundColour(colour) + + if self.listItem: + self.SetBackgroundColour(colour) + self.discFiles.SetBackgroundColour(colour) + self.discPersons.SetBackgroundColour(colour) + self.status.SetBackgroundColour(colour) + self.tasteHeart.setBackground(colour) + self.togglePersonsItemDetailsSummary(True) + self.guiUtility.standardOverview.selectedPeer = self.data['permid'] + self.taste.SetBackgroundColour(colour) + self.friendsIcon.setBackground(colour) + + self.Refresh() + + def keyTyped(self, event): + if self.selected: + key = event.GetKeyCode() + if (key == wx.WXK_DELETE): + if self.data: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'pip: deleting' +# self.guiUtility.deleteTorrent(self.data) + event.Skip() + + def mouseAction(self, event): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pip: set focus" + self.SetFocus() + if self.data: + self.guiUtility.selectPeer(self.data) + + self.togglePersonsItemDetailsSummary(False) + if event.RightDown(): + self.rightMouseButton(event) + + + def rightMouseButton(self, event): + menu = self.guiUtility.OnRightMouseAction(event) + if menu is not None: + self.PopupMenu(menu, (-1,-1)) + + def getIdentifier(self): + if self.data: + return self.data['permid'] + +class ThumbnailViewer(wx.Panel): + """ + Show thumbnail and mast with info on mouseOver + """ + + def __init__(self, *args, **kw): + self.triblerGrey = wx.Colour(128,128,128) + self.triblerLightGrey = wx.Colour(203,203,203) + if len(args) == 0: + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, *args, **kw) + self._PostInit() + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + + def togglePersonsItemDetailsSummary(self, visible): + + if visible and not self.summary: + self.summary = PersonsItemDetailsSummary(self, mode='persons') + self.triblerStyles.setLightText(self.summary) + self.hSizerSummary.Add(self.summary, 1, wx.ALL|wx.EXPAND, 0) + self.SetMinSize((-1,140)) + + elif self.summary and not visible: + self.summary.Hide() + # the Thumb should be destoryed seperately because it has a different parent. + self.summary.thumbSummary.Destroy() + self.summary.DestroyChildren() + self.summary.Destroy() + self.summary = None + self.SetMinSize((-1,22)) + + def moreInfoClicked(self, event): + event.Skip() + self.guiUtility.standardOverview.selectedPeer = self.data['permid'] + self.guiUtility.buttonClicked(event) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here +# self.backgroundColor = wx.WHITE + self.dataBitmap = self.maskBitmap = None + self.data = None + self.mouseOver = False + self.triblerGrey = wx.Colour(128,128,128) + self.triblerLightGrey = wx.Colour(203,203,203) + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + self.Bind(wx.EVT_LEFT_UP, self.guiUtility.buttonClicked) + self.Bind(wx.EVT_PAINT, self.OnPaint) + self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase) + self.selected = False + self.border = None + #create the heart + #I will use TasteHeart.BITMAPS to paint the right one + self.peer_db = self.utility.session.open_dbhandler(NTFY_PEERS) + self.superpeer_db = self.utility.session.open_dbhandler(NTFY_SUPERPEERS) + self.iconsManager = IconsManager.getInstance() + + + def setData(self, data, summary=''): + + if not data: + self.Hide() + self.Refresh() + return + + if not self.IsShown(): + self.Show() + #if data != self.data: + self.data = data + self.setThumbnail(data) + + def setThumbnail(self, data, summary=''): + # Get the file(s)data for this torrent + try: + + listItem = self.GetParent().listItem + if listItem and summary != 'filesItemSummary': + defThumb = 'DEFAULT_THUMB_SMALL' + else: + defThumb = 'DEFAULT_THUMB' + + bmp_default = self.iconsManager.get_default('personsMode',defThumb) + # Check if we have already read the thumbnail and metadata information from this torrent file + if data.get('metadata'): + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pip: Reusing Bitmap",`data['name']` + + bmp = data['metadata'].get('ThumbnailBitmap') + tt = data['metadata'].get('tried_time') + if not bmp: + now = time() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","BMP IS NONE",data['name'] + if now > tt+(15*60.0): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","REFRESH OF PEER IMAGE SCHEDULED" + self.GetParent().guiserver.add_task(lambda:self.loadMetadata(data),0) + else: + bmp_default = bmp + else: + self.GetParent().guiserver.add_task(lambda:self.loadMetadata(data),0) + + self.setBitmap(bmp_default) + width, height = self.GetSize() + d = 1 + self.border = [wx.Point(0,d), wx.Point(width-d, d), wx.Point(width-d, height-d), wx.Point(d,height-d), wx.Point(d,0)] + self.Refresh() + #wx.Yield() + + except: + print_exc() + return {} + + + def setBitmap(self, bmp): + # Recalculate image placement + w, h = self.GetSize() + img = bmp.ConvertToImage() + bmp = getResizedBitmapFromImage(img, (w,h)) + + self.dataBitmap = bmp + iw, ih = bmp.GetSize() + self.xpos, self.ypos = (w-iw)/2, (h-ih)/2 + + + def loadMetadata(self,data,type=''): + """ Called by non-GUI thread """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pip: ThumbnailViewer: loadMetadata: Peer",show_permid_short(data['permid']),data['name'] + + # We can't do any wx stuff here apparently, so the only thing we can do is to + # read the data from the file and create the wxBitmap in the GUI callback. + [mimetype,bmpdata] = self.peer_db.getPeerIcon(data['permid']) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PersonsItemPanel: ThumbnailViewer: loadMetadata: Got",show_permid_short(data['permid']),mimetype + + wx.CallAfter(self.metadata_thread_gui_callback,data,mimetype,bmpdata,type) + + def metadata_thread_gui_callback(self,data,mimetype,bmpdata,type=''): + """ Called by GUI thread """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pip: ThumbnailViewer: GUI callback" + + metadata = {} + metadata['tried_time'] = time()+(random.random()*100) + if mimetype is not None: + metadata['ThumbnailBitmap'] = data2wxBitmap(mimetype,bmpdata) + else: + superpeers = self.superpeer_db.getSuperPeers() + + """ + if data['name'].lower().startswith("superpeer"): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pip: Name is superpeer",data['name'],"permid",show_permid_short(data['permid']) + for speer in superpeers: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pip: Comparing to superpeer",show_permid_short(speer) + """ + if data['permid'] in superpeers: + bm = self.iconsManager.get_default('personsMode','SUPERPEER_BITMAP') + metadata['ThumbnailBitmap'] = bm + else: + metadata['ThumbnailBitmap'] = None + + if type and metadata['ThumbnailBitmap'] is not None: + iw, ih = metadata['ThumbnailBitmap'].GetSize() + w, h = self.GetSize() + if (iw/float(ih)) > (w/float(h)): + nw = w + nh = int(ih * w/float(iw)) + else: + nh = h + nw = int(iw * h/float(ih)) + if nw != iw or nh != ih: + #print 'Rescale from (%d, %d) to (%d, %d)' % (iw, ih, nw, nh) + img = wx.ImageFromBitmap(metadata['ThumbnailBitmap']) + img.Rescale(nw, nh) + metadata['ThumbnailBitmap'+type] = wx.BitmapFromImage(img) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pip: Netresult is",metadata['ThumbnailBitmap'] + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pip: ThumbnailViewer: Setting metadata" + data['metadata'] = metadata + + # This item may be displaying another person right now, only show the icon + # when it's still the same person + if data['permid'] == self.data['permid']: + thumb_type = 'ThumbnailBitmap' + if type: + thumb_type = thumb_type+type + if thumb_type in metadata and metadata[thumb_type] is not None: + self.setBitmap(metadata[thumb_type]) + self.Refresh() + + + def OnErase(self, event): + pass + #event.Skip() + + def setSelected(self, sel): + self.selected = sel + self.Refresh() + + def isSelected(self): + return self.selected + + def mouseAction(self, event): + if event.Entering(): + if DEBUG: + print 'pip: enter' + self.mouseOver = True + self.Refresh() + elif event.Leaving(): + self.mouseOver = False + if DEBUG: + print 'pip: leave' + self.Refresh() +# elif event.ButtonUp(): +# self.ClickedButton() + #event.Skip() + """ + def ClickedButton(self): + print 'Click' + """ + + def setBackground(self, wxColor): + self.backgroundColor = wxColor + + def OnPaint(self, evt): + dc = wx.BufferedPaintDC(self) + dc.SetBackground(wx.Brush(self.backgroundColor)) + dc.Clear() + + if self.dataBitmap: + dc.DrawBitmap(self.dataBitmap, self.xpos,self.ypos, True) +# if self.mouseOver: + if self.data is not None and type(self.data)==dict and self.data.get('permid'): + rank = self.data['simRank'] + #because of the fact that hearts are coded so that lower index means higher ranking, then: + heartBitmap = TasteHeart.getHeartBitmap(rank) + if self.mouseOver: + mask = self.iconsManager.get_default('personsMode','MASK_BITMAP_CLEAN') + y_pos = 0 + m_height = mask.GetSize()[1] + y_height = self.GetSize()[1] + while y_pos>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'SearchGridManager: Could not import web2' + print_exc() + + +DEBUG = False + +SEARCHMODE_STOPPED = 1 +SEARCHMODE_SEARCHING = 2 +SEARCHMODE_NONE = 3 + +class TorrentSearchGridManager: + # Code to make this a singleton + __single = None + + def __init__(self,guiUtility): + if TorrentSearchGridManager.__single: + raise RuntimeError, "TorrentSearchGridManager is singleton" + TorrentSearchGridManager.__single = self + + self.guiUtility = guiUtility + + # Contains all matches for keywords in DB, not filtered by category + self.hits = [] + # Remote results for current keywords + self.remoteHits = {} + self.stopped = False + self.dod = None + # Jelle's word filter + self.searchmgr = None + self.torrent_db = None + self.pref_db = None # Nic: for rerankers + self.mypref_db = None + self.search_db = None + # For asking for a refresh when remote results came in + self.gridmgr = None + + self.standardOverview = None + self.searchkeywords = {'filesMode':[], 'libraryMode':[]} + self.rerankingStrategy = {'filesMode':DefaultTorrentReranker(), 'libraryMode':DefaultTorrentReranker()} + self.oldsearchkeywords = {'filesMode':[], 'libraryMode':[]} # previous query + + self.category = Category.getInstance() + + def getInstance(*args, **kw): + if TorrentSearchGridManager.__single is None: + TorrentSearchGridManager(*args, **kw) + return TorrentSearchGridManager.__single + getInstance = staticmethod(getInstance) + + def register(self,torrent_db,pref_db,mypref_db,search_db): + self.torrent_db = torrent_db + self.pref_db = pref_db + self.mypref_db = mypref_db + self.search_db = search_db + self.searchmgr = SearchManager(torrent_db) + + def set_gridmgr(self,gridmgr): + self.gridmgr = gridmgr + + def getHitsInCategory(self,mode,categorykey,range,sort,reverse): + begintime = time() + # mode is 'filesMode', 'libraryMode' + # categorykey can be 'all', 'Video', 'Document', ... + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: getHitsInCategory:",mode,categorykey,range + + categorykey = categorykey.lower() + enabledcattuples = self.category.getCategoryNames() + enabledcatslow = ["other"] + for catname,displayname in enabledcattuples: + enabledcatslow.append(catname.lower()) + + if not self.standardOverview: + self.standardOverview = self.guiUtility.standardOverview + + + + # TODO: do all filtering in DB query + def torrentFilter(torrent): + library = (mode == 'libraryMode') + okLibrary = not library or (torrent.get('myDownloadHistory', False) and torrent.get('destdir',"") != "") + + okCategory = False + categories = torrent.get("category", []) + if not categories: + categories = ["other"] + if categorykey == 'all': + for torcat in categories: + if torcat.lower() in enabledcatslow: + okCategory = True + break + elif categorykey in [cat.lower() for cat in categories]: + okCategory = True + + okGood = torrent['status'] == 'good' or torrent.get('myDownloadHistory', False) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","FILTER: lib",okLibrary,"cat",okCategory,"good",okGood + return okLibrary and okCategory and okGood + + # 1. Local search puts hits in self.hits + new_local_hits = self.searchLocalDatabase(mode) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'TorrentSearchGridManager: getHitsInCat: search found: %d items' % len(self.hits) + + if new_local_hits: + # 2. Filter self.hits on category and status + self.hits = filter(torrentFilter,self.hits) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'TorrentSearchGridManager: getHitsInCat: torrentFilter after filter found: %d items' % len(self.hits) + + self.standardOverview.setSearchFeedback('web2', self.stopped, -1, self.searchkeywords[mode]) + self.standardOverview.setSearchFeedback('remote', self.stopped, -1, self.searchkeywords[mode]) + if mode == 'filesMode': + self.standardOverview.setSearchFeedback('torrent', self.stopped, len(self.hits), self.searchkeywords[mode]) + elif mode == 'libraryMode': + # set finished true and use other string + self.standardOverview.setSearchFeedback('library', True, len(self.hits), self.searchkeywords[mode]) + + + # 3. Add remote hits that may apply. TODO: double filtering, could + # add remote hits to self.hits before filter(torrentFilter,...) + + if mode != 'libraryMode': + self.addStoredRemoteResults(mode, categorykey) + self.addStoredWeb2Results(mode,categorykey,range) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'TorrentSearchGridManager: getHitsInCat: found after remote search: %d items' % len(self.hits) + + + +# if self.getSearchMode(mode) == SEARCHMODE_SEARCHING: +# self.standardOverview.setSearchFeedback('torrent', True, len(self.hits)) + + if range[0] > len(self.hits): + return [0,None] + elif range[1] > len(self.hits): + end = len(self.hits) + else: + end = range[1] + begin = range[0] + beginsort = time() + + if sort == 'rameezmetric': + self.sort() + else: + # Sort on columns in list view + cmpfunc = lambda a,b:torrent_cmp(a,b,sort) + self.hits.sort(cmpfunc,reverse=reverse) + + # Nic: Ok this is somewhat diagonal to the previous sorting algorithms + # eventually, these should probably be combined + # since for now, however, my reranking is very tame (exchanging first and second place under certain circumstances) + # this should be fine... + + self.rerankingStrategy[mode] = getTorrentReranker() + self.hits = self.rerankingStrategy[mode].rerank(self.hits, self.searchkeywords[mode], self.torrent_db, + self.pref_db, self.mypref_db, self.search_db) + + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'getHitsInCat took: %s of which search %s' % ((time() - begintime), (time()-beginsort)) + return [len(self.hits),self.hits[begin:end]] + + + def setSearchKeywords(self,wantkeywords, mode): + self.stopped = False +# if len(wantkeywords) == 0: +# print_stack() + + self.searchkeywords[mode] = wantkeywords + if mode == 'filesMode': + self.remoteHits = {} + if self.dod: + self.dod.clear() + + def getSearchMode(self, mode): + # Return searching, stopped, or no search + if self.standardOverview is None: + if self.searchkeywords.get(mode): + return SEARCHMODE_SEARCHING + else: + if self.searchkeywords.get(mode): + if self.standardOverview.getSearchBusy(): + return SEARCHMODE_SEARCHING + else: + return SEARCHMODE_STOPPED + return SEARCHMODE_NONE + + + def stopSearch(self): + self.stopped = True + if self.dod: + self.dod.stop() + + def getCurrentHitsLen(self): + return len(self.hits) + + def searchLocalDatabase(self,mode): + """ Called by GetHitsInCategory() to search local DB. Caches previous query result. """ + if self.searchkeywords[mode] == self.oldsearchkeywords[mode] and len(self.hits) > 0: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: searchLocalDB: returning old hit list",len(self.hits) + return False + + self.oldsearchkeywords[mode] = self.searchkeywords[mode] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: searchLocalDB: Want",self.searchkeywords[mode] + + if len(self.searchkeywords[mode]) == 0 or len(self.searchkeywords[mode]) == 1 and self.searchkeywords[mode][0] == '': + return False + + self.hits = self.searchmgr.search(self.searchkeywords[mode]) + + return True + + def addStoredRemoteResults(self, mode, cat): + """ Called by GetHitsInCategory() to add remote results to self.hits """ + if len(self.remoteHits) > 0: + numResults = 0 + def catFilter(item): + icat = item.get('category') + if type(icat) == list: + icat = icat[0].lower() + elif type(icat) == str: + icat = icat.lower() + else: + return False + return icat == cat or cat == 'all' + + catResults = filter(catFilter, self.remoteHits.values()) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: remote: Adding %d remote results (%d in category)" % (len(self.remoteHits), len(catResults)) + + + for remoteItem in catResults: + known = False + for item in self.hits: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: remote: Should we add",`remoteItem['name']` + if item['infohash'] == remoteItem['infohash']: + known = True + break + if not known: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: remote: Adding",`remoteItem['name']` + self.hits.append(remoteItem) + numResults+=1 + self.standardOverview.setSearchFeedback('remote', self.stopped, numResults, self.searchkeywords[mode]) + + def gotRemoteHits(self,permid,kws,answers,mode): + """ Called by GUIUtil when hits come in. """ + try: + #if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: gotRemoteHist: got",len(answers),"for",kws + + # Always store the results, only display when in filesMode + # We got some replies. First check if they are for the current query + if self.searchkeywords['filesMode'] == kws: + numResults = 0 + catobj = Category.getInstance() + for key,value in answers.iteritems(): + + if self.torrent_db.hasTorrent(key): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: gotRemoteHist: Ignoring hit for",`value['content_name']`,"already got it" + continue # do not show results we have ourselves + + # Convert answer fields as per + # Session.query_connected_peers() spec. to NEWDB format + newval = {} + newval['name'] = value['content_name'] + newval['infohash'] = key + newval['torrent_file_name'] = '' + newval['length'] = value['length'] + newval['creation_date'] = time() # None gives '?' in GUI + newval['relevance'] = 0 + newval['source'] = 'RQ' + newval['category'] = value['category'][0] + # We trust the peer + newval['status'] = 'good' + newval['num_seeders'] = value['seeder'] + newval['num_leechers'] = value['leecher'] + + # OLPROTO_VER_NINE includes a torrent_size. Set to + # -1 when not available. + if 'torrent_size' in value: + newval['torrent_size'] = value['torrent_size'] + else: + newval['torrent_size'] = -1 + + # Extra fiedl: Set from which peer this info originates + newval['query_permids'] = [permid] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: gotRemoteHist: appending hit",`newval['name']` + #value['name'] = 'REMOTE '+value['name'] + + # Filter out results from unwanted categories + flag = False + for cat in value['category']: + rank = catobj.getCategoryRank(cat) + if rank == -1: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: gotRemoteHits: Got",`newval['name']`,"from banned category",cat,", discarded it." + flag = True + break + if flag: + continue + + if newval['infohash'] in self.remoteHits: + # merge this result with previous results + oldval = self.remoteHits[newval['infohash']] + for query_permid in newval['query_permids']: + if not query_permid in oldval['query_permids']: + oldval['query_permids'].append(query_permid) + else: + self.remoteHits[newval['infohash']] = newval + numResults +=1 + # if numResults % 5 == 0: + # self.refreshGrid() + + if numResults > 0 and mode == 'filesMode': # and self.standardOverview.getSearchBusy(): + self.refreshGrid() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'TorrentSearchGridManager: gotRemoteHits: Refresh grid after new remote torrent hits came in' + return True + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: gotRemoteHits: got hits for",kws,"but current search is for",self.searchkeywords[mode] + return False + except: + print_exc() + return False + + def refreshGrid(self): + if self.gridmgr is not None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: refreshGrid: gridmgr refresh" + self.gridmgr.refresh() + + + # + # Move to Web2SearchGridManager + # + def searchWeb2(self,initialnum): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: searchWeb2:",initialnum + + if self.dod: + self.dod.stop() + self.dod = web2.DataOnDemandWeb2(" ".join(self.searchkeywords['filesMode']),guiutil=self.guiUtility) + self.dod.request(initialnum) + self.dod.register(self.tthread_gotWeb2Hit) + + def tthread_gotWeb2Hit(self,item): + """ Called by Web2DBSearchThread*s* """ + #if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: tthread_gotWeb2Hit",`item['content_name']` + + wx.CallAfter(self.refreshGrid) + + def web2tonewdb(self,value): + try: + # Added protection against missing values + newval = {} + newval['infohash'] = value['infohash'] + newval['name'] = value['content_name'] + newval['status'] = value.get('status','unknown') + newval['description'] = value.get('description','') + newval['tags'] = value.get('tags',[]) + newval['url'] = value.get('url','') + newval['num_leechers'] = value.get('leecher',1) + newval['num_seeders'] = value.get('views',1) + newval['creation_date'] = value.get('date','') + newval['views'] = value.get('views',0) + newval['web2'] = value.get('web2',True) + newval['length'] = value.get('length',1) + if 'preview' in value: # Apparently not always present + newval['preview'] = value['preview'] + return newval + except: + print_exc() + return None + + def addStoredWeb2Results(self,mode,categorykey,range): + web2on = self.guiUtility.utility.config.Read('enableweb2search',"boolean") + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: getCategory: mode",mode,"webon",web2on,"insearch",self.getSearchMode(mode),"catekey",categorykey + + if mode == 'filesMode' and web2on and self.getSearchMode(mode) == SEARCHMODE_SEARCHING and \ + categorykey in ['video', 'all']: + # if we are searching in filesmode + #self.standardOverview.setSearchFeedback('web2', False, 0) + + if self.dod: + # Arno: ask for more when needed (=only one page left to display) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchManager: web2: requestMore?",range[1],self.dod.getNumRequested() + pagesize = range[1] - range[0] + #diff = self.dod.getNumRequested() - range[1] + #if diff <= pagesize: + # JelleComment: above code doesnt work, because other search results are also on pages + # so we might have 100 pages of local search results. If range is related to 80th page + # websearch will try to get 80xpagesize youtube videos + # Set it steady to 3 pages + if self.dod.getNumRequested() < 3*pagesize: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchManager: web2: requestMore diff",pagesize + self.dod.requestMore(pagesize) + + data = self.dod.getData() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchManager: getHitsInCat: web2: Got total",len(data) + numResults = 0 + for value in data: + + # Translate to NEWDB/FileItemPanel format, doing this in + # web2/video/genericsearch.py breaks something + newval = self.web2tonewdb(value) + if newval is None: + continue + + known = False + for item in self.hits: + if item['infohash'] == newval['infohash']: + known = True + break + if not known: + self.hits.append(newval) + numResults += 1 + + self.standardOverview.setSearchFeedback('web2', self.stopped, numResults, self.searchkeywords[mode]) + # else: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchManager: No web2 hits, no self.dod" + + #else: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchManager: No web2 hits, mode",mode,"web2on",web2on,"in search",self.getSearchMode(mode),"catkey",categorykey + + #Rameez: The following code will call normalization functions and then + #sort and merge the combine torrent and youtube results + def sort(self): + self.normalizeResults() + self.statisticalNormalization() + #Rameez: now sort combined (i.e after the above two normalization procedures) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'SearchGridMan: Search res: %s' % [a.get('normScore',0) for a in self.hits] + def cmp(a,b): + # normScores can be small, so multiply + # No normscore gives negative 1000, because should be less than 0 (mean) + return int(1000000.0 * (b.get('normScore',-1000) - a.get('normScore',-1000))) + self.hits.sort(cmp) + + + + + def normalizeResults(self): + torrent_total = 0 + youtube_total = 0 + KEY_NORMSCORE = 'normScore' + + #Rameez: normalize torrent results + #Rameez: normalize youtube results + for hit in self.hits: + if not hit.has_key('views'): + torrent_total += hit.get('num_seeders',0) + elif hit['views'] != 'unknown': + youtube_total += int(hit['views']) + + if torrent_total == 0: # if zero, set to one for divZeroExc. we can do this, cause nominator will also be zero in following division + torrent_total = 1 + if youtube_total == 0: + youtube_total = 1 + + for hit in self.hits: + if not hit.has_key('views'): + hit[KEY_NORMSCORE] = hit.get('num_seeders',0)/float(torrent_total) + elif hit['views'] != 'unknown': + hit[KEY_NORMSCORE] = int(hit['views'])/float(youtube_total) + + + + + def statisticalNormalization(self): + youtube_hits = [hit for hit in self.hits if (hit.get('views', 'unknown') != "unknown" + and hit.has_key('normScore'))] + torrent_hits = [hit for hit in self.hits if (not hit.has_key('views') + and hit.has_key('normScore'))] + self.doStatNormalization(youtube_hits) + self.doStatNormalization(torrent_hits) + + def doStatNormalization(self, hits): + #Rameez: statistically normalize torrent results + + count = 0 + tot = 0 + + for hit in hits: + tot += hit['normScore'] + count +=1 + + if count > 0: + mean = tot/count + else: + mean = 0 + + sum = 0 + for hit in hits: + temp = hit['normScore'] - mean + temp = temp * temp + sum += temp + + if count > 1: + dev = sum /(count-1) + else: + dev = 0 + + stdDev = sqrt(dev) + + for hit in hits: + if stdDev > 0: + hit['normScore'] = (hit['normScore']-mean)/ stdDev + + + + +class PeerSearchGridManager: + + # Code to make this a singleton + __single = None + + def __init__(self,guiUtility): + if PeerSearchGridManager.__single: + raise RuntimeError, "PeerSearchGridManager is singleton" + PeerSearchGridManager.__single = self + + self.guiUtility = guiUtility + + # Contains all matches for keywords in DB, not filtered by category + self.hits = [] + # Jelle's word filter + self.psearchmgr = None + self.fsearchmgr = None + self.stopped = False # not stopped by default + self.gridmgr = None + + self.standardOverview = None + self.searchkeywords = {'personsMode':[], 'friendsMode':[]} + self.oldsearchkeywords = {'personsMode':[], 'friendsMode':[]} # previous query + + + def getInstance(*args, **kw): + if PeerSearchGridManager.__single is None: + PeerSearchGridManager(*args, **kw) + return PeerSearchGridManager.__single + getInstance = staticmethod(getInstance) + + def register(self,peer_db,friend_db): + self.psearchmgr = SearchManager(peer_db) + self.fsearchmgr = SearchManager(friend_db) + + def set_gridmgr(self,gridmgr): + self.gridmgr = gridmgr + + + def getHits(self,mode,range): + # mode is 'personsMode', 'friendsMode' + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PeerSearchGridManager: getHitsIn:",mode,range + + if not self.standardOverview: + self.standardOverview = self.guiUtility.standardOverview + + # Local search puts hits in self.hits + self.searchLocalDatabase(mode) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PeerSearchGridManager: searchLocalDB: GOT HITS",self.hits + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'PeerSearchGridManager: getHitsInCat: search found: %d items' % len(self.hits) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'PeerSearchGridManager: getHitsInCat: torrentFilter after search found: %d items' % len(self.hits) + + if mode == 'personsMode': + searchType = 'peers' + elif mode == 'friendsMode': + searchType = 'friends' + self.standardOverview.setSearchFeedback(searchType, True, len(self.hits), self.searchkeywords[mode]) + + if range[0] > len(self.hits): + return [0,None] + elif range[1] > len(self.hits): + end = len(self.hits) + else: + end = range[1] + begin = range[0] + + return [len(self.hits),self.hits[begin:end]] + + + def setSearchKeywords(self,wantkeywords, mode): + self.stopped = False +# if len(wantkeywords) == 0: +# print_stack() + + self.searchkeywords[mode] = wantkeywords + + def getSearchMode(self, mode): + if bool(self.searchkeywords[mode]): + if not self.stopped: + mode = SEARCHMODE_SEARCHING + else: + mode = SEARCHMODE_STOPPED + else: + mode = SEARCHMODE_NONE + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PeerSearchGridManager: getSearchMode?",mode + return mode + + def stopSearch(self): + print_stack() + self.stopped = True + + def searchLocalDatabase(self,mode): + """ Called by getHits() to search local DB. Caches previous query result. """ + if self.searchkeywords[mode] == self.oldsearchkeywords[mode] and len(self.hits) > 0: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PeerSearchGridManager: searchLocalDB: returning old hit list",len(self.hits) + return self.hits + + self.oldsearchkeywords[mode] = self.searchkeywords[mode] + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PeerSearchGridManager: searchLocalDB: Want",self.searchkeywords[mode] + + if len(self.searchkeywords[mode]) == 0 or len(self.searchkeywords[mode]) == 1 and self.searchkeywords[mode][0] == '': + return self.hits + + if mode == 'personsMode': + self.hits = self.psearchmgr.search(self.searchkeywords[mode]) + else: # friends + self.hits = self.fsearchmgr.search(self.searchkeywords[mode]) + + return self.hits + + +def torrent_cmp(a,b,sort): + """ Compare torrent db records based on key "sort" """ + vala = a.get(sort,0) + valb = b.get(sort,0) + if vala == valb: + return 0 + elif vala < valb: + return -1 + else: + return 1 + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/SearchGridManager.py.bak b/tribler-mod/Tribler/Main/vwxGUI/SearchGridManager.py.bak new file mode 100644 index 0000000..4503c14 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/SearchGridManager.py.bak @@ -0,0 +1,674 @@ +# Written by Jelle Roozenburg, Maarten ten Brinke, Lucan Musat, Arno Bakker +# see LICENSE.txt for license information + +import sys +import wx +from traceback import print_exc, print_stack +from time import time + +from Tribler.Category.Category import Category +from Tribler.Core.Search.SearchManager import SearchManager +from Tribler.Core.Search.Reranking import getTorrentReranker, DefaultTorrentReranker + +from math import sqrt +try: + import web2 +except ImportError: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'SearchGridManager: Could not import web2' + print_exc() + + +DEBUG = False + +SEARCHMODE_STOPPED = 1 +SEARCHMODE_SEARCHING = 2 +SEARCHMODE_NONE = 3 + +class TorrentSearchGridManager: + # Code to make this a singleton + __single = None + + def __init__(self,guiUtility): + if TorrentSearchGridManager.__single: + raise RuntimeError, "TorrentSearchGridManager is singleton" + TorrentSearchGridManager.__single = self + + self.guiUtility = guiUtility + + # Contains all matches for keywords in DB, not filtered by category + self.hits = [] + # Remote results for current keywords + self.remoteHits = {} + self.stopped = False + self.dod = None + # Jelle's word filter + self.searchmgr = None + self.torrent_db = None + self.pref_db = None # Nic: for rerankers + self.mypref_db = None + self.search_db = None + # For asking for a refresh when remote results came in + self.gridmgr = None + + self.standardOverview = None + self.searchkeywords = {'filesMode':[], 'libraryMode':[]} + self.rerankingStrategy = {'filesMode':DefaultTorrentReranker(), 'libraryMode':DefaultTorrentReranker()} + self.oldsearchkeywords = {'filesMode':[], 'libraryMode':[]} # previous query + + self.category = Category.getInstance() + + def getInstance(*args, **kw): + if TorrentSearchGridManager.__single is None: + TorrentSearchGridManager(*args, **kw) + return TorrentSearchGridManager.__single + getInstance = staticmethod(getInstance) + + def register(self,torrent_db,pref_db,mypref_db,search_db): + self.torrent_db = torrent_db + self.pref_db = pref_db + self.mypref_db = mypref_db + self.search_db = search_db + self.searchmgr = SearchManager(torrent_db) + + def set_gridmgr(self,gridmgr): + self.gridmgr = gridmgr + + def getHitsInCategory(self,mode,categorykey,range,sort,reverse): + begintime = time() + # mode is 'filesMode', 'libraryMode' + # categorykey can be 'all', 'Video', 'Document', ... + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: getHitsInCategory:",mode,categorykey,range + + categorykey = categorykey.lower() + enabledcattuples = self.category.getCategoryNames() + enabledcatslow = ["other"] + for catname,displayname in enabledcattuples: + enabledcatslow.append(catname.lower()) + + if not self.standardOverview: + self.standardOverview = self.guiUtility.standardOverview + + + + # TODO: do all filtering in DB query + def torrentFilter(torrent): + library = (mode == 'libraryMode') + okLibrary = not library or (torrent.get('myDownloadHistory', False) and torrent.get('destdir',"") != "") + + okCategory = False + categories = torrent.get("category", []) + if not categories: + categories = ["other"] + if categorykey == 'all': + for torcat in categories: + if torcat.lower() in enabledcatslow: + okCategory = True + break + elif categorykey in [cat.lower() for cat in categories]: + okCategory = True + + okGood = torrent['status'] == 'good' or torrent.get('myDownloadHistory', False) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","FILTER: lib",okLibrary,"cat",okCategory,"good",okGood + return okLibrary and okCategory and okGood + + # 1. Local search puts hits in self.hits + new_local_hits = self.searchLocalDatabase(mode) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'TorrentSearchGridManager: getHitsInCat: search found: %d items' % len(self.hits) + + if new_local_hits: + # 2. Filter self.hits on category and status + self.hits = filter(torrentFilter,self.hits) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'TorrentSearchGridManager: getHitsInCat: torrentFilter after filter found: %d items' % len(self.hits) + + self.standardOverview.setSearchFeedback('web2', self.stopped, -1, self.searchkeywords[mode]) + self.standardOverview.setSearchFeedback('remote', self.stopped, -1, self.searchkeywords[mode]) + if mode == 'filesMode': + self.standardOverview.setSearchFeedback('torrent', self.stopped, len(self.hits), self.searchkeywords[mode]) + elif mode == 'libraryMode': + # set finished true and use other string + self.standardOverview.setSearchFeedback('library', True, len(self.hits), self.searchkeywords[mode]) + + + # 3. Add remote hits that may apply. TODO: double filtering, could + # add remote hits to self.hits before filter(torrentFilter,...) + + if mode != 'libraryMode': + self.addStoredRemoteResults(mode, categorykey) + self.addStoredWeb2Results(mode,categorykey,range) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'TorrentSearchGridManager: getHitsInCat: found after remote search: %d items' % len(self.hits) + + + +# if self.getSearchMode(mode) == SEARCHMODE_SEARCHING: +# self.standardOverview.setSearchFeedback('torrent', True, len(self.hits)) + + if range[0] > len(self.hits): + return [0,None] + elif range[1] > len(self.hits): + end = len(self.hits) + else: + end = range[1] + begin = range[0] + beginsort = time() + + if sort == 'rameezmetric': + self.sort() + else: + # Sort on columns in list view + cmpfunc = lambda a,b:torrent_cmp(a,b,sort) + self.hits.sort(cmpfunc,reverse=reverse) + + # Nic: Ok this is somewhat diagonal to the previous sorting algorithms + # eventually, these should probably be combined + # since for now, however, my reranking is very tame (exchanging first and second place under certain circumstances) + # this should be fine... + + self.rerankingStrategy[mode] = getTorrentReranker() + self.hits = self.rerankingStrategy[mode].rerank(self.hits, self.searchkeywords[mode], self.torrent_db, + self.pref_db, self.mypref_db, self.search_db) + + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'getHitsInCat took: %s of which search %s' % ((time() - begintime), (time()-beginsort)) + return [len(self.hits),self.hits[begin:end]] + + + def setSearchKeywords(self,wantkeywords, mode): + self.stopped = False +# if len(wantkeywords) == 0: +# print_stack() + + self.searchkeywords[mode] = wantkeywords + if mode == 'filesMode': + self.remoteHits = {} + if self.dod: + self.dod.clear() + + def getSearchMode(self, mode): + # Return searching, stopped, or no search + if self.standardOverview is None: + if self.searchkeywords.get(mode): + return SEARCHMODE_SEARCHING + else: + if self.searchkeywords.get(mode): + if self.standardOverview.getSearchBusy(): + return SEARCHMODE_SEARCHING + else: + return SEARCHMODE_STOPPED + return SEARCHMODE_NONE + + + def stopSearch(self): + self.stopped = True + if self.dod: + self.dod.stop() + + def getCurrentHitsLen(self): + return len(self.hits) + + def searchLocalDatabase(self,mode): + """ Called by GetHitsInCategory() to search local DB. Caches previous query result. """ + if self.searchkeywords[mode] == self.oldsearchkeywords[mode] and len(self.hits) > 0: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: searchLocalDB: returning old hit list",len(self.hits) + return False + + self.oldsearchkeywords[mode] = self.searchkeywords[mode] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: searchLocalDB: Want",self.searchkeywords[mode] + + if len(self.searchkeywords[mode]) == 0 or len(self.searchkeywords[mode]) == 1 and self.searchkeywords[mode][0] == '': + return False + + self.hits = self.searchmgr.search(self.searchkeywords[mode]) + + return True + + def addStoredRemoteResults(self, mode, cat): + """ Called by GetHitsInCategory() to add remote results to self.hits """ + if len(self.remoteHits) > 0: + numResults = 0 + def catFilter(item): + icat = item.get('category') + if type(icat) == list: + icat = icat[0].lower() + elif type(icat) == str: + icat = icat.lower() + else: + return False + return icat == cat or cat == 'all' + + catResults = filter(catFilter, self.remoteHits.values()) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: remote: Adding %d remote results (%d in category)" % (len(self.remoteHits), len(catResults)) + + + for remoteItem in catResults: + known = False + for item in self.hits: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: remote: Should we add",`remoteItem['name']` + if item['infohash'] == remoteItem['infohash']: + known = True + break + if not known: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: remote: Adding",`remoteItem['name']` + self.hits.append(remoteItem) + numResults+=1 + self.standardOverview.setSearchFeedback('remote', self.stopped, numResults, self.searchkeywords[mode]) + + def gotRemoteHits(self,permid,kws,answers,mode): + """ Called by GUIUtil when hits come in. """ + try: + #if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: gotRemoteHist: got",len(answers),"for",kws + + # Always store the results, only display when in filesMode + # We got some replies. First check if they are for the current query + if self.searchkeywords['filesMode'] == kws: + numResults = 0 + catobj = Category.getInstance() + for key,value in answers.iteritems(): + + if self.torrent_db.hasTorrent(key): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: gotRemoteHist: Ignoring hit for",`value['content_name']`,"already got it" + continue # do not show results we have ourselves + + # Convert answer fields as per + # Session.query_connected_peers() spec. to NEWDB format + newval = {} + newval['name'] = value['content_name'] + newval['infohash'] = key + newval['torrent_file_name'] = '' + newval['length'] = value['length'] + newval['creation_date'] = time() # None gives '?' in GUI + newval['relevance'] = 0 + newval['source'] = 'RQ' + newval['category'] = value['category'][0] + # We trust the peer + newval['status'] = 'good' + newval['num_seeders'] = value['seeder'] + newval['num_leechers'] = value['leecher'] + + # OLPROTO_VER_NINE includes a torrent_size. Set to + # -1 when not available. + if 'torrent_size' in value: + newval['torrent_size'] = value['torrent_size'] + else: + newval['torrent_size'] = -1 + + # Extra fiedl: Set from which peer this info originates + newval['query_permids'] = [permid] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: gotRemoteHist: appending hit",`newval['name']` + #value['name'] = 'REMOTE '+value['name'] + + # Filter out results from unwanted categories + flag = False + for cat in value['category']: + rank = catobj.getCategoryRank(cat) + if rank == -1: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: gotRemoteHits: Got",`newval['name']`,"from banned category",cat,", discarded it." + flag = True + break + if flag: + continue + + if newval['infohash'] in self.remoteHits: + # merge this result with previous results + oldval = self.remoteHits[newval['infohash']] + for query_permid in newval['query_permids']: + if not query_permid in oldval['query_permids']: + oldval['query_permids'].append(query_permid) + else: + self.remoteHits[newval['infohash']] = newval + numResults +=1 + # if numResults % 5 == 0: + # self.refreshGrid() + + if numResults > 0 and mode == 'filesMode': # and self.standardOverview.getSearchBusy(): + self.refreshGrid() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'TorrentSearchGridManager: gotRemoteHits: Refresh grid after new remote torrent hits came in' + return True + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: gotRemoteHits: got hits for",kws,"but current search is for",self.searchkeywords[mode] + return False + except: + print_exc() + return False + + def refreshGrid(self): + if self.gridmgr is not None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: refreshGrid: gridmgr refresh" + self.gridmgr.refresh() + + + # + # Move to Web2SearchGridManager + # + def searchWeb2(self,initialnum): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: searchWeb2:",initialnum + + if self.dod: + self.dod.stop() + self.dod = web2.DataOnDemandWeb2(" ".join(self.searchkeywords['filesMode']),guiutil=self.guiUtility) + self.dod.request(initialnum) + self.dod.register(self.tthread_gotWeb2Hit) + + def tthread_gotWeb2Hit(self,item): + """ Called by Web2DBSearchThread*s* """ + #if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: tthread_gotWeb2Hit",`item['content_name']` + + wx.CallAfter(self.refreshGrid) + + def web2tonewdb(self,value): + try: + # Added protection against missing values + newval = {} + newval['infohash'] = value['infohash'] + newval['name'] = value['content_name'] + newval['status'] = value.get('status','unknown') + newval['description'] = value.get('description','') + newval['tags'] = value.get('tags',[]) + newval['url'] = value.get('url','') + newval['num_leechers'] = value.get('leecher',1) + newval['num_seeders'] = value.get('views',1) + newval['creation_date'] = value.get('date','') + newval['views'] = value.get('views',0) + newval['web2'] = value.get('web2',True) + newval['length'] = value.get('length',1) + if 'preview' in value: # Apparently not always present + newval['preview'] = value['preview'] + return newval + except: + print_exc() + return None + + def addStoredWeb2Results(self,mode,categorykey,range): + web2on = self.guiUtility.utility.config.Read('enableweb2search',"boolean") + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchGridManager: getCategory: mode",mode,"webon",web2on,"insearch",self.getSearchMode(mode),"catekey",categorykey + + if mode == 'filesMode' and web2on and self.getSearchMode(mode) == SEARCHMODE_SEARCHING and \ + categorykey in ['video', 'all']: + # if we are searching in filesmode + #self.standardOverview.setSearchFeedback('web2', False, 0) + + if self.dod: + # Arno: ask for more when needed (=only one page left to display) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchManager: web2: requestMore?",range[1],self.dod.getNumRequested() + pagesize = range[1] - range[0] + #diff = self.dod.getNumRequested() - range[1] + #if diff <= pagesize: + # JelleComment: above code doesnt work, because other search results are also on pages + # so we might have 100 pages of local search results. If range is related to 80th page + # websearch will try to get 80xpagesize youtube videos + # Set it steady to 3 pages + if self.dod.getNumRequested() < 3*pagesize: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchManager: web2: requestMore diff",pagesize + self.dod.requestMore(pagesize) + + data = self.dod.getData() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchManager: getHitsInCat: web2: Got total",len(data) + numResults = 0 + for value in data: + + # Translate to NEWDB/FileItemPanel format, doing this in + # web2/video/genericsearch.py breaks something + newval = self.web2tonewdb(value) + if newval is None: + continue + + known = False + for item in self.hits: + if item['infohash'] == newval['infohash']: + known = True + break + if not known: + self.hits.append(newval) + numResults += 1 + + self.standardOverview.setSearchFeedback('web2', self.stopped, numResults, self.searchkeywords[mode]) + # else: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchManager: No web2 hits, no self.dod" + + #else: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TorrentSearchManager: No web2 hits, mode",mode,"web2on",web2on,"in search",self.getSearchMode(mode),"catkey",categorykey + + #Rameez: The following code will call normalization functions and then + #sort and merge the combine torrent and youtube results + def sort(self): + self.normalizeResults() + self.statisticalNormalization() + #Rameez: now sort combined (i.e after the above two normalization procedures) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'SearchGridMan: Search res: %s' % [a.get('normScore',0) for a in self.hits] + def cmp(a,b): + # normScores can be small, so multiply + # No normscore gives negative 1000, because should be less than 0 (mean) + return int(1000000.0 * (b.get('normScore',-1000) - a.get('normScore',-1000))) + self.hits.sort(cmp) + + + + + def normalizeResults(self): + torrent_total = 0 + youtube_total = 0 + KEY_NORMSCORE = 'normScore' + + #Rameez: normalize torrent results + #Rameez: normalize youtube results + for hit in self.hits: + if not hit.has_key('views'): + torrent_total += hit.get('num_seeders',0) + elif hit['views'] != 'unknown': + youtube_total += int(hit['views']) + + if torrent_total == 0: # if zero, set to one for divZeroExc. we can do this, cause nominator will also be zero in following division + torrent_total = 1 + if youtube_total == 0: + youtube_total = 1 + + for hit in self.hits: + if not hit.has_key('views'): + hit[KEY_NORMSCORE] = hit.get('num_seeders',0)/float(torrent_total) + elif hit['views'] != 'unknown': + hit[KEY_NORMSCORE] = int(hit['views'])/float(youtube_total) + + + + + def statisticalNormalization(self): + youtube_hits = [hit for hit in self.hits if (hit.get('views', 'unknown') != "unknown" + and hit.has_key('normScore'))] + torrent_hits = [hit for hit in self.hits if (not hit.has_key('views') + and hit.has_key('normScore'))] + self.doStatNormalization(youtube_hits) + self.doStatNormalization(torrent_hits) + + def doStatNormalization(self, hits): + #Rameez: statistically normalize torrent results + + count = 0 + tot = 0 + + for hit in hits: + tot += hit['normScore'] + count +=1 + + if count > 0: + mean = tot/count + else: + mean = 0 + + sum = 0 + for hit in hits: + temp = hit['normScore'] - mean + temp = temp * temp + sum += temp + + if count > 1: + dev = sum /(count-1) + else: + dev = 0 + + stdDev = sqrt(dev) + + for hit in hits: + if stdDev > 0: + hit['normScore'] = (hit['normScore']-mean)/ stdDev + + + + +class PeerSearchGridManager: + + # Code to make this a singleton + __single = None + + def __init__(self,guiUtility): + if PeerSearchGridManager.__single: + raise RuntimeError, "PeerSearchGridManager is singleton" + PeerSearchGridManager.__single = self + + self.guiUtility = guiUtility + + # Contains all matches for keywords in DB, not filtered by category + self.hits = [] + # Jelle's word filter + self.psearchmgr = None + self.fsearchmgr = None + self.stopped = False # not stopped by default + self.gridmgr = None + + self.standardOverview = None + self.searchkeywords = {'personsMode':[], 'friendsMode':[]} + self.oldsearchkeywords = {'personsMode':[], 'friendsMode':[]} # previous query + + + def getInstance(*args, **kw): + if PeerSearchGridManager.__single is None: + PeerSearchGridManager(*args, **kw) + return PeerSearchGridManager.__single + getInstance = staticmethod(getInstance) + + def register(self,peer_db,friend_db): + self.psearchmgr = SearchManager(peer_db) + self.fsearchmgr = SearchManager(friend_db) + + def set_gridmgr(self,gridmgr): + self.gridmgr = gridmgr + + + def getHits(self,mode,range): + # mode is 'personsMode', 'friendsMode' + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PeerSearchGridManager: getHitsIn:",mode,range + + if not self.standardOverview: + self.standardOverview = self.guiUtility.standardOverview + + # Local search puts hits in self.hits + self.searchLocalDatabase(mode) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PeerSearchGridManager: searchLocalDB: GOT HITS",self.hits + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'PeerSearchGridManager: getHitsInCat: search found: %d items' % len(self.hits) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'PeerSearchGridManager: getHitsInCat: torrentFilter after search found: %d items' % len(self.hits) + + if mode == 'personsMode': + searchType = 'peers' + elif mode == 'friendsMode': + searchType = 'friends' + self.standardOverview.setSearchFeedback(searchType, True, len(self.hits), self.searchkeywords[mode]) + + if range[0] > len(self.hits): + return [0,None] + elif range[1] > len(self.hits): + end = len(self.hits) + else: + end = range[1] + begin = range[0] + + return [len(self.hits),self.hits[begin:end]] + + + def setSearchKeywords(self,wantkeywords, mode): + self.stopped = False +# if len(wantkeywords) == 0: +# print_stack() + + self.searchkeywords[mode] = wantkeywords + + def getSearchMode(self, mode): + if bool(self.searchkeywords[mode]): + if not self.stopped: + mode = SEARCHMODE_SEARCHING + else: + mode = SEARCHMODE_STOPPED + else: + mode = SEARCHMODE_NONE + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PeerSearchGridManager: getSearchMode?",mode + return mode + + def stopSearch(self): + print_stack() + self.stopped = True + + def searchLocalDatabase(self,mode): + """ Called by getHits() to search local DB. Caches previous query result. """ + if self.searchkeywords[mode] == self.oldsearchkeywords[mode] and len(self.hits) > 0: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PeerSearchGridManager: searchLocalDB: returning old hit list",len(self.hits) + return self.hits + + self.oldsearchkeywords[mode] = self.searchkeywords[mode] + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PeerSearchGridManager: searchLocalDB: Want",self.searchkeywords[mode] + + if len(self.searchkeywords[mode]) == 0 or len(self.searchkeywords[mode]) == 1 and self.searchkeywords[mode][0] == '': + return self.hits + + if mode == 'personsMode': + self.hits = self.psearchmgr.search(self.searchkeywords[mode]) + else: # friends + self.hits = self.fsearchmgr.search(self.searchkeywords[mode]) + + return self.hits + + +def torrent_cmp(a,b,sort): + """ Compare torrent db records based on key "sort" """ + vala = a.get(sort,0) + valb = b.get(sort,0) + if vala == valb: + return 0 + elif vala < valb: + return -1 + else: + return 1 + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/SubscriptionsItemPanel.py b/tribler-mod/Tribler/Main/vwxGUI/SubscriptionsItemPanel.py new file mode 100644 index 0000000..a3f3e91 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/SubscriptionsItemPanel.py @@ -0,0 +1,506 @@ +from time import localtime, strftime +import wx, sys +from traceback import print_exc + +from Tribler.Core.API import * +from Tribler.Core.Utilities.unicode import * +from Tribler.Core.Utilities.utilities import * +from Tribler.Core.Utilities.timeouturlopen import urlOpenTimeout + +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.vwxGUI.IconsManager import IconsManager,data2wxBitmap + + +from font import * +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles +from tribler_topButton import * +## import TasteHeart + +DEBUG = False + +# font sizes +if sys.platform == 'darwin': + FS_SUBSCRTITLE = 10 + FS_TOTALNUMBER = 10 +# FS_SIMILARITY = 10 +# FS_HEARTRANK = 10 +# FS_ONLINE = 10 +else: + FS_SUBSCRTITLE = 8 + FS_TOTALNUMBER = 9 +# FS_SIMILARITY = 10 +# FS_HEARTRANK = 7 +# FS_ONLINE = 8 + + +class SubscriptionsItemPanel(wx.Panel): + """ + PersonsItemPanel shows one persons item inside the PersonsGridPanel + """ + def __init__(self, parent, keyTypedFun = None): + global TORRENTPANEL_BACKGROUND + + wx.Panel.__init__(self, parent, -1) + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.parent = parent + self.data = None + self.datacopy = None + self.titleLength = 172 # num characters + self.selected = False + self.warningMode = False + self.guiserver = parent.guiserver + self.torrentfeed = parent.torrentfeed + self.iconsManager = IconsManager.getInstance() + + self.oldCategoryLabel = None + self.addComponents() + self.Show() + self.Refresh() + self.Layout() + self.triblerStyles = TriblerStyles.getInstance() + + def addComponents(self): + self.Show(False) + #self.SetMinSize((50,50)) + self.selectedColour = wx.Colour(255,200,187) + self.unselectedColour = wx.WHITE + + self.SetBackgroundColour(self.unselectedColour) + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + + self.Bind(wx.EVT_LEFT_UP, self.mouseAction) + self.Bind(wx.EVT_KEY_UP, self.keyTyped) + + # Add Spacer + self.hSizer.Add([8,22],0,wx.EXPAND|wx.FIXED_MINSIZE,0) + + # Add Checkbox turn on/off + self.cB = wx.CheckBox(self,-1,"",wx.Point(8,128),wx.Size(18,18)) + self.cB.SetForegroundColour(wx.Colour(0,0,0)) + self.hSizer.Add(self.cB, 0, wx.TOP|wx.LEFT|wx.RIGHT, 3) + + # Add Spacer + self.hSizer.Add([0,20],0,wx.EXPAND|wx.FIXED_MINSIZE,0) + + # Add thumb / favicon from website? + self.thumb = FavicoThumbnailViewer(self) + self.thumb.setBackground(wx.BLACK) + self.thumb.SetSize((16,16)) + self.hSizer.Add(self.thumb, 0, wx.TOP|wx.RIGHT, 3) + + # Add title + self.title =wx.StaticText(self,-1,"Tribler discovery through other Tribler Users",wx.Point(0,0),wx.Size(800,20)) + self.triblerStyles.setLightText(self.title) + self.hSizer.Add(self.title, 1, wx.TOP|wx.RIGHT, 5) + + """ + # Add total number of received files + self.totalnumber =wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(130,12)) + self.totalnumber.SetBackgroundColour(wx.WHITE) + self.totalnumber.SetFont(wx.Font(FS_TOTALNUMBER,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + self.totalnumber.SetForegroundColour(wx.Colour(128,128,128)) + self.totalnumber.SetMinSize((60,12)) + self.totalnumber.SetLabel('') + self.hSizer.Add(self.totalnumber,0,wx.TOP|wx.EXPAND,3) + + # Add left vertical line + self.vLine2 = self.addLine() + """ + + # Add Spacer to keep space occupied when no delete button is available + self.vSizer = wx.BoxSizer(wx.VERTICAL) + self.vSizer.Add([20,1],0,wx.FIXED_MINSIZE,0) + + # Add delete button + self.delete = tribler_topButton(self, -1, wx.Point(0,0), wx.Size(16,16),name='deleteSubscriptionItem') + self.vSizer.Add(self.delete, 0, wx.TOP, 3) + + self.hSizer.Add(self.vSizer, 0, wx.LEFT|wx.RIGHT|wx.TOP, 0) + + + # Add Spacer + self.hSizer.Add([8,20],0,wx.EXPAND|wx.FIXED_MINSIZE,0) + + self.SetSizer(self.hSizer); + self.SetAutoLayout(1); + self.Layout(); + self.Refresh() + + self.Bind(wx.EVT_CHECKBOX, self.checkboxAction, self.cB) + + # 2.8.4.2 return value of GetChildren changed + wl = [] + for c in self.GetChildren(): + wl.append(c) + for window in wl: + window.Bind(wx.EVT_LEFT_UP, self.mouseAction) + window.Bind(wx.EVT_KEY_UP, self.keyTyped) + window.Bind(wx.EVT_RIGHT_DOWN, self.mouseAction) + + def addLine(self, vertical=True): + if vertical: + vLine = wx.StaticLine(self,-1,wx.DefaultPosition, wx.Size(2,22),wx.LI_VERTICAL) + self.hSizer.Add(vLine, 0, wx.RIGHT|wx.EXPAND, 3) + return vLine + else: + hLine = wx.StaticLine(self,-1,wx.DefaultPosition, wx.Size(-1,1),wx.LI_HORIZONTAL) + self.vSizer.Add(hLine, 0, wx.EXPAND, 0) + return hLine + + def setData(self, peer_data): + # set bitmap, rating, title + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subip: setData called",peer_data + + if peer_data is not None and 'content_name' in peer_data: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subip: ERROR! setData called with torrent data!" + peer_data = None + + if peer_data is None: + self.datacopy = None + + if self.datacopy is not None and self.datacopy['url'] == peer_data['url']: + if (self.datacopy['status'] == peer_data['status']): + return + + self.data = peer_data + + if peer_data is not None: + # deepcopy no longer works with 'ThumnailBitmap' on board + self.datacopy = {} + self.datacopy['url'] = peer_data['url'] + self.datacopy['status'] = peer_data['status'] + else: + peer_data = {} + + if peer_data is None: + peer_data = {} + + if peer_data.get('url'): + title = peer_data['url'][:self.titleLength] + self.title.Enable(True) + self.title.SetLabel(title) + self.title.Wrap(self.title.GetSize()[0]) + #self.title.SetToolTipString(peer_data['url']) + self.cB.Show() + self.cB.SetValue(peer_data['status'] == "active") +# self.vLine1.Show() + #self.vLine2.Show() + if 'persistent' in self.data: + self.delete.Hide() + else: + self.delete.Show() + else: + self.title.SetLabel('') + #self.title.SetToolTipString('') + self.title.Enable(False) + self.cB.SetValue(False) + self.cB.Hide() +# self.vLine1.Hide() + #self.vLine2.Hide() + #self.delete.Enable(False) + self.delete.Hide() + + self.thumb.setData(peer_data) + + self.Layout() + self.Refresh() + #self.parent.Refresh() + + + + def select(self, rowIndex, colIndex): + + colour = self.triblerStyles.selected(3) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'subip: selected' + self.thumb.setSelected(True) + self.SetBackgroundColour(colour) + self.title.SetBackgroundColour(colour) + #self.totalnumber.SetBackgroundColour(self.selectedColour) + self.Refresh() + + def deselect(self, rowIndex, colIndex): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'subip: deselected',self.data + if rowIndex % 2 == 0: + colour = self.triblerStyles.selected(1) + else: + colour = self.triblerStyles.selected(2) + + self.thumb.setSelected(False) + self.SetBackgroundColour(colour) + self.title.SetBackgroundColour(colour) + #self.totalnumber.SetBackgroundColour(colour) + self.Refresh() + + def keyTyped(self, event): + if self.selected: + key = event.GetKeyCode() + if (key == wx.WXK_DELETE): + if self.data: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'subip: deleting' + self.guiUtility.deleteSubscription(self.data) + event.Skip() + + def toggleStatus(self,newstatus): + if not self.data: + return + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subip: new status: ",newstatus + + self.guiUtility.selectSubscription(self.data) + + if 'persistent' in self.data: + if self.data['persistent'] == 'BC': + self.toggleBuddycast(newstatus) + elif self.data['persistent'] == 'Web2.0': + self.toggleWeb2Search(newstatus) + else: + self.torrentfeed.setURLStatus(self.data['url'],newstatus) + + def checkboxAction(self,event): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subip: checkboxAction" + + newstatus = self.cB.GetValue() + self.toggleStatus( newstatus ) + + event.Skip() + + def mouseAction(self, event): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subip: mouseAction" + obj = event.GetEventObject() + name = obj.GetName() + #print "subip: mouseAction: name is",name + + #self.SetFocus() + if self.data: + self.guiUtility.selectSubscription(self.data) + + if self.data is not None: + if name == 'deleteSubscriptionItem': + self.torrentfeed.deleteURL(self.data['url']) + self.guiUtility.deleteSubscription(self.data) + if event.RightDown(): + self.rightMouseButton(event) + + event.Skip() + + def rightMouseButton(self, event): + menu = self.guiUtility.OnRightMouseAction(event) + if menu is not None: + self.PopupMenu(menu, (-1,-1)) + + def getIdentifier(self): + if self.data: + return self.data['url'] + + def toggleBuddycast(self,status): + try: + # Save SessionStartupConfig + state_dir = self.utility.session.get_state_dir() + cfgfilename = Session.get_default_config_filename(state_dir) + scfg = SessionStartupConfig.load(cfgfilename) + + for target in [scfg,self.utility.session]: + try: + target.set_start_recommender(status) + except: + print_exc() + + scfg.save(cfgfilename) + except: + print_exc() + + def toggleWeb2Search(self,status): + if status and sys.platform == 'linux2': + dlg = wx.MessageDialog(None, self.utility.lang.get('vlc_linux_start_bug') ,self.utility.lang.get('vlc_linux_start_bug_title'), wx.OK|wx.ICON_INFORMATION) + dlg.ShowModal() + dlg.Destroy() + + self.utility.config.Write('enableweb2search',status, "boolean") + search = self.guiUtility.getSearchField(mode='filesMode') + if status: + txt = self.utility.lang.get('filesdefaultsearchweb2txt') + else: + txt = self.utility.lang.get('filesdefaultsearchtxt') + search.SetValue(txt) + + + +class FavicoThumbnailViewer(wx.Panel): + """ + Show thumbnail and mast with info on mouseOver + """ + + def __init__(self, *args, **kw): + if len(args) == 0: + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, *args, **kw) + self._PostInit() + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + self.backgroundColor = wx.WHITE + self.dataBitmap = self.maskBitmap = None + self.data = None + self.mouseOver = False + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.Bind(wx.EVT_PAINT, self.OnPaint) + self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase) + self.selected = False + self.border = None + + self.iconsManager = IconsManager.getInstance() + + def setData(self, data): + + if not data: + self.Hide() + self.Refresh() + return + + if not self.IsShown(): + self.Show() + if data != self.data: + self.data = data + self.setThumbnail(data) + + def setThumbnail(self, data): + # Get the file(s)data for this torrent + try: + bmp = self.iconsManager.get_default('subscriptionsMode','DEFAULT_THUMB') + # Check if we have already read the thumbnail and metadata information from this torrent file + if data.get('metadata'): + bmp = data['metadata'].get('ThumbnailBitmap') + if not bmp: + bmp = self.iconsManager.get_default('subscriptionMode','DEFAULT_THUMB') + else: + self.GetParent().guiserver.add_task(lambda:self.loadMetadata(data),0) + + self.setBitmap(bmp) + width, height = self.GetSize() + d = 1 + self.border = [wx.Point(0,d), wx.Point(width-d, d), wx.Point(width-d, height-d), wx.Point(d,height-d), wx.Point(d,0)] + self.Refresh() + + except: + print_exc() + return {} + + + def setBitmap(self, bmp): + # Recalculate image placement + w, h = self.GetSize() + iw, ih = bmp.GetSize() + + self.dataBitmap = bmp + self.xpos, self.ypos = (w-iw)/2, (h-ih)/2 + + + def loadMetadata(self,data): + """ Called by non-GUI thread """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subip: ThumbnailViewer: loadMetadata: url",data['url'] + mimetype = None + bmpdata = None + if not ('persistent' in data): + try: + t = urlparse.urlparse(data['url']) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subip: ThumbnailViewer: loadMetadata: parsed url",t + newurl = t[0]+'://'+t[1]+'/'+'favicon.ico' + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subip: ThumbnailViewer: loadMetadata: newurl",newurl + stream = urlOpenTimeout(newurl,timeout=5) + mimetype = 'image/x-ico' # 'image/vnd.microsoft.icon' # 'image/ico' + bmpdata = stream.read() + stream.close() + except: + print_exc() + + wx.CallAfter(self.metadata_thread_gui_callback,data,mimetype,bmpdata) + + def metadata_thread_gui_callback(self,data,mimetype,bmpdata): + """ Called by GUI thread """ + + if DEBUG: + print "subip: ThumbnailViewer: GUI callback" + + metadata = {} + if 'persistent' in data: + metadata['ThumbnailBitmap'] = self.iconsManager.get_default('subscriptionsMode','BUDDYCAST_THUMB') + else: + if mimetype is not None: + metadata['ThumbnailBitmap'] = data2wxBitmap(mimetype,bmpdata,dim=16) + else: + metadata['ThumbnailBitmap'] = None + + data['metadata'] = metadata + + # This item may be displaying another subscription right now, only show the icon + # when it's still the same person + if data['url'] == self.data['url']: + if 'ThumbnailBitmap' in metadata and metadata['ThumbnailBitmap'] is not None: + self.setBitmap(metadata['ThumbnailBitmap']) + self.Refresh() + + + def OnErase(self, event): + pass + #event.Skip() + + def setSelected(self, sel): + self.selected = sel + self.Refresh() + + def isSelected(self): + return self.selected + + def mouseAction(self, event): + if event.Entering(): + if DEBUG: + print 'subip: enter' + self.mouseOver = True + self.Refresh() + elif event.Leaving(): + self.mouseOver = False + if DEBUG: + print 'subip: leave' + self.Refresh() + elif event.ButtonUp(): + self.ClickedButton() + #event.Skip() + + def ClickedButton(self): + if DEBUG: + print 'subip: Click' + pass + + def setBackground(self, wxColor): + self.backgroundColor = wxColor + + def OnPaint(self, evt): + dc = wx.BufferedPaintDC(self) + dc.SetBackground(wx.Brush(self.backgroundColor)) + dc.Clear() + + if self.dataBitmap: + #dc.DrawBitmap(self.dataBitmap, self.xpos,self.ypos, True) + dc.DrawBitmap(self.dataBitmap, 0, 0, True) diff --git a/tribler-mod/Tribler/Main/vwxGUI/SubscriptionsItemPanel.py.bak b/tribler-mod/Tribler/Main/vwxGUI/SubscriptionsItemPanel.py.bak new file mode 100644 index 0000000..d4afd5d --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/SubscriptionsItemPanel.py.bak @@ -0,0 +1,505 @@ +import wx, sys +from traceback import print_exc + +from Tribler.Core.API import * +from Tribler.Core.Utilities.unicode import * +from Tribler.Core.Utilities.utilities import * +from Tribler.Core.Utilities.timeouturlopen import urlOpenTimeout + +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.vwxGUI.IconsManager import IconsManager,data2wxBitmap + + +from font import * +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles +from tribler_topButton import * +## import TasteHeart + +DEBUG = False + +# font sizes +if sys.platform == 'darwin': + FS_SUBSCRTITLE = 10 + FS_TOTALNUMBER = 10 +# FS_SIMILARITY = 10 +# FS_HEARTRANK = 10 +# FS_ONLINE = 10 +else: + FS_SUBSCRTITLE = 8 + FS_TOTALNUMBER = 9 +# FS_SIMILARITY = 10 +# FS_HEARTRANK = 7 +# FS_ONLINE = 8 + + +class SubscriptionsItemPanel(wx.Panel): + """ + PersonsItemPanel shows one persons item inside the PersonsGridPanel + """ + def __init__(self, parent, keyTypedFun = None): + global TORRENTPANEL_BACKGROUND + + wx.Panel.__init__(self, parent, -1) + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.parent = parent + self.data = None + self.datacopy = None + self.titleLength = 172 # num characters + self.selected = False + self.warningMode = False + self.guiserver = parent.guiserver + self.torrentfeed = parent.torrentfeed + self.iconsManager = IconsManager.getInstance() + + self.oldCategoryLabel = None + self.addComponents() + self.Show() + self.Refresh() + self.Layout() + self.triblerStyles = TriblerStyles.getInstance() + + def addComponents(self): + self.Show(False) + #self.SetMinSize((50,50)) + self.selectedColour = wx.Colour(255,200,187) + self.unselectedColour = wx.WHITE + + self.SetBackgroundColour(self.unselectedColour) + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + + self.Bind(wx.EVT_LEFT_UP, self.mouseAction) + self.Bind(wx.EVT_KEY_UP, self.keyTyped) + + # Add Spacer + self.hSizer.Add([8,22],0,wx.EXPAND|wx.FIXED_MINSIZE,0) + + # Add Checkbox turn on/off + self.cB = wx.CheckBox(self,-1,"",wx.Point(8,128),wx.Size(18,18)) + self.cB.SetForegroundColour(wx.Colour(0,0,0)) + self.hSizer.Add(self.cB, 0, wx.TOP|wx.LEFT|wx.RIGHT, 3) + + # Add Spacer + self.hSizer.Add([0,20],0,wx.EXPAND|wx.FIXED_MINSIZE,0) + + # Add thumb / favicon from website? + self.thumb = FavicoThumbnailViewer(self) + self.thumb.setBackground(wx.BLACK) + self.thumb.SetSize((16,16)) + self.hSizer.Add(self.thumb, 0, wx.TOP|wx.RIGHT, 3) + + # Add title + self.title =wx.StaticText(self,-1,"Tribler discovery through other Tribler Users",wx.Point(0,0),wx.Size(800,20)) + self.triblerStyles.setLightText(self.title) + self.hSizer.Add(self.title, 1, wx.TOP|wx.RIGHT, 5) + + """ + # Add total number of received files + self.totalnumber =wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(130,12)) + self.totalnumber.SetBackgroundColour(wx.WHITE) + self.totalnumber.SetFont(wx.Font(FS_TOTALNUMBER,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + self.totalnumber.SetForegroundColour(wx.Colour(128,128,128)) + self.totalnumber.SetMinSize((60,12)) + self.totalnumber.SetLabel('') + self.hSizer.Add(self.totalnumber,0,wx.TOP|wx.EXPAND,3) + + # Add left vertical line + self.vLine2 = self.addLine() + """ + + # Add Spacer to keep space occupied when no delete button is available + self.vSizer = wx.BoxSizer(wx.VERTICAL) + self.vSizer.Add([20,1],0,wx.FIXED_MINSIZE,0) + + # Add delete button + self.delete = tribler_topButton(self, -1, wx.Point(0,0), wx.Size(16,16),name='deleteSubscriptionItem') + self.vSizer.Add(self.delete, 0, wx.TOP, 3) + + self.hSizer.Add(self.vSizer, 0, wx.LEFT|wx.RIGHT|wx.TOP, 0) + + + # Add Spacer + self.hSizer.Add([8,20],0,wx.EXPAND|wx.FIXED_MINSIZE,0) + + self.SetSizer(self.hSizer); + self.SetAutoLayout(1); + self.Layout(); + self.Refresh() + + self.Bind(wx.EVT_CHECKBOX, self.checkboxAction, self.cB) + + # 2.8.4.2 return value of GetChildren changed + wl = [] + for c in self.GetChildren(): + wl.append(c) + for window in wl: + window.Bind(wx.EVT_LEFT_UP, self.mouseAction) + window.Bind(wx.EVT_KEY_UP, self.keyTyped) + window.Bind(wx.EVT_RIGHT_DOWN, self.mouseAction) + + def addLine(self, vertical=True): + if vertical: + vLine = wx.StaticLine(self,-1,wx.DefaultPosition, wx.Size(2,22),wx.LI_VERTICAL) + self.hSizer.Add(vLine, 0, wx.RIGHT|wx.EXPAND, 3) + return vLine + else: + hLine = wx.StaticLine(self,-1,wx.DefaultPosition, wx.Size(-1,1),wx.LI_HORIZONTAL) + self.vSizer.Add(hLine, 0, wx.EXPAND, 0) + return hLine + + def setData(self, peer_data): + # set bitmap, rating, title + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subip: setData called",peer_data + + if peer_data is not None and 'content_name' in peer_data: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subip: ERROR! setData called with torrent data!" + peer_data = None + + if peer_data is None: + self.datacopy = None + + if self.datacopy is not None and self.datacopy['url'] == peer_data['url']: + if (self.datacopy['status'] == peer_data['status']): + return + + self.data = peer_data + + if peer_data is not None: + # deepcopy no longer works with 'ThumnailBitmap' on board + self.datacopy = {} + self.datacopy['url'] = peer_data['url'] + self.datacopy['status'] = peer_data['status'] + else: + peer_data = {} + + if peer_data is None: + peer_data = {} + + if peer_data.get('url'): + title = peer_data['url'][:self.titleLength] + self.title.Enable(True) + self.title.SetLabel(title) + self.title.Wrap(self.title.GetSize()[0]) + #self.title.SetToolTipString(peer_data['url']) + self.cB.Show() + self.cB.SetValue(peer_data['status'] == "active") +# self.vLine1.Show() + #self.vLine2.Show() + if 'persistent' in self.data: + self.delete.Hide() + else: + self.delete.Show() + else: + self.title.SetLabel('') + #self.title.SetToolTipString('') + self.title.Enable(False) + self.cB.SetValue(False) + self.cB.Hide() +# self.vLine1.Hide() + #self.vLine2.Hide() + #self.delete.Enable(False) + self.delete.Hide() + + self.thumb.setData(peer_data) + + self.Layout() + self.Refresh() + #self.parent.Refresh() + + + + def select(self, rowIndex, colIndex): + + colour = self.triblerStyles.selected(3) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'subip: selected' + self.thumb.setSelected(True) + self.SetBackgroundColour(colour) + self.title.SetBackgroundColour(colour) + #self.totalnumber.SetBackgroundColour(self.selectedColour) + self.Refresh() + + def deselect(self, rowIndex, colIndex): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'subip: deselected',self.data + if rowIndex % 2 == 0: + colour = self.triblerStyles.selected(1) + else: + colour = self.triblerStyles.selected(2) + + self.thumb.setSelected(False) + self.SetBackgroundColour(colour) + self.title.SetBackgroundColour(colour) + #self.totalnumber.SetBackgroundColour(colour) + self.Refresh() + + def keyTyped(self, event): + if self.selected: + key = event.GetKeyCode() + if (key == wx.WXK_DELETE): + if self.data: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'subip: deleting' + self.guiUtility.deleteSubscription(self.data) + event.Skip() + + def toggleStatus(self,newstatus): + if not self.data: + return + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subip: new status: ",newstatus + + self.guiUtility.selectSubscription(self.data) + + if 'persistent' in self.data: + if self.data['persistent'] == 'BC': + self.toggleBuddycast(newstatus) + elif self.data['persistent'] == 'Web2.0': + self.toggleWeb2Search(newstatus) + else: + self.torrentfeed.setURLStatus(self.data['url'],newstatus) + + def checkboxAction(self,event): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subip: checkboxAction" + + newstatus = self.cB.GetValue() + self.toggleStatus( newstatus ) + + event.Skip() + + def mouseAction(self, event): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subip: mouseAction" + obj = event.GetEventObject() + name = obj.GetName() + #print "subip: mouseAction: name is",name + + #self.SetFocus() + if self.data: + self.guiUtility.selectSubscription(self.data) + + if self.data is not None: + if name == 'deleteSubscriptionItem': + self.torrentfeed.deleteURL(self.data['url']) + self.guiUtility.deleteSubscription(self.data) + if event.RightDown(): + self.rightMouseButton(event) + + event.Skip() + + def rightMouseButton(self, event): + menu = self.guiUtility.OnRightMouseAction(event) + if menu is not None: + self.PopupMenu(menu, (-1,-1)) + + def getIdentifier(self): + if self.data: + return self.data['url'] + + def toggleBuddycast(self,status): + try: + # Save SessionStartupConfig + state_dir = self.utility.session.get_state_dir() + cfgfilename = Session.get_default_config_filename(state_dir) + scfg = SessionStartupConfig.load(cfgfilename) + + for target in [scfg,self.utility.session]: + try: + target.set_start_recommender(status) + except: + print_exc() + + scfg.save(cfgfilename) + except: + print_exc() + + def toggleWeb2Search(self,status): + if status and sys.platform == 'linux2': + dlg = wx.MessageDialog(None, self.utility.lang.get('vlc_linux_start_bug') ,self.utility.lang.get('vlc_linux_start_bug_title'), wx.OK|wx.ICON_INFORMATION) + dlg.ShowModal() + dlg.Destroy() + + self.utility.config.Write('enableweb2search',status, "boolean") + search = self.guiUtility.getSearchField(mode='filesMode') + if status: + txt = self.utility.lang.get('filesdefaultsearchweb2txt') + else: + txt = self.utility.lang.get('filesdefaultsearchtxt') + search.SetValue(txt) + + + +class FavicoThumbnailViewer(wx.Panel): + """ + Show thumbnail and mast with info on mouseOver + """ + + def __init__(self, *args, **kw): + if len(args) == 0: + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, *args, **kw) + self._PostInit() + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + self.backgroundColor = wx.WHITE + self.dataBitmap = self.maskBitmap = None + self.data = None + self.mouseOver = False + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.Bind(wx.EVT_PAINT, self.OnPaint) + self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase) + self.selected = False + self.border = None + + self.iconsManager = IconsManager.getInstance() + + def setData(self, data): + + if not data: + self.Hide() + self.Refresh() + return + + if not self.IsShown(): + self.Show() + if data != self.data: + self.data = data + self.setThumbnail(data) + + def setThumbnail(self, data): + # Get the file(s)data for this torrent + try: + bmp = self.iconsManager.get_default('subscriptionsMode','DEFAULT_THUMB') + # Check if we have already read the thumbnail and metadata information from this torrent file + if data.get('metadata'): + bmp = data['metadata'].get('ThumbnailBitmap') + if not bmp: + bmp = self.iconsManager.get_default('subscriptionMode','DEFAULT_THUMB') + else: + self.GetParent().guiserver.add_task(lambda:self.loadMetadata(data),0) + + self.setBitmap(bmp) + width, height = self.GetSize() + d = 1 + self.border = [wx.Point(0,d), wx.Point(width-d, d), wx.Point(width-d, height-d), wx.Point(d,height-d), wx.Point(d,0)] + self.Refresh() + + except: + print_exc() + return {} + + + def setBitmap(self, bmp): + # Recalculate image placement + w, h = self.GetSize() + iw, ih = bmp.GetSize() + + self.dataBitmap = bmp + self.xpos, self.ypos = (w-iw)/2, (h-ih)/2 + + + def loadMetadata(self,data): + """ Called by non-GUI thread """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subip: ThumbnailViewer: loadMetadata: url",data['url'] + mimetype = None + bmpdata = None + if not ('persistent' in data): + try: + t = urlparse.urlparse(data['url']) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subip: ThumbnailViewer: loadMetadata: parsed url",t + newurl = t[0]+'://'+t[1]+'/'+'favicon.ico' + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subip: ThumbnailViewer: loadMetadata: newurl",newurl + stream = urlOpenTimeout(newurl,timeout=5) + mimetype = 'image/x-ico' # 'image/vnd.microsoft.icon' # 'image/ico' + bmpdata = stream.read() + stream.close() + except: + print_exc() + + wx.CallAfter(self.metadata_thread_gui_callback,data,mimetype,bmpdata) + + def metadata_thread_gui_callback(self,data,mimetype,bmpdata): + """ Called by GUI thread """ + + if DEBUG: + print "subip: ThumbnailViewer: GUI callback" + + metadata = {} + if 'persistent' in data: + metadata['ThumbnailBitmap'] = self.iconsManager.get_default('subscriptionsMode','BUDDYCAST_THUMB') + else: + if mimetype is not None: + metadata['ThumbnailBitmap'] = data2wxBitmap(mimetype,bmpdata,dim=16) + else: + metadata['ThumbnailBitmap'] = None + + data['metadata'] = metadata + + # This item may be displaying another subscription right now, only show the icon + # when it's still the same person + if data['url'] == self.data['url']: + if 'ThumbnailBitmap' in metadata and metadata['ThumbnailBitmap'] is not None: + self.setBitmap(metadata['ThumbnailBitmap']) + self.Refresh() + + + def OnErase(self, event): + pass + #event.Skip() + + def setSelected(self, sel): + self.selected = sel + self.Refresh() + + def isSelected(self): + return self.selected + + def mouseAction(self, event): + if event.Entering(): + if DEBUG: + print 'subip: enter' + self.mouseOver = True + self.Refresh() + elif event.Leaving(): + self.mouseOver = False + if DEBUG: + print 'subip: leave' + self.Refresh() + elif event.ButtonUp(): + self.ClickedButton() + #event.Skip() + + def ClickedButton(self): + if DEBUG: + print 'subip: Click' + pass + + def setBackground(self, wxColor): + self.backgroundColor = wxColor + + def OnPaint(self, evt): + dc = wx.BufferedPaintDC(self) + dc.SetBackground(wx.Brush(self.backgroundColor)) + dc.Clear() + + if self.dataBitmap: + #dc.DrawBitmap(self.dataBitmap, self.xpos,self.ypos, True) + dc.DrawBitmap(self.dataBitmap, 0, 0, True) diff --git a/tribler-mod/Tribler/Main/vwxGUI/Tab_graphs.xrc b/tribler-mod/Tribler/Main/vwxGUI/Tab_graphs.xrc new file mode 100644 index 0000000..81639c0 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/Tab_graphs.xrc @@ -0,0 +1,21 @@ + + + + 0,0 + 300,348 + #ffffff + + wxVERTICAL + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + 3,3 + 300,300 + #ffffff + + + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/TextButton.py b/tribler-mod/Tribler/Main/vwxGUI/TextButton.py new file mode 100644 index 0000000..496f5ba --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/TextButton.py @@ -0,0 +1,279 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg, Maarten ten Brinke +# see LICENSE.txt for license information + +import wx, os, sys +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles +from Tribler.Main.vwxGUI.bgPanel import ImagePanel +from Tribler.Main.vwxGUI.IconsManager import IconsManager +from Tribler.Main.vwxGUI.TextEdit import TextEdit +from font import * +from traceback import print_exc +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +#from wx.lib.stattext import GenStaticText + +DEBUG = False + +class TextButtonBasic(wx.Panel): + """ + Button that changes the image shown if you move your mouse over it. + It redraws the background of the parent Panel, if this is an imagepanel with + a variable self.bitmap. + """ + + def __init__(self, menuItem, *args, **kw): + self.selected = False + self.menuItem = menuItem + self.triblerStyles = TriblerStyles.getInstance() + if len(args) == 0: + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, *args, **kw) + self._PostInit() + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + self.guiUtility = GUIUtility.getInstance() + self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + self.Bind(wx.EVT_PAINT, self.OnPaint) + if not self.menuItem: + self.Bind(wx.EVT_LEFT_UP, self.ClickedButton) + + self.dcRollOver = 0 + + if self.GetParent().GetName() == 'filterStandard': + self.SetMinSize((-1,15)) + self.SetSize((-1,15)) +# self.SetMinSize((60,17)) + else: + self.SetMinSize(self.GetSize()) + + self.GetParent().Layout() + self.Refresh(True) + self.Update() + + + def setSelected(self, sel): + if sel != self.selected: + self.selected = sel + if self.menuItem: + self.dcRollOver = 0 + self.Refresh() + + def isSelected(self): + return self.selected + + def mouseAction(self, event): + event.Skip() + if event.Entering() and not self.selected: + self.dcRollOver = 1 + self.Refresh() + elif event.Leaving() and not self.selected: + self.dcRollOver = 0 + self.Refresh() + + def ClickedButton(self, event): + name = self.GetName() + event.Skip() + self.guiUtility.buttonClicked(event) +# self.guiUtility.detailsTabClicked(name) + + def OnPaint(self, evt): + dc = wx.BufferedPaintDC(self) + if self.dcRollOver == 0 : + dc.SetBackground(wx.Brush(self.triblerStyles.textButtonLeft(style = 'bgColour'))) + if self.dcRollOver == 1 or self.selected == True: + dc.SetBackground(wx.Brush(self.triblerStyles.textButtonLeft(style = 'bgColour2'))) + + dc.SetTextForeground(self.triblerStyles.textButtonLeft(style = 'textColour')) + dc.SetFont(self.triblerStyles.textButtonLeft(style = 'font')) + + dc.Clear() + + dc.DrawText(self.GetName(), 2, 2) + +class TextButton(TextButtonBasic): + def __init__(self, *args, **kw): + menuItem = False + TextButtonBasic.__init__(self, menuItem, *args, **kw) + +class TextButtonFilter(TextButtonBasic): + def __init__(self, *args, **kw): + menuItem = False + TextButtonBasic.__init__(self, menuItem, *args, **kw) + + +class TextButtonLeft(TextButtonBasic): + def __init__(self, parent, addItem = False, icon=False, *args, **kw): + + self.mm = IconsManager.getInstance() + self.icon = None + self.leftBtnMenuIcon = None + + + menuItem = True + self.addItem = addItem + self.extraMenu = False + + + TextButtonBasic.__init__(self, menuItem, parent, *args, **kw) + if icon: +# if self.GetName() == 'Highspeed': + self.AddLeftButtonIcon(True) + +# self.AddLeftButtonMenuIcon(False) + + + def AddLeftButtonIcon(self, False): + print 'tb> addleftButtonIcon' + print self.GetName() + if self.GetName() == 'Start page': + self.icon = self.mm.MENUICONHOME + elif self.GetName() == 'Stats': + self.icon = self.mm.MENUICONSTATS + elif self.GetName() == 'Profile': + self.icon = self.mm.MENUICONPROFILE + elif self.GetName() == 'All Downloads': + self.icon = self.mm.MENUICONALLDOWNLOADS + elif self.GetName() == 'Highspeed': + self.icon = self.mm.MENUICONPLAYLIST + elif self.GetName() == 'All Friends': + self.icon = self.mm.MENUICONALLFRIENDS + elif self.GetName() == 'All Favorites': + self.icon = self.mm.MENUICONPLAYLIST + elif self.GetName() == 'All Subscriptions': + self.icon = self.mm.MENUICONALLSUBSCRIPTIONS + elif self.GetName() == 'Tribler 5': + self.icon = self.mm.MENUICONGROUPS + elif self.GetName() == 'Tribler 4': + self.icon = self.mm.MENUICONGROUPS + elif self.GetName() == '< Tribler 4': + self.icon = self.mm.MENUICONGROUPS + + + + def AddLeftButtonMenuIcon(self, enabled): + if enabled: + self.leftBtnMenuIcon = self.mm.LEFTBUTTONMENU + else: + self.leftBtnMenuIcon = None +# if enabled: +# self.expanded = True +# self.enabled = self.mm.H1EXPANDEDTRUE +# else: +# self.expanded = False +# self.enabled = self.mm.H1EXPANDEDFALSE + + + + def AddButtonLeftMenu(self, active): + self.active = active + if self.active: + self.buttonIcon = self.mm.ADDMENUITEM + else: + self.buttonIcon = None + + self.Refresh() + + + def OnPaint(self, evt): + # overriding the OnPaint funcion in TextButton + dc = wx.BufferedPaintDC(self) + + if self.dcRollOver == 0 : +# dc.SetBrush(wx.Brush(wx.BLACK)) + + dc.SetBackground(wx.Brush(self.triblerStyles.textButtonLeft(style = 'bgColour'))) + dc.Clear() + + if self.dcRollOver == 1 or self.selected == True: + dc.SetBackground(wx.Brush(self.triblerStyles.textButtonLeft(style = 'bgColour'))) +# dc.SetBackground(wx.Brush(self.triblerStyles.textButtonLeft(style = 'bgColour2'))) + dc.Clear() +# + dc.SetBrush(wx.Brush(self.triblerStyles.textButtonLeft(style = 'bgColour2'))) + dc.DrawRectangle(0, 0, 200, 20) + +# dc.SetBackground(wx.Brush(self.triblerStyles.textButtonLeft(style = 'bgColour2'))) + + if self.addItem: + dc.SetFont(self.triblerStyles.textButtonLeft(style = 'fontAdd')) + dc.SetTextForeground(self.triblerStyles.textButtonLeft(style = 'textColourAdd')) + else: + dc.SetFont(self.triblerStyles.textButtonLeft(style = 'font')) + dc.SetTextForeground(self.triblerStyles.textButtonLeft(style = 'textColour')) + + dc.DrawText(self.GetName(), 48, 3) +# if self.leftBtnMenuIcon != None: +# dc.DrawBitmap(self.leftBtnMenuIcon, 140, 2, True) + if self.icon != None: + dc.DrawBitmap(self.icon, 20, 0, True) + + +class TextButtonLeftH1(TextButtonBasic): + def __init__(self, *args, **kw): + self.mm = IconManager.getInstance() + self.enabled = True + self.Enabled(self.enabled) + self.active = False + menuItem = True + self.buttonIcon = None + + TextButtonBasic.__init__(self, menuItem, *args, **kw) + +# self.AddButtonIcon(False) + +# def AddButtonIcon(self, active): +# self.active = active +# if self.active: +# self.buttonIcon = self.mm.ADDMENUITEM +# else: +# self.buttonIcon = None +# +# self.Refresh() + + def Enabled(self, enabled): + if enabled: + self.expanded = True + self.enabled = self.mm.H1EXPANDEDTRUE + else: + self.expanded = False + self.enabled = self.mm.H1EXPANDEDFALSE + + + + def OnPaint(self, evt): + # overriding the OnPaint funcion in TextButton + dc = wx.BufferedPaintDC(self) + if self.dcRollOver == 0 : + dc.SetBackground(wx.Brush(self.triblerStyles.textButtonLeftH1(style = 'bgColour'))) + if self.dcRollOver == 1 or self.selected == True: + dc.SetBackground(wx.Brush(self.triblerStyles.textButtonLeftH1(style = 'bgColour'))) + + if self.expanded: + dc.SetTextForeground(self.triblerStyles.textButtonLeftH1(style = 'textColour')) + else: + dc.SetTextForeground(self.triblerStyles.textButtonLeftH1(style = 'textColour2')) + + dc.SetFont(self.triblerStyles.textButtonLeftH1(style = 'font')) + dc.Clear() + + dc.DrawText(self.GetName(), 18, 2) + dc.DrawBitmap(self.enabled, 5, 5, True) + + if self.buttonIcon != None: + print 'tb' +# dc.DrawBitmap(self.buttonIcon, 140, 2, True) + + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/TextButton.py.bak b/tribler-mod/Tribler/Main/vwxGUI/TextButton.py.bak new file mode 100644 index 0000000..ac36a30 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/TextButton.py.bak @@ -0,0 +1,278 @@ +# Written by Jelle Roozenburg, Maarten ten Brinke +# see LICENSE.txt for license information + +import wx, os, sys +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles +from Tribler.Main.vwxGUI.bgPanel import ImagePanel +from Tribler.Main.vwxGUI.IconsManager import IconsManager +from Tribler.Main.vwxGUI.TextEdit import TextEdit +from font import * +from traceback import print_exc +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +#from wx.lib.stattext import GenStaticText + +DEBUG = False + +class TextButtonBasic(wx.Panel): + """ + Button that changes the image shown if you move your mouse over it. + It redraws the background of the parent Panel, if this is an imagepanel with + a variable self.bitmap. + """ + + def __init__(self, menuItem, *args, **kw): + self.selected = False + self.menuItem = menuItem + self.triblerStyles = TriblerStyles.getInstance() + if len(args) == 0: + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, *args, **kw) + self._PostInit() + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + self.guiUtility = GUIUtility.getInstance() + self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + self.Bind(wx.EVT_PAINT, self.OnPaint) + if not self.menuItem: + self.Bind(wx.EVT_LEFT_UP, self.ClickedButton) + + self.dcRollOver = 0 + + if self.GetParent().GetName() == 'filterStandard': + self.SetMinSize((-1,15)) + self.SetSize((-1,15)) +# self.SetMinSize((60,17)) + else: + self.SetMinSize(self.GetSize()) + + self.GetParent().Layout() + self.Refresh(True) + self.Update() + + + def setSelected(self, sel): + if sel != self.selected: + self.selected = sel + if self.menuItem: + self.dcRollOver = 0 + self.Refresh() + + def isSelected(self): + return self.selected + + def mouseAction(self, event): + event.Skip() + if event.Entering() and not self.selected: + self.dcRollOver = 1 + self.Refresh() + elif event.Leaving() and not self.selected: + self.dcRollOver = 0 + self.Refresh() + + def ClickedButton(self, event): + name = self.GetName() + event.Skip() + self.guiUtility.buttonClicked(event) +# self.guiUtility.detailsTabClicked(name) + + def OnPaint(self, evt): + dc = wx.BufferedPaintDC(self) + if self.dcRollOver == 0 : + dc.SetBackground(wx.Brush(self.triblerStyles.textButtonLeft(style = 'bgColour'))) + if self.dcRollOver == 1 or self.selected == True: + dc.SetBackground(wx.Brush(self.triblerStyles.textButtonLeft(style = 'bgColour2'))) + + dc.SetTextForeground(self.triblerStyles.textButtonLeft(style = 'textColour')) + dc.SetFont(self.triblerStyles.textButtonLeft(style = 'font')) + + dc.Clear() + + dc.DrawText(self.GetName(), 2, 2) + +class TextButton(TextButtonBasic): + def __init__(self, *args, **kw): + menuItem = False + TextButtonBasic.__init__(self, menuItem, *args, **kw) + +class TextButtonFilter(TextButtonBasic): + def __init__(self, *args, **kw): + menuItem = False + TextButtonBasic.__init__(self, menuItem, *args, **kw) + + +class TextButtonLeft(TextButtonBasic): + def __init__(self, parent, addItem = False, icon=False, *args, **kw): + + self.mm = IconsManager.getInstance() + self.icon = None + self.leftBtnMenuIcon = None + + + menuItem = True + self.addItem = addItem + self.extraMenu = False + + + TextButtonBasic.__init__(self, menuItem, parent, *args, **kw) + if icon: +# if self.GetName() == 'Highspeed': + self.AddLeftButtonIcon(True) + +# self.AddLeftButtonMenuIcon(False) + + + def AddLeftButtonIcon(self, False): + print 'tb> addleftButtonIcon' + print self.GetName() + if self.GetName() == 'Start page': + self.icon = self.mm.MENUICONHOME + elif self.GetName() == 'Stats': + self.icon = self.mm.MENUICONSTATS + elif self.GetName() == 'Profile': + self.icon = self.mm.MENUICONPROFILE + elif self.GetName() == 'All Downloads': + self.icon = self.mm.MENUICONALLDOWNLOADS + elif self.GetName() == 'Highspeed': + self.icon = self.mm.MENUICONPLAYLIST + elif self.GetName() == 'All Friends': + self.icon = self.mm.MENUICONALLFRIENDS + elif self.GetName() == 'All Favorites': + self.icon = self.mm.MENUICONPLAYLIST + elif self.GetName() == 'All Subscriptions': + self.icon = self.mm.MENUICONALLSUBSCRIPTIONS + elif self.GetName() == 'Tribler 5': + self.icon = self.mm.MENUICONGROUPS + elif self.GetName() == 'Tribler 4': + self.icon = self.mm.MENUICONGROUPS + elif self.GetName() == '< Tribler 4': + self.icon = self.mm.MENUICONGROUPS + + + + def AddLeftButtonMenuIcon(self, enabled): + if enabled: + self.leftBtnMenuIcon = self.mm.LEFTBUTTONMENU + else: + self.leftBtnMenuIcon = None +# if enabled: +# self.expanded = True +# self.enabled = self.mm.H1EXPANDEDTRUE +# else: +# self.expanded = False +# self.enabled = self.mm.H1EXPANDEDFALSE + + + + def AddButtonLeftMenu(self, active): + self.active = active + if self.active: + self.buttonIcon = self.mm.ADDMENUITEM + else: + self.buttonIcon = None + + self.Refresh() + + + def OnPaint(self, evt): + # overriding the OnPaint funcion in TextButton + dc = wx.BufferedPaintDC(self) + + if self.dcRollOver == 0 : +# dc.SetBrush(wx.Brush(wx.BLACK)) + + dc.SetBackground(wx.Brush(self.triblerStyles.textButtonLeft(style = 'bgColour'))) + dc.Clear() + + if self.dcRollOver == 1 or self.selected == True: + dc.SetBackground(wx.Brush(self.triblerStyles.textButtonLeft(style = 'bgColour'))) +# dc.SetBackground(wx.Brush(self.triblerStyles.textButtonLeft(style = 'bgColour2'))) + dc.Clear() +# + dc.SetBrush(wx.Brush(self.triblerStyles.textButtonLeft(style = 'bgColour2'))) + dc.DrawRectangle(0, 0, 200, 20) + +# dc.SetBackground(wx.Brush(self.triblerStyles.textButtonLeft(style = 'bgColour2'))) + + if self.addItem: + dc.SetFont(self.triblerStyles.textButtonLeft(style = 'fontAdd')) + dc.SetTextForeground(self.triblerStyles.textButtonLeft(style = 'textColourAdd')) + else: + dc.SetFont(self.triblerStyles.textButtonLeft(style = 'font')) + dc.SetTextForeground(self.triblerStyles.textButtonLeft(style = 'textColour')) + + dc.DrawText(self.GetName(), 48, 3) +# if self.leftBtnMenuIcon != None: +# dc.DrawBitmap(self.leftBtnMenuIcon, 140, 2, True) + if self.icon != None: + dc.DrawBitmap(self.icon, 20, 0, True) + + +class TextButtonLeftH1(TextButtonBasic): + def __init__(self, *args, **kw): + self.mm = IconManager.getInstance() + self.enabled = True + self.Enabled(self.enabled) + self.active = False + menuItem = True + self.buttonIcon = None + + TextButtonBasic.__init__(self, menuItem, *args, **kw) + +# self.AddButtonIcon(False) + +# def AddButtonIcon(self, active): +# self.active = active +# if self.active: +# self.buttonIcon = self.mm.ADDMENUITEM +# else: +# self.buttonIcon = None +# +# self.Refresh() + + def Enabled(self, enabled): + if enabled: + self.expanded = True + self.enabled = self.mm.H1EXPANDEDTRUE + else: + self.expanded = False + self.enabled = self.mm.H1EXPANDEDFALSE + + + + def OnPaint(self, evt): + # overriding the OnPaint funcion in TextButton + dc = wx.BufferedPaintDC(self) + if self.dcRollOver == 0 : + dc.SetBackground(wx.Brush(self.triblerStyles.textButtonLeftH1(style = 'bgColour'))) + if self.dcRollOver == 1 or self.selected == True: + dc.SetBackground(wx.Brush(self.triblerStyles.textButtonLeftH1(style = 'bgColour'))) + + if self.expanded: + dc.SetTextForeground(self.triblerStyles.textButtonLeftH1(style = 'textColour')) + else: + dc.SetTextForeground(self.triblerStyles.textButtonLeftH1(style = 'textColour2')) + + dc.SetFont(self.triblerStyles.textButtonLeftH1(style = 'font')) + dc.Clear() + + dc.DrawText(self.GetName(), 18, 2) + dc.DrawBitmap(self.enabled, 5, 5, True) + + if self.buttonIcon != None: + print 'tb' +# dc.DrawBitmap(self.buttonIcon, 140, 2, True) + + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/TextEdit.py b/tribler-mod/Tribler/Main/vwxGUI/TextEdit.py new file mode 100644 index 0000000..04178ac --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/TextEdit.py @@ -0,0 +1,124 @@ +from time import localtime, strftime +import wx, os, sys +#import wx, math, time, os, sys, threading +import wx, os +from font import * +from traceback import print_exc +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles +#from wx.lib.stattext import GenStaticText + +DEBUG = False + +class TextEdit(wx.Panel): + """ + Text item that is used for moderations. + """ + + def __init__(self, *args, **kw): + self.selected = False + self.colours = [wx.Colour(102,102,102), wx.WHITE] + if len(args) == 0: + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, *args, **kw) + self.OnCreate() + self._PostInit() + + def OnCreate(self, event= None): + print 'tb > OnCreate TEXTEDIT' + self.Unbind(wx.EVT_WINDOW_CREATE) + self.triblerStyles = TriblerStyles.getInstance() + self.addComponents() + self.editState = True + self.editSetToggle(False) + + + + wx.CallAfter(self._PostInit) + if event != None: + event.Skip() + return True + + def addComponents(self): + self.sizer = wx.BoxSizer(wx.VERTICAL) + self.textctrl = wx.TextCtrl(self, -1, style = wx.TE_MULTILINE|wx.NO_BORDER|wx.TE_RICH ) + + self.triblerStyles.setLightText(self.textctrl) +# self.textctrl.SetForegroundColour(wx.Colour(180,180,180)) +# self.textctrl.SetBackgroundColour(wx.Colour(102,102,102)) +# self.triblerStyles.setLightText(self.sizeField, text= '---') +# wx.TE_NO_VSCROLL + +# self.textctrl = wx.StaticText(self, -1, label) + self.textctrl.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + self.textctrl.Bind(wx.EVT_LEFT_UP, self.ClickedButton) + + + self.sizer.Add(self.textctrl, 1, wx.ALIGN_LEFT|wx.EXPAND, 5) +# self.SetMinSize((60,200)) + self.SetBackgroundColour(self.colours[int(self.selected)]) + + self.SetSizer(self.sizer) + + self.SetAutoLayout(1) + self.Layout() + self.Refresh(True) + self.Update() + + + def _PostInit(self): + # Do all init here + self.guiUtility = GUIUtility.getInstance() +# self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + self.Bind(wx.EVT_LEFT_UP, self.ClickedButton) + + + #self.Show() + + def setText(self, text=''): + self.textctrl.SetValue(text) + + + + def editSetToggle(self, newState): + + if newState != self.editState: + if newState == True: + # enable Edit + colour = self.triblerStyles.colours(2) + self.textctrl.SetEditable(True) + + elif newState == False: + # disable Edit + colour = self.triblerStyles.colours(1) + self.textctrl.SetEditable(False) + + self.SetBackgroundColour(colour) + self.textctrl.SetBackgroundColour(colour) + + self.editState = newState + + + def isSelected(self): + return self.selected + + def mouseAction(self, event): + event.Skip() + if event.Entering() and not self.selected: + #print 'TextButton: enter' + self.SetBackgroundColour(self.colours[1]) + self.Refresh() + elif event.Leaving() and not self.selected: + #print 'TextButton: leaving' + self.SetBackgroundColour(self.colours[0]) + self.Refresh() + + def ClickedButton(self, event): + name = self.GetName() + event.Skip() + #self.guiUtility.buttonClicked(event) + self.guiUtility.detailsTabClicked(name) diff --git a/tribler-mod/Tribler/Main/vwxGUI/TextEdit.py.bak b/tribler-mod/Tribler/Main/vwxGUI/TextEdit.py.bak new file mode 100644 index 0000000..a50e2b3 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/TextEdit.py.bak @@ -0,0 +1,123 @@ +import wx, os, sys +#import wx, math, time, os, sys, threading +import wx, os +from font import * +from traceback import print_exc +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles +#from wx.lib.stattext import GenStaticText + +DEBUG = False + +class TextEdit(wx.Panel): + """ + Text item that is used for moderations. + """ + + def __init__(self, *args, **kw): + self.selected = False + self.colours = [wx.Colour(102,102,102), wx.WHITE] + if len(args) == 0: + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, *args, **kw) + self.OnCreate() + self._PostInit() + + def OnCreate(self, event= None): + print 'tb > OnCreate TEXTEDIT' + self.Unbind(wx.EVT_WINDOW_CREATE) + self.triblerStyles = TriblerStyles.getInstance() + self.addComponents() + self.editState = True + self.editSetToggle(False) + + + + wx.CallAfter(self._PostInit) + if event != None: + event.Skip() + return True + + def addComponents(self): + self.sizer = wx.BoxSizer(wx.VERTICAL) + self.textctrl = wx.TextCtrl(self, -1, style = wx.TE_MULTILINE|wx.NO_BORDER|wx.TE_RICH ) + + self.triblerStyles.setLightText(self.textctrl) +# self.textctrl.SetForegroundColour(wx.Colour(180,180,180)) +# self.textctrl.SetBackgroundColour(wx.Colour(102,102,102)) +# self.triblerStyles.setLightText(self.sizeField, text= '---') +# wx.TE_NO_VSCROLL + +# self.textctrl = wx.StaticText(self, -1, label) + self.textctrl.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + self.textctrl.Bind(wx.EVT_LEFT_UP, self.ClickedButton) + + + self.sizer.Add(self.textctrl, 1, wx.ALIGN_LEFT|wx.EXPAND, 5) +# self.SetMinSize((60,200)) + self.SetBackgroundColour(self.colours[int(self.selected)]) + + self.SetSizer(self.sizer) + + self.SetAutoLayout(1) + self.Layout() + self.Refresh(True) + self.Update() + + + def _PostInit(self): + # Do all init here + self.guiUtility = GUIUtility.getInstance() +# self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + self.Bind(wx.EVT_LEFT_UP, self.ClickedButton) + + + #self.Show() + + def setText(self, text=''): + self.textctrl.SetValue(text) + + + + def editSetToggle(self, newState): + + if newState != self.editState: + if newState == True: + # enable Edit + colour = self.triblerStyles.colours(2) + self.textctrl.SetEditable(True) + + elif newState == False: + # disable Edit + colour = self.triblerStyles.colours(1) + self.textctrl.SetEditable(False) + + self.SetBackgroundColour(colour) + self.textctrl.SetBackgroundColour(colour) + + self.editState = newState + + + def isSelected(self): + return self.selected + + def mouseAction(self, event): + event.Skip() + if event.Entering() and not self.selected: + #print 'TextButton: enter' + self.SetBackgroundColour(self.colours[1]) + self.Refresh() + elif event.Leaving() and not self.selected: + #print 'TextButton: leaving' + self.SetBackgroundColour(self.colours[0]) + self.Refresh() + + def ClickedButton(self, event): + name = self.GetName() + event.Skip() + #self.guiUtility.buttonClicked(event) + self.guiUtility.detailsTabClicked(name) diff --git a/tribler-mod/Tribler/Main/vwxGUI/TopSearchPanel.py b/tribler-mod/Tribler/Main/vwxGUI/TopSearchPanel.py new file mode 100644 index 0000000..d29288c --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/TopSearchPanel.py @@ -0,0 +1,653 @@ +from time import localtime, strftime +# generated by wx.Glade 0.6.3 on Thu Feb 05 15:42:50 2009 +# +# Arno: please edit TopSearchPanel.xrc in some XRC editor, then generate +# code for it using wxGlade (single python file mode), and copy the +# relevant parts from it into this file, see "MAINLY GENERATED" line below. +# +# We need this procedure as there is a bug in wxPython 2.8.x on Win32 that +# cause the painting/fitting of the panel to fail. All elements wind up in +# the topleft corner. This is a wx bug as it also happens in XRCED when you +# display the panel twice. +# + +import sys +import wx +import os +import math +from traceback import print_exc + +# begin wx.Glade: extracode +# end wx.Glade + +from bgPanel import bgPanel +from tribler_topButton import * +from GuiUtility import GUIUtility +from Tribler.Main.Utility.utility import Utility +from Tribler.__init__ import LIBRARYNAME +from NewStaticText import NewStaticText +from MyText import MyText + + + +wx.SystemOptions_SetOption("msw.remap","1") + + + +# fonts +if sys.platform == 'darwin': # mac os x + FONT_SIZE_SR_MSG=11 + FONT_SIZE_TOTAL_DOWN=9 + FONT_SIZE_TOTAL_UP=9 + FONT_SIZE_RESULTS=10 + FONT_SIZE_SETTINGS=10 + FONT_SIZE_MY_FILES=10 + FONT_SIZE_FAMILY_FILTER=10 + FONT_SIZE_FILES_FRIENDS=11 + FONT_SIZE_SHARING_REPUTATION=11 + FONT_SIZE_SEARCH_RESULTS=12 + FONT_SIZE_SEARCH=14 + +elif sys.platform == 'win32': + + FONT_SIZE_SR_MSG=8 + FONT_SIZE_TOTAL_DOWN=7 + FONT_SIZE_TOTAL_UP=7 + FONT_SIZE_RESULTS=8 + FONT_SIZE_SETTINGS=8 + FONT_SIZE_MY_FILES=8 + FONT_SIZE_FAMILY_FILTER=8 + FONT_SIZE_FILES_FRIENDS=8 + FONT_SIZE_SHARING_REPUTATION=8 + FONT_SIZE_SEARCH_RESULTS=8 + FONT_SIZE_SEARCH=10 + +else: + + FONT_SIZE_SR_MSG=8 + FONT_SIZE_TOTAL_DOWN=7 + FONT_SIZE_TOTAL_UP=7 + FONT_SIZE_RESULTS=8 + FONT_SIZE_SETTINGS=8 + FONT_SIZE_MY_FILES=8 + FONT_SIZE_FAMILY_FILTER=8 + FONT_SIZE_FILES_FRIENDS=8 + FONT_SIZE_SHARING_REPUTATION=8 + FONT_SIZE_SEARCH_RESULTS=8 + FONT_SIZE_SEARCH=10 + + +DEBUG = False + +class TopSearchPanel(bgPanel): + def __init__(self, *args, **kwds): + if DEBUG: + print >> sys.stderr , "TopSearchPanel: __init__" + bgPanel.__init__(self,*args,**kwds) + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.installdir = self.utility.getPath() + self.frame = None + self.first = True + self.rep = 0 + self.count=0 + self.sr_msg = None + + def set_frame(self,frame): + self.frame = frame + + def custom_init(self): + # animated gif for search results + if sys.platform != 'darwin': + if sys.platform == 'win32': + ag_fname = os.path.join(self.utility.getPath(),'Tribler','Main','vwxGUI','images','5.0','search_new_windows.gif') + else: + ag_fname = os.path.join(self.utility.getPath(),'Tribler','Main','vwxGUI','images','5.0','search_new.gif') + self.ag = wx.animate.GIFAnimationCtrl(self.go.GetParent(), -1, ag_fname) + vsizer = wx.BoxSizer(wx.VERTICAL) + vsizer.AddSpacer(wx.Size(0,5)) + vsizer.Add(self.ag,0, 0, 0) + hsizer = self.go.GetContainingSizer() + hsizer.Add(vsizer,0,0,0) + hsizer.Layout() + + hide_names = [self.ag,self.newFile,self.seperator] + for name in hide_names: + name.Hide() + + + # family filter + #print >> sys.stderr , "FF" , self.utility.config.Read('family_filter', "boolean") + if self.utility.config.Read('family_filter', "boolean"): + self.familyfilter.SetLabel('Family Filter:ON') + else: + self.familyfilter.SetLabel('Family Filter:OFF') + + + + # binding events + self.searchField.Bind(wx.EVT_KEY_DOWN, self.OnSearchKeyDown) + self.go.Bind(wx.EVT_LEFT_UP, self.OnSearchKeyDown) + self.help.Bind(wx.EVT_LEFT_UP, self.helpClick) + self.familyfilter.Bind(wx.EVT_LEFT_UP,self.toggleFamilyFilter) + + if sys.platform == 'linux2' or sys.platform == 'darwin': # mouse over implementation on linux and mac + self.results.Bind(wx.EVT_MOUSE_EVENTS, self.OnResults) + self.settings.Bind(wx.EVT_MOUSE_EVENTS, self.OnSettings) + self.my_files.Bind(wx.EVT_MOUSE_EVENTS, self.OnLibrary) + self.Bind(wx.EVT_MOUSE_EVENTS, self.OnTopPanel) + self.sr_msg.Bind(wx.EVT_LEFT_UP, self.sr_msgClick) + else: + self.results.Bind(wx.EVT_LEFT_UP, self.viewResults) + self.settings.Bind(wx.EVT_LEFT_UP, self.viewSettings) + self.my_files.Bind(wx.EVT_LEFT_UP, self.viewLibrary) + self.sharing_reputation.Bind(wx.EVT_LEFT_UP, self.sr_msgClick) + + + def OnSearchKeyDown(self,event): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TopSearchPanel: OnSearchKeyDown" + + if event.GetEventObject().GetName() == 'text': + keycode = event.GetKeyCode() + else: + keycode = None + + if self.searchField.GetValue().strip() != '' and (keycode == wx.WXK_RETURN or event.GetEventObject().GetName() == 'Search_new' or event.GetEventObject().GetName() == 'Search_new_win'): + if self.first: + self.first=False + + self.tribler_logo2.Show() + self.sharing_reputation.Show() + self.help.Show() + self.frame.hsizer = self.sr_indicator.GetContainingSizer() + self.frame.Layout() + if sys.platform == 'win32': + self.createBackgroundImage() + ##else: + ## self.createBackgroundImage('top_search_grey.png') + self.srgradient.Show() + self.sr_indicator.Show() + self.frame.standardOverview.Show() + self.seperator.Show() + self.familyfilter.Show() + self.search_results.Show() + if sys.platform == 'win32': + self.results.setBlank(False) + self.results.setToggled(True) + else: + self.results.SetFont(wx.Font(FONT_SIZE_RESULTS+1, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.results.SetLabel('Search Results') + self.results.SetForegroundColour((0,105,156)) + + self.ag.Show() + self.ag.Play() + + # Timer to stop animation after 10 seconds. No results will come + # in after that + self.count = 0. + self.agtimer = wx.Timer(self) + self.Bind(wx.EVT_TIMER, self.OnAGTimer) + self.agtimer.Start(100) + + if sys.platform != 'darwin': + self.frame.videoframe.show_videoframe() + self.frame.videoparentpanel.Show() + + self.frame.pagerPanel.Show() + + + + if sys.platform == 'win32': + self.settings.setToggled(False) + self.my_files.setToggled(False) + self.results.setToggled(True) + else: + self.settings.SetForegroundColour((255,51,0)) + self.my_files.SetForegroundColour((255,51,0)) + self.settings.SetFont(wx.Font(FONT_SIZE_SETTINGS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.my_files.SetFont(wx.Font(FONT_SIZE_MY_FILES, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + + + self.guiUtility.standardFilesOverview() + + if sys.platform == 'win32': + self.Refresh() + self.Layout() + + # Arno: delay actual search so the painting is faster. + wx.CallAfter(self.guiUtility.dosearch) + else: + if not keycode == wx.WXK_BACK: + try: + wx.CallAfter(self.autocomplete) + except: + pass # don't break the input field if something with autocomplete goes awkward + event.Skip() # Nicolas: not enough into wx to know if this should stay even though we're doing someething in here now + + def autocomplete(self): + """appends the most frequent completion according to + buddycast clicklog to the current input. + sets the appended characters to "selected" such that they are + automatically deleted as the user continues typing""" + input = self.searchField.GetValue() + terms = input.split(" ") + # only autocomplete if the last term in the input contains more than one character + if len(terms[-1])>1: + completion = self.guiUtility.complete(terms[-1]) + if completion: + l = len(input) + self.searchField.SetValue(input + completion) + self.searchField.SetSelection(l,l+len(completion)) + + ##def OnSearchResultsPressed(self, event): + ## self.guiUtility.OnResultsClicked() + + def OnAGTimer(self,event): + self.count = self.count + 1 + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", self.count + if self.count == 100: + self.ag.Stop() + self.ag.Hide() + self.agtimer.Stop() + + + def sr_msgClick(self,event=None): + + if self.rep < 0.33: + title = self.utility.lang.get('sharing_reputation_information_title') + msg = self.utility.lang.get('sharing_reputation_poor') + + dlg = wx.MessageDialog(None, msg, title, wx.OK|wx.ICON_WARNING) + + + result = dlg.ShowModal() + dlg.Destroy() + + + + def helpClick(self,event=None): + title = self.utility.lang.get('sharing_reputation_information_title') + msg = self.utility.lang.get('sharing_reputation_information_message') + + dlg = wx.MessageDialog(None, msg, title, wx.OK|wx.ICON_INFORMATION) + result = dlg.ShowModal() + dlg.Destroy() + + def OnResults(self,event): + if sys.platform == 'darwin' and self.count < 100: + self.ag.Play() + self.ag.Show() + if event.LeftDown() and self.guiUtility.guiPage != None: + self.guiUtility.standardFilesOverview() + colour = wx.Colour(0,105,156) + self.results.SetForegroundColour(colour) + self.results.SetFont(wx.Font(FONT_SIZE_RESULTS+1, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + if self.guiUtility.guiPage != 'settings': + self.settings.SetFont(wx.Font(FONT_SIZE_SETTINGS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + if self.guiUtility.guiPage != 'my_files': + self.my_files.SetFont(wx.Font(FONT_SIZE_MY_FILES, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + + + def OnSettings(self,event): + if event.LeftDown(): + self.guiUtility.settingsOverview() + colour = wx.Colour(0,105,156) + self.settings.SetForegroundColour(colour) + self.settings.SetFont(wx.Font(FONT_SIZE_SETTINGS+1, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + if self.guiUtility.guiPage != 'my_files': + self.my_files.SetFont(wx.Font(FONT_SIZE_MY_FILES, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + if self.guiUtility.guiPage != 'search_results': + self.results.SetFont(wx.Font(FONT_SIZE_RESULTS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + + + + def OnLibrary(self,event): + if event.LeftDown(): + self.guiUtility.standardLibraryOverview() + colour = wx.Colour(0,105,156) + self.my_files.SetForegroundColour(colour) + self.my_files.SetFont(wx.Font(FONT_SIZE_MY_FILES+1, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + if self.guiUtility.guiPage != 'settings': + self.settings.SetFont(wx.Font(FONT_SIZE_SETTINGS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + if self.guiUtility.guiPage != 'search_results': + self.results.SetFont(wx.Font(FONT_SIZE_RESULTS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + + + + def OnTopPanel(self, event): + if self.guiUtility.guiPage != 'search_results': + self.results.SetForegroundColour(wx.Colour(255,51,0)) + self.results.SetFont(wx.Font(FONT_SIZE_RESULTS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + if self.guiUtility.guiPage != 'settings': + self.settings.SetForegroundColour(wx.Colour(255,51,0)) + self.settings.SetFont(wx.Font(FONT_SIZE_SETTINGS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + if self.guiUtility.guiPage != 'my_files': + self.my_files.SetForegroundColour(wx.Colour(255,51,0)) + self.my_files.SetFont(wx.Font(FONT_SIZE_MY_FILES, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + + + def viewResults(self,event): + self.results.setToggled(True) + self.settings.setToggled(False) + self.my_files.setToggled(False) + self.guiUtility.standardFilesOverview() + + def viewSettings(self,event): + self.results.setToggled(False) + self.settings.setToggled(True) + self.my_files.setToggled(False) + self.guiUtility.settingsOverview() + + def viewLibrary(self,event): + self.results.setToggled(False) + self.settings.setToggled(False) + self.my_files.setToggled(True) + self.guiUtility.standardLibraryOverview() + + def toggleFamilyFilter(self,event): + self.guiUtility.toggleFamilyFilter() + + + def setReputation(self, rep): + self.rep = rep + + + def updateReputation(self, rep): # used on windows only + self.setReputation(rep) + if rep < -0.33: + self.sharing_reputation.setState(0) + elif rep < 0.33: + self.sharing_reputation.setState(1) + else: + self.sharing_reputation.setState(2) + self.Refresh() + + + + + + + def Bitmap(self,path,type): + namelist = path.split("/") + path = os.path.join(self.installdir,LIBRARYNAME,"Main","vwxGUI",*namelist) + return wx.Bitmap(path,type) + + def _PostInit(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TopSearchPanel: OnCreate" + + bgPanel._PostInit(self) + +# MAINLY GENERATED BELOW, replace wxStaticBitmap, etc. with wx.StaticBitmap +# and replace wx.BitMap with self.Bitmap +# +# What makes this code (either as Python or as XRC fail is the last statement: +# self.SetSizer(object_1) +# should be +# self.SetSizerAndFit(object_1) +# ---------------------------------------------------------------------------------------- + + self.searchField = wx.TextCtrl(self, -1, "", style=wx.TE_PROCESS_ENTER) + self.newFile = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/iconSaved.png", wx.BITMAP_TYPE_ANY)) + self.total_down = wx.StaticText(self, -1, "0B Down") + self.total_up = wx.StaticText(self, -1, "0B Up") + + if sys.platform == 'win32': + self.search_results = MyText(self, "",wx.BLACK, wx.Font(FONT_SIZE_SEARCH_RESULTS, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "")) + self.search_results.Hide() + self.files_friends = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/search_files.png", wx.BITMAP_TYPE_ANY)) + #self.files_friends = MyText(self, "Search Files",wx.BLACK, wx.Font(FONT_SIZE_FILES_FRIENDS, wx.SWISS, wx.NORMAL, wx.BOLD, 0, "Nimbus Sans L")) + self.go = tribler_topButton(self,-1,name = 'Search_new_win') + self.srgradient = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/SRgradient_new_win.png", wx.BITMAP_TYPE_ANY)) + self.familyfilter = SwitchButton(self, -1, name = 'familyfilter_win') + #self.familyfilter = NewStaticText(self, "Family Filter:", wx.BLACK, wx.Font(FONT_SIZE_FAMILY_FILTER, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + #self.sharing_reputation = NewStaticText(self, "Sharing Reputation: ", wx.BLACK, wx.Font(FONT_SIZE_SHARING_REPUTATION, wx.SWISS, wx.NORMAL, wx.BOLD, 0, "Nimbus Sans L")) + #self.sharing_reputation = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/sharing_reputation_win.png", wx.BITMAP_TYPE_ANY)) + self.sharing_reputation = SharingButton(self, -1, name = 'sr') + self.help = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/help_win.png", wx.BITMAP_TYPE_ANY)) + self.sr_indicator = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/SRind2_win.png", wx.BITMAP_TYPE_ANY)) + self.settings = ClickButton(self, -1 , name = 'settings_win') + #self.settings = NewStaticText(self, "Settings", wx.Colour(255,51,0), wx.Font(FONT_SIZE_SETTINGS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.my_files = ClickButton(self, -1, name = "my_files_win") + #self.my_files = NewStaticText(self, "My Files", wx.Colour(255,51,0), wx.Font(FONT_SIZE_MY_FILES, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.results = ClickButton(self, -1, name = "results_win") + self.seperator = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/seperator_win.png", wx.BITMAP_TYPE_ANY)) + self.seperator2 = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/seperator_win.png", wx.BITMAP_TYPE_ANY)) + self.tribler_logo2 = wx.StaticBitmap(self, -1, self.Bitmap("images/logo4video2_win.png", wx.BITMAP_TYPE_ANY)) + else: + self.files_friends = wx.StaticText(self, -1, "Search Files") + self.go = tribler_topButton(self,-1,name = 'Search_new') + self.srgradient = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/SRgradient_new.png", wx.BITMAP_TYPE_ANY)) + self.familyfilter = wx.StaticText(self, -1, "Family Filter:") + #if sys.platform == 'win32': + # self.search_results = NewStaticText(self, "", wx.BLACK, wx.Font(FONT_SIZE_SEARCH_RESULTS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "")) + #else: + self.sharing_reputation = wx.StaticText(self, -1, "Sharing Reputation: ") + self.sr_msg = wx.StaticText(self, -1, "") + self.help = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/help.png", wx.BITMAP_TYPE_ANY)) + self.sr_indicator = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/SRind2.png", wx.BITMAP_TYPE_ANY)) + self.settings = wx.StaticText(self, -1, "Settings") + self.my_files = wx.StaticText(self, -1, "My Files") + self.results = wx.StaticText(self, -1, " ") + self.seperator = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/seperator.png", wx.BITMAP_TYPE_ANY)) + self.seperator2 = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/seperator.png", wx.BITMAP_TYPE_ANY)) + self.tribler_logo2 = wx.StaticBitmap(self, -1, self.Bitmap("images/logo4video2.png", wx.BITMAP_TYPE_ANY)) + self.search_results = wx.StaticText(self, -1, "") + if sys.platform == 'darwin': + ag_fname = os.path.join(self.utility.getPath(),'Tribler','Main','vwxGUI','images','5.0','search_new_windows.gif') + self.ag = wx.animate.GIFAnimationCtrl(self, -1, ag_fname) + + self.__set_properties() + + + self.__do_layout() + # end wx.Glade + + # OUR CODE + self.custom_init() + + + self.Layout() + self.frame.Layout() + + def __set_properties(self): + # begin wx.Glade: MyPanel.__set_properties + self.SetSize((1000,90)) + self.SetBackgroundColour(wx.Colour(255, 255, 255)) + self.searchField.SetMinSize((320,23)) + self.searchField.SetForegroundColour(wx.Colour(0, 0, 0)) + self.searchField.SetFont(wx.Font(FONT_SIZE_SEARCH, wx.MODERN, wx.NORMAL, wx.NORMAL, 0, "Verdana")) + self.searchField.SetFocus() + self.go.SetMinSize((50,24)) + self.go.SetBackgroundColour((230,230,230)) + self.go.Refresh() + self.total_down.SetFont(wx.Font(FONT_SIZE_TOTAL_DOWN, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.total_up.SetFont(wx.Font(FONT_SIZE_TOTAL_UP, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.results.SetFont(wx.Font(FONT_SIZE_RESULTS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.total_down.SetToolTipString('Total Download') + self.total_up.SetToolTipString('Total Upload') + if sys.platform == 'win32': + self.results.setBlank(True) ## + self.settings.SetMinSize((50,14)) + self.my_files.SetMinSize((45,14)) + self.results.SetMinSize((91,11)) + self.total_down.SetBackgroundColour((235,235,235)) + self.total_down.SetMinSize((55,12)) + self.total_up.SetMinSize((50,12)) + self.total_up.SetBackgroundColour((235,235,235)) + self.search_results.SetBackgroundColour(wx.Colour(wx.TRANSPARENT)) + self.guiUtility.toggleFamilyFilter(True) + else: + self.familyfilter.SetMinSize((100,15)) + self.familyfilter.SetFont(wx.Font(FONT_SIZE_FAMILY_FILTER, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.settings.SetMinSize((60,15)) + self.settings.SetForegroundColour(wx.Colour(255, 51, 0)) + self.settings.SetFont(wx.Font(FONT_SIZE_SETTINGS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.my_files.SetMinSize((60,15)) + self.my_files.SetForegroundColour(wx.Colour(255, 51, 0)) + self.my_files.SetFont(wx.Font(FONT_SIZE_MY_FILES, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.sharing_reputation.SetFont(wx.Font(FONT_SIZE_SHARING_REPUTATION, wx.SWISS, wx.NORMAL, wx.BOLD, 0, "Nimbus Sans L")) + self.results.SetMinSize((100,15)) + self.results.SetForegroundColour(wx.Colour(255, 51, 0)) + self.sr_msg.SetFont(wx.Font(FONT_SIZE_SR_MSG, wx.SWISS, wx.NORMAL, wx.BOLD, 0, "Nimbus Sans L")) + self.search_results.SetMinSize((100,15)) + self.search_results.SetFont(wx.Font(FONT_SIZE_SEARCH_RESULTS, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "")) + self.files_friends.SetFont(wx.Font(FONT_SIZE_FILES_FRIENDS, wx.SWISS, wx.NORMAL, wx.BOLD, 0, "Nimbus Sans L")) + + # end wx.Glade + + + def __do_layout(self): + # begin wx.Glade: MyPanel.__do_layout + object_1 = wx.BoxSizer(wx.HORIZONTAL) + object_12 = wx.BoxSizer(wx.VERTICAL) + object_11 = wx.BoxSizer(wx.VERTICAL) + object_10 = wx.BoxSizer(wx.VERTICAL) + object_2 = wx.BoxSizer(wx.HORIZONTAL) + object_7 = wx.BoxSizer(wx.VERTICAL) + object_14 = wx.BoxSizer(wx.HORIZONTAL) + object_9 = wx.BoxSizer(wx.HORIZONTAL) + object_8 = wx.BoxSizer(wx.HORIZONTAL) + object_3 = wx.BoxSizer(wx.VERTICAL) + object_5 = wx.BoxSizer(wx.HORIZONTAL) + object_6 = wx.BoxSizer(wx.VERTICAL) + object_4 = wx.BoxSizer(wx.HORIZONTAL) + object_13 = wx.BoxSizer(wx.HORIZONTAL) + object_15 = wx.BoxSizer(wx.VERTICAL) + object_16 = wx.BoxSizer(wx.VERTICAL) + object_17 = wx.BoxSizer(wx.VERTICAL) + object_18 = wx.BoxSizer(wx.VERTICAL) + + object_1.Add((10, 0), 0, 0, 0) + object_3.Add((0, 20), 0, 0, 0) + object_3.Add(self.files_friends, 0, 0, 0) + if sys.platform == 'win32': + object_3.Add((0, 8), 0, 0, 0) + else: + object_3.Add((0, 5), 0, 0, 0) + object_4.Add(self.searchField, 0, wx.LEFT, -2) + + if sys.platform == 'darwin': + object_4.Add((6, 0), 0, 0, 0) + else: + object_4.Add((2, 0), 0, 0, 0) + + + object_4.Add(self.go, 0, 0, 0) + object_4.Add((2,0), 0, 0, 0) + if sys.platform == 'darwin': + object_4.Add(self.ag, 0, wx.TOP, 5) + object_3.Add(object_4, 0, 0, 0) + if sys.platform == 'win32': + object_6.Add((0, 2), 0, 0, 0) + else: + object_6.Add((0, 0), 0, 0, 0) + object_6.Add(self.familyfilter, 0, 0, 0) + object_5.Add(object_6, 0, 0, 0) + if sys.platform == 'win32': + object_5.Add((150, 0), 1, 0, 0) + else: + object_5.Add((120, 0), 1, 0, 0) + if sys.platform == 'darwin': + object_16.Add((0, 2), 0, 0, 0) + object_16.Add(self.search_results, 0, 0, 0) + object_5.Add(object_16, 0, wx.ALIGN_RIGHT, 0) + else: + object_5.Add(self.search_results, 0, wx.ALIGN_RIGHT, 0) + object_3.Add(object_5, 0, 0, 0) + object_2.Add(object_3, 0, wx.EXPAND, 0) + if sys.platform == 'win32': + object_2.Add((45, 0), 0, 0, 0) + else: + object_2.Add((40, 0), 0, 0, 0) + object_7.Add((0, 20), 0, 0, 0) + object_7.Add(object_14, 0, 0, 0) + object_14.Add(self.sharing_reputation, 0, 0, 0) + if sys.platform != 'win32': + object_14.Add(self.sr_msg, 0, wx.LEFT, 10) + object_7.Add((0, 5), 0, 0, 0) + object_8.Add(self.srgradient, 0, 0, 0) + object_8.Add((5, 0), 0, 0, 0) + object_8.Add(self.help, 0, 0, 0) + object_7.Add(object_8, 0, 0, 0) + object_7.Add((0, 5), 0, 0, 0) + object_9.Add((50, 0), 0, 0, 0) + object_9.Add(self.sr_indicator, 0, wx.TOP, -16) + object_7.Add(object_9, 0, 0, 0) + if sys.platform == 'win32': + object_7.Add(object_13, 0, wx.TOP, -3) + else: + object_7.Add(object_13, 0, 0, 0) + object_2.Add(object_7, 0, wx.EXPAND, 0) + object_1.Add(object_2, 1, wx.EXPAND, 0) + + if sys.platform == 'win32': + space = 123 + elif sys.platform == 'linux2': + space = 7 + else: + space = 130 + + object_1.Add((space, 0), 0, 0, 0) # Arno: set to a specific value to get right view on win32 + + # seperator + object_11.Add((0, 20), 0, 0, 0) + object_11.Add(self.seperator, 0, 0, 0) + + # seperator2 + object_18.Add((0, 20), 0, 0, 0) + object_18.Add(self.seperator2, 0, 0, 0) + + object_17.Add((0, 20), 0, 0, 0) + object_17.Add(self.results, 0, 0, 0) + object_17.Add((0, 0), 0, 0, 0) + + object_10.Add((0, 20), 0, 0, 0) + object_10.Add(self.settings, 0, 0, 0) + object_10.Add((0, 0), 0, 0, 0) + + object_12.Add((0, 20), 0, 0, 0) + object_12.Add(self.my_files, 0, 0, 0) + object_12.Add((0, 0), 0, 0, 0) + object_12.Add(self.newFile, 0, 0, 0) + + + object_1.Add(object_17, 0, 0, 0) + if sys.platform == 'win32': + object_1.Add((10, 0), 0, 0, 0) + else: + object_1.Add((7, 0), 0, 0, 0) + object_1.Add(object_11, 0, 0, 0) + object_1.Add((7, 0), 0, 0, 0) + object_1.Add(object_10, 0, 0, 0) + object_1.Add((7, 0), 0, 0, 0) + object_1.Add(object_18, 0, 0, 0) + object_1.Add((7, 0), 0, 0, 0) + object_1.Add(object_12, 0, 0, 0) + object_1.Add((7, 0), 0, 0, 0) + + object_15.Add((0,3), 0, 0, 0) + object_15.Add(self.tribler_logo2, 0, 0, 0) + object_1.Add(object_15, 0, 0, 0) + object_1.Add((10, 0), 0, 0, 0) + ##object_13.Add(self.left, 0, 0, 0) + object_13.Add((0, 0), 0, 0, 0) + object_13.Add(self.total_down, 0, 0, 0) + if sys.platform == 'darwin': + object_13.Add((14, 0), 0, 0, 0) + else: + object_13.Add((8, 0), 0, 0, 0) + object_13.Add(self.total_up, 0, 0, 0) + object_13.Add((0, 0), 0, 0, 0) + ##object_13.Add(self.right, 0, 0, 0) + + # OUR CODE ARNO50: Check diff in defs + if sys.platform != 'linux2': + self.SetSizerAndFit(object_1) + else: + self.SetSizer(object_1) + # end wx.Glade + +# end of class MyPanel + diff --git a/tribler-mod/Tribler/Main/vwxGUI/TopSearchPanel.py.bak b/tribler-mod/Tribler/Main/vwxGUI/TopSearchPanel.py.bak new file mode 100644 index 0000000..868f71e --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/TopSearchPanel.py.bak @@ -0,0 +1,652 @@ +# generated by wx.Glade 0.6.3 on Thu Feb 05 15:42:50 2009 +# +# Arno: please edit TopSearchPanel.xrc in some XRC editor, then generate +# code for it using wxGlade (single python file mode), and copy the +# relevant parts from it into this file, see "MAINLY GENERATED" line below. +# +# We need this procedure as there is a bug in wxPython 2.8.x on Win32 that +# cause the painting/fitting of the panel to fail. All elements wind up in +# the topleft corner. This is a wx bug as it also happens in XRCED when you +# display the panel twice. +# + +import sys +import wx +import os +import math +from traceback import print_exc + +# begin wx.Glade: extracode +# end wx.Glade + +from bgPanel import bgPanel +from tribler_topButton import * +from GuiUtility import GUIUtility +from Tribler.Main.Utility.utility import Utility +from Tribler.__init__ import LIBRARYNAME +from NewStaticText import NewStaticText +from MyText import MyText + + + +wx.SystemOptions_SetOption("msw.remap","1") + + + +# fonts +if sys.platform == 'darwin': # mac os x + FONT_SIZE_SR_MSG=11 + FONT_SIZE_TOTAL_DOWN=9 + FONT_SIZE_TOTAL_UP=9 + FONT_SIZE_RESULTS=10 + FONT_SIZE_SETTINGS=10 + FONT_SIZE_MY_FILES=10 + FONT_SIZE_FAMILY_FILTER=10 + FONT_SIZE_FILES_FRIENDS=11 + FONT_SIZE_SHARING_REPUTATION=11 + FONT_SIZE_SEARCH_RESULTS=12 + FONT_SIZE_SEARCH=14 + +elif sys.platform == 'win32': + + FONT_SIZE_SR_MSG=8 + FONT_SIZE_TOTAL_DOWN=7 + FONT_SIZE_TOTAL_UP=7 + FONT_SIZE_RESULTS=8 + FONT_SIZE_SETTINGS=8 + FONT_SIZE_MY_FILES=8 + FONT_SIZE_FAMILY_FILTER=8 + FONT_SIZE_FILES_FRIENDS=8 + FONT_SIZE_SHARING_REPUTATION=8 + FONT_SIZE_SEARCH_RESULTS=8 + FONT_SIZE_SEARCH=10 + +else: + + FONT_SIZE_SR_MSG=8 + FONT_SIZE_TOTAL_DOWN=7 + FONT_SIZE_TOTAL_UP=7 + FONT_SIZE_RESULTS=8 + FONT_SIZE_SETTINGS=8 + FONT_SIZE_MY_FILES=8 + FONT_SIZE_FAMILY_FILTER=8 + FONT_SIZE_FILES_FRIENDS=8 + FONT_SIZE_SHARING_REPUTATION=8 + FONT_SIZE_SEARCH_RESULTS=8 + FONT_SIZE_SEARCH=10 + + +DEBUG = False + +class TopSearchPanel(bgPanel): + def __init__(self, *args, **kwds): + if DEBUG: + print >> sys.stderr , "TopSearchPanel: __init__" + bgPanel.__init__(self,*args,**kwds) + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.installdir = self.utility.getPath() + self.frame = None + self.first = True + self.rep = 0 + self.count=0 + self.sr_msg = None + + def set_frame(self,frame): + self.frame = frame + + def custom_init(self): + # animated gif for search results + if sys.platform != 'darwin': + if sys.platform == 'win32': + ag_fname = os.path.join(self.utility.getPath(),'Tribler','Main','vwxGUI','images','5.0','search_new_windows.gif') + else: + ag_fname = os.path.join(self.utility.getPath(),'Tribler','Main','vwxGUI','images','5.0','search_new.gif') + self.ag = wx.animate.GIFAnimationCtrl(self.go.GetParent(), -1, ag_fname) + vsizer = wx.BoxSizer(wx.VERTICAL) + vsizer.AddSpacer(wx.Size(0,5)) + vsizer.Add(self.ag,0, 0, 0) + hsizer = self.go.GetContainingSizer() + hsizer.Add(vsizer,0,0,0) + hsizer.Layout() + + hide_names = [self.ag,self.newFile,self.seperator] + for name in hide_names: + name.Hide() + + + # family filter + #print >> sys.stderr , "FF" , self.utility.config.Read('family_filter', "boolean") + if self.utility.config.Read('family_filter', "boolean"): + self.familyfilter.SetLabel('Family Filter:ON') + else: + self.familyfilter.SetLabel('Family Filter:OFF') + + + + # binding events + self.searchField.Bind(wx.EVT_KEY_DOWN, self.OnSearchKeyDown) + self.go.Bind(wx.EVT_LEFT_UP, self.OnSearchKeyDown) + self.help.Bind(wx.EVT_LEFT_UP, self.helpClick) + self.familyfilter.Bind(wx.EVT_LEFT_UP,self.toggleFamilyFilter) + + if sys.platform == 'linux2' or sys.platform == 'darwin': # mouse over implementation on linux and mac + self.results.Bind(wx.EVT_MOUSE_EVENTS, self.OnResults) + self.settings.Bind(wx.EVT_MOUSE_EVENTS, self.OnSettings) + self.my_files.Bind(wx.EVT_MOUSE_EVENTS, self.OnLibrary) + self.Bind(wx.EVT_MOUSE_EVENTS, self.OnTopPanel) + self.sr_msg.Bind(wx.EVT_LEFT_UP, self.sr_msgClick) + else: + self.results.Bind(wx.EVT_LEFT_UP, self.viewResults) + self.settings.Bind(wx.EVT_LEFT_UP, self.viewSettings) + self.my_files.Bind(wx.EVT_LEFT_UP, self.viewLibrary) + self.sharing_reputation.Bind(wx.EVT_LEFT_UP, self.sr_msgClick) + + + def OnSearchKeyDown(self,event): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TopSearchPanel: OnSearchKeyDown" + + if event.GetEventObject().GetName() == 'text': + keycode = event.GetKeyCode() + else: + keycode = None + + if self.searchField.GetValue().strip() != '' and (keycode == wx.WXK_RETURN or event.GetEventObject().GetName() == 'Search_new' or event.GetEventObject().GetName() == 'Search_new_win'): + if self.first: + self.first=False + + self.tribler_logo2.Show() + self.sharing_reputation.Show() + self.help.Show() + self.frame.hsizer = self.sr_indicator.GetContainingSizer() + self.frame.Layout() + if sys.platform == 'win32': + self.createBackgroundImage() + ##else: + ## self.createBackgroundImage('top_search_grey.png') + self.srgradient.Show() + self.sr_indicator.Show() + self.frame.standardOverview.Show() + self.seperator.Show() + self.familyfilter.Show() + self.search_results.Show() + if sys.platform == 'win32': + self.results.setBlank(False) + self.results.setToggled(True) + else: + self.results.SetFont(wx.Font(FONT_SIZE_RESULTS+1, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.results.SetLabel('Search Results') + self.results.SetForegroundColour((0,105,156)) + + self.ag.Show() + self.ag.Play() + + # Timer to stop animation after 10 seconds. No results will come + # in after that + self.count = 0. + self.agtimer = wx.Timer(self) + self.Bind(wx.EVT_TIMER, self.OnAGTimer) + self.agtimer.Start(100) + + if sys.platform != 'darwin': + self.frame.videoframe.show_videoframe() + self.frame.videoparentpanel.Show() + + self.frame.pagerPanel.Show() + + + + if sys.platform == 'win32': + self.settings.setToggled(False) + self.my_files.setToggled(False) + self.results.setToggled(True) + else: + self.settings.SetForegroundColour((255,51,0)) + self.my_files.SetForegroundColour((255,51,0)) + self.settings.SetFont(wx.Font(FONT_SIZE_SETTINGS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.my_files.SetFont(wx.Font(FONT_SIZE_MY_FILES, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + + + self.guiUtility.standardFilesOverview() + + if sys.platform == 'win32': + self.Refresh() + self.Layout() + + # Arno: delay actual search so the painting is faster. + wx.CallAfter(self.guiUtility.dosearch) + else: + if not keycode == wx.WXK_BACK: + try: + wx.CallAfter(self.autocomplete) + except: + pass # don't break the input field if something with autocomplete goes awkward + event.Skip() # Nicolas: not enough into wx to know if this should stay even though we're doing someething in here now + + def autocomplete(self): + """appends the most frequent completion according to + buddycast clicklog to the current input. + sets the appended characters to "selected" such that they are + automatically deleted as the user continues typing""" + input = self.searchField.GetValue() + terms = input.split(" ") + # only autocomplete if the last term in the input contains more than one character + if len(terms[-1])>1: + completion = self.guiUtility.complete(terms[-1]) + if completion: + l = len(input) + self.searchField.SetValue(input + completion) + self.searchField.SetSelection(l,l+len(completion)) + + ##def OnSearchResultsPressed(self, event): + ## self.guiUtility.OnResultsClicked() + + def OnAGTimer(self,event): + self.count = self.count + 1 + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", self.count + if self.count == 100: + self.ag.Stop() + self.ag.Hide() + self.agtimer.Stop() + + + def sr_msgClick(self,event=None): + + if self.rep < 0.33: + title = self.utility.lang.get('sharing_reputation_information_title') + msg = self.utility.lang.get('sharing_reputation_poor') + + dlg = wx.MessageDialog(None, msg, title, wx.OK|wx.ICON_WARNING) + + + result = dlg.ShowModal() + dlg.Destroy() + + + + def helpClick(self,event=None): + title = self.utility.lang.get('sharing_reputation_information_title') + msg = self.utility.lang.get('sharing_reputation_information_message') + + dlg = wx.MessageDialog(None, msg, title, wx.OK|wx.ICON_INFORMATION) + result = dlg.ShowModal() + dlg.Destroy() + + def OnResults(self,event): + if sys.platform == 'darwin' and self.count < 100: + self.ag.Play() + self.ag.Show() + if event.LeftDown() and self.guiUtility.guiPage != None: + self.guiUtility.standardFilesOverview() + colour = wx.Colour(0,105,156) + self.results.SetForegroundColour(colour) + self.results.SetFont(wx.Font(FONT_SIZE_RESULTS+1, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + if self.guiUtility.guiPage != 'settings': + self.settings.SetFont(wx.Font(FONT_SIZE_SETTINGS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + if self.guiUtility.guiPage != 'my_files': + self.my_files.SetFont(wx.Font(FONT_SIZE_MY_FILES, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + + + def OnSettings(self,event): + if event.LeftDown(): + self.guiUtility.settingsOverview() + colour = wx.Colour(0,105,156) + self.settings.SetForegroundColour(colour) + self.settings.SetFont(wx.Font(FONT_SIZE_SETTINGS+1, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + if self.guiUtility.guiPage != 'my_files': + self.my_files.SetFont(wx.Font(FONT_SIZE_MY_FILES, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + if self.guiUtility.guiPage != 'search_results': + self.results.SetFont(wx.Font(FONT_SIZE_RESULTS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + + + + def OnLibrary(self,event): + if event.LeftDown(): + self.guiUtility.standardLibraryOverview() + colour = wx.Colour(0,105,156) + self.my_files.SetForegroundColour(colour) + self.my_files.SetFont(wx.Font(FONT_SIZE_MY_FILES+1, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + if self.guiUtility.guiPage != 'settings': + self.settings.SetFont(wx.Font(FONT_SIZE_SETTINGS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + if self.guiUtility.guiPage != 'search_results': + self.results.SetFont(wx.Font(FONT_SIZE_RESULTS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + + + + def OnTopPanel(self, event): + if self.guiUtility.guiPage != 'search_results': + self.results.SetForegroundColour(wx.Colour(255,51,0)) + self.results.SetFont(wx.Font(FONT_SIZE_RESULTS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + if self.guiUtility.guiPage != 'settings': + self.settings.SetForegroundColour(wx.Colour(255,51,0)) + self.settings.SetFont(wx.Font(FONT_SIZE_SETTINGS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + if self.guiUtility.guiPage != 'my_files': + self.my_files.SetForegroundColour(wx.Colour(255,51,0)) + self.my_files.SetFont(wx.Font(FONT_SIZE_MY_FILES, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + + + def viewResults(self,event): + self.results.setToggled(True) + self.settings.setToggled(False) + self.my_files.setToggled(False) + self.guiUtility.standardFilesOverview() + + def viewSettings(self,event): + self.results.setToggled(False) + self.settings.setToggled(True) + self.my_files.setToggled(False) + self.guiUtility.settingsOverview() + + def viewLibrary(self,event): + self.results.setToggled(False) + self.settings.setToggled(False) + self.my_files.setToggled(True) + self.guiUtility.standardLibraryOverview() + + def toggleFamilyFilter(self,event): + self.guiUtility.toggleFamilyFilter() + + + def setReputation(self, rep): + self.rep = rep + + + def updateReputation(self, rep): # used on windows only + self.setReputation(rep) + if rep < -0.33: + self.sharing_reputation.setState(0) + elif rep < 0.33: + self.sharing_reputation.setState(1) + else: + self.sharing_reputation.setState(2) + self.Refresh() + + + + + + + def Bitmap(self,path,type): + namelist = path.split("/") + path = os.path.join(self.installdir,LIBRARYNAME,"Main","vwxGUI",*namelist) + return wx.Bitmap(path,type) + + def _PostInit(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TopSearchPanel: OnCreate" + + bgPanel._PostInit(self) + +# MAINLY GENERATED BELOW, replace wxStaticBitmap, etc. with wx.StaticBitmap +# and replace wx.BitMap with self.Bitmap +# +# What makes this code (either as Python or as XRC fail is the last statement: +# self.SetSizer(object_1) +# should be +# self.SetSizerAndFit(object_1) +# ---------------------------------------------------------------------------------------- + + self.searchField = wx.TextCtrl(self, -1, "", style=wx.TE_PROCESS_ENTER) + self.newFile = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/iconSaved.png", wx.BITMAP_TYPE_ANY)) + self.total_down = wx.StaticText(self, -1, "0B Down") + self.total_up = wx.StaticText(self, -1, "0B Up") + + if sys.platform == 'win32': + self.search_results = MyText(self, "",wx.BLACK, wx.Font(FONT_SIZE_SEARCH_RESULTS, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "")) + self.search_results.Hide() + self.files_friends = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/search_files.png", wx.BITMAP_TYPE_ANY)) + #self.files_friends = MyText(self, "Search Files",wx.BLACK, wx.Font(FONT_SIZE_FILES_FRIENDS, wx.SWISS, wx.NORMAL, wx.BOLD, 0, "Nimbus Sans L")) + self.go = tribler_topButton(self,-1,name = 'Search_new_win') + self.srgradient = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/SRgradient_new_win.png", wx.BITMAP_TYPE_ANY)) + self.familyfilter = SwitchButton(self, -1, name = 'familyfilter_win') + #self.familyfilter = NewStaticText(self, "Family Filter:", wx.BLACK, wx.Font(FONT_SIZE_FAMILY_FILTER, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + #self.sharing_reputation = NewStaticText(self, "Sharing Reputation: ", wx.BLACK, wx.Font(FONT_SIZE_SHARING_REPUTATION, wx.SWISS, wx.NORMAL, wx.BOLD, 0, "Nimbus Sans L")) + #self.sharing_reputation = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/sharing_reputation_win.png", wx.BITMAP_TYPE_ANY)) + self.sharing_reputation = SharingButton(self, -1, name = 'sr') + self.help = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/help_win.png", wx.BITMAP_TYPE_ANY)) + self.sr_indicator = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/SRind2_win.png", wx.BITMAP_TYPE_ANY)) + self.settings = ClickButton(self, -1 , name = 'settings_win') + #self.settings = NewStaticText(self, "Settings", wx.Colour(255,51,0), wx.Font(FONT_SIZE_SETTINGS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.my_files = ClickButton(self, -1, name = "my_files_win") + #self.my_files = NewStaticText(self, "My Files", wx.Colour(255,51,0), wx.Font(FONT_SIZE_MY_FILES, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.results = ClickButton(self, -1, name = "results_win") + self.seperator = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/seperator_win.png", wx.BITMAP_TYPE_ANY)) + self.seperator2 = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/seperator_win.png", wx.BITMAP_TYPE_ANY)) + self.tribler_logo2 = wx.StaticBitmap(self, -1, self.Bitmap("images/logo4video2_win.png", wx.BITMAP_TYPE_ANY)) + else: + self.files_friends = wx.StaticText(self, -1, "Search Files") + self.go = tribler_topButton(self,-1,name = 'Search_new') + self.srgradient = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/SRgradient_new.png", wx.BITMAP_TYPE_ANY)) + self.familyfilter = wx.StaticText(self, -1, "Family Filter:") + #if sys.platform == 'win32': + # self.search_results = NewStaticText(self, "", wx.BLACK, wx.Font(FONT_SIZE_SEARCH_RESULTS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "")) + #else: + self.sharing_reputation = wx.StaticText(self, -1, "Sharing Reputation: ") + self.sr_msg = wx.StaticText(self, -1, "") + self.help = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/help.png", wx.BITMAP_TYPE_ANY)) + self.sr_indicator = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/SRind2.png", wx.BITMAP_TYPE_ANY)) + self.settings = wx.StaticText(self, -1, "Settings") + self.my_files = wx.StaticText(self, -1, "My Files") + self.results = wx.StaticText(self, -1, " ") + self.seperator = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/seperator.png", wx.BITMAP_TYPE_ANY)) + self.seperator2 = wx.StaticBitmap(self, -1, self.Bitmap("images/5.0/seperator.png", wx.BITMAP_TYPE_ANY)) + self.tribler_logo2 = wx.StaticBitmap(self, -1, self.Bitmap("images/logo4video2.png", wx.BITMAP_TYPE_ANY)) + self.search_results = wx.StaticText(self, -1, "") + if sys.platform == 'darwin': + ag_fname = os.path.join(self.utility.getPath(),'Tribler','Main','vwxGUI','images','5.0','search_new_windows.gif') + self.ag = wx.animate.GIFAnimationCtrl(self, -1, ag_fname) + + self.__set_properties() + + + self.__do_layout() + # end wx.Glade + + # OUR CODE + self.custom_init() + + + self.Layout() + self.frame.Layout() + + def __set_properties(self): + # begin wx.Glade: MyPanel.__set_properties + self.SetSize((1000,90)) + self.SetBackgroundColour(wx.Colour(255, 255, 255)) + self.searchField.SetMinSize((320,23)) + self.searchField.SetForegroundColour(wx.Colour(0, 0, 0)) + self.searchField.SetFont(wx.Font(FONT_SIZE_SEARCH, wx.MODERN, wx.NORMAL, wx.NORMAL, 0, "Verdana")) + self.searchField.SetFocus() + self.go.SetMinSize((50,24)) + self.go.SetBackgroundColour((230,230,230)) + self.go.Refresh() + self.total_down.SetFont(wx.Font(FONT_SIZE_TOTAL_DOWN, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.total_up.SetFont(wx.Font(FONT_SIZE_TOTAL_UP, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.results.SetFont(wx.Font(FONT_SIZE_RESULTS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.total_down.SetToolTipString('Total Download') + self.total_up.SetToolTipString('Total Upload') + if sys.platform == 'win32': + self.results.setBlank(True) ## + self.settings.SetMinSize((50,14)) + self.my_files.SetMinSize((45,14)) + self.results.SetMinSize((91,11)) + self.total_down.SetBackgroundColour((235,235,235)) + self.total_down.SetMinSize((55,12)) + self.total_up.SetMinSize((50,12)) + self.total_up.SetBackgroundColour((235,235,235)) + self.search_results.SetBackgroundColour(wx.Colour(wx.TRANSPARENT)) + self.guiUtility.toggleFamilyFilter(True) + else: + self.familyfilter.SetMinSize((100,15)) + self.familyfilter.SetFont(wx.Font(FONT_SIZE_FAMILY_FILTER, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.settings.SetMinSize((60,15)) + self.settings.SetForegroundColour(wx.Colour(255, 51, 0)) + self.settings.SetFont(wx.Font(FONT_SIZE_SETTINGS, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.my_files.SetMinSize((60,15)) + self.my_files.SetForegroundColour(wx.Colour(255, 51, 0)) + self.my_files.SetFont(wx.Font(FONT_SIZE_MY_FILES, wx.SWISS, wx.NORMAL, wx.NORMAL, 0, "UTF-8")) + self.sharing_reputation.SetFont(wx.Font(FONT_SIZE_SHARING_REPUTATION, wx.SWISS, wx.NORMAL, wx.BOLD, 0, "Nimbus Sans L")) + self.results.SetMinSize((100,15)) + self.results.SetForegroundColour(wx.Colour(255, 51, 0)) + self.sr_msg.SetFont(wx.Font(FONT_SIZE_SR_MSG, wx.SWISS, wx.NORMAL, wx.BOLD, 0, "Nimbus Sans L")) + self.search_results.SetMinSize((100,15)) + self.search_results.SetFont(wx.Font(FONT_SIZE_SEARCH_RESULTS, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "")) + self.files_friends.SetFont(wx.Font(FONT_SIZE_FILES_FRIENDS, wx.SWISS, wx.NORMAL, wx.BOLD, 0, "Nimbus Sans L")) + + # end wx.Glade + + + def __do_layout(self): + # begin wx.Glade: MyPanel.__do_layout + object_1 = wx.BoxSizer(wx.HORIZONTAL) + object_12 = wx.BoxSizer(wx.VERTICAL) + object_11 = wx.BoxSizer(wx.VERTICAL) + object_10 = wx.BoxSizer(wx.VERTICAL) + object_2 = wx.BoxSizer(wx.HORIZONTAL) + object_7 = wx.BoxSizer(wx.VERTICAL) + object_14 = wx.BoxSizer(wx.HORIZONTAL) + object_9 = wx.BoxSizer(wx.HORIZONTAL) + object_8 = wx.BoxSizer(wx.HORIZONTAL) + object_3 = wx.BoxSizer(wx.VERTICAL) + object_5 = wx.BoxSizer(wx.HORIZONTAL) + object_6 = wx.BoxSizer(wx.VERTICAL) + object_4 = wx.BoxSizer(wx.HORIZONTAL) + object_13 = wx.BoxSizer(wx.HORIZONTAL) + object_15 = wx.BoxSizer(wx.VERTICAL) + object_16 = wx.BoxSizer(wx.VERTICAL) + object_17 = wx.BoxSizer(wx.VERTICAL) + object_18 = wx.BoxSizer(wx.VERTICAL) + + object_1.Add((10, 0), 0, 0, 0) + object_3.Add((0, 20), 0, 0, 0) + object_3.Add(self.files_friends, 0, 0, 0) + if sys.platform == 'win32': + object_3.Add((0, 8), 0, 0, 0) + else: + object_3.Add((0, 5), 0, 0, 0) + object_4.Add(self.searchField, 0, wx.LEFT, -2) + + if sys.platform == 'darwin': + object_4.Add((6, 0), 0, 0, 0) + else: + object_4.Add((2, 0), 0, 0, 0) + + + object_4.Add(self.go, 0, 0, 0) + object_4.Add((2,0), 0, 0, 0) + if sys.platform == 'darwin': + object_4.Add(self.ag, 0, wx.TOP, 5) + object_3.Add(object_4, 0, 0, 0) + if sys.platform == 'win32': + object_6.Add((0, 2), 0, 0, 0) + else: + object_6.Add((0, 0), 0, 0, 0) + object_6.Add(self.familyfilter, 0, 0, 0) + object_5.Add(object_6, 0, 0, 0) + if sys.platform == 'win32': + object_5.Add((150, 0), 1, 0, 0) + else: + object_5.Add((120, 0), 1, 0, 0) + if sys.platform == 'darwin': + object_16.Add((0, 2), 0, 0, 0) + object_16.Add(self.search_results, 0, 0, 0) + object_5.Add(object_16, 0, wx.ALIGN_RIGHT, 0) + else: + object_5.Add(self.search_results, 0, wx.ALIGN_RIGHT, 0) + object_3.Add(object_5, 0, 0, 0) + object_2.Add(object_3, 0, wx.EXPAND, 0) + if sys.platform == 'win32': + object_2.Add((45, 0), 0, 0, 0) + else: + object_2.Add((40, 0), 0, 0, 0) + object_7.Add((0, 20), 0, 0, 0) + object_7.Add(object_14, 0, 0, 0) + object_14.Add(self.sharing_reputation, 0, 0, 0) + if sys.platform != 'win32': + object_14.Add(self.sr_msg, 0, wx.LEFT, 10) + object_7.Add((0, 5), 0, 0, 0) + object_8.Add(self.srgradient, 0, 0, 0) + object_8.Add((5, 0), 0, 0, 0) + object_8.Add(self.help, 0, 0, 0) + object_7.Add(object_8, 0, 0, 0) + object_7.Add((0, 5), 0, 0, 0) + object_9.Add((50, 0), 0, 0, 0) + object_9.Add(self.sr_indicator, 0, wx.TOP, -16) + object_7.Add(object_9, 0, 0, 0) + if sys.platform == 'win32': + object_7.Add(object_13, 0, wx.TOP, -3) + else: + object_7.Add(object_13, 0, 0, 0) + object_2.Add(object_7, 0, wx.EXPAND, 0) + object_1.Add(object_2, 1, wx.EXPAND, 0) + + if sys.platform == 'win32': + space = 123 + elif sys.platform == 'linux2': + space = 7 + else: + space = 130 + + object_1.Add((space, 0), 0, 0, 0) # Arno: set to a specific value to get right view on win32 + + # seperator + object_11.Add((0, 20), 0, 0, 0) + object_11.Add(self.seperator, 0, 0, 0) + + # seperator2 + object_18.Add((0, 20), 0, 0, 0) + object_18.Add(self.seperator2, 0, 0, 0) + + object_17.Add((0, 20), 0, 0, 0) + object_17.Add(self.results, 0, 0, 0) + object_17.Add((0, 0), 0, 0, 0) + + object_10.Add((0, 20), 0, 0, 0) + object_10.Add(self.settings, 0, 0, 0) + object_10.Add((0, 0), 0, 0, 0) + + object_12.Add((0, 20), 0, 0, 0) + object_12.Add(self.my_files, 0, 0, 0) + object_12.Add((0, 0), 0, 0, 0) + object_12.Add(self.newFile, 0, 0, 0) + + + object_1.Add(object_17, 0, 0, 0) + if sys.platform == 'win32': + object_1.Add((10, 0), 0, 0, 0) + else: + object_1.Add((7, 0), 0, 0, 0) + object_1.Add(object_11, 0, 0, 0) + object_1.Add((7, 0), 0, 0, 0) + object_1.Add(object_10, 0, 0, 0) + object_1.Add((7, 0), 0, 0, 0) + object_1.Add(object_18, 0, 0, 0) + object_1.Add((7, 0), 0, 0, 0) + object_1.Add(object_12, 0, 0, 0) + object_1.Add((7, 0), 0, 0, 0) + + object_15.Add((0,3), 0, 0, 0) + object_15.Add(self.tribler_logo2, 0, 0, 0) + object_1.Add(object_15, 0, 0, 0) + object_1.Add((10, 0), 0, 0, 0) + ##object_13.Add(self.left, 0, 0, 0) + object_13.Add((0, 0), 0, 0, 0) + object_13.Add(self.total_down, 0, 0, 0) + if sys.platform == 'darwin': + object_13.Add((14, 0), 0, 0, 0) + else: + object_13.Add((8, 0), 0, 0, 0) + object_13.Add(self.total_up, 0, 0, 0) + object_13.Add((0, 0), 0, 0, 0) + ##object_13.Add(self.right, 0, 0, 0) + + # OUR CODE ARNO50: Check diff in defs + if sys.platform != 'linux2': + self.SetSizerAndFit(object_1) + else: + self.SetSizer(object_1) + # end wx.Glade + +# end of class MyPanel + diff --git a/tribler-mod/Tribler/Main/vwxGUI/TopSearchPanel.xrc b/tribler-mod/Tribler/Main/vwxGUI/TopSearchPanel.xrc new file mode 100644 index 0000000..c99501e --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/TopSearchPanel.xrc @@ -0,0 +1,358 @@ + + + + + wxHORIZONTAL + + 10,0 + + + + + + + images/5.0/black_spacer.png + + + + + wxHORIZONTAL + + + wxVERTICAL + + 0,20 + + + + images/5.0/search_files.png + 155,255 + + + + + + 0,5 + + + + wxHORIZONTAL + + + + 177,270 + + 320,23 + #000000 + + 10 + modern + + normal + 0 + Verdana + + 1 + + wxLEFT + -2 + + + 2,0 + + + + 502,264 + 24,24 + #FFFFFF + + wxALIGN_LEFT|wxFIXED_MINSIZE + + + 5,0 + + + + + + wxHORIZONTAL + + + wxVERTICAL + + 0,0 + + + + + 100,15 + + 8 + swiss + + normal + 0 + Sans + UTF-8 + + + + + + + 120,0 + + + + + 100,10 + + + 10 + default + + normal + 0 + + + + + wxALIGN_RIGHT + + + + + wxEXPAND|wxFIXED_MINSIZE + + + 100,0 + + + + wxVERTICAL + + 0,20 + + + + + + 8 + swiss + + bold + 0 + Nimbus Sans L + UTF-8 + + + + + 0,5 + + + + wxHORIZONTAL + + + images/5.0/SRgradient.png + + + + 5,0 + + + + images/5.0/help.png + + + + wxFIXED_MINSIZE + + + 0,5 + + + + wxHORIZONTAL + + 50,0 + + + + images/5.0/SRindicator.png + + wxTOP + -17 + + + + + + wxHORIZONTAL + + + images/5.0/left.png + #FFFFFF + + + + 0,0 + + + + + + 6 + swiss + + normal + 0 + Sans + UTF-8 + + + + + 5,0 + + + + + + 6 + swiss + + normal + 0 + Sans + UTF-8 + + + + + 0,0 + + + + images/5.0/right.png + #FFFFFF + + + + + + wxEXPAND|wxFIXED_MINSIZE + + + + wxEXPAND + + + 7,0 + + + + wxVERTICAL + + 0,20 + + + + + 50,15 + #FF3300 + + 8 + swiss + + normal + 0 + Sans + UTF-8 + + + + + 0,0 + + + + + 7,0 + + + + wxVERTICAL + + 0,20 + + + + images/5.0/seperator.png + + + + + + 7,0 + + + + wxVERTICAL + + 0,20 + + + + + 50,15 + #FF3300 + + 8 + swiss + + normal + 0 + Sans + UTF-8 + + + + + 0,5 + + + + images/5.0/iconSaved.png + + + + + + 7,0 + + + + wxVERTICAL + + 0,3 + + + + images/logo4video2.png + 10,-7 + + + + + + + + 10,0 + + + 1000,90 + #FFFFFF + + \ No newline at end of file diff --git a/tribler-mod/Tribler/Main/vwxGUI/TriblerProgressbar.py b/tribler-mod/Tribler/Main/vwxGUI/TriblerProgressbar.py new file mode 100644 index 0000000..bcffd29 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/TriblerProgressbar.py @@ -0,0 +1,90 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg, Maarten ten Brinke, Arno Bakker +# see LICENSE.txt for license information + +import wx + +class TriblerProgressbar(wx.Panel): + """ + Progressbar with percentage and ETA + """ + def __init__(self, *args, **kw): + self.backgroundColour = wx.WHITE + self.percentage = 0.0 + self.eta = '?' + self.enabled = True + if len(args) == 0: + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, *args, **kw) + self._PostInit() + + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + self.createBackgroundImage() + self.Refresh(True) + self.Update() + + + def setPercentage(self, p): + self.percentage = float(p) + self.Refresh() + + def setETA(self, eta): + self.eta = eta + self.Refresh() + + def createBackgroundImage(self): + self.Bind(wx.EVT_PAINT, self.OnPaint) + self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase) + + + + def OnErase(self, event): + pass + #event.Skip() + + def setEnabled(self, e): + self.enabled = e + + def OnPaint(self, evt): + obj = evt.GetEventObject() + dc = wx.BufferedPaintDC(obj) + dc.SetBackground(wx.Brush(self.backgroundColour)) + dc.Clear() + if self.enabled: + size = self.GetSize() + fillwidth = int((size[0])*self.percentage/100.0) + + # draw around rect + dc.SetPen(wx.BLACK_PEN) + dc.SetBrush(wx.NullBrush) + dc.DrawRectangle(0,0, size[0], size[1]) + # draw progression rect + dc.SetPen(wx.NullPen) + dc.SetBrush(wx.Brush(wx.Colour(213,213,213))) + dc.DrawRectangle(0,0,fillwidth, size[1]) + dc.SetPen(wx.Pen(wx.Colour(102,102,102), 1)) + dc.DrawLine(fillwidth-1, 0, fillwidth-1, size[1]) + + # print text + dc.SetFont(wx.Font(7, wx.DEFAULT, wx.NORMAL, wx.NORMAL, False)) + percString = '%.1f%%' % self.percentage + textSize = dc.GetTextExtent(percString) + dc.DrawText(percString, 3, (size[1]-textSize[1])/2) + if self.eta.find('unknown') == -1 and not '?' in self.eta: + etaSize = dc.GetTextExtent(self.eta) + dc.DrawText(self.eta, size[0]-3-etaSize[0], (size[1]-etaSize[1])/2) + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/TriblerProgressbar.py.bak b/tribler-mod/Tribler/Main/vwxGUI/TriblerProgressbar.py.bak new file mode 100644 index 0000000..4b99f03 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/TriblerProgressbar.py.bak @@ -0,0 +1,89 @@ +# Written by Jelle Roozenburg, Maarten ten Brinke, Arno Bakker +# see LICENSE.txt for license information + +import wx + +class TriblerProgressbar(wx.Panel): + """ + Progressbar with percentage and ETA + """ + def __init__(self, *args, **kw): + self.backgroundColour = wx.WHITE + self.percentage = 0.0 + self.eta = '?' + self.enabled = True + if len(args) == 0: + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, *args, **kw) + self._PostInit() + + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + self.createBackgroundImage() + self.Refresh(True) + self.Update() + + + def setPercentage(self, p): + self.percentage = float(p) + self.Refresh() + + def setETA(self, eta): + self.eta = eta + self.Refresh() + + def createBackgroundImage(self): + self.Bind(wx.EVT_PAINT, self.OnPaint) + self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase) + + + + def OnErase(self, event): + pass + #event.Skip() + + def setEnabled(self, e): + self.enabled = e + + def OnPaint(self, evt): + obj = evt.GetEventObject() + dc = wx.BufferedPaintDC(obj) + dc.SetBackground(wx.Brush(self.backgroundColour)) + dc.Clear() + if self.enabled: + size = self.GetSize() + fillwidth = int((size[0])*self.percentage/100.0) + + # draw around rect + dc.SetPen(wx.BLACK_PEN) + dc.SetBrush(wx.NullBrush) + dc.DrawRectangle(0,0, size[0], size[1]) + # draw progression rect + dc.SetPen(wx.NullPen) + dc.SetBrush(wx.Brush(wx.Colour(213,213,213))) + dc.DrawRectangle(0,0,fillwidth, size[1]) + dc.SetPen(wx.Pen(wx.Colour(102,102,102), 1)) + dc.DrawLine(fillwidth-1, 0, fillwidth-1, size[1]) + + # print text + dc.SetFont(wx.Font(7, wx.DEFAULT, wx.NORMAL, wx.NORMAL, False)) + percString = '%.1f%%' % self.percentage + textSize = dc.GetTextExtent(percString) + dc.DrawText(percString, 3, (size[1]-textSize[1])/2) + if self.eta.find('unknown') == -1 and not '?' in self.eta: + etaSize = dc.GetTextExtent(self.eta) + dc.DrawText(self.eta, size[0]-3-etaSize[0], (size[1]-etaSize[1])/2) + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/TriblerStyles.py b/tribler-mod/Tribler/Main/vwxGUI/TriblerStyles.py new file mode 100644 index 0000000..368c9a3 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/TriblerStyles.py @@ -0,0 +1,169 @@ +from time import localtime, strftime +import wx, math, time, os, sys, threading +import wx, os +from font import * + +# font sizes +if sys.platform == 'darwin': + FS_LEFTH1 = 12 + FS_HEADER = 11 + FS_FILETITLE = 10 + FS_SIMILARITY = 10 + FS_HEARTRANK = 8 +elif sys.platform == 'linux2': + FS_LEFTH1 = 11 + FS_HEADER = 10 + FS_FILETITLE = 8 + FS_SIMILARITY = 7 + FS_HEARTRANK = 7 +else: + FS_LEFTH1 = 11 + FS_HEADER = 10 + FS_FILETITLE = 9 + FS_SIMILARITY = 10 + FS_HEARTRANK = 7 + +class TriblerStyles: + __single = None + + + def __init__(self): + if TriblerStyles.__single: + raise RuntimeError, "TriblerStyles is singleton" + TriblerStyles.__single = self + + def getInstance(*args, **kw): + if TriblerStyles.__single is None: + TriblerStyles(*args, **kw) + return TriblerStyles.__single + getInstance = staticmethod(getInstance) + + def colours(self, cNumber): + if cNumber == 1: + return wx.Colour(102,102,102) + if cNumber == 2: + return wx.Colour(77,163,184) + + + + def textButtonLeftH1(self, style=''): +# item.SetForegroundColour(wx.Colour(50,50,50)) +## item.SetForegroundColour(wx.Colour(255,255,255)) +# item.SetBackgroundColour(wx.Colour(102,102,102)) +## wxFont(int pointSize, wxFontFamily family, int style, wxFontWeight weight, const bool underline = false, const wxString& faceName = "", wxFontEncoding encoding = wxFONTENCODING_DEFAULT) +# item.SetFont(wx.Font(FS_LEFTH1,FONTFAMILY,wx.NORMAL,wx.BOLD,False,FONTFACE)) +# +# if text != '': +# item.SetLabel(text) + + if style == 'bgColour': + return wx.Colour(102,102,102) + elif style == 'textColour': + return wx.Colour(166,166,166) + elif style == 'textColour2': + return wx.Colour(145,145,145) + elif style == 'font': + return wx.Font(FS_LEFTH1,FONTFAMILY,wx.NORMAL,wx.BOLD,False,FONTFACE) + + return None + + def textButtonLeft(self, style=''): +# item = wx.BufferedPaintDC(self) + if style == 'bgColour': + return wx.Colour(102,102,102) + elif style == 'bgColour2': + return wx.Colour(0,0,0) + elif style == 'textColour': + return wx.Colour(220,220,220) + elif style == 'textColourAdd': + return wx.Colour(160,160,160) + elif style == 'font': + return wx.Font(FS_FILETITLE,FONTFAMILY,wx.NORMAL, wx.NORMAL,False,FONTFACE) + elif style == 'fontAdd': + return wx.Font(FS_FILETITLE,FONTFAMILY,wx.FONTSTYLE_ITALIC, wx.NORMAL,False,FONTFACE) + + return None +# item.SetFont( +# item.Clear() +# item.DrawText(text, 2, 2) +# +### item.SetForegroundColour(wx.Colour(255,255,255)) +#### item.SetForegroundColour(wx.Colour(255,255,255)) +### item.SetBackgroundColour(wx.Colour(102,102,102)) +## wxFont(int pointSize, wxFontFamily family, int style, wxFontWeight weight, const bool underline = false, const wxString& faceName = "", wxFontEncoding encoding = wxFONTENCODING_DEFAULT) +# +# +# if text != '': +# item.SetLabel(text) + + + def titleBar(self, item, text=''): + item.SetForegroundColour(wx.Colour(180,180,180)) + item.SetBackgroundColour(wx.Colour(71,71,71)) + item.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,wx.NORMAL,FONTWEIGHT,False,FONTFACE)) + + + if text != '': + item.SetLabel(text) + + def setHeaderText(self, item, text=''): + item.SetForegroundColour(wx.Colour(180,180,180)) + item.SetBackgroundColour(wx.Colour(102,102,102)) + item.SetFont(wx.Font(FS_HEADER,FONTFAMILY,wx.NORMAL,wx.BOLD,False,FONTFACE)) + + if text != '': + item.SetLabel(text) + + + def setDarkText(self, item, text=''): + item.SetForegroundColour(wx.BLACK) ## color of 'name' 'creation date' etc 51,51,51 + item.SetBackgroundColour(wx.Colour(216,233,240)) ## 102,102,102 + item.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + + if text != '': + item.SetLabel(text) + + def setLightText(self, item, text=''): + # - normal text & + # - left menu items + + item.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + item.SetForegroundColour(wx.Colour(100,100,100)) + item.SetBackgroundColour(wx.Colour(216,233,240)) ## 102,102,102 + + if text != '': + item.SetLabel(text) + + def selected (self, state): + #possible states: + # 1. first row - not selected - not in Library + # 2. second row - not selected - not in Library + # 3. selected - not in Library + # 4. not selected - in Library + # 5. selected - in Library + if state == 1: + colour = wx.Colour(102,102,102) + if state == 2: + colour = wx.Colour(102,102,102) +# colour = wx.Colour(230,230,230) + if state == 3: + colour = wx.Colour(80,70,70) + if state == 4: + colour = wx.Colour(255,255,255) + if state == 5: + colour = wx.Colour(170,80,70) + + return colour + + def sortingColumns (self, state): + # 1. unselected + # 2: selected + BG colour Pictues in column + + if state == 1: + colour = wx.Colour(230,230,230) + if state == 2: + colour = wx.Colour(230,230,230) + + return colour + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/TriblerStyles.py.bak b/tribler-mod/Tribler/Main/vwxGUI/TriblerStyles.py.bak new file mode 100644 index 0000000..eafdf34 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/TriblerStyles.py.bak @@ -0,0 +1,168 @@ +import wx, math, time, os, sys, threading +import wx, os +from font import * + +# font sizes +if sys.platform == 'darwin': + FS_LEFTH1 = 12 + FS_HEADER = 11 + FS_FILETITLE = 10 + FS_SIMILARITY = 10 + FS_HEARTRANK = 8 +elif sys.platform == 'linux2': + FS_LEFTH1 = 11 + FS_HEADER = 10 + FS_FILETITLE = 8 + FS_SIMILARITY = 7 + FS_HEARTRANK = 7 +else: + FS_LEFTH1 = 11 + FS_HEADER = 10 + FS_FILETITLE = 9 + FS_SIMILARITY = 10 + FS_HEARTRANK = 7 + +class TriblerStyles: + __single = None + + + def __init__(self): + if TriblerStyles.__single: + raise RuntimeError, "TriblerStyles is singleton" + TriblerStyles.__single = self + + def getInstance(*args, **kw): + if TriblerStyles.__single is None: + TriblerStyles(*args, **kw) + return TriblerStyles.__single + getInstance = staticmethod(getInstance) + + def colours(self, cNumber): + if cNumber == 1: + return wx.Colour(102,102,102) + if cNumber == 2: + return wx.Colour(77,163,184) + + + + def textButtonLeftH1(self, style=''): +# item.SetForegroundColour(wx.Colour(50,50,50)) +## item.SetForegroundColour(wx.Colour(255,255,255)) +# item.SetBackgroundColour(wx.Colour(102,102,102)) +## wxFont(int pointSize, wxFontFamily family, int style, wxFontWeight weight, const bool underline = false, const wxString& faceName = "", wxFontEncoding encoding = wxFONTENCODING_DEFAULT) +# item.SetFont(wx.Font(FS_LEFTH1,FONTFAMILY,wx.NORMAL,wx.BOLD,False,FONTFACE)) +# +# if text != '': +# item.SetLabel(text) + + if style == 'bgColour': + return wx.Colour(102,102,102) + elif style == 'textColour': + return wx.Colour(166,166,166) + elif style == 'textColour2': + return wx.Colour(145,145,145) + elif style == 'font': + return wx.Font(FS_LEFTH1,FONTFAMILY,wx.NORMAL,wx.BOLD,False,FONTFACE) + + return None + + def textButtonLeft(self, style=''): +# item = wx.BufferedPaintDC(self) + if style == 'bgColour': + return wx.Colour(102,102,102) + elif style == 'bgColour2': + return wx.Colour(0,0,0) + elif style == 'textColour': + return wx.Colour(220,220,220) + elif style == 'textColourAdd': + return wx.Colour(160,160,160) + elif style == 'font': + return wx.Font(FS_FILETITLE,FONTFAMILY,wx.NORMAL, wx.NORMAL,False,FONTFACE) + elif style == 'fontAdd': + return wx.Font(FS_FILETITLE,FONTFAMILY,wx.FONTSTYLE_ITALIC, wx.NORMAL,False,FONTFACE) + + return None +# item.SetFont( +# item.Clear() +# item.DrawText(text, 2, 2) +# +### item.SetForegroundColour(wx.Colour(255,255,255)) +#### item.SetForegroundColour(wx.Colour(255,255,255)) +### item.SetBackgroundColour(wx.Colour(102,102,102)) +## wxFont(int pointSize, wxFontFamily family, int style, wxFontWeight weight, const bool underline = false, const wxString& faceName = "", wxFontEncoding encoding = wxFONTENCODING_DEFAULT) +# +# +# if text != '': +# item.SetLabel(text) + + + def titleBar(self, item, text=''): + item.SetForegroundColour(wx.Colour(180,180,180)) + item.SetBackgroundColour(wx.Colour(71,71,71)) + item.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,wx.NORMAL,FONTWEIGHT,False,FONTFACE)) + + + if text != '': + item.SetLabel(text) + + def setHeaderText(self, item, text=''): + item.SetForegroundColour(wx.Colour(180,180,180)) + item.SetBackgroundColour(wx.Colour(102,102,102)) + item.SetFont(wx.Font(FS_HEADER,FONTFAMILY,wx.NORMAL,wx.BOLD,False,FONTFACE)) + + if text != '': + item.SetLabel(text) + + + def setDarkText(self, item, text=''): + item.SetForegroundColour(wx.BLACK) ## color of 'name' 'creation date' etc 51,51,51 + item.SetBackgroundColour(wx.Colour(216,233,240)) ## 102,102,102 + item.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + + if text != '': + item.SetLabel(text) + + def setLightText(self, item, text=''): + # - normal text & + # - left menu items + + item.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + item.SetForegroundColour(wx.Colour(100,100,100)) + item.SetBackgroundColour(wx.Colour(216,233,240)) ## 102,102,102 + + if text != '': + item.SetLabel(text) + + def selected (self, state): + #possible states: + # 1. first row - not selected - not in Library + # 2. second row - not selected - not in Library + # 3. selected - not in Library + # 4. not selected - in Library + # 5. selected - in Library + if state == 1: + colour = wx.Colour(102,102,102) + if state == 2: + colour = wx.Colour(102,102,102) +# colour = wx.Colour(230,230,230) + if state == 3: + colour = wx.Colour(80,70,70) + if state == 4: + colour = wx.Colour(255,255,255) + if state == 5: + colour = wx.Colour(170,80,70) + + return colour + + def sortingColumns (self, state): + # 1. unselected + # 2: selected + BG colour Pictues in column + + if state == 1: + colour = wx.Colour(230,230,230) + if state == 2: + colour = wx.Colour(230,230,230) + + return colour + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/__init__.py b/tribler-mod/Tribler/Main/vwxGUI/__init__.py new file mode 100644 index 0000000..dada7f4 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/__init__.py @@ -0,0 +1,3 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg, Maarten ten Brinke, Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Main/vwxGUI/__init__.py.bak b/tribler-mod/Tribler/Main/vwxGUI/__init__.py.bak new file mode 100644 index 0000000..2ce5899 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/__init__.py.bak @@ -0,0 +1,2 @@ +# Written by Jelle Roozenburg, Maarten ten Brinke, Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Main/vwxGUI/bgPanel.py b/tribler-mod/Tribler/Main/vwxGUI/bgPanel.py new file mode 100644 index 0000000..74ba91a --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/bgPanel.py @@ -0,0 +1,155 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg, Maarten ten Brinke, Arno Bakker +# see LICENSE.txt for license information +import wx, os, sys + +DEBUG = True + +class ImagePanelBasic(wx.Panel): + """ + Panel with automatic backgroundimage control. + """ + + __bitmapCache = {} + + def __init__(self, tile, *args, **kw): + self.backgroundColour = wx.Colour(102,102,102) + from Tribler.Main.vwxGUI.GuiUtility import GUIUtility + + self.guiUtility = GUIUtility.getInstance() + self.xpos = self.ypos = 0 + self.tile = tile + self.bitmap = None + if len(args) == 0: + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, *args, **kw) + self._PostInit() + + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) +# print self.Name +# print '>> size' +# print self.Size +# print self.Position + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + ## if sys.platform == 'win32': + if self.bitmap is None: #mluc: search for bitmap only if not already set; it may happen as the setBitmap might be called before the _PostInit + self.searchBitmap() + ##self.createBackgroundImage() + # print self.Name +# print '> size' +# print self.Size +# print self.Position + + self.Refresh(True) + self.Update() + + + def setBackground(self, colour): + self.backgroundColour = colour + self.bitmap = None + wx.EVT_PAINT(self, self.OnPaint) + self.Refresh() + + + + def searchBitmap(self, name = None): + self.bitmap = None + + # get the image directory + self.imagedir = os.path.join(self.guiUtility.vwxGUI_path, 'images') + + # find a file with same name as this panel + if name is None: + self.bitmapPath = os.path.join(self.imagedir, self.GetName()+'.png') + else: + self.bitmapPath = os.path.join(self.imagedir, name) + + if os.path.isfile(self.bitmapPath): + self.setBitmap(wx.Bitmap(self.bitmapPath, wx.BITMAP_TYPE_ANY)) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'bgPanel: Could not load image: %s' % self.bitmapPath + #print_stack() + +## try: + # These unnamed things popup on LibraryView +## if self.bitmapPath.endswith('panel.png'): +## return +## +## img = self.bitmapPath +## if img in ImagePanelBasic.__bitmapCache: +## bitmap = ImagePanelBasic.__bitmapCache[img] +## else: +## bitmap = wx.Bitmap(img, wx.BITMAP_TYPE_ANY) +## ImagePanelBasic.__bitmapCache[img] = bitmap + +## self.setBitmap(bitmap) +## except: +## print_exc() + + def createBackgroundImage(self, bitmap = None): + if bitmap: + self.searchBitmap(bitmap) + + wx.EVT_PAINT(self, self.OnPaint) + self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase) + + + def setBitmapFromFile(self, filename): + self.setBitmap(wx.Bitmap(os.path.join(self.imagedir, filename+'.png'))) + + def setBitmap(self, bitmap): + self.bitmap = bitmap + + w, h = self.GetSize() + iw, ih = self.bitmap.GetSize() + + self.xpos, self.ypos = (w-iw)/2, (h-ih)/2 +# self.SetMinSize((iw, ih)) + self.Refresh() + + def OnErase(self, event): + pass + #event.Skip() + + def OnPaint(self, evt): + obj = evt.GetEventObject() + dc = wx.BufferedPaintDC(obj) + dc.SetBackground(wx.Brush(self.backgroundColour)) + dc.Clear() + if self.bitmap: + # Tile bitmap + rec=wx.Rect() + rec=self.GetClientRect() + + if self.tile: + for y in range(0,rec.GetHeight(),self.bitmap.GetHeight()): + for x in range(0,rec.GetWidth(),self.bitmap.GetWidth()): + dc.DrawBitmap(self.bitmap,x,y,0) + else: + # Do not tile + + dc.DrawBitmap(self.bitmap, self.xpos,self.ypos, True) + + + +class bgPanel(ImagePanelBasic): + def __init__(self, *args, **kw): + tile = True + ImagePanelBasic.__init__(self, tile, *args, **kw) + +class ImagePanel(ImagePanelBasic): + def __init__(self, *args, **kw): + tile = False + ImagePanelBasic.__init__(self, tile, *args, **kw) + diff --git a/tribler-mod/Tribler/Main/vwxGUI/bgPanel.py.bak b/tribler-mod/Tribler/Main/vwxGUI/bgPanel.py.bak new file mode 100644 index 0000000..6752fd5 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/bgPanel.py.bak @@ -0,0 +1,154 @@ +# Written by Jelle Roozenburg, Maarten ten Brinke, Arno Bakker +# see LICENSE.txt for license information +import wx, os, sys + +DEBUG = True + +class ImagePanelBasic(wx.Panel): + """ + Panel with automatic backgroundimage control. + """ + + __bitmapCache = {} + + def __init__(self, tile, *args, **kw): + self.backgroundColour = wx.Colour(102,102,102) + from Tribler.Main.vwxGUI.GuiUtility import GUIUtility + + self.guiUtility = GUIUtility.getInstance() + self.xpos = self.ypos = 0 + self.tile = tile + self.bitmap = None + if len(args) == 0: + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, *args, **kw) + self._PostInit() + + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) +# print self.Name +# print '>> size' +# print self.Size +# print self.Position + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + ## if sys.platform == 'win32': + if self.bitmap is None: #mluc: search for bitmap only if not already set; it may happen as the setBitmap might be called before the _PostInit + self.searchBitmap() + ##self.createBackgroundImage() + # print self.Name +# print '> size' +# print self.Size +# print self.Position + + self.Refresh(True) + self.Update() + + + def setBackground(self, colour): + self.backgroundColour = colour + self.bitmap = None + wx.EVT_PAINT(self, self.OnPaint) + self.Refresh() + + + + def searchBitmap(self, name = None): + self.bitmap = None + + # get the image directory + self.imagedir = os.path.join(self.guiUtility.vwxGUI_path, 'images') + + # find a file with same name as this panel + if name is None: + self.bitmapPath = os.path.join(self.imagedir, self.GetName()+'.png') + else: + self.bitmapPath = os.path.join(self.imagedir, name) + + if os.path.isfile(self.bitmapPath): + self.setBitmap(wx.Bitmap(self.bitmapPath, wx.BITMAP_TYPE_ANY)) + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'bgPanel: Could not load image: %s' % self.bitmapPath + #print_stack() + +## try: + # These unnamed things popup on LibraryView +## if self.bitmapPath.endswith('panel.png'): +## return +## +## img = self.bitmapPath +## if img in ImagePanelBasic.__bitmapCache: +## bitmap = ImagePanelBasic.__bitmapCache[img] +## else: +## bitmap = wx.Bitmap(img, wx.BITMAP_TYPE_ANY) +## ImagePanelBasic.__bitmapCache[img] = bitmap + +## self.setBitmap(bitmap) +## except: +## print_exc() + + def createBackgroundImage(self, bitmap = None): + if bitmap: + self.searchBitmap(bitmap) + + wx.EVT_PAINT(self, self.OnPaint) + self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase) + + + def setBitmapFromFile(self, filename): + self.setBitmap(wx.Bitmap(os.path.join(self.imagedir, filename+'.png'))) + + def setBitmap(self, bitmap): + self.bitmap = bitmap + + w, h = self.GetSize() + iw, ih = self.bitmap.GetSize() + + self.xpos, self.ypos = (w-iw)/2, (h-ih)/2 +# self.SetMinSize((iw, ih)) + self.Refresh() + + def OnErase(self, event): + pass + #event.Skip() + + def OnPaint(self, evt): + obj = evt.GetEventObject() + dc = wx.BufferedPaintDC(obj) + dc.SetBackground(wx.Brush(self.backgroundColour)) + dc.Clear() + if self.bitmap: + # Tile bitmap + rec=wx.Rect() + rec=self.GetClientRect() + + if self.tile: + for y in range(0,rec.GetHeight(),self.bitmap.GetHeight()): + for x in range(0,rec.GetWidth(),self.bitmap.GetWidth()): + dc.DrawBitmap(self.bitmap,x,y,0) + else: + # Do not tile + + dc.DrawBitmap(self.bitmap, self.xpos,self.ypos, True) + + + +class bgPanel(ImagePanelBasic): + def __init__(self, *args, **kw): + tile = True + ImagePanelBasic.__init__(self, tile, *args, **kw) + +class ImagePanel(ImagePanelBasic): + def __init__(self, *args, **kw): + tile = False + ImagePanelBasic.__init__(self, tile, *args, **kw) + diff --git a/tribler-mod/Tribler/Main/vwxGUI/btn_DetailsHeader.py b/tribler-mod/Tribler/Main/vwxGUI/btn_DetailsHeader.py new file mode 100644 index 0000000..9f40206 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/btn_DetailsHeader.py @@ -0,0 +1,204 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg, Maarten ten Brinke +# see LICENSE.txt for license information + +import wx, os, sys +from traceback import print_exc +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility + +DEBUG = False + +class btn_DetailsHeader(wx.Panel): + """ + Button that changes the image shown if you move your mouse over it. + It redraws the background of the parent Panel, if this is an imagepanel with + a variable self.bitmap. + """ + + def __init__(self, *args, **kw): + if len(args) == 0: + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, *args, **kw) + self._PostInit() + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + self.guiUtility = GUIUtility.getInstance() + self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + self.Bind(wx.EVT_LEFT_UP, self.guiUtility.buttonClicked) + self.searchBitmaps() + self.createBackgroundImage() + if self.bitmaps[0]: + self.SetSize(self.bitmaps[0].GetSize()) + self.Refresh(True) + self.Update() + + + + def searchBitmaps(self): + self.bitmaps = [None, None] + self.parentBitmap = None + self.mouseOver = False + + # get the image directory + self.imagedir = os.path.join(self.guiUtility.vwxGUI_path, 'images') + + if not os.path.isdir(self.imagedir): + print 'Error: no image directory found in %s and %s' % (olddir, self.imagedir) + return + + # find a file with same name as this panel + self.bitmapPath = [os.path.join(self.imagedir, 'btn_DetailsHeader.gif'), + os.path.join(self.imagedir, 'btn_DetailsHeader.gif')] + + i = 0 + for img in self.bitmapPath: + if os.path.isfile(img): + self.bitmaps[i] = wx.Bitmap(img, wx.BITMAP_TYPE_ANY) + i+=1 + else: + print 'Could not find image: %s' % img + + + + def createBackgroundImage(self): + if self.bitmaps[0]: + wx.EVT_PAINT(self, self.OnPaint) + self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase) + + + def OnErase(self, event): + pass + #event.Skip() + + def mouseAction(self, event): + if event.Entering(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'btn_DetailsHeader: enter' + self.mouseOver = True + self.Refresh() + elif event.Leaving(): + self.mouseOver = False + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'btn_DetailsHeader: leave' + self.Refresh() + elif event.ButtonUp(): + self.ClickedButton() + #event.Skip() + """ + def ClickedButton(self): + print 'Click' + """ + + def getParentBitmap(self): + try: + parent = self.GetParent() + bitmap = parent.bitmap + #print bitmap + except: + return None + + if bitmap: + location = self.GetPosition() + #location[0] -= parent.GetPosition()[0] + #location[1] -= parent.GetPosition()[1] + if DEBUG: + print 'Mypos: %s, Parentpos: %s' % (self.GetPosition(), parent.GetPosition()) + rect = [location[0], location[1], self.GetClientSize()[0], self.GetClientSize()[1]] + if DEBUG: + print 'Slicing rect(%d,%d) size(%s) from parent image size(%s)' % (location[0], location[1], str(self.GetClientSize()), str(bitmap.GetSize())) + bitmap = self.getBitmapSlice(bitmap, rect) + return bitmap + else: + return None + + def joinImage(self, im1,im2,offsetx=0,offsety=0): + "Draw im2 on im1" + stopx = im2.GetWidth() + if stopx > (im1.GetWidth()-offsetx): + stopx = im1.GetWidth()-offsetx + stopy = im2.GetHeight() + if stopy > (im1.GetHeight()-offsety): + stopy = im1.GetHeight()-offsety + if stopx>0 and stopy>0: + for x in range(0,stopx): + for y in range(0,stopy): + rgb2 = (im2.GetRed(x,y),im2.GetGreen(x,y),im2.GetBlue(x,y)) + if rgb2 !=(255,0,255): + im1.SetRGB(x+offsetx,y+offsety,rgb2[0],rgb2[1],rgb2[2]) + return im1 + + def getBitmapSlice(self, bitmap, rect): + try: + #print rect + bitmapSize = bitmap.GetSize() + rect[0] %= bitmapSize[0] + rect[1] %= bitmapSize[1] + rects = [rect] + if rect[0]+rect[2] >= bitmapSize[0]: + rect1 = (rect[0], rect[1], bitmapSize[0]-rect[0], rect[3]) + rect2 = (0, rect[1], rect[0]+rect[2] - bitmapSize[0], rect[3]) + rects = [rect1, rect2] + if rect[1]+ rect[3] >= bitmapSize[1]: + rects2 = [] + for r in rects: + r1 = (r[0], r[1], r[2], bitmapSize[1] - r[3]) + r2 = (r[0], 0, r[2], r[1]+r[3] - bitmapSize[1]) + rects2.append(r1) + rects2.append(r2) + rects = rects2 + images = [] + if len(rects) > 1: + #print "Result: %s" % rects + image = wx.EmptyImage(rect[2], rect[3]) + for r in rects: + subBitmap = bitmap.GetSubBitmap(wx.Rect(r[0], r[1], r[2], r[3])) + subImage = subBitmap.ConvertToImage() + if r == rects[0]: + place = (0,0) + elif r == rects[1]: + if len(rects) == 4: + place = (0, bitmapSize[1]-rect[1]) + elif len(rects) == 2: + place = (bitmapSize[0]-rect[0], 0) + elif r == rects[2]: + place = (bitmapSize[0] - rect[0], 0) + elif r == rects[3]: + place = (bitmapSize[0] - rect[0], bitmapSize[1] - rect[1]) + #print "Place: %s" % str(place) + self.joinImage(image, subImage, place[0], place[1]) + return image.ConvertToBitmap() + else: + return bitmap.GetSubBitmap(wx.Rect(rect[0], rect[1], rect[2], rect[3])) + except: + print_exc() + return None + + + def OnPaint(self, evt): + dc = wx.PaintDC(self) + dc.SetBackground(wx.Brush(wx.Colour(102,102,102))) + dc.Clear() + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + else: + self.parentBitmap = self.getParentBitmap() + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + + if self.bitmaps[0]: + dc.DrawBitmap(self.bitmaps[0], 0,0, True) + if self.mouseOver and self.bitmaps[1]: + dc.DrawBitmap(self.bitmaps[1], 0,0, True) + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/btn_DetailsHeader.py.bak b/tribler-mod/Tribler/Main/vwxGUI/btn_DetailsHeader.py.bak new file mode 100644 index 0000000..b1746bb --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/btn_DetailsHeader.py.bak @@ -0,0 +1,203 @@ +# Written by Jelle Roozenburg, Maarten ten Brinke +# see LICENSE.txt for license information + +import wx, os, sys +from traceback import print_exc +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility + +DEBUG = False + +class btn_DetailsHeader(wx.Panel): + """ + Button that changes the image shown if you move your mouse over it. + It redraws the background of the parent Panel, if this is an imagepanel with + a variable self.bitmap. + """ + + def __init__(self, *args, **kw): + if len(args) == 0: + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, *args, **kw) + self._PostInit() + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + self.guiUtility = GUIUtility.getInstance() + self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + self.Bind(wx.EVT_LEFT_UP, self.guiUtility.buttonClicked) + self.searchBitmaps() + self.createBackgroundImage() + if self.bitmaps[0]: + self.SetSize(self.bitmaps[0].GetSize()) + self.Refresh(True) + self.Update() + + + + def searchBitmaps(self): + self.bitmaps = [None, None] + self.parentBitmap = None + self.mouseOver = False + + # get the image directory + self.imagedir = os.path.join(self.guiUtility.vwxGUI_path, 'images') + + if not os.path.isdir(self.imagedir): + print 'Error: no image directory found in %s and %s' % (olddir, self.imagedir) + return + + # find a file with same name as this panel + self.bitmapPath = [os.path.join(self.imagedir, 'btn_DetailsHeader.gif'), + os.path.join(self.imagedir, 'btn_DetailsHeader.gif')] + + i = 0 + for img in self.bitmapPath: + if os.path.isfile(img): + self.bitmaps[i] = wx.Bitmap(img, wx.BITMAP_TYPE_ANY) + i+=1 + else: + print 'Could not find image: %s' % img + + + + def createBackgroundImage(self): + if self.bitmaps[0]: + wx.EVT_PAINT(self, self.OnPaint) + self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase) + + + def OnErase(self, event): + pass + #event.Skip() + + def mouseAction(self, event): + if event.Entering(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'btn_DetailsHeader: enter' + self.mouseOver = True + self.Refresh() + elif event.Leaving(): + self.mouseOver = False + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'btn_DetailsHeader: leave' + self.Refresh() + elif event.ButtonUp(): + self.ClickedButton() + #event.Skip() + """ + def ClickedButton(self): + print 'Click' + """ + + def getParentBitmap(self): + try: + parent = self.GetParent() + bitmap = parent.bitmap + #print bitmap + except: + return None + + if bitmap: + location = self.GetPosition() + #location[0] -= parent.GetPosition()[0] + #location[1] -= parent.GetPosition()[1] + if DEBUG: + print 'Mypos: %s, Parentpos: %s' % (self.GetPosition(), parent.GetPosition()) + rect = [location[0], location[1], self.GetClientSize()[0], self.GetClientSize()[1]] + if DEBUG: + print 'Slicing rect(%d,%d) size(%s) from parent image size(%s)' % (location[0], location[1], str(self.GetClientSize()), str(bitmap.GetSize())) + bitmap = self.getBitmapSlice(bitmap, rect) + return bitmap + else: + return None + + def joinImage(self, im1,im2,offsetx=0,offsety=0): + "Draw im2 on im1" + stopx = im2.GetWidth() + if stopx > (im1.GetWidth()-offsetx): + stopx = im1.GetWidth()-offsetx + stopy = im2.GetHeight() + if stopy > (im1.GetHeight()-offsety): + stopy = im1.GetHeight()-offsety + if stopx>0 and stopy>0: + for x in range(0,stopx): + for y in range(0,stopy): + rgb2 = (im2.GetRed(x,y),im2.GetGreen(x,y),im2.GetBlue(x,y)) + if rgb2 !=(255,0,255): + im1.SetRGB(x+offsetx,y+offsety,rgb2[0],rgb2[1],rgb2[2]) + return im1 + + def getBitmapSlice(self, bitmap, rect): + try: + #print rect + bitmapSize = bitmap.GetSize() + rect[0] %= bitmapSize[0] + rect[1] %= bitmapSize[1] + rects = [rect] + if rect[0]+rect[2] >= bitmapSize[0]: + rect1 = (rect[0], rect[1], bitmapSize[0]-rect[0], rect[3]) + rect2 = (0, rect[1], rect[0]+rect[2] - bitmapSize[0], rect[3]) + rects = [rect1, rect2] + if rect[1]+ rect[3] >= bitmapSize[1]: + rects2 = [] + for r in rects: + r1 = (r[0], r[1], r[2], bitmapSize[1] - r[3]) + r2 = (r[0], 0, r[2], r[1]+r[3] - bitmapSize[1]) + rects2.append(r1) + rects2.append(r2) + rects = rects2 + images = [] + if len(rects) > 1: + #print "Result: %s" % rects + image = wx.EmptyImage(rect[2], rect[3]) + for r in rects: + subBitmap = bitmap.GetSubBitmap(wx.Rect(r[0], r[1], r[2], r[3])) + subImage = subBitmap.ConvertToImage() + if r == rects[0]: + place = (0,0) + elif r == rects[1]: + if len(rects) == 4: + place = (0, bitmapSize[1]-rect[1]) + elif len(rects) == 2: + place = (bitmapSize[0]-rect[0], 0) + elif r == rects[2]: + place = (bitmapSize[0] - rect[0], 0) + elif r == rects[3]: + place = (bitmapSize[0] - rect[0], bitmapSize[1] - rect[1]) + #print "Place: %s" % str(place) + self.joinImage(image, subImage, place[0], place[1]) + return image.ConvertToBitmap() + else: + return bitmap.GetSubBitmap(wx.Rect(rect[0], rect[1], rect[2], rect[3])) + except: + print_exc() + return None + + + def OnPaint(self, evt): + dc = wx.PaintDC(self) + dc.SetBackground(wx.Brush(wx.Colour(102,102,102))) + dc.Clear() + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + else: + self.parentBitmap = self.getParentBitmap() + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + + if self.bitmaps[0]: + dc.DrawBitmap(self.bitmaps[0], 0,0, True) + if self.mouseOver and self.bitmaps[1]: + dc.DrawBitmap(self.bitmaps[1], 0,0, True) + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/deleteTorrent.xrc b/tribler-mod/Tribler/Main/vwxGUI/deleteTorrent.xrc new file mode 100644 index 0000000..b414f99 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/deleteTorrent.xrc @@ -0,0 +1,71 @@ + + + + Delete Torrent + 360,170 + + + 360,170 + + wxVERTICAL + + 0,20 + + + + wxHORIZONTAL + + + + 370,50 + + + + + + + wxHORIZONTAL + + + + + + + 20,0 + + + + + + + + 20,0 + + + + + + + + + + 0,20 + + + + wxHORIZONTAL + + 0,0 + + + + + + + + + + + + + \ No newline at end of file diff --git a/tribler-mod/Tribler/Main/vwxGUI/dummy.xrc b/tribler-mod/Tribler/Main/vwxGUI/dummy.xrc new file mode 100644 index 0000000..ea58d8a --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/dummy.xrc @@ -0,0 +1,23 @@ + + + + 0,0 + 300,300 + #ffffff + + wxVERTICAL + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + 0,4 + 194,17 + #ffffff + + + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/filesDetails.xrc b/tribler-mod/Tribler/Main/vwxGUI/filesDetails.xrc new file mode 100644 index 0000000..e334991 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/filesDetails.xrc @@ -0,0 +1,487 @@ + + + + 1 + 0,0 + 300,755 + #d8d8bf + + wxVERTICAL + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + 0,0 + 10,21 + + + + wxFIXED_MINSIZE + 3 + + + images/triblerpanel_topcenter.png + 10,0 + 280,21 + # 0 + #ffffff + + wxHORIZONTAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 5 + + + + + 0,5 + 280,16 + # 0 + #ffffff + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + 288,0 + 10,21 + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,21 + 300,170 + + wxHORIZONTAL + + wxFIXED_MINSIZE + 3 + + + 0,0 + 62,16 + #ffffff + # 0 + + + + 18,170 + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 6 + + + 0,211 + 300,28 + # 0 + + wxVERTICAL + + 290,10 + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxHORIZONTAL + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 10 + + + 10,10 + 75,18 + # 0 + + + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 10 + + + 95,10 + 75,18 + # 0 + + + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,219 + 300,348 + #ffffff + + wxVERTICAL + + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxHORIZONTAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 4 + + + wxVERTICAL + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 5 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 6 + + + + + 8,7 + 79,18 + # 0 + + + + wxEXPAND|wxFIXED_MINSIZE + 10 + + + + + 87,7 + 131,18 + # 0 + + + + + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 5 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 6 + + + + + 8,25 + 79,18 + # 0 + + + + wxEXPAND|wxFIXED_MINSIZE + 10 + + + + 87,25 + 131,18 + # 0 + + + + + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 5 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + 8,43 + 79,18 + # 0 + + + + wxRIGHT|wxFIXED_MINSIZE + 3 + + + 87,43 + 11,14 + #ffffff + + + + wxRIGHT|wxEXPAND + 4 + + + + 101,43 + 50,18 + # 0 + + + + wxRIGHT|wxFIXED_MINSIZE + 3 + + + 155,43 + 11,14 + #ffffff + + + + wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 4 + + + + 173,43 + 50,18 + # 0 + + + + wxLEFT|wxFIXED_MINSIZE + 2 + + + 248,43 + 11,12 + + + + + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 5 + + + wxHORIZONTAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + 8,64 + 79,15 + # 0 + + + + wxTOP|wxBOTTOM|wxRIGHT|wxFIXED_MINSIZE + 3 + + + 87,64 + 14,14 + + + + wxTOP|wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + 107,64 + 101,12 + # 0 + + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 4 + + + wxHORIZONTAL + + 10,10 + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + + wxFIXED_MINSIZE + 3 + + + 238,3 + 55,55 + + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,84 + 294,15 + #cbcbcb + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + 0,0 + 199,15 + #cbcbcb + # 0 + + + + + + + wxTOP|wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + + 6,105 + 265,73 + # 0 + + + + wxEXPAND|wxFIXED_MINSIZE + 10 + + + 0,178 + 20,15 + + wxHORIZONTAL + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 10 + + + + + 10,0 + 204,15 + #cbcbcb + # 0 + + + + + + + wxTOP|wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + 6,199 + 284,125 + + + + + wxEXPAND|wxFIXED_MINSIZE + 10 + + + 0,324 + 20,15 + + wxHORIZONTAL + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 10 + + + + + 10,0 + 204,15 + #cbcbcb + # 0 + + + + + + + wxTOP|wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + 6,345 + 284,125 + + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,568 + 300,5 + + + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/filesItemPanel.py b/tribler-mod/Tribler/Main/vwxGUI/filesItemPanel.py new file mode 100644 index 0000000..8fcb18a --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/filesItemPanel.py @@ -0,0 +1,1111 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg, Maarten ten Brinke, Lucan Musat +# see LICENSE.txt for license information + +import wx, math, time, os, sys, threading +from traceback import print_exc,print_stack + +from Tribler.Core.Utilities.utilities import * +#from wx.lib.stattext import GenStaticText as StaticText +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.vwxGUI.tribler_topButton import tribler_topButton, SwitchButton +from Tribler.Main.vwxGUI.bgPanel import ImagePanel +from Tribler.Core.Utilities.unicode import * +from Tribler.Main.Utility.utility import getMetainfo, similarTorrent, copyTorrent +from Tribler.Main.vwxGUI.IconsManager import IconsManager, data2wxImage + +from Tribler.Core.Utilities.timeouturlopen import urlOpenTimeout +from Tribler.Core.BitTornado.bencode import bencode,bdecode +import urllib +import cStringIO +import string + +from copy import deepcopy +import cStringIO +import mimetypes +import tempfile +## import TasteHeart +from font import * + + +from Tribler.Main.vwxGUI.FilesItemDetailsSummary import FilesItemDetailsSummary +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles + + +DEBUG = False + +AUTOMODERATION_SAVE_WEBSEARCH_IMAGE_TO_TORRENT = False + +# font sizes +if sys.platform == 'darwin': + FS_FILETITLE = 10 + FS_FILETITLE_SEL = 12 # size of title in expanded torrent + FS_SIMILARITY = 10 + FS_HEARTRANK = 8 +elif sys.platform == 'linux2': + FS_FILETITLE = 8 + FS_FILETITLE_SEL = 10 + FS_SIMILARITY = 7 + FS_HEARTRANK = 7 +else: + FS_FILETITLE = 8 + FS_FILETITLE_SEL = 10 + FS_SIMILARITY = 10 + FS_HEARTRANK = 7 + + +filesModeThumbSize = (125, 70) +#filesModeThumbSizeList = (32, 18) +libraryModeThumbSize = (32,18)#(43,24)#(66, 37) + + +class ItemPanel(wx.Panel): + pass + +class FilesItemPanel(wx.Panel): + """ + This Panel shows one content item inside the GridPanel + """ + def __init__(self, parent, keyfun, name='regular'): + + wx.Panel.__init__(self, parent, -1) + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.parent = parent + self.data = None + self.datacopy = {} + self.titleLength = 64 # num characters + self.triblerGrey = wx.Colour(200,200,200) ## 128,128,128 + self.selected = False + self.warningMode = False + self.summary = None ## added for function toggleFilesItemDetailsSummary + self.oldCategoryLabel = None + self.guiserver = parent.guiserver + + self.guiImagePath = os.path.join(self.guiUtility.utility.getPath(), 'Tribler', 'Main', 'vwxGUI', 'images') ## + + if self.parent.GetName() == 'filesGrid': + self.listItem = (self.parent.viewmode == 'list') + self.guiserver = parent.guiserver + else: + self.listItem = True + self.guiserver = GUIServer.getInstance() + + self.addComponents() + self.iconsManager = IconsManager.getInstance() + self.Show() + self.Refresh() + self.Layout() + self.gridKeyTyped = keyfun + + + self.name = name + self.ThumbnailViewer = ThumbnailViewer + self.guiUtility.thumbnailViewer = ThumbnailViewer + self.vSizer2 = None + + + def addComponents(self): + + self.Show(False) + self.triblerStyles = TriblerStyles.getInstance() ## added + + self.selectedColour = wx.Colour(255,200,187) + self.unselectedColour = wx.WHITE + + self.SetBackgroundColour(self.unselectedColour) + + self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) ## added + + if not self.listItem: + + self.SetMinSize((138,110)) + + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + self.hSizer.Add([7,5],0,wx.EXPAND|wx.FIXED_MINSIZE,3) + + self.vSizer = wx.BoxSizer(wx.VERTICAL) + + # Add thumb + #self.thumb = ThumbnailViewer(self, 'filesMode') + #self.thumb.setBackground(wx.BLACK) + #self.thumb.SetSize((125,70)) + #self.vSizer.Add(self.thumb, 0, wx.ALL, 0) + + self.title =wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(125,22)) # + self.title.SetBackgroundColour(wx.WHITE) + self.title.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + self.title.SetMinSize((125,40)) + self.vSizer.Add(self.title, 0, wx.BOTTOM, 3) + self.vSizer.Add([100,5],0,wx.EXPAND|wx.FIXED_MINSIZE,3) + # + self.hSizer.Add(self.vSizer,0,wx.ALL|wx.FIXED_MINSIZE,0) + self.hSizer.Add([2,5],0,wx.EXPAND|wx.FIXED_MINSIZE,3) + self.SetSizer(self.hSizer); + else: # listitem + self.SetMinSize((660,22)) + + self.vSizerOverall = wx.BoxSizer(wx.VERTICAL) ## + + + imgpath = os.path.join(self.utility.getPath(),"Tribler","Main","vwxGUI","images","5.0","line3.png") + self.line_file = wx.Image(imgpath, wx.BITMAP_TYPE_ANY) + + self.hLine = wx.StaticBitmap(self, -1, wx.BitmapFromImage(self.line_file)) + + + + #self.hLine = wx.StaticLine(self,-1,wx.DefaultPosition, wx.Size(220,2),wx.LI_HORIZONTAL) + #self.hLine.SetBackgroundColour((255,0,0)) + self.vSizerOverall.Add(self.hLine, 0, 0, 0) ## + + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + + + + self.hSizer.Add([10,5],0,wx.EXPAND|wx.FIXED_MINSIZE,0) + self.vSizerOverall.Add(self.hSizer, 0, wx.EXPAND, 0) ## + + self.thumb = ThumbnailViewer(self, 'filesMode') + self.thumb.setBackground(wx.BLACK) + self.thumb.SetSize((32,18)) + self.hSizer.Add(self.thumb, 0, wx.ALL, 2) + # Add title + self.title =wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(105,18)) + self.title.SetBackgroundColour(wx.WHITE) + self.title.SetForegroundColour(wx.BLACK) + self.title.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + self.title.SetMinSize((400,18)) + self.hSizer.Add(self.title, 0,wx.TOP|wx.BOTTOM, 3) + #self.hSizer.Add([5,5],0,wx.EXPAND|wx.FIXED_MINSIZE,3) + # V Line + ##self.vLine1 = self.addLine() + # Add size + self.fileSize = wx.StaticText(self,-1,"size",wx.Point(0,0),wx.Size(100,18), wx.ALIGN_LEFT | wx.ST_NO_AUTORESIZE) + self.fileSize.SetBackgroundColour(wx.WHITE) + self.fileSize.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + self.fileSize.SetForegroundColour(wx.BLACK) + self.fileSize.SetMinSize((100,18)) + self.hSizer.Add(self.fileSize, 0,wx.TOP|wx.BOTTOM, 2) + + self.popularity = None + + +# self.popularity = wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(200,18), wx.ALIGN_LEFT | wx.ST_NO_AUTORESIZE) +# self.popularity.SetBackgroundColour(wx.WHITE) +# self.popularity.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) +# self.popularity.SetForegroundColour(self.triblerGrey) +# self.popularity.SetMinSize((100,18)) +# self.hSizer.Add(self.popularity, 0,wx.TOP|wx.BOTTOM, 2) + + + # V Line + ##self.vLine2 = self.addLine() + # Add creation date + ##self.creationDate = wx.StaticText(self,-1,"21-01-2007",wx.Point(0,0),wx.Size(120,18), wx.ALIGN_LEFT | wx.ST_NO_AUTORESIZE) + ##self.creationDate.SetBackgroundColour(wx.WHITE) + ##self.creationDate.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + ##self.creationDate.SetForegroundColour(self.triblerGrey) + ##self.creationDate.SetMinSize((120,18)) + ##self.hSizer.Add(self.creationDate, 0,wx.TOP|wx.BOTTOM, 2) + # V Line + ##self.vLine3 = self.addLine() + # Add popularity +## self.seeders = ImagePanel(self, -1, wx.DefaultPosition, wx.Size(16,16),name='up') +## self.seeders.setBackground(wx.WHITE) +## self.seeders.SetToolTipString(self.utility.lang.get('rNumberOfSeeders')) + ##self.seedersNumber = wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(125,18), wx.ALIGN_LEFT | wx.ST_NO_AUTORESIZE) + ##self.seedersNumber.SetBackgroundColour(wx.WHITE) + ##self.seedersNumber.SetForegroundColour(self.triblerGrey) + ##self.seedersNumber.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + ##self.seedersNumber.SetMinSize((100,18)) +## self.leechers = ImagePanel(self, -1, wx.DefaultPosition, wx.Size(16,16),name='down') +## self.leechers.setBackground(wx.WHITE) +## self.leechers.SetToolTipString(self.utility.lang.get('rNumberOfLeechers')) + ##self.leechersNumber = wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(125,18), wx.ALIGN_LEFT | wx.ST_NO_AUTORESIZE) + ##self.leechersNumber.SetBackgroundColour(wx.WHITE) + ##self.leechersNumber.SetForegroundColour(self.triblerGrey) + ##self.leechersNumber.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + ##self.leechersNumber.SetMinSize((45,18)) +## self.hSizer.Add(self.seeders, 0,wx.TOP|wx.BOTTOM|wx.RIGHT, 2) + ##self.hSizer.Add(self.seedersNumber, 0,wx.TOP|wx.BOTTOM|wx.RIGHT, 2) + ##self.vLine4 = self.addLine() +## self.hSizer.Add(self.leechers, 0,wx.TOP|wx.BOTTOM|wx.RIGHT, 2) + ##self.hSizer.Add(self.leechersNumber, 0,wx.TOP|wx.BOTTOM|wx.RIGHT, 2) + # V Line + ##self.vLine5 = self.addLine() + # Add Taste Heart + self.vSizer2 = wx.BoxSizer(wx.VERTICAL) + self.vSizer2.Add([30,2],0,wx.EXPAND|wx.FIXED_MINSIZE,3) + self.hSizer2 = wx.BoxSizer(wx.HORIZONTAL) + ##self.tasteHeart = TasteHeart.TasteHeart(self, -1, wx.DefaultPosition, wx.Size(14,14),name='TasteHeart') + ##self.hSizer2.Add(self.tasteHeart, 0, wx.TOP, 0) + # Add Taste similarity + ##self.taste =wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(40,15)) + ##self.taste.SetBackgroundColour(wx.WHITE) + ##self.taste.SetFont(wx.Font(FS_HEARTRANK,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + ##self.taste.SetMinSize((40,15)) + ##self.taste.SetLabel('2nd') + ##self.hSizer2.Add(self.taste, 0, wx.TOP|wx.RIGHT, 0) + self.vSizer2.Add(self.hSizer2,0, wx.EXPAND|wx.FIXED_MINSIZE, 0) + self.hSizer.Add(self.vSizer2,0,wx.EXPAND|wx.FIXED_MINSIZE, 0) + # V Line + ##self.vLine6 = self.addLine() + # Add Source Icon + ##self.sourceIcon = ImagePanel(self, -1, wx.DefaultPosition, wx.Size(16,16),name='bcicon') + ##self.sourceIcon.setBackground(wx.WHITE) + ##self.sourceIcon.SetToolTipString(self.utility.lang.get('---')) + ##self.hSizer.Add(self.sourceIcon, 0, wx.TOP, 2) + self.hSizer.Add([10,5],0,wx.FIXED_MINSIZE,0) + + self.hSizerSummary = wx.BoxSizer(wx.HORIZONTAL) ## + self.vSizerOverall.Add(self.hSizerSummary, 0, wx.FIXED_MINSIZE|wx.EXPAND, 0) ## + + if sys.platform != 'linux2': + self.title.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + self.fileSize.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + + + + self.SetSizer(self.vSizerOverall); ## self.hSizer + + self.SetAutoLayout(1); + self.Layout(); + self.Refresh() + + # 2.8.4.2 return value of GetChildren changed + wl = [] + for c in self.GetChildren(): + wl.append(c) + for window in wl: + window.Bind(wx.EVT_LEFT_UP, self.mouseAction) + window.Bind(wx.EVT_KEY_UP, self.keyTyped) + window.Bind(wx.EVT_LEFT_DCLICK, self.doubleClicked) + window.Bind(wx.EVT_RIGHT_DOWN, self.mouseAction) + #window.Bind(wx.EVT_RIGHT_DOWN, self.rightMouseButton) + + def getColumns(self): + return [{'sort':'name', 'reverse':True, 'title':'Name', 'width':395,'tip':self.utility.lang.get('C_filename')}, + {'sort':'length', 'title':'Size', 'width':132, 'tip':self.utility.lang.get('C_filesize')}, + {'sort':'popularity', 'title':'Popularity', 'width':120, 'tip':self.utility.lang.get('C_popularity')} + ] + + + def setData(self, torrent): + + if DEBUG: + if torrent is None: + stat = 'None' + else: + stat = torrent.keys() # torrent['myDownloadHistory']] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: setData:",stat + + self.data = torrent + + # Do not update if 'similar torrent' is set + if similarTorrent(self.datacopy, self.data): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: Similar torrent" + return + + self.datacopy = copyTorrent(self.data) + + if not torrent: + torrent = {} + + self.thumb.Hide() ## should not be shown + + #self.thumb.setTorrent(torrent) + + if torrent.get('name'): + title = torrent['name'][:self.titleLength] + if sys.platform == 'win32': + title = string.replace(title,'&','&&') + #print >> sys.stderr , title + #print >> sys.stderr , title_new + self.title.Enable(True) + self.title.SetLabel(title) + if sys.platform != 'win32': # on windows causes a new line bug when title contains & symbol + self.title.Wrap(self.title.GetSize()[0]) + self.title.SetToolTipString(title) + ##self.setSourceIcon(torrent) + if self.listItem: + self.fileSize.Enable(True) + if torrent.get('web2'): + self.fileSize.SetLabel('%s s' % torrent['length']) + else: + self.fileSize.SetLabel(self.utility.size_format(torrent['length'])) + + # Show Popularity of torrent a sequence of bars + total = torrent['num_seeders']+torrent['num_leechers'] + + popularity_file = os.path.join(self.utility.getPath(),"Tribler","Main","vwxGUI","images","popularity") + + if total > 18000: + popularity_file+='10' + elif total > 16000: + popularity_file+='9' + elif total > 14000: + popularity_file+='8' + elif total > 12000: + popularity_file+='7' + elif total > 10000: + popularity_file+='6' + elif total > 8000: + popularity_file+='5' + elif total > 6000: + popularity_file+='4' + elif total > 4000: + popularity_file+='3' + elif total > 2000: + popularity_file+='2' + else: + popularity_file+='1' + + popularity_file+='.png' + + if self.popularity is not None: + self.popularity.Destroy() + + #self.popularity = tribler_topButton(self, -1, wx.DefaultPosition, wx.Size(49,12),name=popularity_file) + self.popularity_image = wx.Image(popularity_file, wx.BITMAP_TYPE_ANY) + + self.popularity = wx.StaticBitmap(self, -1, wx.BitmapFromImage(self.popularity_image)) + self.popularity.Bind(wx.EVT_MOUSE_EVENTS, self.popularityOver) + + if torrent['num_seeders'] > 0 and torrent['num_leechers'] > 0: + self.popularity.SetToolTipString('%s seeders\n%s leechers' % (torrent['num_seeders'], torrent['num_leechers'])) + else: + self.popularity.SetToolTipString('Poor') + self.hSizer.Add(self.popularity, 0, wx.TOP, 2) + + self.hLine.Show() + + + + self.hLine.Show() + else: + #self.thumb.Hide() + self.title.SetLabel('') + self.title.SetToolTipString('') + self.title.Enable(False) + if self.listItem: + # -- if list VIEW -- + self.fileSize.SetLabel('') + + if self.popularity: + self.popularity.Hide() + + + + + + self.Layout() + #self.Refresh() + #self.parent.Refresh() + + def addLine(self): + vLine = wx.StaticLine(self,-1,wx.DefaultPosition, wx.Size(2,22),wx.LI_VERTICAL) +# vLine.SetForegroundColour(wx.Colour(64,128,128)) +# vLine.SetBackgroundColour(wx.Colour(255,51,0)) + self.hSizer.Add(vLine, 0, wx.RIGHT|wx.LEFT|wx.EXPAND, 3) + return vLine + + def select(self, rowIndex, colIndex, pageIndex=-1, panelsPerRow=-1, rowsPerPage=-1): + # if pageIndex is given, we assume panelsPerRow and rowsPerPage are given as well, + # and set click_position, a 0-indexed value indicating the rank of the panel + if pageIndex>-1: + panelsPerPage = panelsPerRow * rowsPerPage + self.data["click_position"] = pageIndex * panelsPerPage + rowIndex * panelsPerRow + colIndex + + # allows to deselect a selected torrent + #if self.selected == True: + # self.deselect(rowIndex, colIndex) + # return + + self.selected = True + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'fip: item selected' + if self.data and self.data.get('myDownloadHistory'): + colour = self.guiUtility.selectedColour + elif self.data and self.data.get('query_torrent_was_requested',False): + colour = self.guiUtility.selectedColourPending + else: + colour = self.guiUtility.selectedColour + #self.thumb.setSelected(True) + self.title.SetBackgroundColour(colour) + self.title.SetFont(wx.Font(FS_FILETITLE_SEL,FONTFAMILY,FONTWEIGHT,wx.BOLD,False,FONTFACE)) + + + if self.listItem: + self.SetBackgroundColour(colour) + self.fileSize.SetBackgroundColour(colour) + ##self.creationDate.SetBackgroundColour(colour) +## self.seeders.setBackground(colour) + ##self.seedersNumber.SetBackgroundColour(colour) +## self.leechers.setBackground(colour) + ##self.leechersNumber.SetBackgroundColour(colour) + ##self.tasteHeart.setBackground(colour) + ##self.sourceIcon.setBackground(colour) + ##self.taste.SetBackgroundColour(colour) + ##self.sourceIcon.SetBackgroundColour(colour) + self.toggleFilesItemDetailsSummary(True) ## + self.guiUtility.standardOverview.selectedTorrent = self.data['infohash'] + + self.Refresh() + self.guiUtility.standardOverview.SetFocus() + + def deselect(self, rowIndex, colIndex): + self.selected = False + #colour = self.guiUtility.unselectedColour + self.hLine.Show() + self.vSizerOverall.Layout() + downloading = self.data and self.data.get('myDownloadHistory') + if rowIndex % 2 == 0 or not self.listItem: + if downloading: + colour = self.guiUtility.unselectedColour + else: + colour = self.guiUtility.unselectedColour + else: + if downloading: + colour = self.guiUtility.unselectedColour2 + else: + colour = self.guiUtility.unselectedColour2 + + + #self.thumb.setSelected(False) + self.title.SetBackgroundColour(colour) + self.title.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + + + if self.listItem: + self.SetBackgroundColour(colour) + self.fileSize.SetBackgroundColour(colour) + ##self.creationDate.SetBackgroundColour(colour) +## self.seeders.setBackground(colour) + ##self.seedersNumber.SetBackgroundColour(colour) +## self.leechers.setBackground(colour) + ##self.leechersNumber.SetBackgroundColour(colour) + ##self.tasteHeart.setBackground(colour) + ##self.sourceIcon.setBackground(colour) + ##self.taste.SetBackgroundColour(colour) + ##self.sourceIcon.SetBackgroundColour(colour) + self.toggleFilesItemDetailsSummary(False) ## + self.Refresh() + + def keyTyped(self, event): + if self.selected: + key = event.GetKeyCode() + if (key == wx.WXK_DELETE): + if self.data: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'fip: deleting' + #self.guiUtility.deleteTorrent(self.data) + event.Skip() + try: + self.gridKeyTyped(event) + except: + print 'Exception in keytyped' + + def popularityOver(self, event): + + event.Skip() + colour = wx.Colour(216,233,240) + + if self.data is None: + colour = self.guiUtility.unselectedColour + + elif event.Entering() and self.data is not None: + colour = self.guiUtility.selectedColour + + + + self.title.SetBackgroundColour(colour) + self.fileSize.SetBackgroundColour(colour) + self.SetBackgroundColour(colour) + wx.CallAfter(self.Refresh) + + + #if event.Entering(): + # self.title.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.BOLD,False,FONTFACE)) + #elif event.Leaving() and self.selected == False: + # self.title.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + + + + + + #self.SetFocus() + if self.data and (event.LeftUp() or event.RightDown()): + # torrent data is sent to guiUtility > standardDetails.setData + self.guiUtility.selectTorrent(self.data) + + ##if event.RightDown(): + ## self.rightMouseButton(event) + + + + + def mouseAction(self, event): + + event.Skip() + colour = wx.Colour(216,233,240) + + if self.data is None: + colour = self.guiUtility.unselectedColour + + elif event.Entering() and self.data is not None: + colour = self.guiUtility.selectedColour + + elif event.Leaving() and self.selected == False: + #if sys.platform == 'win32': + # position = event.GetPosition() + # for i in xrange(2): + # position[i]+=event.GetEventObject().GetPosition()[i] + # position[i]-=self.GetPosition()[i] + # size = self.GetSize() + + # if position[0]<0 or position[0]>=size[0] or position[1]<0 or position[1]>=size[1]: + # colour = self.guiUtility.unselectedColour + #else: + colour = self.guiUtility.unselectedColour + + + self.title.SetBackgroundColour(colour) + self.fileSize.SetBackgroundColour(colour) + self.SetBackgroundColour(colour) + wx.CallAfter(self.Refresh) + + + #if event.Entering(): + # self.title.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.BOLD,False,FONTFACE)) + #elif event.Leaving() and self.selected == False: + # self.title.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + + + + + + #self.SetFocus() + if self.data and (event.LeftUp() or event.RightDown()): + # torrent data is sent to guiUtility > standardDetails.setData + self.guiUtility.selectTorrent(self.data) + + ##if event.RightDown(): + ## self.rightMouseButton(event) + + + + def rightMouseButton(self, event): + menu = self.guiUtility.OnRightMouseAction(event) + if menu is not None: + self.PopupMenu(menu, (-1,-1)) + + def doubleClicked(self, event): + self.guiUtility.standardDetails.download(self.data) + + def getIdentifier(self): + return self.data['infohash'] + + def toggleFilesItemDetailsSummary(self, visible): + if visible and not self.summary: + if not self.data.get('web2'): + self.guiUtility.moderatedinfohash = self.data['infohash'] + self.summary = FilesItemDetailsSummary(self, torrentHash = self.data['infohash'], torrent = self.data) + else: + self.summary = FilesItemDetailsSummary(self, torrentHash = None, torrent = self.data, web2data = self.data) + ##self.triblerStyles.setLightText(self.summary) + self.hSizerSummary.Add(self.summary, 1, wx.ALL|wx.EXPAND, 0) + if sys.platform == 'win32': + self.SetMinSize((-1,97)) + elif sys.platform == 'darwin': + self.SetMinSize((-1,101)) + else: + self.SetMinSize((-1,100)) + elif visible and self.summary: + pass + ## self.guiUtility.standardDetails.setDownloadbutton(torrent=self.data, item = self.summary.download) + + elif self.summary and not visible: + #beg = time() + self.summary.Hide() + #self.summary.reset_video() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'hide took: %f s' % (time() -beg) + + # the Thumb should be destoryed seperately because it has a different parent. + ##if not self.summary.downloading: + ##wx.CallAfter(self.summary.thumbSummary.Destroy) + #self.hLine.Show() + wx.CallAfter(self.summary.DestroyChildren) + wx.CallAfter(self.summary.Destroy) + self.summary = None + self.SetMinSize((-1,22)) + + + +class ThumbnailViewer(wx.Panel): + """ + Show thumbnail and mast with info on mouseOver + """ + + def __init__(self, parent, mode, **kw): + self.parent = parent + wx.Panel.__init__(self, parent, **kw) + self.mode = mode + self._PostInit() + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + self.backgroundColor = wx.WHITE + self.torrentBitmap = None + self.torrent = None + self.mouseOver = False + self.triblerGrey = wx.Colour(128,128,128) + self.triblerLightGrey = wx.Colour(203,203,203) + self.sourceIcon = None + self.guiUtility = GUIUtility.getInstance() + self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + self.Bind(wx.EVT_LEFT_UP, self.guiUtility.buttonClicked) + self.Bind(wx.EVT_PAINT, self.OnPaint) + self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase) + self.selected = False + self.border = None + self.downloading = False + self.categoryIcon = None + self.iconsManager = IconsManager.getInstance() + + + def setTorrent(self, torrent): + if not torrent: + self.Hide() + self.Refresh() + return + + if not self.IsShown(): + self.Hide() ## self.Show() + + + self.torrent = torrent + ##self.setThumbnail(torrent) + ##self.setCategoryIcon(torrent) + # items in library should not show downloading color + self.downloading = torrent.get('myDownloadHistory', False) and self.mode != 'libraryMode' + + + def setCategoryIcon(self, torrent): + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: ",`torrent['name']`,"has cat",torrent.get('category') + self.categoryIcon = self.iconsManager.getCategoryIcon(self.mode, torrent.get('category'), thumbtype='icon', web2 = torrent.get('web2')) + + def setSourceIcon(self, si): + self.sourceIcon = si + + def setThumbnail(self, torrent): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: setThumb:",torrent['name'] + + thumbtype = (self.parent.listItem) and 'small' or 'normal' + bmp = None + readable = torrent.get('metadata',{}).get('ThumbReadable') + if readable == False: + bmp = self.iconsManager.getCategoryIcon(self.mode,torrent.get('category'), thumbtype=thumbtype, web2 = torrent.get('web2')) + + else: + # Check if we have already read the thumbnail and metadata information from this torrent file + if 'preview' in torrent: + self.GetParent().guiserver.add_task(lambda:self.loadMetadata(torrent,None),0) + + elif torrent.get('metadata',{}).get('ThumbnailBitmap'): + if self.mode == 'libraryMode' or self.parent.listItem: + # Make a resized thumb for lib view + bmp = torrent['metadata'].get('ThumbnailBitmap') + if bmp: + img = bmp.ConvertToImage() + bmp = getResizedBitmapFromImage(img, libraryModeThumbSize) + + elif self.mode == 'filesMode': + bmp = torrent['metadata'].get('ThumbnailBitmap') + elif 'torrent_file_name' in torrent and torrent['torrent_file_name'] != '': + torrent_dir = self.guiUtility.utility.session.get_torrent_collecting_dir() + torrent_filename = os.path.join(torrent_dir, torrent['torrent_file_name']) + + if DEBUG: + print "fip: Scheduling read of thumbnail for",`torrent['name']`,"from",torrent_filename + + def loadMetaDataNow(): + try: + self.loadMetadata(torrent,torrent_filename) + except wx.PyDeadObjectError: + pass + + try: + self.GetParent().guiserver.add_task(loadMetaDataNow,0) + except wx.PyDeadObjectError: + pass + + # ARNO: TODO: The FileItemPanels that use this ThumbnailViewer now get deleted, and thus + # also the ThumbnailViewer objects. Or at least the C++ part of them. As a result we + # can no longer schedule these loadMetadata callbacks on the GUITaskQueue thread. + # + # At the moment, the wx code protects us, and throws an exception that the C++ part + # of the ThumbnailViewer object is gone. But we should clean this up. + + if not bmp: + bmp = self.iconsManager.getCategoryIcon(self.mode, torrent.get('category'), thumbtype=thumbtype, web2 = torrent.get('web2')) + + assert bmp, 'No bitmap found for %s' % `torrent['name']` + self.setBitmap(bmp) + width, height = self.GetSize() + d = 1 + self.border = [wx.Point(0,d), wx.Point(width-d, d), wx.Point(width-d, height-d), wx.Point(d,height-d), wx.Point(d,0)] + self.Refresh() + #wx.Yield() + + + + def setBitmap(self, bmp): + # Recalculate image placement + if not bmp: + self.torrentBitmap = None + self.xpos, self.ypos = 0,0 + raise Exception('Warning: Thumbnail set to None for %s' % `self.torrent`) + else: + w, h = self.GetSize() + iw, ih = bmp.GetSize() + + self.torrentBitmap = bmp + self.xpos, self.ypos = (w-iw)/2, (h-ih)/2 + + + def loadMetadata(self, torrent,torrent_filename): + """ Called by separate non-GUI thread """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: ThumbnailViewer: loadMetadata",torrent_filename + if not torrent.get('preview'): + if not os.path.exists(torrent_filename): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: ThumbnailViewer: loadMetadata: %s does not exist" % torrent_filename + return None + + # We can't do any wx stuff here apparently, so the only thing we can do is to + # read the data from the torrent file and create the wxBitmap in the GUI callback. + + newmetadata = loadAzureusMetadataFromTorrent(torrent_filename) + + + if newmetadata.get('Thumbnail') is None and AUTOMODERATION_SAVE_WEBSEARCH_IMAGE_TO_TORRENT: + # Use Google Image search to find a thumb + (mimetype,thumbdata) = google_image_search(torrent['name']) + + if DEBUG: + if thumbdata is None: + t = None + else: + t = 'data' + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: automod: Google Image Search Got:",mimetype,t + + if mimetype is not None and thumbdata is not None: + # Scale image + scaledthumbdata = scale_image_convert_jpeg(mimetype,thumbdata,171) + newmetadata = { 'Thumbnail' : scaledthumbdata} + + # Save thumb data in torrent, auto-moderation ;o) + saveAzureusMetadataToTorrent(torrent_filename,scaledthumbdata) + else: + # Web2 items have preview fields + newmetadata = { 'Thumbnail' : torrent['preview'] } + + + wx.CallAfter(self.metadata_thread_gui_callback,torrent,newmetadata) + + + def metadata_thread_gui_callback(self,torrent,metadata): + """ Called by GUI thread """ + + #print 'Azureus_thumb: %s' % thumbnailString + thumbnailString = metadata.get('Thumbnail') + + if thumbnailString: + #print 'Found thumbnail: %s' % thumbnailString + + img = createThumbImage(thumbnailString) + if img is None: + return + + bmp = getResizedBitmapFromImage(img, filesModeThumbSize) + + if bmp: + metadata['ThumbnailBitmap'] = bmp + metadata['ThumbnailReadable'] = True + ## We now scale live + #bmplib = getResizedBitmapFromImage(img, libraryModeThumbSize) + #if bmplib: + # metadata['ThumbnailBitmapLibrary'] = bmplib + + # Dump the raw data + #del metadata['Thumbnail'] + else: + metadata['ThumbnailReadable'] = False + + torrent['metadata'] = metadata + + # This item may be displaying another torrent right now, only show the icon + # when it's still the same torrent + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: meta_gui_cb: old torrent",`torrent['name']`,"new torrent",`self.torrent['name']` + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: meta_gui_cb: old torrent",`torrent['infohash']`,"new torrent",`self.torrent['infohash']` + try: + if torrent['infohash'] == self.torrent['infohash']: + bmp = metadata.get('ThumbnailBitmap') + if bmp: + if self.parent.listItem: + bmp = getResizedBitmapFromImage(img, libraryModeThumbSize) + self.setBitmap(bmp) + self.Refresh() + except wx.PyDeadObjectError: + pass + + + + def OnErase(self, event): + pass + #event.Skip() + + def setSelected(self, sel): + self.selected = sel + self.Refresh() + + def isSelected(self): + return self.selected + + def mouseAction(self, event): + if event.Entering(): + #print 'enter' + self.mouseOver = True + self.Refresh() + elif event.Leaving(): + self.mouseOver = False + #print 'leave' + self.Refresh() + + """ + def ClickedButton(self): + print 'Click' + """ + + def setBackground(self, wxColor): + self.backgroundColor = wxColor + + def OnPaint(self, evt): + dc = wx.BufferedPaintDC(self) + dc.SetBackground(wx.Brush(self.backgroundColor)) + dc.Clear() + + + if self.torrent and self.mode == 'filesMode': + rank = self.torrent.get('simRank', -1) + else: + rank = -1 + + heartBitmap = TasteHeart.getHeartBitmap(rank) + + + if self.torrentBitmap: + dc.DrawBitmap(self.torrentBitmap, self.xpos,self.ypos, True) +# dc.SetFont(wx.Font(6, wx.SWISS, wx.NORMAL, wx.BOLD, True)) +# dc.DrawBitmap(MASK_BITMAP,0 ,52, True) +# dc.SetTextForeground(wx.BLACK) + #dc.DrawText('rating', 8, 50) + + if self.categoryIcon: + dc.DrawBitmap(self.categoryIcon, 99, 7, True) + if self.sourceIcon: + dc.DrawBitmap(self.sourceIcon, 101, 27, True) + + if self.mouseOver: + dc.SetFont(wx.Font(6, FONTFAMILY,FONTWEIGHT, wx.BOLD, True, FONTFACE)) + mask = self.iconsManager.get_default('filesMode','MASK_BITMAP') + dc.DrawBitmap(mask,0 ,0, True) + + if heartBitmap: + mask = self.iconsManager.get_default('filesMode','MASK_BITMAP_BOTTOM') + margin = 52 + dc.DrawBitmap(mask,0 ,margin, True) + dc.DrawBitmap(heartBitmap,5 ,margin+2, True) + dc.SetFont(wx.Font(FS_HEARTRANK, FONTFAMILY, FONTWEIGHT, wx.BOLD, False, FONTFACE)) + text = repr(rank) + dc.DrawText(text, 22, margin+4) + + + if self.border: + if self.selected: + if self.downloading: + colour = self.guiUtility.selectedColourDownload + else: + colour = self.guiUtility.triblerRed + else: + if self.downloading: + colour = self.guiUtility.unselectedColourDownload + else: + colour = self.triblerLightGrey + dc.SetPen(wx.Pen(colour, 2)) + dc.DrawLines(self.border) + + +def loadAzureusMetadataFromTorrent(torrent_filename): + metadata = getMetainfo(torrent_filename) + if not metadata: + return None + + newmetadata = metadata.get('azureus_properties', {}).get('Content',{}) + for key in ['encoding','comment','comment-utf8']: # 'created by' + if key in metadata: + newmetadata[key] = metadata[key] + return newmetadata + + +def createThumbImage(imgdata): + try: + # Simple protection against bad parsing of websites, if the + # image data is HTML, ignore it. + + low = imgdata[:5].lower() + if low == ' (w/float(h)): + nw = w + nh = int(ih * w/float(iw)) + else: + nh = h + nw = int(iw * h/float(ih)) + if nw != iw or nh != ih: + #print 'Rescale from (%d, %d) to (%d, %d)' % (iw, ih, nw, nh) + img.Rescale(nw, nh) + bmp = wx.BitmapFromImage(img) + return bmp + +def google_image_search(name): + try: + rname = name.replace('.',' ') + rname = rname.replace('-',' ') + rname = rname.replace('_',' ') + rname = rname.replace('[',' ') + rname = rname.replace(']',' ') + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: automod: Name becomes keywords",rname + + qname = urllib.quote(rname) + + # 1. Query Google Image search + url = 'http://www.searchmash.com/results/images:'+qname+'' + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: automod: Query URL",url + f = urlOpenTimeout(url,timeout=2) + resp = f.read() + f.close() + + start = 0 + while True: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: automod: Searching from idx",start + i = resp.find("imageUrl",start) + if i == -1: + break + else: + i += len("imageUrl\":\"") + j = resp.find("\"",i) + if j == -1: + break + else: + # 2. Found an Image, see if we can guess MIME type + imgurl = resp[i:j] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: automod: Found image",imgurl + + iconmime = mimetypes.guess_type(imgurl)[0] + if iconmime is None: + start = j + continue + + # 3. Load the image + try: + f = urlOpenTimeout(imgurl,timeout=2) + imgresp = f.read() + f.close() + + if imgresp == '': + start = j + continue + + return (iconmime,imgresp) + except: + print_exc() + start = j + continue + except: + print_exc() + return (None,None) + + +def scale_image_convert_jpeg(mimetype,data,dim): + icondata = None + try: + cio = cStringIO.StringIO(data) + if wx.Image.CanReadStream(cio): + sim = data2wxImage(mimetype,data,dim=dim) + [thumbhandle,thumbfilename] = tempfile.mkstemp("torrent-thumb") + os.close(thumbhandle) + sim.SaveFile(thumbfilename,wx.BITMAP_TYPE_JPEG) + + f = open(thumbfilename,"rb") + icondata = f.read() + f.close() + + os.remove(thumbfilename) + except: + print_exc() + + return icondata + + +def saveAzureusMetadataToTorrent(torrentfilename,scaledthumbdata): + try: + f = open(torrentfilename,"rb") + data = f.read() + f.close() + d = bdecode(data) + + d['azureus_properties'] = {} + d['azureus_properties']['Content'] = {} + d['azureus_properties']['Content']['Thumbnail'] = scaledthumbdata + + newdata = bencode(d) + f = open(torrentfilename,"wb") + f.write(newdata) + f.close() + except: + print_exc() + diff --git a/tribler-mod/Tribler/Main/vwxGUI/filesItemPanel.py.bak b/tribler-mod/Tribler/Main/vwxGUI/filesItemPanel.py.bak new file mode 100644 index 0000000..e4210b5 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/filesItemPanel.py.bak @@ -0,0 +1,1110 @@ +# Written by Jelle Roozenburg, Maarten ten Brinke, Lucan Musat +# see LICENSE.txt for license information + +import wx, math, time, os, sys, threading +from traceback import print_exc,print_stack + +from Tribler.Core.Utilities.utilities import * +#from wx.lib.stattext import GenStaticText as StaticText +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.vwxGUI.tribler_topButton import tribler_topButton, SwitchButton +from Tribler.Main.vwxGUI.bgPanel import ImagePanel +from Tribler.Core.Utilities.unicode import * +from Tribler.Main.Utility.utility import getMetainfo, similarTorrent, copyTorrent +from Tribler.Main.vwxGUI.IconsManager import IconsManager, data2wxImage + +from Tribler.Core.Utilities.timeouturlopen import urlOpenTimeout +from Tribler.Core.BitTornado.bencode import bencode,bdecode +import urllib +import cStringIO +import string + +from copy import deepcopy +import cStringIO +import mimetypes +import tempfile +## import TasteHeart +from font import * + + +from Tribler.Main.vwxGUI.FilesItemDetailsSummary import FilesItemDetailsSummary +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles + + +DEBUG = False + +AUTOMODERATION_SAVE_WEBSEARCH_IMAGE_TO_TORRENT = False + +# font sizes +if sys.platform == 'darwin': + FS_FILETITLE = 10 + FS_FILETITLE_SEL = 12 # size of title in expanded torrent + FS_SIMILARITY = 10 + FS_HEARTRANK = 8 +elif sys.platform == 'linux2': + FS_FILETITLE = 8 + FS_FILETITLE_SEL = 10 + FS_SIMILARITY = 7 + FS_HEARTRANK = 7 +else: + FS_FILETITLE = 8 + FS_FILETITLE_SEL = 10 + FS_SIMILARITY = 10 + FS_HEARTRANK = 7 + + +filesModeThumbSize = (125, 70) +#filesModeThumbSizeList = (32, 18) +libraryModeThumbSize = (32,18)#(43,24)#(66, 37) + + +class ItemPanel(wx.Panel): + pass + +class FilesItemPanel(wx.Panel): + """ + This Panel shows one content item inside the GridPanel + """ + def __init__(self, parent, keyfun, name='regular'): + + wx.Panel.__init__(self, parent, -1) + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.parent = parent + self.data = None + self.datacopy = {} + self.titleLength = 64 # num characters + self.triblerGrey = wx.Colour(200,200,200) ## 128,128,128 + self.selected = False + self.warningMode = False + self.summary = None ## added for function toggleFilesItemDetailsSummary + self.oldCategoryLabel = None + self.guiserver = parent.guiserver + + self.guiImagePath = os.path.join(self.guiUtility.utility.getPath(), 'Tribler', 'Main', 'vwxGUI', 'images') ## + + if self.parent.GetName() == 'filesGrid': + self.listItem = (self.parent.viewmode == 'list') + self.guiserver = parent.guiserver + else: + self.listItem = True + self.guiserver = GUIServer.getInstance() + + self.addComponents() + self.iconsManager = IconsManager.getInstance() + self.Show() + self.Refresh() + self.Layout() + self.gridKeyTyped = keyfun + + + self.name = name + self.ThumbnailViewer = ThumbnailViewer + self.guiUtility.thumbnailViewer = ThumbnailViewer + self.vSizer2 = None + + + def addComponents(self): + + self.Show(False) + self.triblerStyles = TriblerStyles.getInstance() ## added + + self.selectedColour = wx.Colour(255,200,187) + self.unselectedColour = wx.WHITE + + self.SetBackgroundColour(self.unselectedColour) + + self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) ## added + + if not self.listItem: + + self.SetMinSize((138,110)) + + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + self.hSizer.Add([7,5],0,wx.EXPAND|wx.FIXED_MINSIZE,3) + + self.vSizer = wx.BoxSizer(wx.VERTICAL) + + # Add thumb + #self.thumb = ThumbnailViewer(self, 'filesMode') + #self.thumb.setBackground(wx.BLACK) + #self.thumb.SetSize((125,70)) + #self.vSizer.Add(self.thumb, 0, wx.ALL, 0) + + self.title =wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(125,22)) # + self.title.SetBackgroundColour(wx.WHITE) + self.title.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + self.title.SetMinSize((125,40)) + self.vSizer.Add(self.title, 0, wx.BOTTOM, 3) + self.vSizer.Add([100,5],0,wx.EXPAND|wx.FIXED_MINSIZE,3) + # + self.hSizer.Add(self.vSizer,0,wx.ALL|wx.FIXED_MINSIZE,0) + self.hSizer.Add([2,5],0,wx.EXPAND|wx.FIXED_MINSIZE,3) + self.SetSizer(self.hSizer); + else: # listitem + self.SetMinSize((660,22)) + + self.vSizerOverall = wx.BoxSizer(wx.VERTICAL) ## + + + imgpath = os.path.join(self.utility.getPath(),"Tribler","Main","vwxGUI","images","5.0","line3.png") + self.line_file = wx.Image(imgpath, wx.BITMAP_TYPE_ANY) + + self.hLine = wx.StaticBitmap(self, -1, wx.BitmapFromImage(self.line_file)) + + + + #self.hLine = wx.StaticLine(self,-1,wx.DefaultPosition, wx.Size(220,2),wx.LI_HORIZONTAL) + #self.hLine.SetBackgroundColour((255,0,0)) + self.vSizerOverall.Add(self.hLine, 0, 0, 0) ## + + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + + + + self.hSizer.Add([10,5],0,wx.EXPAND|wx.FIXED_MINSIZE,0) + self.vSizerOverall.Add(self.hSizer, 0, wx.EXPAND, 0) ## + + self.thumb = ThumbnailViewer(self, 'filesMode') + self.thumb.setBackground(wx.BLACK) + self.thumb.SetSize((32,18)) + self.hSizer.Add(self.thumb, 0, wx.ALL, 2) + # Add title + self.title =wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(105,18)) + self.title.SetBackgroundColour(wx.WHITE) + self.title.SetForegroundColour(wx.BLACK) + self.title.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + self.title.SetMinSize((400,18)) + self.hSizer.Add(self.title, 0,wx.TOP|wx.BOTTOM, 3) + #self.hSizer.Add([5,5],0,wx.EXPAND|wx.FIXED_MINSIZE,3) + # V Line + ##self.vLine1 = self.addLine() + # Add size + self.fileSize = wx.StaticText(self,-1,"size",wx.Point(0,0),wx.Size(100,18), wx.ALIGN_LEFT | wx.ST_NO_AUTORESIZE) + self.fileSize.SetBackgroundColour(wx.WHITE) + self.fileSize.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + self.fileSize.SetForegroundColour(wx.BLACK) + self.fileSize.SetMinSize((100,18)) + self.hSizer.Add(self.fileSize, 0,wx.TOP|wx.BOTTOM, 2) + + self.popularity = None + + +# self.popularity = wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(200,18), wx.ALIGN_LEFT | wx.ST_NO_AUTORESIZE) +# self.popularity.SetBackgroundColour(wx.WHITE) +# self.popularity.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) +# self.popularity.SetForegroundColour(self.triblerGrey) +# self.popularity.SetMinSize((100,18)) +# self.hSizer.Add(self.popularity, 0,wx.TOP|wx.BOTTOM, 2) + + + # V Line + ##self.vLine2 = self.addLine() + # Add creation date + ##self.creationDate = wx.StaticText(self,-1,"21-01-2007",wx.Point(0,0),wx.Size(120,18), wx.ALIGN_LEFT | wx.ST_NO_AUTORESIZE) + ##self.creationDate.SetBackgroundColour(wx.WHITE) + ##self.creationDate.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + ##self.creationDate.SetForegroundColour(self.triblerGrey) + ##self.creationDate.SetMinSize((120,18)) + ##self.hSizer.Add(self.creationDate, 0,wx.TOP|wx.BOTTOM, 2) + # V Line + ##self.vLine3 = self.addLine() + # Add popularity +## self.seeders = ImagePanel(self, -1, wx.DefaultPosition, wx.Size(16,16),name='up') +## self.seeders.setBackground(wx.WHITE) +## self.seeders.SetToolTipString(self.utility.lang.get('rNumberOfSeeders')) + ##self.seedersNumber = wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(125,18), wx.ALIGN_LEFT | wx.ST_NO_AUTORESIZE) + ##self.seedersNumber.SetBackgroundColour(wx.WHITE) + ##self.seedersNumber.SetForegroundColour(self.triblerGrey) + ##self.seedersNumber.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + ##self.seedersNumber.SetMinSize((100,18)) +## self.leechers = ImagePanel(self, -1, wx.DefaultPosition, wx.Size(16,16),name='down') +## self.leechers.setBackground(wx.WHITE) +## self.leechers.SetToolTipString(self.utility.lang.get('rNumberOfLeechers')) + ##self.leechersNumber = wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(125,18), wx.ALIGN_LEFT | wx.ST_NO_AUTORESIZE) + ##self.leechersNumber.SetBackgroundColour(wx.WHITE) + ##self.leechersNumber.SetForegroundColour(self.triblerGrey) + ##self.leechersNumber.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + ##self.leechersNumber.SetMinSize((45,18)) +## self.hSizer.Add(self.seeders, 0,wx.TOP|wx.BOTTOM|wx.RIGHT, 2) + ##self.hSizer.Add(self.seedersNumber, 0,wx.TOP|wx.BOTTOM|wx.RIGHT, 2) + ##self.vLine4 = self.addLine() +## self.hSizer.Add(self.leechers, 0,wx.TOP|wx.BOTTOM|wx.RIGHT, 2) + ##self.hSizer.Add(self.leechersNumber, 0,wx.TOP|wx.BOTTOM|wx.RIGHT, 2) + # V Line + ##self.vLine5 = self.addLine() + # Add Taste Heart + self.vSizer2 = wx.BoxSizer(wx.VERTICAL) + self.vSizer2.Add([30,2],0,wx.EXPAND|wx.FIXED_MINSIZE,3) + self.hSizer2 = wx.BoxSizer(wx.HORIZONTAL) + ##self.tasteHeart = TasteHeart.TasteHeart(self, -1, wx.DefaultPosition, wx.Size(14,14),name='TasteHeart') + ##self.hSizer2.Add(self.tasteHeart, 0, wx.TOP, 0) + # Add Taste similarity + ##self.taste =wx.StaticText(self,-1,"",wx.Point(0,0),wx.Size(40,15)) + ##self.taste.SetBackgroundColour(wx.WHITE) + ##self.taste.SetFont(wx.Font(FS_HEARTRANK,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + ##self.taste.SetMinSize((40,15)) + ##self.taste.SetLabel('2nd') + ##self.hSizer2.Add(self.taste, 0, wx.TOP|wx.RIGHT, 0) + self.vSizer2.Add(self.hSizer2,0, wx.EXPAND|wx.FIXED_MINSIZE, 0) + self.hSizer.Add(self.vSizer2,0,wx.EXPAND|wx.FIXED_MINSIZE, 0) + # V Line + ##self.vLine6 = self.addLine() + # Add Source Icon + ##self.sourceIcon = ImagePanel(self, -1, wx.DefaultPosition, wx.Size(16,16),name='bcicon') + ##self.sourceIcon.setBackground(wx.WHITE) + ##self.sourceIcon.SetToolTipString(self.utility.lang.get('---')) + ##self.hSizer.Add(self.sourceIcon, 0, wx.TOP, 2) + self.hSizer.Add([10,5],0,wx.FIXED_MINSIZE,0) + + self.hSizerSummary = wx.BoxSizer(wx.HORIZONTAL) ## + self.vSizerOverall.Add(self.hSizerSummary, 0, wx.FIXED_MINSIZE|wx.EXPAND, 0) ## + + if sys.platform != 'linux2': + self.title.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + self.fileSize.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + + + + self.SetSizer(self.vSizerOverall); ## self.hSizer + + self.SetAutoLayout(1); + self.Layout(); + self.Refresh() + + # 2.8.4.2 return value of GetChildren changed + wl = [] + for c in self.GetChildren(): + wl.append(c) + for window in wl: + window.Bind(wx.EVT_LEFT_UP, self.mouseAction) + window.Bind(wx.EVT_KEY_UP, self.keyTyped) + window.Bind(wx.EVT_LEFT_DCLICK, self.doubleClicked) + window.Bind(wx.EVT_RIGHT_DOWN, self.mouseAction) + #window.Bind(wx.EVT_RIGHT_DOWN, self.rightMouseButton) + + def getColumns(self): + return [{'sort':'name', 'reverse':True, 'title':'Name', 'width':395,'tip':self.utility.lang.get('C_filename')}, + {'sort':'length', 'title':'Size', 'width':132, 'tip':self.utility.lang.get('C_filesize')}, + {'sort':'popularity', 'title':'Popularity', 'width':120, 'tip':self.utility.lang.get('C_popularity')} + ] + + + def setData(self, torrent): + + if DEBUG: + if torrent is None: + stat = 'None' + else: + stat = torrent.keys() # torrent['myDownloadHistory']] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: setData:",stat + + self.data = torrent + + # Do not update if 'similar torrent' is set + if similarTorrent(self.datacopy, self.data): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: Similar torrent" + return + + self.datacopy = copyTorrent(self.data) + + if not torrent: + torrent = {} + + self.thumb.Hide() ## should not be shown + + #self.thumb.setTorrent(torrent) + + if torrent.get('name'): + title = torrent['name'][:self.titleLength] + if sys.platform == 'win32': + title = string.replace(title,'&','&&') + #print >> sys.stderr , title + #print >> sys.stderr , title_new + self.title.Enable(True) + self.title.SetLabel(title) + if sys.platform != 'win32': # on windows causes a new line bug when title contains & symbol + self.title.Wrap(self.title.GetSize()[0]) + self.title.SetToolTipString(title) + ##self.setSourceIcon(torrent) + if self.listItem: + self.fileSize.Enable(True) + if torrent.get('web2'): + self.fileSize.SetLabel('%s s' % torrent['length']) + else: + self.fileSize.SetLabel(self.utility.size_format(torrent['length'])) + + # Show Popularity of torrent a sequence of bars + total = torrent['num_seeders']+torrent['num_leechers'] + + popularity_file = os.path.join(self.utility.getPath(),"Tribler","Main","vwxGUI","images","popularity") + + if total > 18000: + popularity_file+='10' + elif total > 16000: + popularity_file+='9' + elif total > 14000: + popularity_file+='8' + elif total > 12000: + popularity_file+='7' + elif total > 10000: + popularity_file+='6' + elif total > 8000: + popularity_file+='5' + elif total > 6000: + popularity_file+='4' + elif total > 4000: + popularity_file+='3' + elif total > 2000: + popularity_file+='2' + else: + popularity_file+='1' + + popularity_file+='.png' + + if self.popularity is not None: + self.popularity.Destroy() + + #self.popularity = tribler_topButton(self, -1, wx.DefaultPosition, wx.Size(49,12),name=popularity_file) + self.popularity_image = wx.Image(popularity_file, wx.BITMAP_TYPE_ANY) + + self.popularity = wx.StaticBitmap(self, -1, wx.BitmapFromImage(self.popularity_image)) + self.popularity.Bind(wx.EVT_MOUSE_EVENTS, self.popularityOver) + + if torrent['num_seeders'] > 0 and torrent['num_leechers'] > 0: + self.popularity.SetToolTipString('%s seeders\n%s leechers' % (torrent['num_seeders'], torrent['num_leechers'])) + else: + self.popularity.SetToolTipString('Poor') + self.hSizer.Add(self.popularity, 0, wx.TOP, 2) + + self.hLine.Show() + + + + self.hLine.Show() + else: + #self.thumb.Hide() + self.title.SetLabel('') + self.title.SetToolTipString('') + self.title.Enable(False) + if self.listItem: + # -- if list VIEW -- + self.fileSize.SetLabel('') + + if self.popularity: + self.popularity.Hide() + + + + + + self.Layout() + #self.Refresh() + #self.parent.Refresh() + + def addLine(self): + vLine = wx.StaticLine(self,-1,wx.DefaultPosition, wx.Size(2,22),wx.LI_VERTICAL) +# vLine.SetForegroundColour(wx.Colour(64,128,128)) +# vLine.SetBackgroundColour(wx.Colour(255,51,0)) + self.hSizer.Add(vLine, 0, wx.RIGHT|wx.LEFT|wx.EXPAND, 3) + return vLine + + def select(self, rowIndex, colIndex, pageIndex=-1, panelsPerRow=-1, rowsPerPage=-1): + # if pageIndex is given, we assume panelsPerRow and rowsPerPage are given as well, + # and set click_position, a 0-indexed value indicating the rank of the panel + if pageIndex>-1: + panelsPerPage = panelsPerRow * rowsPerPage + self.data["click_position"] = pageIndex * panelsPerPage + rowIndex * panelsPerRow + colIndex + + # allows to deselect a selected torrent + #if self.selected == True: + # self.deselect(rowIndex, colIndex) + # return + + self.selected = True + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'fip: item selected' + if self.data and self.data.get('myDownloadHistory'): + colour = self.guiUtility.selectedColour + elif self.data and self.data.get('query_torrent_was_requested',False): + colour = self.guiUtility.selectedColourPending + else: + colour = self.guiUtility.selectedColour + #self.thumb.setSelected(True) + self.title.SetBackgroundColour(colour) + self.title.SetFont(wx.Font(FS_FILETITLE_SEL,FONTFAMILY,FONTWEIGHT,wx.BOLD,False,FONTFACE)) + + + if self.listItem: + self.SetBackgroundColour(colour) + self.fileSize.SetBackgroundColour(colour) + ##self.creationDate.SetBackgroundColour(colour) +## self.seeders.setBackground(colour) + ##self.seedersNumber.SetBackgroundColour(colour) +## self.leechers.setBackground(colour) + ##self.leechersNumber.SetBackgroundColour(colour) + ##self.tasteHeart.setBackground(colour) + ##self.sourceIcon.setBackground(colour) + ##self.taste.SetBackgroundColour(colour) + ##self.sourceIcon.SetBackgroundColour(colour) + self.toggleFilesItemDetailsSummary(True) ## + self.guiUtility.standardOverview.selectedTorrent = self.data['infohash'] + + self.Refresh() + self.guiUtility.standardOverview.SetFocus() + + def deselect(self, rowIndex, colIndex): + self.selected = False + #colour = self.guiUtility.unselectedColour + self.hLine.Show() + self.vSizerOverall.Layout() + downloading = self.data and self.data.get('myDownloadHistory') + if rowIndex % 2 == 0 or not self.listItem: + if downloading: + colour = self.guiUtility.unselectedColour + else: + colour = self.guiUtility.unselectedColour + else: + if downloading: + colour = self.guiUtility.unselectedColour2 + else: + colour = self.guiUtility.unselectedColour2 + + + #self.thumb.setSelected(False) + self.title.SetBackgroundColour(colour) + self.title.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + + + if self.listItem: + self.SetBackgroundColour(colour) + self.fileSize.SetBackgroundColour(colour) + ##self.creationDate.SetBackgroundColour(colour) +## self.seeders.setBackground(colour) + ##self.seedersNumber.SetBackgroundColour(colour) +## self.leechers.setBackground(colour) + ##self.leechersNumber.SetBackgroundColour(colour) + ##self.tasteHeart.setBackground(colour) + ##self.sourceIcon.setBackground(colour) + ##self.taste.SetBackgroundColour(colour) + ##self.sourceIcon.SetBackgroundColour(colour) + self.toggleFilesItemDetailsSummary(False) ## + self.Refresh() + + def keyTyped(self, event): + if self.selected: + key = event.GetKeyCode() + if (key == wx.WXK_DELETE): + if self.data: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'fip: deleting' + #self.guiUtility.deleteTorrent(self.data) + event.Skip() + try: + self.gridKeyTyped(event) + except: + print 'Exception in keytyped' + + def popularityOver(self, event): + + event.Skip() + colour = wx.Colour(216,233,240) + + if self.data is None: + colour = self.guiUtility.unselectedColour + + elif event.Entering() and self.data is not None: + colour = self.guiUtility.selectedColour + + + + self.title.SetBackgroundColour(colour) + self.fileSize.SetBackgroundColour(colour) + self.SetBackgroundColour(colour) + wx.CallAfter(self.Refresh) + + + #if event.Entering(): + # self.title.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.BOLD,False,FONTFACE)) + #elif event.Leaving() and self.selected == False: + # self.title.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + + + + + + #self.SetFocus() + if self.data and (event.LeftUp() or event.RightDown()): + # torrent data is sent to guiUtility > standardDetails.setData + self.guiUtility.selectTorrent(self.data) + + ##if event.RightDown(): + ## self.rightMouseButton(event) + + + + + def mouseAction(self, event): + + event.Skip() + colour = wx.Colour(216,233,240) + + if self.data is None: + colour = self.guiUtility.unselectedColour + + elif event.Entering() and self.data is not None: + colour = self.guiUtility.selectedColour + + elif event.Leaving() and self.selected == False: + #if sys.platform == 'win32': + # position = event.GetPosition() + # for i in xrange(2): + # position[i]+=event.GetEventObject().GetPosition()[i] + # position[i]-=self.GetPosition()[i] + # size = self.GetSize() + + # if position[0]<0 or position[0]>=size[0] or position[1]<0 or position[1]>=size[1]: + # colour = self.guiUtility.unselectedColour + #else: + colour = self.guiUtility.unselectedColour + + + self.title.SetBackgroundColour(colour) + self.fileSize.SetBackgroundColour(colour) + self.SetBackgroundColour(colour) + wx.CallAfter(self.Refresh) + + + #if event.Entering(): + # self.title.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.BOLD,False,FONTFACE)) + #elif event.Leaving() and self.selected == False: + # self.title.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + + + + + + #self.SetFocus() + if self.data and (event.LeftUp() or event.RightDown()): + # torrent data is sent to guiUtility > standardDetails.setData + self.guiUtility.selectTorrent(self.data) + + ##if event.RightDown(): + ## self.rightMouseButton(event) + + + + def rightMouseButton(self, event): + menu = self.guiUtility.OnRightMouseAction(event) + if menu is not None: + self.PopupMenu(menu, (-1,-1)) + + def doubleClicked(self, event): + self.guiUtility.standardDetails.download(self.data) + + def getIdentifier(self): + return self.data['infohash'] + + def toggleFilesItemDetailsSummary(self, visible): + if visible and not self.summary: + if not self.data.get('web2'): + self.guiUtility.moderatedinfohash = self.data['infohash'] + self.summary = FilesItemDetailsSummary(self, torrentHash = self.data['infohash'], torrent = self.data) + else: + self.summary = FilesItemDetailsSummary(self, torrentHash = None, torrent = self.data, web2data = self.data) + ##self.triblerStyles.setLightText(self.summary) + self.hSizerSummary.Add(self.summary, 1, wx.ALL|wx.EXPAND, 0) + if sys.platform == 'win32': + self.SetMinSize((-1,97)) + elif sys.platform == 'darwin': + self.SetMinSize((-1,101)) + else: + self.SetMinSize((-1,100)) + elif visible and self.summary: + pass + ## self.guiUtility.standardDetails.setDownloadbutton(torrent=self.data, item = self.summary.download) + + elif self.summary and not visible: + #beg = time() + self.summary.Hide() + #self.summary.reset_video() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'hide took: %f s' % (time() -beg) + + # the Thumb should be destoryed seperately because it has a different parent. + ##if not self.summary.downloading: + ##wx.CallAfter(self.summary.thumbSummary.Destroy) + #self.hLine.Show() + wx.CallAfter(self.summary.DestroyChildren) + wx.CallAfter(self.summary.Destroy) + self.summary = None + self.SetMinSize((-1,22)) + + + +class ThumbnailViewer(wx.Panel): + """ + Show thumbnail and mast with info on mouseOver + """ + + def __init__(self, parent, mode, **kw): + self.parent = parent + wx.Panel.__init__(self, parent, **kw) + self.mode = mode + self._PostInit() + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + self.backgroundColor = wx.WHITE + self.torrentBitmap = None + self.torrent = None + self.mouseOver = False + self.triblerGrey = wx.Colour(128,128,128) + self.triblerLightGrey = wx.Colour(203,203,203) + self.sourceIcon = None + self.guiUtility = GUIUtility.getInstance() + self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + self.Bind(wx.EVT_LEFT_UP, self.guiUtility.buttonClicked) + self.Bind(wx.EVT_PAINT, self.OnPaint) + self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase) + self.selected = False + self.border = None + self.downloading = False + self.categoryIcon = None + self.iconsManager = IconsManager.getInstance() + + + def setTorrent(self, torrent): + if not torrent: + self.Hide() + self.Refresh() + return + + if not self.IsShown(): + self.Hide() ## self.Show() + + + self.torrent = torrent + ##self.setThumbnail(torrent) + ##self.setCategoryIcon(torrent) + # items in library should not show downloading color + self.downloading = torrent.get('myDownloadHistory', False) and self.mode != 'libraryMode' + + + def setCategoryIcon(self, torrent): + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: ",`torrent['name']`,"has cat",torrent.get('category') + self.categoryIcon = self.iconsManager.getCategoryIcon(self.mode, torrent.get('category'), thumbtype='icon', web2 = torrent.get('web2')) + + def setSourceIcon(self, si): + self.sourceIcon = si + + def setThumbnail(self, torrent): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: setThumb:",torrent['name'] + + thumbtype = (self.parent.listItem) and 'small' or 'normal' + bmp = None + readable = torrent.get('metadata',{}).get('ThumbReadable') + if readable == False: + bmp = self.iconsManager.getCategoryIcon(self.mode,torrent.get('category'), thumbtype=thumbtype, web2 = torrent.get('web2')) + + else: + # Check if we have already read the thumbnail and metadata information from this torrent file + if 'preview' in torrent: + self.GetParent().guiserver.add_task(lambda:self.loadMetadata(torrent,None),0) + + elif torrent.get('metadata',{}).get('ThumbnailBitmap'): + if self.mode == 'libraryMode' or self.parent.listItem: + # Make a resized thumb for lib view + bmp = torrent['metadata'].get('ThumbnailBitmap') + if bmp: + img = bmp.ConvertToImage() + bmp = getResizedBitmapFromImage(img, libraryModeThumbSize) + + elif self.mode == 'filesMode': + bmp = torrent['metadata'].get('ThumbnailBitmap') + elif 'torrent_file_name' in torrent and torrent['torrent_file_name'] != '': + torrent_dir = self.guiUtility.utility.session.get_torrent_collecting_dir() + torrent_filename = os.path.join(torrent_dir, torrent['torrent_file_name']) + + if DEBUG: + print "fip: Scheduling read of thumbnail for",`torrent['name']`,"from",torrent_filename + + def loadMetaDataNow(): + try: + self.loadMetadata(torrent,torrent_filename) + except wx.PyDeadObjectError: + pass + + try: + self.GetParent().guiserver.add_task(loadMetaDataNow,0) + except wx.PyDeadObjectError: + pass + + # ARNO: TODO: The FileItemPanels that use this ThumbnailViewer now get deleted, and thus + # also the ThumbnailViewer objects. Or at least the C++ part of them. As a result we + # can no longer schedule these loadMetadata callbacks on the GUITaskQueue thread. + # + # At the moment, the wx code protects us, and throws an exception that the C++ part + # of the ThumbnailViewer object is gone. But we should clean this up. + + if not bmp: + bmp = self.iconsManager.getCategoryIcon(self.mode, torrent.get('category'), thumbtype=thumbtype, web2 = torrent.get('web2')) + + assert bmp, 'No bitmap found for %s' % `torrent['name']` + self.setBitmap(bmp) + width, height = self.GetSize() + d = 1 + self.border = [wx.Point(0,d), wx.Point(width-d, d), wx.Point(width-d, height-d), wx.Point(d,height-d), wx.Point(d,0)] + self.Refresh() + #wx.Yield() + + + + def setBitmap(self, bmp): + # Recalculate image placement + if not bmp: + self.torrentBitmap = None + self.xpos, self.ypos = 0,0 + raise Exception('Warning: Thumbnail set to None for %s' % `self.torrent`) + else: + w, h = self.GetSize() + iw, ih = bmp.GetSize() + + self.torrentBitmap = bmp + self.xpos, self.ypos = (w-iw)/2, (h-ih)/2 + + + def loadMetadata(self, torrent,torrent_filename): + """ Called by separate non-GUI thread """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: ThumbnailViewer: loadMetadata",torrent_filename + if not torrent.get('preview'): + if not os.path.exists(torrent_filename): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: ThumbnailViewer: loadMetadata: %s does not exist" % torrent_filename + return None + + # We can't do any wx stuff here apparently, so the only thing we can do is to + # read the data from the torrent file and create the wxBitmap in the GUI callback. + + newmetadata = loadAzureusMetadataFromTorrent(torrent_filename) + + + if newmetadata.get('Thumbnail') is None and AUTOMODERATION_SAVE_WEBSEARCH_IMAGE_TO_TORRENT: + # Use Google Image search to find a thumb + (mimetype,thumbdata) = google_image_search(torrent['name']) + + if DEBUG: + if thumbdata is None: + t = None + else: + t = 'data' + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: automod: Google Image Search Got:",mimetype,t + + if mimetype is not None and thumbdata is not None: + # Scale image + scaledthumbdata = scale_image_convert_jpeg(mimetype,thumbdata,171) + newmetadata = { 'Thumbnail' : scaledthumbdata} + + # Save thumb data in torrent, auto-moderation ;o) + saveAzureusMetadataToTorrent(torrent_filename,scaledthumbdata) + else: + # Web2 items have preview fields + newmetadata = { 'Thumbnail' : torrent['preview'] } + + + wx.CallAfter(self.metadata_thread_gui_callback,torrent,newmetadata) + + + def metadata_thread_gui_callback(self,torrent,metadata): + """ Called by GUI thread """ + + #print 'Azureus_thumb: %s' % thumbnailString + thumbnailString = metadata.get('Thumbnail') + + if thumbnailString: + #print 'Found thumbnail: %s' % thumbnailString + + img = createThumbImage(thumbnailString) + if img is None: + return + + bmp = getResizedBitmapFromImage(img, filesModeThumbSize) + + if bmp: + metadata['ThumbnailBitmap'] = bmp + metadata['ThumbnailReadable'] = True + ## We now scale live + #bmplib = getResizedBitmapFromImage(img, libraryModeThumbSize) + #if bmplib: + # metadata['ThumbnailBitmapLibrary'] = bmplib + + # Dump the raw data + #del metadata['Thumbnail'] + else: + metadata['ThumbnailReadable'] = False + + torrent['metadata'] = metadata + + # This item may be displaying another torrent right now, only show the icon + # when it's still the same torrent + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: meta_gui_cb: old torrent",`torrent['name']`,"new torrent",`self.torrent['name']` + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: meta_gui_cb: old torrent",`torrent['infohash']`,"new torrent",`self.torrent['infohash']` + try: + if torrent['infohash'] == self.torrent['infohash']: + bmp = metadata.get('ThumbnailBitmap') + if bmp: + if self.parent.listItem: + bmp = getResizedBitmapFromImage(img, libraryModeThumbSize) + self.setBitmap(bmp) + self.Refresh() + except wx.PyDeadObjectError: + pass + + + + def OnErase(self, event): + pass + #event.Skip() + + def setSelected(self, sel): + self.selected = sel + self.Refresh() + + def isSelected(self): + return self.selected + + def mouseAction(self, event): + if event.Entering(): + #print 'enter' + self.mouseOver = True + self.Refresh() + elif event.Leaving(): + self.mouseOver = False + #print 'leave' + self.Refresh() + + """ + def ClickedButton(self): + print 'Click' + """ + + def setBackground(self, wxColor): + self.backgroundColor = wxColor + + def OnPaint(self, evt): + dc = wx.BufferedPaintDC(self) + dc.SetBackground(wx.Brush(self.backgroundColor)) + dc.Clear() + + + if self.torrent and self.mode == 'filesMode': + rank = self.torrent.get('simRank', -1) + else: + rank = -1 + + heartBitmap = TasteHeart.getHeartBitmap(rank) + + + if self.torrentBitmap: + dc.DrawBitmap(self.torrentBitmap, self.xpos,self.ypos, True) +# dc.SetFont(wx.Font(6, wx.SWISS, wx.NORMAL, wx.BOLD, True)) +# dc.DrawBitmap(MASK_BITMAP,0 ,52, True) +# dc.SetTextForeground(wx.BLACK) + #dc.DrawText('rating', 8, 50) + + if self.categoryIcon: + dc.DrawBitmap(self.categoryIcon, 99, 7, True) + if self.sourceIcon: + dc.DrawBitmap(self.sourceIcon, 101, 27, True) + + if self.mouseOver: + dc.SetFont(wx.Font(6, FONTFAMILY,FONTWEIGHT, wx.BOLD, True, FONTFACE)) + mask = self.iconsManager.get_default('filesMode','MASK_BITMAP') + dc.DrawBitmap(mask,0 ,0, True) + + if heartBitmap: + mask = self.iconsManager.get_default('filesMode','MASK_BITMAP_BOTTOM') + margin = 52 + dc.DrawBitmap(mask,0 ,margin, True) + dc.DrawBitmap(heartBitmap,5 ,margin+2, True) + dc.SetFont(wx.Font(FS_HEARTRANK, FONTFAMILY, FONTWEIGHT, wx.BOLD, False, FONTFACE)) + text = repr(rank) + dc.DrawText(text, 22, margin+4) + + + if self.border: + if self.selected: + if self.downloading: + colour = self.guiUtility.selectedColourDownload + else: + colour = self.guiUtility.triblerRed + else: + if self.downloading: + colour = self.guiUtility.unselectedColourDownload + else: + colour = self.triblerLightGrey + dc.SetPen(wx.Pen(colour, 2)) + dc.DrawLines(self.border) + + +def loadAzureusMetadataFromTorrent(torrent_filename): + metadata = getMetainfo(torrent_filename) + if not metadata: + return None + + newmetadata = metadata.get('azureus_properties', {}).get('Content',{}) + for key in ['encoding','comment','comment-utf8']: # 'created by' + if key in metadata: + newmetadata[key] = metadata[key] + return newmetadata + + +def createThumbImage(imgdata): + try: + # Simple protection against bad parsing of websites, if the + # image data is HTML, ignore it. + + low = imgdata[:5].lower() + if low == ' (w/float(h)): + nw = w + nh = int(ih * w/float(iw)) + else: + nh = h + nw = int(iw * h/float(ih)) + if nw != iw or nh != ih: + #print 'Rescale from (%d, %d) to (%d, %d)' % (iw, ih, nw, nh) + img.Rescale(nw, nh) + bmp = wx.BitmapFromImage(img) + return bmp + +def google_image_search(name): + try: + rname = name.replace('.',' ') + rname = rname.replace('-',' ') + rname = rname.replace('_',' ') + rname = rname.replace('[',' ') + rname = rname.replace(']',' ') + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: automod: Name becomes keywords",rname + + qname = urllib.quote(rname) + + # 1. Query Google Image search + url = 'http://www.searchmash.com/results/images:'+qname+'' + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: automod: Query URL",url + f = urlOpenTimeout(url,timeout=2) + resp = f.read() + f.close() + + start = 0 + while True: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: automod: Searching from idx",start + i = resp.find("imageUrl",start) + if i == -1: + break + else: + i += len("imageUrl\":\"") + j = resp.find("\"",i) + if j == -1: + break + else: + # 2. Found an Image, see if we can guess MIME type + imgurl = resp[i:j] + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","fip: automod: Found image",imgurl + + iconmime = mimetypes.guess_type(imgurl)[0] + if iconmime is None: + start = j + continue + + # 3. Load the image + try: + f = urlOpenTimeout(imgurl,timeout=2) + imgresp = f.read() + f.close() + + if imgresp == '': + start = j + continue + + return (iconmime,imgresp) + except: + print_exc() + start = j + continue + except: + print_exc() + return (None,None) + + +def scale_image_convert_jpeg(mimetype,data,dim): + icondata = None + try: + cio = cStringIO.StringIO(data) + if wx.Image.CanReadStream(cio): + sim = data2wxImage(mimetype,data,dim=dim) + [thumbhandle,thumbfilename] = tempfile.mkstemp("torrent-thumb") + os.close(thumbhandle) + sim.SaveFile(thumbfilename,wx.BITMAP_TYPE_JPEG) + + f = open(thumbfilename,"rb") + icondata = f.read() + f.close() + + os.remove(thumbfilename) + except: + print_exc() + + return icondata + + +def saveAzureusMetadataToTorrent(torrentfilename,scaledthumbdata): + try: + f = open(torrentfilename,"rb") + data = f.read() + f.close() + d = bdecode(data) + + d['azureus_properties'] = {} + d['azureus_properties']['Content'] = {} + d['azureus_properties']['Content']['Thumbnail'] = scaledthumbdata + + newdata = bencode(d) + f = open(torrentfilename,"wb") + f.write(newdata) + f.close() + except: + print_exc() + diff --git a/tribler-mod/Tribler/Main/vwxGUI/filesTab_files.py b/tribler-mod/Tribler/Main/vwxGUI/filesTab_files.py new file mode 100644 index 0000000..8dc5a19 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/filesTab_files.py @@ -0,0 +1,88 @@ +from time import localtime, strftime +# -*- coding: iso-8859-1 -*- +# Don't modify comment + +import wx +from triblerList import * +from tribler_topButton import * +#[inc]add your include files here + +#[inc]end your include + +class filesTab_files(wx.Panel): + def __init__(self,parent,id = -1,pos = wx.Point(0,0),size = wx.Size(300,348),style = wx.TAB_TRAVERSAL,name = 'panel'): + pre=wx.PrePanel() + self.OnPreCreate() + pre.Create(parent,id,pos,size,style,name) + self.PostCreate(pre) + self.initBefore() + self.VwXinit() + self.initAfter() + + def __del__(self): + self.Ddel() + return + + + def VwXinit(self): + self.Show(True) + self.SetBackgroundColour(wx.Colour(255,255,255)) + self.st209cCC = wx.StaticText(self,-1,"",wx.Point(8,8),wx.Size(129,18),wx.ST_NO_AUTORESIZE) + self.st209cCC.SetLabel("number of included files:") + self.filesField = wx.StaticText(self,-1,"",wx.Point(137,8),wx.Size(86,18),wx.ST_NO_AUTORESIZE) + self.filesField.SetLabel("unknown") + self.st209cCCC = wx.StaticText(self,-1,"",wx.Point(8,31),wx.Size(44,18),wx.ST_NO_AUTORESIZE) + self.st209cCCC.SetLabel("tracker:") + self.trackerField = wx.StaticText(self,-1,"",wx.Point(52,31),wx.Size(181,18),wx.ST_NO_AUTORESIZE) + self.trackerField.SetLabel("unknown") + self.includedFiles = FilesList(self,-1,wxDefaultPosition,wxDefaultSize) + self.includedFiles.SetDimensions(3,64,292,155) + self.download = tribler_topButton(self, -1, wx.Point(240,3), wx.Size(55,55)) + self.sz3s = wx.BoxSizer(wx.VERTICAL) + self.sz237s = wx.BoxSizer(wx.HORIZONTAL) + self.downloadSizer = wx.BoxSizer(wx.HORIZONTAL) + self.vert = wx.BoxSizer(wx.VERTICAL) + self.hor_tracker = wx.BoxSizer(wx.HORIZONTAL) + self.hor_numfiles = wx.BoxSizer(wx.HORIZONTAL) + self.sz3s.Add(self.sz237s,0,wx.TOP|wx.LEFT|wx.BOTTOM|wx.RIGHT|wx.EXPAND|wx.FIXED_MINSIZE,3) + self.sz3s.Add(self.includedFiles,1,wx.TOP|wx.LEFT|wx.BOTTOM|wx.RIGHT|wx.EXPAND|wx.FIXED_MINSIZE,3) + self.sz237s.Add(self.downloadSizer,1,wx.EXPAND|wx.FIXED_MINSIZE,4) + self.downloadSizer.Add(self.vert,0,wx.EXPAND|wx.FIXED_MINSIZE,3) + self.downloadSizer.Add([7,55],1,wx.EXPAND|wx.FIXED_MINSIZE,3) + self.downloadSizer.Add(self.download,0,wx.FIXED_MINSIZE,3) + self.vert.Add(self.hor_numfiles,0,wx.EXPAND|wx.FIXED_MINSIZE,3) + self.vert.Add(self.hor_tracker,0,wx.EXPAND|wx.FIXED_MINSIZE,3) + self.hor_tracker.Add(self.st209cCCC,0,wx.LEFT|wx.FIXED_MINSIZE,5) + self.hor_tracker.Add(self.trackerField,0,wx.FIXED_MINSIZE,5) + self.hor_numfiles.Add(self.st209cCC,0,wx.TOP|wx.LEFT|wx.FIXED_MINSIZE,5) + self.hor_numfiles.Add(self.filesField,0,wx.TOP|wx.FIXED_MINSIZE,5) + self.SetSizer(self.sz3s);self.SetAutoLayout(1);self.Layout(); + self.Refresh() + return + def VwXDelComp(self): + return + +#[win]add your code here + + def OnPreCreate(self): + #add your code here + + return + + def initBefore(self): + #add your code here + + return + + def initAfter(self): + #add your code here + + return + + def Ddel(self): #init function + #[2a8]Code VwX...Don't modify[2a8]# + #add your code here + + return #end function + +#[win]end your code diff --git a/tribler-mod/Tribler/Main/vwxGUI/filesTab_files.py.bak b/tribler-mod/Tribler/Main/vwxGUI/filesTab_files.py.bak new file mode 100644 index 0000000..124493e --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/filesTab_files.py.bak @@ -0,0 +1,87 @@ +# -*- coding: iso-8859-1 -*- +# Don't modify comment + +import wx +from triblerList import * +from tribler_topButton import * +#[inc]add your include files here + +#[inc]end your include + +class filesTab_files(wx.Panel): + def __init__(self,parent,id = -1,pos = wx.Point(0,0),size = wx.Size(300,348),style = wx.TAB_TRAVERSAL,name = 'panel'): + pre=wx.PrePanel() + self.OnPreCreate() + pre.Create(parent,id,pos,size,style,name) + self.PostCreate(pre) + self.initBefore() + self.VwXinit() + self.initAfter() + + def __del__(self): + self.Ddel() + return + + + def VwXinit(self): + self.Show(True) + self.SetBackgroundColour(wx.Colour(255,255,255)) + self.st209cCC = wx.StaticText(self,-1,"",wx.Point(8,8),wx.Size(129,18),wx.ST_NO_AUTORESIZE) + self.st209cCC.SetLabel("number of included files:") + self.filesField = wx.StaticText(self,-1,"",wx.Point(137,8),wx.Size(86,18),wx.ST_NO_AUTORESIZE) + self.filesField.SetLabel("unknown") + self.st209cCCC = wx.StaticText(self,-1,"",wx.Point(8,31),wx.Size(44,18),wx.ST_NO_AUTORESIZE) + self.st209cCCC.SetLabel("tracker:") + self.trackerField = wx.StaticText(self,-1,"",wx.Point(52,31),wx.Size(181,18),wx.ST_NO_AUTORESIZE) + self.trackerField.SetLabel("unknown") + self.includedFiles = FilesList(self,-1,wxDefaultPosition,wxDefaultSize) + self.includedFiles.SetDimensions(3,64,292,155) + self.download = tribler_topButton(self, -1, wx.Point(240,3), wx.Size(55,55)) + self.sz3s = wx.BoxSizer(wx.VERTICAL) + self.sz237s = wx.BoxSizer(wx.HORIZONTAL) + self.downloadSizer = wx.BoxSizer(wx.HORIZONTAL) + self.vert = wx.BoxSizer(wx.VERTICAL) + self.hor_tracker = wx.BoxSizer(wx.HORIZONTAL) + self.hor_numfiles = wx.BoxSizer(wx.HORIZONTAL) + self.sz3s.Add(self.sz237s,0,wx.TOP|wx.LEFT|wx.BOTTOM|wx.RIGHT|wx.EXPAND|wx.FIXED_MINSIZE,3) + self.sz3s.Add(self.includedFiles,1,wx.TOP|wx.LEFT|wx.BOTTOM|wx.RIGHT|wx.EXPAND|wx.FIXED_MINSIZE,3) + self.sz237s.Add(self.downloadSizer,1,wx.EXPAND|wx.FIXED_MINSIZE,4) + self.downloadSizer.Add(self.vert,0,wx.EXPAND|wx.FIXED_MINSIZE,3) + self.downloadSizer.Add([7,55],1,wx.EXPAND|wx.FIXED_MINSIZE,3) + self.downloadSizer.Add(self.download,0,wx.FIXED_MINSIZE,3) + self.vert.Add(self.hor_numfiles,0,wx.EXPAND|wx.FIXED_MINSIZE,3) + self.vert.Add(self.hor_tracker,0,wx.EXPAND|wx.FIXED_MINSIZE,3) + self.hor_tracker.Add(self.st209cCCC,0,wx.LEFT|wx.FIXED_MINSIZE,5) + self.hor_tracker.Add(self.trackerField,0,wx.FIXED_MINSIZE,5) + self.hor_numfiles.Add(self.st209cCC,0,wx.TOP|wx.LEFT|wx.FIXED_MINSIZE,5) + self.hor_numfiles.Add(self.filesField,0,wx.TOP|wx.FIXED_MINSIZE,5) + self.SetSizer(self.sz3s);self.SetAutoLayout(1);self.Layout(); + self.Refresh() + return + def VwXDelComp(self): + return + +#[win]add your code here + + def OnPreCreate(self): + #add your code here + + return + + def initBefore(self): + #add your code here + + return + + def initAfter(self): + #add your code here + + return + + def Ddel(self): #init function + #[2a8]Code VwX...Don't modify[2a8]# + #add your code here + + return #end function + +#[win]end your code diff --git a/tribler-mod/Tribler/Main/vwxGUI/filesTab_files.xrc b/tribler-mod/Tribler/Main/vwxGUI/filesTab_files.xrc new file mode 100644 index 0000000..70d3ff9 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/filesTab_files.xrc @@ -0,0 +1,120 @@ + + + + 0,0 + 300,348 + #ffffff + + wxVERTICAL + + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 4 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxVERTICAL + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxHORIZONTAL + + wxTOP|wxLEFT|wxFIXED_MINSIZE + 5 + + + + + 8,8 + 129,18 + + + + wxTOP|wxFIXED_MINSIZE + 5 + + + + + 137,8 + 86,18 + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxHORIZONTAL + + wxLEFT|wxFIXED_MINSIZE + 5 + + + + + 8,31 + 44,18 + + + + wxFIXED_MINSIZE + 5 + + + + + 52,31 + 181,18 + + + + + + + + 9,55 + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + wxFIXED_MINSIZE + 3 + + + 240,3 + 55,55 + + + + + + + + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + 3,64 + 292,155 + + + + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/filterStandard.py b/tribler-mod/Tribler/Main/vwxGUI/filterStandard.py new file mode 100644 index 0000000..c5085d7 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/filterStandard.py @@ -0,0 +1,284 @@ +from time import localtime, strftime +import wx, sys + +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from traceback import print_exc +from Tribler.Category.Category import Category +from Tribler.Main.vwxGUI.TextButton import * +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles +from font import * + +DEBUG = False + +class filterStandard(wx.Panel): + """ + Panel with automatic backgroundimage control. + """ + def __init__(self, *args, **kw): + + self.initDone = False + self.enabled = True +# self.filterData =[[('all', 'All'), ('Video', 'Video Files'), ('VideoClips', 'Video Clips'), ('Audio', 'Audio'), ('Compressed', 'Compressed'), ('Document', 'Documents'), ('Picture', 'Pictures'), +# ('other', 'Other')]] + self.filterData = [] + self.filterState = None + self.filters = [] + self.visible = False + + + if len(args) == 0: + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, *args, **kw) + self._PostInit() + + + + def OnCreate(self, event): +# print " tribler_topButton in OnCreate" + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + + + return True + + def _PostInit(self): + # Do all init here + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + + self.guiUtility.initFilterStandard(self) + self.triblerStyles = TriblerStyles.getInstance() +# self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) +# self.Bind(wx.EVT_LEFT_UP, self.ClickedButton) + self.SetMinSize((500, 160)) ## + + self.initDone = True +# self.addComponents() + self.Show() + self.Refresh() + self.Layout() + self.Update() + + + + def addComponents(self): +# self.SetBackgroundColour(wx.BLUE) + self.DestroyChildren() + self.vSizer = wx.BoxSizer(wx.VERTICAL) + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + self.vSizer.Add(self.hSizer, 0, wx.EXPAND, 0) + + + i = 0 + for list in self.filterData: + vSizer = wx.BoxSizer(wx.VERTICAL) + vSizer.Add([120,8],0,wx.FIXED_MINSIZE,0) + + titleText = wx.StaticText(self, -1, self.filterDataTitle[i]) + self.triblerStyles.setDarkText(titleText) + vSizer.Add(titleText, 0, wx.EXPAND|wx.TOP, 1) + + for title in list: + text = TextButtonFilter(self, name=title[1]) + vSizer.Add(text, 0, wx.EXPAND|wx.TOP, 1) + self.hSizer.Add(vSizer, 0, wx.EXPAND|wx.LEFT, 10) + + i = i + 1 + +# for title in list: +# titles.append(title) +## titles = [item[1] for item in pullDownData] +# print 'tb > titles2 = %s' % titles + +# try: +# if self.filterState is None: +# self.filterState = [] +# self.filterState.append(pullDownData[0][0]) +# except: +# if DEBUG: +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardFilter: Error getting default filterState, data: %s' % pullDownData +# pass + +## self.vSizer1 = wx.BoxSizer(wx.VERTICAL) +## self.vSizer1.Add([120,1],0,wx.FIXED_MINSIZE,0) +## for title in titles: +## text = TextButtonFilter(self, name=title) +### text = wx.StaticText(self, -1, title) +## self.vSizer1.Add(text, 0, wx.EXPAND|wx.TOP, 1) +## self.hSizer.Add(self.vSizer1, 0, wx.EXPAND|wx.LEFT, 10) + +# filter = wx.ComboBox(self,-1,titles[0], wx.DefaultPosition,wx.Size(160,10),titles, wx.FIXED_MINSIZE|wx.CB_DROPDOWN|wx.CB_READONLY) + #filter = wx.Choice(self,-1, wx.Point(8,3),wx.Size(180,21),titles) +# filter.SetFont(wx.Font(10,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) +# filter.SetBackgroundColour(wx.WHITE) +# filter.Bind(wx.EVT_COMBOBOX, self.mouseAction) + self.filters.append(filter) +# self.hSizer.Add(filter, 0, wx.FIXED_MINSIZE,0) + +# self.hSizer.Add([8,10],0,wx.EXPAND|wx.FIXED_MINSIZE,2) + + + self.SetSizer(self.vSizer); + self.SetAutoLayout(1); + self.Layout() + self.Refresh() +# self.mouseAction() +# wx.CallAfter(self.mouseAction,[None]) + + def SetData(self, mode): + if self.initDone == True: + print 'tb > mode = %s' % mode + if mode == 'libraryMode' or mode == 'filesMode' or mode == 'personsMode' or mode == 'friendsMode': + if self.visible: + self.Show() + self.guiUtility.advancedFiltering.Show() + else: + self.Hide() + self.guiUtility.advancedFiltering.Hide() + + self.getFilterLists(mode) + self.addComponents() + + def getFilterLists(self, mode): + self.filterDataTitle = [] + self.filterData = [] + if mode == 'filesMode': + self.filterDataTitle.append(' filter on:') + self.filterData.append([('all', 'All'), ('Video', 'Video Files'), ('VideoClips', 'Video Clips'), ('Audio', 'Audio'), ('Compressed', 'Compressed'), ('Document', 'Documents'), ('Picture', 'Pictures'), + ('other', 'Other')]) + self.filterDataTitle.append(' sort on:') + self.filterData.append([('name', 'Name'), ('size', 'Size'), ('popularity', 'Popularity'), ('new', 'Age'), ('source', 'Source')]) + + elif mode == 'libraryMode': + self.filterDataTitle.append(' filter on:') + self.filterData.append([('all', 'All'), ('Video', 'Video Files'), ('VideoClips', 'Video Clips'), ('Audio', 'Audio'), ('Compressed', 'Compressed'), ('Document', 'Documents'), ('Picture', 'Pictures'), + ('other', 'Other')]) + self.filterDataTitle.append(' sort on:') + self.filterData.append([('name', 'Name'), ('progress', 'Progress'), ('eta', 'ETA')]) + + elif mode == 'personsMode': + self.filterDataTitle.append(' sort on:') + self.filterData.append([('name', 'Name'), ('status', 'Status'), ('reputation', 'Reputation'), ('nfiles', 'Files discovered'), ('npeers', 'Persons discovered'), ('nprefs', 'Number of downloads')]) + + elif mode == 'friendsMode': + self.filterDataTitle.append(' type:') + self.filterData.append([('name', 'Name'), ('status', 'Status'), ('reputation', 'Reputation'), ('nfiles', 'Files discovered'), ('npeers', 'Persons discovered'), ('nprefs', 'Number of downloads')]) + + + def mouseAction(self, event=''): + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardFilter: mouseAction: event is",event + filterIndex = [] + for filter in self.filters: + idx = filter.GetSelection() + if idx == -1: + idx = 0 + filterIndex.append(idx) + filterState = [] + for filterNum in range(len(self.filters)): + filterState.append(self.filterData[filterNum][filterIndex[filterNum]][0]) + + filterState.append(None) #replacement for old ordering filter + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardFilter: filterState is",filterState,"old",self.filterState + if filterState != self.filterState: + self.filterChanged(filterState) + self.filterState = filterState + + def filterChanged(self, state): + try: + mode = self.__class__.__name__[:-len('Filter')] + if self.guiUtility.standardOverview.mode.startswith(mode): + self.guiUtility.standardOverview.filterChanged(state, mode) + elif DEBUG: + print 'Warning: StandardOverview was in mode %s and we changed combo of %s' % \ + (self.guiUtility.standardOverview.mode, mode) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardFilter: Error could not call standardOverview.filterChanged()' + print_exc() + + def setSelectionToFilter(self,filterState): + try: + for j in range(len(filterState)): + for i in range(len(self.filterData[j])): + if filterState[j] == self.filterData[j][i][0]: + self.filters[j].SetSelection(i) + break + except: + pass + self.filterState = filterState + + def getState(self): + if self.filterState is None: + state = [] + for i in xrange(len(self.filters)): + state.append(self.filterData[i][0][0]) + return state + return self.filterState + + +class filterFiles(filterStandard): + + def __init__(self, parent): + nametuples = [('all', 'All')] + nametuples += Category.getInstance().getCategoryNames() + nametuples.append(('other', 'Other')) + #nametuples.append(('search', 'Search Results')) + +# parent = None + filterData = [ + nametuples +# [(('content_name', 'increase'), 'Name'), +# ('swarmsize', 'Popular'), +# ('relevance','Recommended'), +# ('date','Creation date'), +# ('length', 'Size'), +# #('tracker', 'Tracker'), +# #('num_owners', 'Often received') +# ] + ] + standardFilter.__init__(self, parent,filterData = filterData) + + def refresh(self): + nametuples = [('all', 'All')] + nametuples += Category.getInstance().getCategoryNames() + nametuples.append(('other', 'Other')) + self.filterData = [nametuples] + #self._PostInit() + self.addComponents() + +#class personsFilter(standardFilter): +# def __init__(self): +# filterData = [ +# [('all', 'All'), +# ('search', 'Search Results') +# ], +# [(('content_name','increase'), 'Name'), +# ('similarity', 'Similar taste'), +# ('last_connected', 'Recently connected'), +# ] +# ] +# standardFilter.__init__(self, filterData = filterData) +# +class filterLibrary(filterStandard): + pass +#class friendsFilter(standardFilter): +# def __init__(self): +# filterData = [ +# [('friends', 'All'), +# ('search_friends', 'Search Results') +# ], +# [(('content_name','increase'), 'Name'), +# ('similarity', 'Similar taste'), +# ('last_connected', 'Recently connected'), +# ] +# ] +# standardFilter.__init__(self, filterData = filterData) + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/filterStandard.py.bak b/tribler-mod/Tribler/Main/vwxGUI/filterStandard.py.bak new file mode 100644 index 0000000..94c2ca0 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/filterStandard.py.bak @@ -0,0 +1,283 @@ +import wx, sys + +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from traceback import print_exc +from Tribler.Category.Category import Category +from Tribler.Main.vwxGUI.TextButton import * +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles +from font import * + +DEBUG = False + +class filterStandard(wx.Panel): + """ + Panel with automatic backgroundimage control. + """ + def __init__(self, *args, **kw): + + self.initDone = False + self.enabled = True +# self.filterData =[[('all', 'All'), ('Video', 'Video Files'), ('VideoClips', 'Video Clips'), ('Audio', 'Audio'), ('Compressed', 'Compressed'), ('Document', 'Documents'), ('Picture', 'Pictures'), +# ('other', 'Other')]] + self.filterData = [] + self.filterState = None + self.filters = [] + self.visible = False + + + if len(args) == 0: + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, *args, **kw) + self._PostInit() + + + + def OnCreate(self, event): +# print " tribler_topButton in OnCreate" + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + + + return True + + def _PostInit(self): + # Do all init here + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + + self.guiUtility.initFilterStandard(self) + self.triblerStyles = TriblerStyles.getInstance() +# self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) +# self.Bind(wx.EVT_LEFT_UP, self.ClickedButton) + self.SetMinSize((500, 160)) ## + + self.initDone = True +# self.addComponents() + self.Show() + self.Refresh() + self.Layout() + self.Update() + + + + def addComponents(self): +# self.SetBackgroundColour(wx.BLUE) + self.DestroyChildren() + self.vSizer = wx.BoxSizer(wx.VERTICAL) + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + self.vSizer.Add(self.hSizer, 0, wx.EXPAND, 0) + + + i = 0 + for list in self.filterData: + vSizer = wx.BoxSizer(wx.VERTICAL) + vSizer.Add([120,8],0,wx.FIXED_MINSIZE,0) + + titleText = wx.StaticText(self, -1, self.filterDataTitle[i]) + self.triblerStyles.setDarkText(titleText) + vSizer.Add(titleText, 0, wx.EXPAND|wx.TOP, 1) + + for title in list: + text = TextButtonFilter(self, name=title[1]) + vSizer.Add(text, 0, wx.EXPAND|wx.TOP, 1) + self.hSizer.Add(vSizer, 0, wx.EXPAND|wx.LEFT, 10) + + i = i + 1 + +# for title in list: +# titles.append(title) +## titles = [item[1] for item in pullDownData] +# print 'tb > titles2 = %s' % titles + +# try: +# if self.filterState is None: +# self.filterState = [] +# self.filterState.append(pullDownData[0][0]) +# except: +# if DEBUG: +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardFilter: Error getting default filterState, data: %s' % pullDownData +# pass + +## self.vSizer1 = wx.BoxSizer(wx.VERTICAL) +## self.vSizer1.Add([120,1],0,wx.FIXED_MINSIZE,0) +## for title in titles: +## text = TextButtonFilter(self, name=title) +### text = wx.StaticText(self, -1, title) +## self.vSizer1.Add(text, 0, wx.EXPAND|wx.TOP, 1) +## self.hSizer.Add(self.vSizer1, 0, wx.EXPAND|wx.LEFT, 10) + +# filter = wx.ComboBox(self,-1,titles[0], wx.DefaultPosition,wx.Size(160,10),titles, wx.FIXED_MINSIZE|wx.CB_DROPDOWN|wx.CB_READONLY) + #filter = wx.Choice(self,-1, wx.Point(8,3),wx.Size(180,21),titles) +# filter.SetFont(wx.Font(10,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) +# filter.SetBackgroundColour(wx.WHITE) +# filter.Bind(wx.EVT_COMBOBOX, self.mouseAction) + self.filters.append(filter) +# self.hSizer.Add(filter, 0, wx.FIXED_MINSIZE,0) + +# self.hSizer.Add([8,10],0,wx.EXPAND|wx.FIXED_MINSIZE,2) + + + self.SetSizer(self.vSizer); + self.SetAutoLayout(1); + self.Layout() + self.Refresh() +# self.mouseAction() +# wx.CallAfter(self.mouseAction,[None]) + + def SetData(self, mode): + if self.initDone == True: + print 'tb > mode = %s' % mode + if mode == 'libraryMode' or mode == 'filesMode' or mode == 'personsMode' or mode == 'friendsMode': + if self.visible: + self.Show() + self.guiUtility.advancedFiltering.Show() + else: + self.Hide() + self.guiUtility.advancedFiltering.Hide() + + self.getFilterLists(mode) + self.addComponents() + + def getFilterLists(self, mode): + self.filterDataTitle = [] + self.filterData = [] + if mode == 'filesMode': + self.filterDataTitle.append(' filter on:') + self.filterData.append([('all', 'All'), ('Video', 'Video Files'), ('VideoClips', 'Video Clips'), ('Audio', 'Audio'), ('Compressed', 'Compressed'), ('Document', 'Documents'), ('Picture', 'Pictures'), + ('other', 'Other')]) + self.filterDataTitle.append(' sort on:') + self.filterData.append([('name', 'Name'), ('size', 'Size'), ('popularity', 'Popularity'), ('new', 'Age'), ('source', 'Source')]) + + elif mode == 'libraryMode': + self.filterDataTitle.append(' filter on:') + self.filterData.append([('all', 'All'), ('Video', 'Video Files'), ('VideoClips', 'Video Clips'), ('Audio', 'Audio'), ('Compressed', 'Compressed'), ('Document', 'Documents'), ('Picture', 'Pictures'), + ('other', 'Other')]) + self.filterDataTitle.append(' sort on:') + self.filterData.append([('name', 'Name'), ('progress', 'Progress'), ('eta', 'ETA')]) + + elif mode == 'personsMode': + self.filterDataTitle.append(' sort on:') + self.filterData.append([('name', 'Name'), ('status', 'Status'), ('reputation', 'Reputation'), ('nfiles', 'Files discovered'), ('npeers', 'Persons discovered'), ('nprefs', 'Number of downloads')]) + + elif mode == 'friendsMode': + self.filterDataTitle.append(' type:') + self.filterData.append([('name', 'Name'), ('status', 'Status'), ('reputation', 'Reputation'), ('nfiles', 'Files discovered'), ('npeers', 'Persons discovered'), ('nprefs', 'Number of downloads')]) + + + def mouseAction(self, event=''): + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardFilter: mouseAction: event is",event + filterIndex = [] + for filter in self.filters: + idx = filter.GetSelection() + if idx == -1: + idx = 0 + filterIndex.append(idx) + filterState = [] + for filterNum in range(len(self.filters)): + filterState.append(self.filterData[filterNum][filterIndex[filterNum]][0]) + + filterState.append(None) #replacement for old ordering filter + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardFilter: filterState is",filterState,"old",self.filterState + if filterState != self.filterState: + self.filterChanged(filterState) + self.filterState = filterState + + def filterChanged(self, state): + try: + mode = self.__class__.__name__[:-len('Filter')] + if self.guiUtility.standardOverview.mode.startswith(mode): + self.guiUtility.standardOverview.filterChanged(state, mode) + elif DEBUG: + print 'Warning: StandardOverview was in mode %s and we changed combo of %s' % \ + (self.guiUtility.standardOverview.mode, mode) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardFilter: Error could not call standardOverview.filterChanged()' + print_exc() + + def setSelectionToFilter(self,filterState): + try: + for j in range(len(filterState)): + for i in range(len(self.filterData[j])): + if filterState[j] == self.filterData[j][i][0]: + self.filters[j].SetSelection(i) + break + except: + pass + self.filterState = filterState + + def getState(self): + if self.filterState is None: + state = [] + for i in xrange(len(self.filters)): + state.append(self.filterData[i][0][0]) + return state + return self.filterState + + +class filterFiles(filterStandard): + + def __init__(self, parent): + nametuples = [('all', 'All')] + nametuples += Category.getInstance().getCategoryNames() + nametuples.append(('other', 'Other')) + #nametuples.append(('search', 'Search Results')) + +# parent = None + filterData = [ + nametuples +# [(('content_name', 'increase'), 'Name'), +# ('swarmsize', 'Popular'), +# ('relevance','Recommended'), +# ('date','Creation date'), +# ('length', 'Size'), +# #('tracker', 'Tracker'), +# #('num_owners', 'Often received') +# ] + ] + standardFilter.__init__(self, parent,filterData = filterData) + + def refresh(self): + nametuples = [('all', 'All')] + nametuples += Category.getInstance().getCategoryNames() + nametuples.append(('other', 'Other')) + self.filterData = [nametuples] + #self._PostInit() + self.addComponents() + +#class personsFilter(standardFilter): +# def __init__(self): +# filterData = [ +# [('all', 'All'), +# ('search', 'Search Results') +# ], +# [(('content_name','increase'), 'Name'), +# ('similarity', 'Similar taste'), +# ('last_connected', 'Recently connected'), +# ] +# ] +# standardFilter.__init__(self, filterData = filterData) +# +class filterLibrary(filterStandard): + pass +#class friendsFilter(standardFilter): +# def __init__(self): +# filterData = [ +# [('friends', 'All'), +# ('search_friends', 'Search Results') +# ], +# [(('content_name','increase'), 'Name'), +# ('similarity', 'Similar taste'), +# ('last_connected', 'Recently connected'), +# ] +# ] +# standardFilter.__init__(self, filterData = filterData) + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/font.py b/tribler-mod/Tribler/Main/vwxGUI/font.py new file mode 100644 index 0000000..7375644 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/font.py @@ -0,0 +1,27 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information + +import wx + +DEBUG = False + +# Default font properties +FONTFAMILY = wx.SWISS +FONTWEIGHT = wx.NORMAL +FONTFACE_CANDIDATES = ["Verdana","Arial",""] # "" means default font +FONTFACE = "" + +def init(): + """ Initialise the font subsystem. Has to be called after wx.App(). """ + + # FONTFACE := first existing font in FONTFACE_CANDIDATES array + global FONTFACE + fontnames = wx.FontEnumerator.GetFacenames() + + for f in FONTFACE_CANDIDATES: + if f in fontnames: + FONTFACE = f + if DEBUG: + print "Found font %s" % f + break diff --git a/tribler-mod/Tribler/Main/vwxGUI/font.py.bak b/tribler-mod/Tribler/Main/vwxGUI/font.py.bak new file mode 100644 index 0000000..e04a00a --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/font.py.bak @@ -0,0 +1,26 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information + +import wx + +DEBUG = False + +# Default font properties +FONTFAMILY = wx.SWISS +FONTWEIGHT = wx.NORMAL +FONTFACE_CANDIDATES = ["Verdana","Arial",""] # "" means default font +FONTFACE = "" + +def init(): + """ Initialise the font subsystem. Has to be called after wx.App(). """ + + # FONTFACE := first existing font in FONTFACE_CANDIDATES array + global FONTFACE + fontnames = wx.FontEnumerator.GetFacenames() + + for f in FONTFACE_CANDIDATES: + if f in fontnames: + FONTFACE = f + if DEBUG: + print "Found font %s" % f + break diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/5.0/None.png b/tribler-mod/Tribler/Main/vwxGUI/images/5.0/None.png new file mode 100644 index 0000000000000000000000000000000000000000..c597f3835b4346d8923dd31251cc99e21f89e305 GIT binary patch literal 165 zcmeAS@N?(olHy`uVBq!ia0vp^0wB!61|;P_|4#%`jKx9jP7LeL$-D$|I14-?iy0WW zg+Z8+Vb&Z8pdfpRr>`sfO;%n$VQWcI(=ecrWQl7;iF1B#Zfaf$gL6@8Vo7R>LV0FM zhJw4NZ$Nk>pEyvFkf)1dh{fsT1PRu~2_ijA91ILB0t}3;_N$73vJ9TCelF{r5}E+p CRV2Lt literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRgradient.png b/tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRgradient.png new file mode 100644 index 0000000000000000000000000000000000000000..fe2f1fc2682ad1b73f5ee3360bf8d36cbf3e2472 GIT binary patch literal 1754 zcmeAS@N?(olHy`uVBq!ia0vp^`9RFi!3HE>8BVkUQY^(zo*^7SP{WbZ0pxQQctjR6 zFmMZjFyp1Wb$@_@VkNE-CC){ui6xo&c?uz!xv2~WmimU~`UV!Nn${p~5+D`9`DrEP ziAAXljw$&`sS0kHMXBZaMcKvvo8|q0HVJ~%MELqxCFkerC4!XbRpb^h*w|MTBqnF4 zmMA2prf25aDk&%^C@Ey7*eZpa`WpBaIHzW0dQ=sq23ProBv)l8Tc#-4+i}@cSOGQX zrj{fsROII56=85jTwBP(NbAOboD43zA+fV53fN}5%WiyPEVAkS7Q zqokz3N?*Ucyj-u`STDaQUEk2s(o)~RNZ-gv7pOwFxH7LKu|hYmSQ%mn%p8~0;^d;t z0&tju%uP(nFDH z-~5!!v`U0_LqlBy(-1>bD^mk2Lzs3XMNsWF`k;V73K(#xgZzXhu0SBbQj+1}RZv=# z1CExIWc}2f)ZEm(l44+(8KQ<+O0rd2eo<~>iCt!HVtT56L0-CzK0*+$`yj3cCJSJ) z)Q6`_nA71=NfQPl+-YJw#86sVVid!Fk%56p($mE;B%<~0bl+^@K#{ij)zAITL`Y8z zyC}vSRB|!o)usnd7g`@&+M4~^Nn-mnt&h!R!3GnRto9$+YJa$QhtS(?Au%GeLWDg} z%ZQgx@|hgceaocr_W^_F=jIkad%Ev)p<5?M-@_veE`ctEPoI8j{XF^MkItNg8u?_N zsF%&*LG7v^Pv}o8IAQ+t{VDC%Fs}2TSG)^~kNzLV_5My=fYPBOOAlSTlk@Q5F^_rt z(HAg{7w*|4Kt?mIJE3EoMClsMRvo-i;oXJ&stvC=lC>O^xV@; zjcgx!AEy6VF5=?YZ42f1i0*?S-i5&sLn>t39V=iOJL|*T^wCRTj?s@bYz8@a4wZCvzMRw|kwr zd~Q#pO3k;l`k6se^51TVd9a7=b71YsqMdf-mOrzag-`bPVg@z?h6 zo57nEW1SmYs(FkiiD%Q1<=edn&2Yu{e_^3ToI_->82IqF3-&jD*022WQ%mvv4FO#nN!N-h8Z literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRgradient_new.png b/tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRgradient_new.png new file mode 100644 index 0000000000000000000000000000000000000000..a316e1f9f0ac0d8926ca9d8c1badc58b6af8cf54 GIT binary patch literal 582 zcmV-M0=fN(P)Px#32;bRa{vGf5&!@T5&_cPe*6Fc00(qQO+^RU1RD)3IpvYz>;M1(*hxe|R9M69 zn9VK(K@i9PRoyeTv5TyTkJvrg{w0&{o}Q|%)PFvDih>}(`qs|o(BQywcYDTY&x6()OpH%M_(gto zu>|=$_e5YKD5X%UB0N32*j`>*T%*3x;>OJ6*y?#(0f#q0C9)DkR5yo5B?CYN zGkIXRr4n*?w$+@J;7AF_C7>^tG3;rLhsY%)EtIQ(@B3h_mCHtohy?Jo2d1y=9v%Mz zW`V>pZI1Ln0J9S=g}M<~DFx&9C#j)L14=RY_7UND&Lt!zZ%9ma02SqjiaL8* zr~9*W*^Gy)3Q+xY$%~p)PUwTm3H?Cjgg&U8(EF8>H%OClGn?<)H}ewK>z>%07*qoM6N<$f(hygK>z>% literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRgradient_new_win.png b/tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRgradient_new_win.png new file mode 100644 index 0000000000000000000000000000000000000000..446b3a31f06c6c9848935b9fa22406d7ccd73155 GIT binary patch literal 562 zcmV-20?qx2P)Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1se|oD5Kc~tpET4#7RU!R9M69 zmqBY3F&KuQ?@Kavy0}GbJ&Nukh!kf}dMtSCwI{Fs3{M4rf;T~bfCvAEdJsW`ws)Cc z1f_+w6`ik#ncYomyX#`99moqqGD*HSAMcxFGE$bMJ%90%x9{H5%@iL3-g{J(zlLyu zxX{TO*#Dy!K}B%R@k0VdQSjvP2J&?CnQeXkMA^B?$M5j7iUblF2iJ9JbQ2gE8~4TU zW8)=i8wsy%Utg4l>rjIJB+wszxal|oI#AOX1 zr^MOBp;64KIh@GH3>D+5GwyYb)qKe9yZ3Q!p|iQW)aS!D_-!Xv_Eh6!hH)~*I5`L7 zWQuWewsA9;yUn>p{xyCUU)^$8!IvRqAt3u-(e4$`&K?OZ!PXuuFM2e1&q0+0X$$s$ zL6&8x_b#c?B2)&!2UO;{JtkZVs4B^|wwgD&{Ux*p0;(W z5?JjfL0WP`GFXBt%hAE0pvZ@;t<-L&gY$CYT7XA4VExW@`n?{qTbB0f^&7UgcUV~r zEaZ9pgO|R2=L(B<&D=Xp74JP&$m#cPvG(u*N1o%0vbnst82|tP07*qoM6N<$f-UO) A&;S4c literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRind.png b/tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRind.png new file mode 100644 index 0000000000000000000000000000000000000000..127ecae90909220bd46833d3e2f8e1d86060e57f GIT binary patch literal 271 zcmeAS@N?(olHy`uVBq!ia0vp^+(695!3HFgJ}hYlQjEnx?oJHr&dIz4aySb-B8!2F zgg}__(%rg0Ktc8rPhVH|o6M5@Iy@`-pIQNh7J9lkhDcoQy=cqV=qPaP(2R>IUfC!)f+8ow4i?$g$b|6uQ3$#m3K^~cwpf~SFw OW$<+Mb6Mw<&;$T)8fBdT literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRind2.png b/tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRind2.png new file mode 100644 index 0000000000000000000000000000000000000000..1d12f6144e494bc643f14a9c44fab9a56e049780 GIT binary patch literal 217 zcmeAS@N?(olHy`uVBq!ia0vp^>>$j+1|*LJgM4JM&$ z-mqR~zoX3D!Xtcw@1=25Lj5|07hFzJvzZ+~o%(<9vHr3zHVd4ar8vuV1evR*+<4d{ zrnGLwpBs-`%%0q_SDn>xy)EV=Uq#Ld{-7f>+PV9FzUBQaKKY`6#Zia22B5VJp00i_ I>zopr09t=b5&!@I literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRind2_win.png b/tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRind2_win.png new file mode 100644 index 0000000000000000000000000000000000000000..9c741029de3734f66c81aed120462cb5770f1f08 GIT binary patch literal 235 zcmeAS@N?(olHy`uVBq!ia0vp^>>$j+1|*LJglFz;kcfAMv?0f1B1iG?Y_-MYt*_U zbk}hweUfAI<&Ha`@a=2p>{+_nYNu@)6<&z4x#>13{Qi27U*~hV+5B}A7PJdJH_`8N z47+zdLcxt&H1CFqf#db{d~6c!_wUvlP7>&ss(C2+D5_cX)C&<_{>_2)?P|AnB+P$y b^c!>W1F7@do*DcHI)uT~)z4*}Q$iB}JK#|p literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRindicator.png b/tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRindicator.png new file mode 100644 index 0000000000000000000000000000000000000000..5ce59d318ce833f27d57c9d87041ff0c4d01d854 GIT binary patch literal 1468 zcmeAS@N?(olHy`uVBq!ia0vp^+(695!3HFgJ}hYlQY^(zo*^7SP{WbZ0pxQQctjQh z)d_(x7yt<)D`RsY0y+i^l`9GBGM3-k^34D{h912x(J1+Bobm0uK+ zpPyrg799%C!NxWqohXVBwj%VWx+IpQ+5yAELf61j*U&t~(7?*b!pguX3Q0di5t?@2 z{FKbJN`!VpLtO*Y5JOWdQv)kQn06#ZQ0+GQpnyOM7;vbA{Dda1Kp?AwLJ+U}Ag%@`3t+O; zho?)J)8SD`69yvOX<|IYP+D4I6vKZJSb+TYba4!km>N6L(3{y&q-Fo!XhD~trJtt% z(0}uXJyTbQ)l*tpaqHCHUa_QfuA?GdZOttTaxxVxLK7=Aj?6fnTP8evyFgw-MB4Yg zcjs1e-C!|mkqS7#6~QDHaU#n(;q@2G%3F7DulL#X@x^Cb$5~975tUjCZuMN!XW4&0 zviidNdvj#33lq9y>=~b`L`(L(5Yq{_GyTPcx_G zx9*CWz{@^&`|7yt<)D`RsY0y+i^l`9GBGM3-k^34D{h912x(J1+Bobm0uK+ zpPyrg799%C!NxWqohXVBwj%VWx+IpQ+5yAELf61j*U&t~(7?*b!pguX3Q0di5t?@2 z{FKbJN`!VpLtO*Y5JOWdQv)kQn06#ZQ0+GQpnyOM7;vbA{Dda1Kp?AwLJ+U}Ag%@`3t+O; zho?)J)8SD`69yvOX<|IYP+D4I6vKZJSb+TYba4!kn3{XRSGy%qD|;P1-c4b#4Dc_cvBzC5^9QRq-)ZyN&XX_S&W&BF zdOyoWY=ZrcUGt>p&AOYf|MkrhMUg+Da$98%d2bcEm$mf+e@J+jilhFmK$pVrUy@RD z5-Yb}u{ViV?@w!I|2SoVH0u`EVqK0}<;7>K)2Dm?T`VlE-u*bc!cXZ-s=+Ky!)@X= z4<)Ax6pKvXI>WE4e@4EkTbP0l+XkK D5747L literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRindicator_right.png b/tribler-mod/Tribler/Main/vwxGUI/images/5.0/SRindicator_right.png new file mode 100644 index 0000000000000000000000000000000000000000..5ce59d318ce833f27d57c9d87041ff0c4d01d854 GIT binary patch literal 1468 zcmeAS@N?(olHy`uVBq!ia0vp^+(695!3HFgJ}hYlQY^(zo*^7SP{WbZ0pxQQctjQh z)d_(x7yt<)D`RsY0y+i^l`9GBGM3-k^34D{h912x(J1+Bobm0uK+ zpPyrg799%C!NxWqohXVBwj%VWx+IpQ+5yAELf61j*U&t~(7?*b!pguX3Q0di5t?@2 z{FKbJN`!VpLtO*Y5JOWdQv)kQn06#ZQ0+GQpnyOM7;vbA{Dda1Kp?AwLJ+U}Ag%@`3t+O; zho?)J)8SD`69yvOX<|IYP+D4I6vKZJSb+TYba4!km>N6L(3{y&q-Fo!XhD~trJtt% z(0}uXJyTbQ)l*tpaqHCHUa_QfuA?GdZOttTaxxVxLK7=Aj?6fnTP8evyFgw-MB4Yg zcjs1e-C!|mkqS7#6~QDHaU#n(;q@2G%3F7DulL#X@x^Cb$5~975tUjCZuMN!XW4&0 zviidNdvj#33lq9y>=~b`L`(L(5Yq{_GyTPcx_G zx9*CWz{@^&`|{XE)7O>#2B#di80-5JH`;(gH$7b(Lp+YZ4SUObM1f-|Z}6So zhyRpI`lJv4zRZ6}QR~9$OfRQdS(SDU^H~{A*v<>S|NVc7{&vBl*^Lo?1<|KpUf(*$ zcI$^DN~|-dJ+5@0{K-l=oL{ZV%5U~@Y0qS@$3m5w>smw$S1si|;qTwee*erKy|q8z zCC6C5tbRRb*?uvb)89?`u2g?s`EkPokK&4Je+4_mTc1urP%5 zZIyYLmyy@8WMK$<=Edrc=vfCId%e!DS^GM|Z1yqbq~6+`tLu-m{WLwkoaefE;^yzq aS>rDTd;6{W;;|p-Nd`|>KbLh*2~7ag1dQ|m literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/5.0/average_win.png b/tribler-mod/Tribler/Main/vwxGUI/images/5.0/average_win.png new file mode 100644 index 0000000000000000000000000000000000000000..ffe7f0e23709f2d071a2f84d00fd7d67a3fe5b53 GIT binary patch literal 1063 zcmV+?1laqDP)Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1rrT4H+be%?EnA+xk*GpR5;7U zlubxuR}{zpFKKLE{3HqyB7%Yo6;TxG#}ubB0Y#|Mg&<7DAR;IV(h(;Jq6^7lx^dl^ zFe)xY)7j`S;*7YOf>G451%ThVqj#JuFaSE17T@8Z2ghj!NU;KI4*-1EEV-SZ(` zU0o3Xj*pK~QBi@ctSp?IoIsXke6mjhzz+mK0xTp!6057L002$Xu(`R3l>U<}k&?v+ zI_SDiUDsJzS;_YHcDh_Hc6WC(rT2QhEG{mlrfH1DVzk@s9335H^dicho*p_J4mzDq z`u%=tnnqpM34mo~W$f$gV@*vBo0^&^%Q9^?8>_0SxUsRp_+^~^{rz+}9Q1fREGa1= z0P4EVX!QTt&@`2MdwT@H?;k(rz`y`AGc$Q{enCxBxv;Q60Nmf-=kw?51i-DWE&Ba_ zI-O3gt*tQ_3=#kr7Z(jEK&#bCkHC?X;y7*EoWwC2!KAHj}IPvn?9#$+DF+? zRh1JH6SUcEJUu<-`nnO&($W%DRpp=W-&2-l1_A+&kB`&sc2h|t*xcNlz96**2M4Jp zRRW;PUvv(vp7ly1F_7;Mv(3lZhk?3JM5-YEu0u`!gZ_K`0c$ z_4PH1ii*-T{=>6p_`0J5nGOe9TU+t+?c8^#{lH!;y@8JogZZL=cHY zu(Pv+hK5Fjcf%l={$Y|hJUj#dl$Ms_)z+)D?e1510RU4|Q<$Bd#rgTU=_BB|=P}s7 zrq~S#?}mYqkr7%f7XEg8%(xP#q9_c7LIl94^G~TL3ja7eqb$oTEG*<@?2?M2@Zus$ zpU+2GmYJWQ&*|xDUdH28;z~NzilT6Nd6{mvn|Ax>93CE~EX&N!&gR>n-_q;#G9x2{ zjg5_TyWIp}u%rKF$K#ij;_)~MKq5g2kRX91BuT=pCjum+cqDV^kwEH*;eTr^O1LEu zgk;!p<1{xnhm(`vasU1|$j^7;&YdrjmzRfkzq~WNtU=qlsM<- z=BDPAFgO>bCYGe8D3oWGWGJ|M`UZqI@`(c#X?wajhD02Gdy$csfq~<&!GZnH7c(%b stZi94S){&)r7cmSO%h0?NwgJ4I|VZf#_o4n4b;Zq>FVdQ&MBb@0BS5b2BR0pdfpRr>`sf4Nf_3Nx4U+yi0*XCp}#pLp+XO4L!}rqR271^Z)oJMND+MN+wYx5HJ#;($Le0lFMPV(=%_kMJ*&li{Z-W-I z|HqJ*cVmME=UV;@6^;5VDY~ps@cFB-7o~6N*!-U#C~~XWVle;vlRp8inTq)jdl-58 z+q*4vZPM;~ZC`%wU#{MN+u8r?tTpN%r(f?lyM6oV!$$m}Z|%RH(wKJc z`>(^!wi328&0n26G~>U2T4>6wjECn8O^zpp+~1+0{^{c3KMY*@_hlv@%}fJ&iow&> K&t;ucLK6Ttc!i<> literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/5.0/go.png b/tribler-mod/Tribler/Main/vwxGUI/images/5.0/go.png new file mode 100644 index 0000000000000000000000000000000000000000..ade918f3b8b6ea366e852d6c55dc98b5032fd97d GIT binary patch literal 569 zcmV-90>=G`P)9N%mCT0~Lg*u|BVb6juKiRTqX zu)Xu0d#~Q|_|3aLPDD}Uj@N2Dx^#S)S{ye z6(I%#)B@T2_ZAnN9>Nv6^q1Vpi@eL(MWU#SyGZghD4|38R!g& z(U6r@B6CCr&wq}`D<=-}BW|LB{mUJ^)Ns}xkF#Vl|7Sc_B8SB0kekU1_Vc6oF#bT_ z0FRP!c++VJc}d(`tJq>MT=!nwDFaF$=-G+8@>rQL)Bve04<~N?tZc4 zbMwqnpNeEFfrm+m_hhHfy7ut}SO-o_QMI`cY75Zo@U*A+AcEna9Qca+i*4$*%aWY# z@auhO_7r!_5(sZ-Fq%9s`>a-bEPJMqcuOlDq#++gu*dxdu}Zl7RN{p{00000NkvXX Hu0mjf|4jdk literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/5.0/good_win.png b/tribler-mod/Tribler/Main/vwxGUI/images/5.0/good_win.png new file mode 100644 index 0000000000000000000000000000000000000000..9a6548461413b30911694bc6a742074ab5e5bb99 GIT binary patch literal 906 zcmV;519kj~P)Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1rrT87wE{5{sZxB-mo{qPBR^kg5oJ zp&(*6ZD?yKp(+ir_>-7m6ss}WL=3hUF@>6F0-G4uO*WfvdU4L4JsV4{;K1Pw=bdlP z`^`Hu5{t#O=U#+eKb@tqtA~eU;{*!w_cRqq8rvOgY&=1z@jB(jerDz7@Xd)9hpfX>)g`t3ccle9i3tP1 zGa{hM-jl5yJ$DU&&r06p!cFTkhcaGzqfTXwLr-lono?bOwE6}z7QLJYz%N&BqN?mKe~-V9e2zD3s&zIgn<&Pl zWHhQNNd;65YH+X#W1K&#Dj8x^Gm23KV|}+OZM}`Zr5lXK6QIhf#dB%z>7ycanvKCis+&3hSY0#^MQyBsjlqRfH-#N6Iu@tnc0aq0 zpC`|o&E4S#N$Q}ABB-X?K@mifxS%p=#uPmHFL*igWf{AFzCcY&7*%EMTMJkbm_x!C zDn5Lj(fBwuEuA!6{}Z1#i!au^&9?W802qxYsA&$TV;>YTo-q71I81Hp4d(b};PYnD zKNNKhn4;oVS}kR4!eZ1`3~W)5NF-vlxqVm=R230%?;ATwGO?s3Nw!@2tfW3DV&S+B zksjkPLnga+D+wNB-_#*&N7fB*mh07*qoM6N<$f~WqP8vpDSr1-Hzi)bjkI>|+1T@_s;@1VL&de0{8v^K?;Zqle1Gx z6p~WYGxKbf6ciMc6f#q6mBLMZ4SWlnQ!_F>s)|yBtNcQetFn_VQA4UA0`Ow9C* z%#AF~&2$uk6H64l67v*{%oU6b41k1@m9aSx0UZMdN_Jd8+NLNa%__*n4QeNlXRDM^ zQc_^0uU}qXu2*iXmtT~wZ)j<0sc&GUZ)BtkRH0j3nOBlnp_^B%3^4>|j!SBBa#3ah zI7~t2CMM;Vme?vOfh>XpsOJ(3;1=Z-Ljw;OjQSOc1^R}12KsQ5ff{Xqf>z+z$}bAZ z&(E<#iw*_nU}GDQP83B5TM_zGT@p)D?SSE7p=)5NYiJ%~XkcYzVP#+xg`^*%2u-_h zeoAIqB|^KQp{{{xh@q*KsezRtOgoYysCFBDP(UCB3^>$5enJyhAP`_F$?)(hC@snX zM@vewerir?ZfaghF)+*wQNt`H*(xo+C^xahE;BbVJypLTFWp8TA&A#~5LW|}1u$9a z!_y_q>F}te2?G)CG%+4xC@n29is1*ALyUVpT^vI+CiYIU&k#MRmGc<{BD zfYY=M zKbzGGZf{!ieQT8Nrrc`23x^GSs`uV|Y0-B(=v4ZFHi1X%UYar2_v$b8X;%1f)n0(5 zQGaPq?Df6Nrz93e=*wUb%&wl@l+J8U) z{FT>VuP@7#3tN3vm4VIf&JLE3ihFLit$lt|N&LD2&tWMZw)yUhFS7DRl*y#q-T(5& z^2dsV+w3<_?zn4SS-B?mYNPPaj>{Flm407e|NKhC?_hSXdehjs%?|}T*cIgb!cBQQ z4jJ6hmuPn{NZXhomECW;;BjSKh|Fhgn_STsdE2EKtEDdZOiEdK;p?rRZ+uI=FPBJG zm~b8t)=d2*&uCE)`+ZrER{x#0pjXwaukOm8m3F>x&d-xe4;$S1Zlmut>4{N+H^)bz z1ty%94O5C*b)Kv~py|Ne*BJX?qrjunF2AL3OMN_4dgN6huqI&eboFyt=akR{04;?C A*Z=?k literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/5.0/help_win.png b/tribler-mod/Tribler/Main/vwxGUI/images/5.0/help_win.png new file mode 100644 index 0000000000000000000000000000000000000000..1e0a3e66b2f63bbf0049033d06ea45a1ff56c9a3 GIT binary patch literal 566 zcmV-60?GY}P)Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1rrH31nUydM(Vm^0?qKG6(01!e@ zsZ=lwgL~yNNC^rkDF~Cv1kdxR*X!7}jjrnegki{dJjVBZ8jS{~X_Cw35CGHZlpqM` zcDsAvuXJ5U*LA`$q~Gr&gupNiL>$M=W-~gS&JFbKD-6%m`}1R{AEE0y?RJ}fzfZMV zCD&@TPBl%#w(azxcLA@S!mpojeg+@krv8&>6bc1as}-x&3U#yD+&}>JBN$3JyMXs! zQg85KpSEqAD2h;%B+2x(*Qa;s`QrFi&)e-b9nPI<-G{3fPF};wX(p}a^LfHB%z{@j zJbaXa!Z1V#fm*3la2)53Z+?e2pELGoG~(dk0JYg{vREv#RbM_%5`kf1Ct zFC|g}3UM6ax-RSWn%!=PloBBXnxP)Px#24YJ`L;(K){{a7>y{D4^000SaNLh0L01FZT01FZU(%pXi00007bV*G`2iXP- z2sj@#Aazgx00k3CL_t(Y$8D8OY+P3r$A9PCd*6I{#vaEhv7b`QP*LKmU8~k-xr?pJn1RKn1HiQX@!I!2kqEh72P*C{0zM8Km@z zN#!SbM0N{vR9JL^4NKEuM4e#GAW;N%3v*7fnv*1>_>AHYN>hNkm!$hJGh`6a-NIZl zWPi;*H(C=sLBhq`x>Ly+LVi{ch?MiC~xrlG+)AM`m>= z8nXUgY!6EOLoL}o(vkjlZvO49)G^@UY3cO}{AMpT*cV@rxX_Uvz^(hKHJN*^DZS;c z>;xr7+gJ4O{*|AbR78NyzP0c4>=Q-gRGFx&R2_Fh9-M4=XV+t*QAGJvOx87UD&$V( zW^i^z1vf0@Y%)6D@pk5IBPD4MOU`0AQLX{)m~1&FYATq4c$K~iZyJ!2t(J-A9;QMb zlsBgv{BxnLTOUzm5M}8jBm~8J zVRvMtkUam&u(v%lYu7~X?V4Zh%F;8BXRVsY{q)tme|0mJUpwX-UwYKHpL-%Z_QSvD zI~X;9BHDC5G#wTeL@Lr{vTiamfURBQ2Y(y(oC;GJ^6m%L{QRQJ08&DJ^pCvv!NAhF zGokglMR#bbCXfB*YSB4)$ls7kEj+L^G3YkikoMKPsn%^u`s6`X6xuY{807p7f6Noim#T*p4eqqgiZhOk%lq?wzpcUZj$3qAX`Sq2e zxZ5|cf(Iy)+4@$p&0gZ(z$ZZGn#>GyJ8IS77$6>Fd6G!CE_(TP3ODbU<-J9hCE&?X zhi04Ny%Gn}#)1b&$Q-ze2SvoZC%kea+E-S+ocx%&uO9c=!c=vEF2A2_Yhd2HN*x0+ zs2Lz4s%i!guB!?)SGnq_NO3CN(Z6Ac2j(XSeHdYBMr1cP{+cfKwz3gx_B)Q!5i&TXMR3AX9)_+r}?1mv+04YlMn< zzkolyQJM-&*Tp~764K~J3MxE0tKneN=+>)7{ilz18qySK?mf=l;4VTv*rER5|?i^^!loRdVW(8xxsrh#c++%}^yO6SOmBX-4&>8<9N0Q;#?{`^HZ$ z7A(xDYGAwwx((s`U#yK4K7XmCNaLC1+0SOz zZcmXxZtM(=erXgDL?9Vt4(eVADnv5|Wz~*`7n}%rGKQt$M3|}xt-6pG#`bVzS79o9 zVBZ-Uy}~F{RWPF*!Btx@k47FV%nZCjBUCXOwf+0h&U=Wz!u&VN3FMcLCBs z@Y8vBJ&@uo@Q5r1Di#7^#!GkW{s0BpOFVsD*>5t5@N3JM7Ek&D6zcYLaSV~Tytdz( z?~s9jOZws&Yg9M8u4}N?dLn#)(}zoDg~fDL@qKAAGuXTXRhEeOm>#`c(=d0@RC^{1 zhJ@_5FXmKBBrltCNs^(8!I^o__j19RGk@;#edEj_zV>Uh=2sP^pP_fB2JMQ{etXk` zDPrH%$sH_d`%D#%dE7Q;Ylxa#Y4~|gv7Kqc0sF6v-`9&L?tQs?DbQIAp00i_>zopr E0IxV&EdT%j literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/5.0/line3.png b/tribler-mod/Tribler/Main/vwxGUI/images/5.0/line3.png new file mode 100644 index 0000000000000000000000000000000000000000..ad86798317f1dca12edce79df9c571087d5b10a5 GIT binary patch literal 187 zcmeAS@N?(olHy`uVBq!ia0y~yVBQL3GjXs1$@aS6sX&UcILO_JVcj{Imp~3@fk$L9 z0|U1(2s1Lwnj--eWH0gbb!ETFD9tC&(bcr|JWxoo#5JPCIX^cyHLrxhxhOTUBsE2$ zJhLQ2!QIn0AiR-J9H>ac)5S3);_%z+jJymC9Lx%D?|(MU<1^1Vs0Om?fb;FJEJg-~ bikA#GAF^+-{MzdU6lL&q^>bP0l+XkKBmy%6 literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/5.0/poor_win.png b/tribler-mod/Tribler/Main/vwxGUI/images/5.0/poor_win.png new file mode 100644 index 0000000000000000000000000000000000000000..968d1ced4fdb4e3dcdaf4349f397e271b05d1a47 GIT binary patch literal 733 zcmV<30wVp1P)Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1rrT9BA}JeO8@`@Z%IT!R5;7c zls!yTK@^3*yX-E6MTl-xNJQcf35`;0XAD}|K$JpFXoOf;L1JZ$g)x!D_*2-ag$uzL z8)HP&#!E1gU$!!4eHL_hnFLH` zVpjiwc?`C?o&}U?W$of5jT7S}KD=e+@f>Q)V#{C24DP1Vlf#h5p3I@vEVjHzW^fmk zo*b*UU(#}T6WX$!m4}}ok=Q_}Zl?P741hMqNdVWrlhvz_`~};(12s00&R_QH41`0j zy3vKa0)z;lTu@P&R%GTCj4Nwk^6xnNz*$x0*Qp!YAk|2@b1z`QqH^Cc8a}?G>Bbnv z{$bM769AMB4PtU0yFNy7V3_oDK430XMZNtJ7O@c)Bjx92)xxb-h<-)3nhbDJg{vn= zp}0os`4r8sW^mbd)bRC+2kvm=rlybz0t>h_E?d ziG(jOvc$eGEy<$IhD8d*Jc=Z!CAPXGs&R?mp724K$BCc3zFDny;p#)5Ltgo$r076J zwi$|^&|417EK*&;2~Wfq8h+$4g^*wGEXt~C&RV-gl|=hh$Gyr6^UD7Oxj11DQ@%A- P00000NkvXXu0mjfmm^Dn literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/5.0/right.png b/tribler-mod/Tribler/Main/vwxGUI/images/5.0/right.png new file mode 100644 index 0000000000000000000000000000000000000000..4d694dc63fa7a4e5322b3fe81b201ce459d15573 GIT binary patch literal 253 zcmeAS@N?(olHy`uVBq!ia0vp^AT}ol8<6B)wpSQPF%}28J29*~C-V{{Q{WL<43rWA zVa7{$>;3=**-JcqUDEak7ad~Q=A>Sbb0Ty$`7V{5`^=BsC zcV%73xSt{J!K8DKSWg_~)GXimC?jc=N6X5vTNBF~r{-jRxMx47xLUAsYS1=@DhAmG zPKH@pZ31i3()I~o6iD8~l)!L#QG}s!(ACQpeeK__n;U0`Y<)Y;D{Y_PMayFqGnfBf tuc73hCKu#&d`|I9R@M*4Px#24YJ`L;(K){{a7>y{D4^000SaNLh0L01FcU01FcV0GgZ_00007bV*G`2iXM^ z2^b*74HH-Z000?uMObu0Z*6U5Zgc=ca%Ew3Wn>_CX>@2HM@dakSAh-}0006FNklT0GbI(2ZfB*OV4@|-Zd_*6<;2Nf- zl#G-ip)ka_VtY}XW2pIxtpbgx#Th)rBJ9LWEcSnV?KqH|cf`0Ao&TErd9XJ<$NF~c z$5>p%_c-VrDzN}9=)x~N!_wSKwBjt9Z~~vugS)88#l6FeSl=|o{wg9 zhdsg)sE^IR@dTH!B@!LNH>}25{J>#6h&{WpA^ayfAiZ(fwwO0!9X{hqF0K!|@Dm-w zkU(8236#Oy5;5!Je>-0PJ!Cev;dCTPxP$ux9NvsEaq*+LHvo_i7!`yjjE}XssLsW` z4*wNW83Nv7PR=HjA)plI>*2TIGB#sUMV4iIP=&oIrJgLyDk5lkykaYAa+2we605~Q z9Kp&WS#_qA`m-z>9rIK|etj|a4&Iy0~rw!5v8T2&CSjKaR9~t+<3(IxHo4*!^-dIf>mD*4zV|)I6xU~dNdxJtmB-3s7)n9`moBdL?#WbjFyi)+ctuN|(+Mmuw?=3iJ*va1KH zCryDyof9<9&Mp0)5s&rcYeiCD&0Yh4gi}KOsV535W z8F4sDvSq=HsM(*l9WN}ssw$!ADf?ofpOeMDOOa<*8S=2nitKJW-}d=U*Ib~1A2eqk zUYx(G3zwsSk@$ma;>&w;zZ51iOnX!~VQ#{Tr-^%>Zrj#+Fzu*;h(zQ0r^X3vEQcH> z+zy;@>edAztd0VD50V^qFK@cgs_5ROCdF}h!Mfug`B)4CHu_xL79i2oe6WCFYt33G Rg>Q=_CM}CM7GPws1^^C)^sE2? literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/5.0/search_new_windows.gif b/tribler-mod/Tribler/Main/vwxGUI/images/5.0/search_new_windows.gif new file mode 100644 index 0000000000000000000000000000000000000000..1a3f33e16254eb365856d4b237fc430f418ea59c GIT binary patch literal 673 zcmZ?wbhEHb6krfw_{6~Q?AbF028Q+P*IQd#0~rw!5v8T2EiEmI|D>FYQWHy3QxwWG zOEMG^vl1(E@)J|^GV{{%85DoAFmM5tEB@#9a}5c0b_{Se(lcOY1PbW@G0D8JzOW7Eb1{;*gxf?l&Wi({-cap7;r7l!E}X+)Eqz!)jo2f+oR?VMR6u=p2PZ6D-9amptEdYo3ARahV5- zdQRKso(f;H$dHGPTckK~_XgANQyCi#HgI|}DR^^EXzVP; zcee@)rY3aUO`LPrWLm4E+ff6N2??{eG4iq;auAqp*l_6Dxrs1i*fER&#{BA3rz>+b vugh_899HN%UdYE{Xt46shIIiFhk(%)c55k| zgW!U_%O?XxI14-?i-B5%L70(Y)*K0-AbW|YuPgga7Df&|mTe1mTU=e1PMvk` z{fm63g%#og3IA5l7t)*dyzBHI&Zv{K6AXXGZFE?;`sfO;%wpW|pA3CNH2+il>WXh{WaAen-B;1_Dg&TL0^vKKaX+ z6&2)K7F{Wd5@0f0Yx+~^fP!bhPmL8T=3F;@zsl+9g``%7a{+wfObJ&PXPIlP{*?9L ztih9@Puh)Z7ySzju(e<9XdvP4`mE!h>;1%&yynLerf*%lGX`iMgQu&X%Q~loCIE%b BM5q7& literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/5.0/sharing_reputation.png b/tribler-mod/Tribler/Main/vwxGUI/images/5.0/sharing_reputation.png new file mode 100644 index 0000000000000000000000000000000000000000..2164a7f04eacb724b7b0823ced5c29520e6e6d50 GIT binary patch literal 863 zcmV-l1EBngP)Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1rrG#12HhgtN;K28FWQhbVF}# zZDnqB07G(RVRU6=Aa`kWXdp*PO;A^X4i^9b0?bK7K~zY`<B1CB{qZ|WE zCWa}}%o%Ts&Wr2o_kM3`WKcSAx%b`k=brC5_nmWL61L+S^Nug*eW@G=fG;jGJr?g&l=mbHQNO3Z6(XI~QoSP_gXIEGzVQd1hm zbNC%YvHw}{p2z%HA1ic5a42p`3A(bkEbI-&{;J@;gAEnPE{Uk@|A1dG7xN;KFBftd zFJVWl*S93%?%;igX-zoma-~hY80-Dv*Z%lk(G-IU*u8iLZ&XgA^i%j^32g0<6Ile9>~q{V|`xBgRjk#QYXs#ndtC9mFg= z5I!Bp!h$~)>*8f7>b~eF(^E=IBL;Q4Bc(KR3^*&flJ-?eOexKcat|N0fq5-z?I?z@ z7k9SQ(nq7=T*9WfF~hj6>6dM>S2=n)>heHKX(~&{>qWEP(xF-Rg`YbL-c$HFO2fHY zY1$b*v$qB3-*UCmB>K|1=w$Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1rrG+GF)6Lt^fcBDM>^@R9M5c zS8GfZSsFbBlPEustI&&u#@O5Nr-dNfL5(Ir;)+g>L=sk+9b=5JI`~+6hSmV%i)cHb zXf&8OCTudI3ueizyV;3Od}J|V(C|YAqhW$^NAS5cveYB~FxJ^0Rb5?$;!I{|E2*Sz z-S2VE`M!H^mGu1ib4DT&T)cP@qobopNl8ILK>_ye-3wWkA}@?a5xN)=U1q!t9`GRY3D&*UEPoKeiw-M z0Fx>zDtPYPIjmp59t#&P#O>R+(c0RIyu3V|KYtz>85zjT%*3^8*M7FJ9UUD=PEJNi zNy#r#sEmvZcsw4|*49E%6htBsWMpKZx3?GnkL#yuFwyRRPhKx=tYk=gphP1q&9?=ku|xt&O^_v$waG#l^*(IddjG9uF5UUd$_3 zuGl_-K!BS!Z)R0h6;o4FnV+9;&-wj+1_A;4d_JB&eVV$ib8Kvk+qP|^EXyn{EoE+Q zE`<;lCe6m?jIZlDHC<8z}*WHcImD`p@Nuye0mxspN%3L&_5?b=CsPqfo@-R7+*3L}vS{eC~w($eVn`x%Ku zs3^*NHPLmQ_wV0le}6yG+}und1k=*eSXfxdO`A5cwY8Na(I_=dqY#2KX3XHxqeq#Z zp3XUQ=1|i#78e&&2*D#qj7gvkT(@o=HBDnfLj%2D zFTcC+ot^va*|WBv5Q6*m?PFC{6@?JovuBSz7sD7Ul%{I@e&BmEUv>^3Ja{mHce~Ad zAZC|i+Bst$Ja~{o2$q+Z^V@I#!SeEQ3L)6s+)PbV->r$Nxv)S{RrdAu@#M*q4244W z#Hv-RsHzHu5S%}MK2=p^US1xB5L8u_qobp2Z*OOPeLYJ`N^DT7Y9`Lg%HsI=xIH%K zCXZKCR74>JpFVv`MN#PU`6z^-qN-G5x1!qdoOf1M7O!8wPE}PI3FXeiv`Ioik=(VIhSOeDvrMRaNB=Km3bA2o@C; z*>tO@86G0moLNP@t~)t2eb~h#Kc6UP7yy82)22a^B#`mbG4`C8mo#@21`;7zyPyLiX6iwzCKn4pP6V~eX{#rj}qXbA1??RfU=8CI-V zjz>eoh(@Ex&(C)!fctrHa1a16ckWzVy?WJk0J;HA0@%gOz>+0PaPQtd95`?Q)2C0z z$jC@Mui^Y6iu)NPl9G}jguugx4`GlfDk{X6Up8WJa1i6;l9QA1u z-_HFVYu2p6r=M;DN$lRe8%aq?`1bcL%4NduQIUBWlt{1wvJ*cjjX z6tqSJAX&4NtbYqimLaXbjaakO@|Wzarl}Nf}j0emsoTzgvwN)2Cy_ zhaVy(B?UKb+;HP@_(~Gt0AM1L;<+eEfNdwi`)emQ-AsP*_|AB5 z`vK+_DmnPMc*hb-?go`?ZQaF^q!=F4<%8G~lRy{_Sph;KVc4h)d)J9$H>0?BCIc2z zkO0j}TQZwx&cq))f(QhDg9{ffpz|-CNJvP4-|xrHFLv51CC1k*zSS177PA1*PH)5o zvTEsEd4_8{r3J(47#eoV#a5x+1m?*BP>PETclQ`pb5n{pIi$FsN%o9@r$lGE~+deW<6YuuAhntsskLyP~*JCPFey#L4#FOVHoFl`PHS67ryT2T9 zXts_C!V2b@qNRDOqj0RZm#P6PoSm^0<^8PPM{kY0V=*p0Hd~OmA0iJxun5R7)Qm6l z-)xL)j_z8sulFCEKB!F)4+@=p5uEr$vdpQ}u0%3bq7npDR}p?CrB2>^XAiT{!jsPU z>sR6c^Ex}N5s5kyh&frzr>wIa9K_V&P>x*LZS={G_xT8TP*7>b>8tfHo0#oV`#Ud< zP2$tD{v3fe78PN;C}RzqsB?-yUca`B67%g?Ll#<7*8o>zV5Cq&R^rZGnV-p}-cQeM z8hG42Df0(%swfwa$cmi5{IPw{6FTg%vDn%0T>r?$y11JegKM2lA-)kc9VU2icLx-r zhoJ@Ooy3d*vMJ#H_8WH$gL%!DDSgzw7!6XSB%uYj9c5O(9%X}xgR;%|x6cZ~->ZPb z{e6MeN^l_W`kau9n@^#`4(rX_oV!V=0pLK)lZp^6x4;E2axWOQ6MLVUsy6KChv5@- zJoXUjI1lA{bhGu)nm>>v(G)ee`%S4lm%Wt2stPjBx1TW;Qi?r=ZFdm`UVLY5mju4A zp-0Jd>bnT75y2>jH;lJePxJGZ_KUB+Kt>Hiu_#{{keEJwZSdV0Q;%s3hVx=pmhPrA z*uVCN+-b%Hzkn2EXJo-0UVNerU|KirI1(4UrZ$yQYgi`H{GietSEL$~0EVtN@K#F< zNxdFqx03vrIi&?B2^|(QE4y0^sn9V+iXrP!5IW3eOL@c!PLRNVz@)MNYatz$oG_7b zs~@L)6N}iLGuz4@1)8<5M$8}g-?gLIB&u6zUt0~Peqj$3@j^`u@vSsa)G^iu&4)^D zkBpMbeZHTei?fS0&^(3{Ne42rK!?X@zsO6TXU@uQV@imU=q1MESy{OYcHWGAQF`bz zeWqZdS+R;uNoB6*ctDW}^QsXRp-D?j*-*r1eVGR~px)`AT{U!>q8uiRRiKDD>ryJS zqxkiqp5(tgm8>IGV*5X=g7V^-b>Hnd*!7w0ts;PyI~LAg$NvU|eYi|ZlvPb3%ze$D z=E~NlcB)1{!nJQ8kk@4TBiJcHFgN8i)OTcQG>-l%FmS&U6vfnYgh)00gbIPNQoirO&h# z;T`%Qc00;Oyj}Vc*QDB{J05j>sdv*M}Oj;GImfFCQ|q8mGw{P&Vh$t`;sC zXED`1-2T^%aV=ocpSN;jyj5#g1Y+X9p;SQ_RsQLE&MUiz%eas;%D+8Q{37L;GsUBD z#b71+K-QrHJHOjTO5!Exp;tZ&Mc|^Tu`ogX%d_YVP~&>_tMO18jgDMd=2BUa&t}{S z-xw)2Q!3<=O(I3hW#NiMwd2+EGke5lFX@POL5pjju?E-+7@iolk>v;D&Rp5l%iJsV zD>(iR-@ozv1N)4=$iF@5L-UeSd*Y`J;2pYf^ij0rR}wzc2VvZRec1yEI15|2H zk}ik{ees(VH&PcC^&5{b1D^U>#@_t92+fjCB3w~J{TG-TAhD2+Huonvx?m*Xk-8pH z0kfh|fnObC^eG9tlY(oQh;@v)^}3XzM8)+?Xy)*q1~Y|%nO&V_wzza!hkyrzWfB%b zdY5Mw-d2St4TuL9A2P3m7m4fIBU zP;BG0gt&5RUM-bNskS)Bs>p>95^_FmvA2D?M)JN%wBN{Je_>!SG;LN)QD?YMZp@OkSjDMu1g*JR*BBQ9gf@toX)%#Goq9U=l2 zQIL0Ec&#BYz+y3f`O?X$OBY1Z)7E|EPNdBXhrtrTIV{eZr16%F)rFQp53AV zb~HOiOG-omV$ykiX*Sz4 z6YrOUW88~(%$@|Yt?`cC433|nz1XbgTeM%UPS%?;PHo&x>iw>dTkDf6ER_$t#MV6Q zsd-aIiM&y2Ct^`qhRfKZnPu^KcRMm6?Q>xQl%{{Er%fnu?y6K4ksbD)D`eF?>Mk&V zSxhfPQj@gAX2Z4U5ipYPh-@RaoC;1h0P{Rt)*4t3GdokRM2E?-I0*K;1Vq8(rszIf z1pxACY0Og%i?Gx;x;i=(Z4n)M5tgiN!M`f0`@J6a%x{dw-aX~uOb6LX)cvp^ch%Aa z09&NG`xLb6jE9)S-cvLL)1@ zAfcplrT_m>FF$8jFN&Yfu1Z(wKUn;q`uw@h|KaZ+oPgKZs-IQ*gXm}Ms=~Ivq%xO> X++fB}POE^wY!Iu{*5<`0-S7MxvpEZ9 literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/5.0/video.gif b/tribler-mod/Tribler/Main/vwxGUI/images/5.0/video.gif new file mode 100644 index 0000000000000000000000000000000000000000..56c99c27b6b02b863a7cfcb14c121d6af57224b9 GIT binary patch literal 8238 zcmb7}c{o)46v>=tFI>x>>*^b?p6gtn>-_P$-rL90+Jdv*-3#G`_=G?V zzM5TFSeW|o*pKzRpj7{QBy{m$~nME`Isu z)7Ou4-~RmZ1Mz#0(oZ?lKyUXTZ!gBNeL+49cVGA5{fFGWwDuj`a|od{43@q=D^0*u|Y+ee`V zbIg!(BJ!v@iEM}2qD7)43Ra>bPe58AfyV~$ytBznTv~{|1qGeGGnYv%pcR^vveBec z)rf*QSX{5Xa1sYD0=Zt~nS^rs7YZ$Bu-)N0xpEq?(OR;eNEFD@s2h+S@}l_rRk(eWMX zsLa1}hG7~fZ+)$xGliwPfMfGvF|d1;En_|RBMfFJDjDuv3#$n-efg~Qm2GC*Xm&Ql zC6z(1VhUD48Zn|+R9L2-LXrQsPe??WX>-VuXGcmOmuWKiiInEyf*j)m2aVz%?th-Y zc_!~ZLGRrk^(!Bi-41R+IAro-fiIhC*1BDGS11u0q)Fl+$3xvOz8wpgXvlu0Pr6A* zLABeQ>f4r8RZl;z@Vu98+fXy}tnP3*&-NJ{K9YV1C(&&EdPpKOAPXNJA3Ss+bD6AT z)1U1h$H_4p{n%LseoMP6Pajib0H3ilNzaJ8Z0CU%odhMFih#zq-{(F~k36`wKt>;w zspyxc5Jg!`BGW(Nr~sr@B9N56J6%AB24^#sLg}GJ%tq3h1q9=@hzO>B$6WyLkdYxp z;?}uGC+mtN0!*fuo2W+Tj+xbVnJh_C@jnMgvGaPk;FrYyJ|S)==RyL+w&tE)tEA)& zSm%9Wq`c`=vrkZt8^nr;n4Rl9Dhm3kZ*h zBx#Uh?FqrFsb_NIlbP1(>1Zu0N`@va_YAf`6@NJ$Z(fmG#YERc)S1;MU29ZMy_Ic* zYRzv4(<1&bKT<)y+fKqm_(A`!9jM{fhxlCp?ERX7}YVvUy7K%6&uy`zk*T;wyIe&A_SXe;{X^tu^IDSpD) z-o3VDSSHjQtG07)*1fZ$=bL@5^?M$|xpw=H)#=(V@58IMJ_`Ld^Rhz~YRh_-7cdaM z_j%^7t0&ftR-bjfEVj9PCG0XkMSXX7c~KPET1JeJ0B_9Qy^y%E9APzh-krmLVjflk0T(il+^f zsLhD=w?(lhf4tWb9g#meH5$!`Vkzqd@FL7NE$a@XA#oJZRvC&Jp9{UYuZ@W3jQx?4 zpku+$WumXM3UrKh`Qil2pV*E>6B^g~tYLT2#PS8)vgRmdwon;Q?3nU2*i2%J(^YjR zlmnggU{xcuj3h6mf7z?N3#iRkQjHGL{9gpEtH(_Sy=S7EGgV}Lr*dNibm0F&3;YJO z?-s<%@?owFy}2MMfF&zsbt%=t-vHOYzsq+Z#AWB?I+mO6u2}yT)G7*I_}+%HevlxL zjZlxR!qdXSz`#{UutzbDA+}iZDRoegB8*fa#l~XyY5^BDp%eisU3GKjQ8X8q7v^{A z-1*pT#-&%O6-=}91qfIv zEP(?;WODJSGC-F4_z|NIk}+?{_;(1@Cm=6h>A;Ex!%-$wFe!>^4ki1XbfH&R@J?bz zwq1-CMTJHG7)BOwgi&DvrI7_3pWdTLj{ONGq*9Q%w=QrF|1W4|00x(HG{`*sNME*| z(OJ=JU5x(LuKxOJ$f$g`br7oONNG&mYC4N8{se*h9IjkppZJ1wYEY@k9Eh(yxU0$I zdl{cQ==!v-uQ*q9qW1kr#i_4fF3|@A;;Q|4w&52BAW!jW&2{PWavyw}SIU*nB#gb< zb0zFmrm@2E&x6-$mW^BC!#+P85r-32Y-y}}Ki=^RTKS7Gl0ckOG}c0fLBWA!Hez{{ z!So}$rtmU1-m#^t8?!NrQWP@nh5)a9~UVp^l9{u#E*W$URUQdEr|WvxL&@7%}( zpWVdqU!3xjmaqTGfRXspVB2MSh312B8)+LCvSjTQC!=I~R07sn?DGf=(s1ezBtec7 z;cii@US{B;{?<f7m#(VKr#uvQpz~u4u4{^65;jp(63VY7Ry)sAaNWK zXYMX)=W5rx*RS+n=I7gF*;E(XTN37asXb!3hiI~5%t@grFUAIXef&s>+Z#tfJ+M$! zs2w>d?P)uqI$G1_N-9sbE;C0_OI93-OZ9= zDBgPmg}3i}d@6fr{^M}R)pdu|hz;7YsQlY<8>qPft}-<)o`uAxRd1fQdR|})zauC3 zHwVf@3d^p~j+MLn)n+|6b7}Mc?5|g{Lwu~#yS%9)-~Q(Jue0O#a<<-T`ti%gH#Q$Z zKp$ptZU`jgk+%a}7&T=SxI%9}ZQ>DurF1j*e_-*QZ zjxN<~)-qvr{rU0S7`5W5Jgtbo!G(ScT&{Dgn-6rxFx>Mvn~|fBKK#m!TfSCxptmi= zC93sd?p&r__)OreFBjVDU`lmzbS1nE9HmjnFx5X`B?%Wz#vRiH3Do^sr{S=Om4uk+ zP%begD?Ckwii^$1@!YduiimkU1I4>|1!GaFOw7QQ@Nie48YU5iLpAWCQlK319tt|X z>#zLzAN@22(0K5kF3>F;LVy_TG^R%<4+q^HA}#v=7@P9`&n?gJUxCb&weX5CDDyfM z8(dCIL}FA79-@Uxf-r4mvx~J?!)}A~Zde~L#zZv|`D`I;x{%8|o5uFMd(pb8@cuvg zxn?5ahgT(6&>NqiEHfKjRkST%ik%5Fwq`%1Nhx0JC|=^{u&AFAD9X`NY?-UAZtz{% zW<2TxzQe`+OQ}QE?)SsCoAfmwH+jrw!{YR-d}+dj!zFck#qcea4<8ib9o9&sLHtIe zodVy1k}z8<@_{GUo;Rg@&7+8%1kaX$sZpLgmj-$b)cSp&DQ*Z*zb}eT@_%hmH%kTR z{>J|@{rb<-C-Gis43;IB^2qUzqlhLHbcveo(!Xn-*K*8;5H#}0czSvn3E%N}_a>Jg z^=r*2BcV^N_#;StU7_RAkjaWao}3sLGsVa>B~BIsVFV*029Ny5^v7bJ(YPeodxpBC58*=`AKW%nc_U)y`YiJLp8w#NnNYCn@!r`^~NYHokr0kp4ODFz6v z3OISAt@VqvM?9|gnFsFHW54pJTFgKB6ZM-YhBq&w*l6K#o~@jthweYiz={UxUe>l8 z`V)V@O`4`{Q>mh9+nuhYjty67T;hkNPfi@f5f6ZegH!>6J&x+81%QGzXmWBg7_rg? zib>;cCu5Q^4(Y1s^MwQ+F{R{WzA{*7kG_(VowB72L4*qPc=!@>)eRLQnt%`ANJ3Y4 zwnb1V@jV0oW91tGQ{b-77^O}EpfEGk7i%jT(X&rT(9HZhJpe*ALPbYBd&s;d@Ri5V z_~%0@>rfbyBBhAJD0g_ykncLjjtV;s$v$j-B(^h#P!%Sa!s|n(0-HP4Yn2`Y4Y~V zD}b(UBb3tl5V^4E!ORcZgP9sWI;|>+xTVFeSCvcQJvUC2eL**9r zl`%FF-yoX)AG=4oZ#H~=Cuxmev9+o3_(m|D5*(4woAsM!cAU%UYtOp%u7s5FV0Qxn zq-ww`IJnt$yASCEtGX3HRL5h9Ao}h(m;|z8HlJ#fp}Rx+x_l+YH8V?dPp@%vv<>}p zJ(R6LWtxl~XSe^-P;@ofiS}@*qHm6o1dinoj$(z{i{9;2#08sy&Bar#XT6m8_t$n1 zK`6D$sRfZjXbcXA_%=?YO(FQN@~hF65r zj?3scWUcf`{7XaQ|1p2`Z-FZo`O~E}Xc3WcN2w;Zqb<)m5|_tsxpvoH?^>_Lv4qty zVIoJQbV?83VbXBh(M;y!^?lr_YTANZ*k_A^)h~Q;2ia)BRwz;LX?RNW}j?TM_9p1#H8-dPg z&?ga4)PZ|=GKM^SQJ?zcNlzyrc^&m=0`=}pJ8@VtLS8rVcCzEuv}7Ie!vVWTiP_;B zIdX5t(OSqBA0wD|(W)}wDMPDE`X1s;(fzFPs7z(d54kNWi(&gl_ zcWVyQKZ$~er}(0kvWiBHQH!pX%fA{m$wyO?K!7lj2=3wSgho8Di zGSgdX-EPJO6KbbpD{Wo#oN~oUwo4o*+n4M==jYmDMLMKO4bu=@Ud<@-zQCCtt8p1A zQ{6duEiQeHXOfw_S$}75kzL8I`}Kc!_~qsx3cux+A}l&)7pu#l!=k`j7!=I6N{eE@$ zTc`Jcp@E9NC@VKn*a?&hA{6>G!xo28lGe2!sME_Z1t^LcA{J3n2uE<-GIAjt%O?vd zo9U)-RExsSgK}9N+_-%C*>q6XWL7z*bI|HBQd0&y3g07gA8#>BLhvBmX766Wa;(k9 zmv6CN)t6{C4~OKd`g_G*#c{Du=CnTLQA*4XKv~)ybQk*e5>vE3B5`~I10r)XcnMj35D&PWm0{TX^_}@V8y>*wk zm@llw-R2q!^}74xuzbJaos|Tf zUbgG)ZyJQuw?k!~(_=SJ=oj09>zA^`SMo}CR$0y{Kgx8hbf<0I_tX$hTisP}Hm=|8 z%X#PCz1qt{b-y<1ZnMUwe*@W2b3i%5p+(~fqd9B@I(j;cf5Y+JNCyGCQOUA-Ap91U zVadOU0w?jS6?GU!-ww^JIl8`FD*^f8g8VD1 zo|Hh-Hzwl4Vypx{zl%13X+>boSgxx>HAIhLt}Y`t0>i;Vz*6JFaA3I&md;X&%K&3l zl_g{rV;3uo@R%|Q9Mjw$92L3rWffpZgH|=WY*gfXM2qD`OOw?#7zIM0Qlu0usmz6w zwQ88+l&vj6&I;S|awML!H=jOX z3cBZHT_`0`-AqU)AQZIylnOO0(G-l1qQo;Ds8qZuCF(?68sLD>;Sx@u!qYq~4;Gh| zS5#iBx?WvVTX&*eurnoUl!V)k$&nnt2VBwBr#8UlChl^%7H#N%(ashc=tFGn&o%6|XxHb+Y*Im93SVw_#&gJ3P)@*{~_$ z`TWO%&U*Vzh6$X?TxTqi{!L+XUBzf?jI{c$uDVK3nk~~ygE5!>c$vDXF_(Wl9l%iR z?mLQ(2bMh`lVMo}koT&w(m{Ye$>1eEu5e##Wwbmf5iD1gB;Q}Or@l+0qb5ch6o?Wf zg`jkAxcnoXuv5j6{iE_P-?t3@d?ESBxgRn@arVtX28kxm^@q8~G$~J~Z)KbQ3|qi7 zUd}!>^W+N%B7R!t9CmPLav3Mk-$YyiqHw7NBxNH>CImG0AmC3w?nq?kjeKnJQcB#B z*i~~FH>ixr6~QbU(O9BJHiL^qZ2y9Xs7wkkX(iKtJdt{>hwJu3{`iFZmf>JJa@#>i zy1-JAJ;o!+h&+%kNxvr0xMhD&Nx0wVkS+I%L%xP`+#-kJW@S|Dk!J$K9Op8+YOsZ@ zpz>)O%%!jO3d!=I(+}rXQ_l8nA`-T^^GMXt&{L0)({XYLY!rT{ADWSR;< zK~v(m#EVq_98bWa~Fthj_w++ zrnL7#V{3D#u6y?jZ*Is1NdvQA{+mOJFn=@34r?_l8Vv`hL#k=~atIksmYPj5V<6?0 z5ZNA~$MW2*Q&i;_3U^oos)$rAbGIFi44x%G*ukQ_;`8D%v{y9B7_@k8&NmB}HGitT zFYAK7bdiBOytnlt3no9WPrcNaZ+%?iw6zY6X$g_GSIF3*8>J z0ow}NbB;t0pIk0ev?(LJI$h}1pAr{IS1^;eC|AA^j??iQ?z&Mm`9NeEuV`!V9L~fE zKW937rbIk0Cpadda$f?Ev>D>z#97%02qzESA?1q?4&wY@EYS)rPk`?1-XOls;P^|Z zzGg96O7mA6A1TZg5`5}KO6}LKPakcsgRYE zJsX2O=)w9)G&>$l2vsGW#vkF~gF~XCeJ!_zI1A5ZoX@;)F)KSql$)2o@vjv#y?}#} zDlaFOh^n*+6?Le53Kd0&*Cy9h(DHzr9r~?xc_h>W6jjW=d{3&qx0kA4d#^r^_<(Aw z2ej1Ds!>aO3cXUa&rV=* z*1!@crKuYnA`EH@9$weXKR8hT zwQN{=zlx!KE`Mw&T9>5!OF+l(2vs&^Z@zKOdl_kn#hDW}BOwu$Sa@VFVsZ&!9F6|5 zEd>GK+zyrG`s+=V=gNqOjpU+$XChpTGo>N;-O}m<<7jUY=S?kp`!nMn=l-`)@%~`) zNlD#hBwA*qjx+h`#mA}slHI${^Mdb|ONDOX8(|^<93HCQ!EIWAOt?ZZOlDTBSV8KD zhC@$d<)^??C^d=L-IFUBi9CT&QVPR$wMQcet64eWMwF$a5$DYON0BWz*j$JZd<+0V z8!PTfd-?Gd$d||p806+n0bnp|Ob+5CvAIGFVwH*s9$GdLn6wPxmBQUKM+dj-TYGq- z-Cn`zXT2>jCwvMQ^QZPdKob7HfDCKy;Tm|`M7FqvIy(VZu7`Zql$UA~-1FGH?D@!O z^PUYIB^El*U9P{Q<{~_}{adle4$w>}cmmtAwzU4GCuHQTP@HKw?qTbE`Plp*-F%qIN>9eKhTcar| zHvcL_@39dc3a)GhjNk#VlKldk!@pJlr7l6~^5I1DuyjoYx>rP}+47wIHk`5b$}B{_ z`RPJ8KEnhlU@;_ZH*{9@}e#&u?XCC&9OE^&AocXPVZyUo8*#w zxP8nQsjH0REdHku{kqiTO|aZ53{=A*j9@yb4&(Ap2EanQvn6Y_kOZvNO3THC>c!w|j>ONNYg0No=pl~8xD{CnCAVB8MErrzKpxEBt(gb)KPM!@2P<;dqlTn2#GNeCb8m^SgUp3 zQYX!%0<;)w>}$Fj$M@Q^N6A1iMJd`^#%Q!2kCopw_NA`&+sK|?eC2+vGfV5+)i^02 zj^7BSA&!)swYtedF(o7_d)7IR?AE#V#`krVZ|W-ZjA}&^8fj0T?N;K8cRKt^YT8ZW zEJri*`JjPt)y*N@?5uxuEci5uP{Wk8tt;YqX~Wq=_$YcDflo8&56d`jt6iI~th3l| c_`LJw4}YxmKT>;=!~g&Q07*qoM6N<$f*1g*6951J literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/5.0/wrapCorBL.png b/tribler-mod/Tribler/Main/vwxGUI/images/5.0/wrapCorBL.png new file mode 100644 index 0000000000000000000000000000000000000000..c07b5dd2c3d89909e615e7c9827fe8d4be59d076 GIT binary patch literal 237 zcmeAS@N?(olHy`uVBq!ia0vp^A|TAc1|)ksWqE-VORIQ+eo=O@f^)D5+osD4fa=6R z>YR&G6H7Al^Atidb5j`%EcFe|^$jdkHLXF$yL!4fhFJ8zJ!2?(KtaIep#PK$Yju~n zaCscyd^*)aQR&9?S8fNUbXZ8(p3^zZV|?br{eqo`uSIQ+eo=O@f^)D5+osD4fa=6R z>YR&G6H7Al^Atidb5j`%EcFe|^$jdkHLXF$J9xS{hFJ6_?H%XQWhn=dmM6^rB`GoH0|2;%gu|o=D>ji8yXrL9UWt1YZaM+fT2~2(^}*^>lL6` N44$rjF6*2UngG&GL5ctX literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/5.0/wrapCorTL.png b/tribler-mod/Tribler/Main/vwxGUI/images/5.0/wrapCorTL.png new file mode 100644 index 0000000000000000000000000000000000000000..78cd65681885f683fd855d3fbe315296a16c9dca GIT binary patch literal 236 zcmeAS@N?(olHy`uVBq!ia0vp^tUxTn!3HE;3=*WlCHlN`mv#O3D+9QW+dm@{>{(+%k(&%kzt}ixr%MP1rVFUI0`l z22$r-l$uzQnV+W+l9`*zU|^|lXs&Nyp{i*OGTz10#W6%;>e~~Bf(-@&3>&tz3#dF` z;PyDcY2nCJ7G-*WDKk3{heC3YOGDJ_4%yEal8-L*UOI(!d3VoQ-(z2<@z>X#Wczu7 VFJPkU7dN1J44$rjF6*2UngAc>M|uDN literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/5.0/wrapCorTR.png b/tribler-mod/Tribler/Main/vwxGUI/images/5.0/wrapCorTR.png new file mode 100644 index 0000000000000000000000000000000000000000..0a68b63393f072466455601727a3323f74f0a9bd GIT binary patch literal 234 zcmeAS@N?(olHy`uVBq!ia0vp^tUxTn!3HE;3=*WlCHlN`mv#O3D+9QW+dm@{>{(+%k(&%kzt}ixr%MP1rVFUI0`l z22$r-l$uzQnV+W+l9`*zU|^|lXs&Nyp{i*OGTzD4#W6%;>e~~Bq74cn3CYqc&}?o$^`Z?@!hIL-Y8) UTs&V^4m6Fy)78&qol`;+0FEF_!T7IZ~ebVG7wVRUJ4ZXi@?ZDjy3IWI9cFEBVIDLaS&00S^d zL_t(oh2@#cPg6k@hrgLltL0&4i?d%}~o&!A;1fmV}75553ydHhA#NxumluRtrx|A0l=&&gZug zfOOVC3PE#FRY!H17`Bbp5DG{DGRVa3$@hI{u0I|Fr|_wlhj?@|o_sSEQBu+ps?B1( zFl~(xkp5|;h9=(i4BUS)maFi^<6#n05oMdSCpD(+ny zVCH>idO1UDNP%HgTjyUv0!T@l5}glDb@wY0)BSvWjkGK<^?LC&jTVCKAxR`41{6!l zF3c>=0x+#=Yx}E+l`(7l>{6uWNq_%AYcNn%QU4UvLXnt8S~iIKKJ6kb1BGeO40Ye; zMOY>ZqrNU(8^SbmC+>?IlkWCY2mA~AS6F@vAAX%1`}VK3-fSN2g^W}b*EMu+qnA5R&`E-YKw!$~Z@papy5}3BnR)4l~vvbtrHrL7dJHOxUTgkR| z!vckE`%mB%TWWXa&$YYH5s_azB5vT+#{@1%%A$X72}q}Z_&W$ zQaOd!Fr!N=e@rwY62NGjn(h8Bb7@08`yT8cr)p;E+T>AwFTW-Px#24YJ`L;(K){{a7>y{D4^000SaNLh0L01FZT01FZU(%pXi00007bV*G`2iXJ^ z3_A`14!RTo00SdQL_t(o!{u2`XcIvc{$_SJY17tJ1D*su=(#<45utkP$wUfTkP843j zL9_W_*q!&@yxDKww>t|%7-L8x6s*ShyJoBhoe09Pa{L#>Z(d06clK|$clIkpw6>iVFy9=z`*{b2EMeEJwK4SoYReYa+L&(B z?4@aWRKH489jA@g%TY+%Q^jwbOWogj4%~jG?^FG`)RL>O&7Vst`APjR?B6X7%JuHc zR3%iXwrnvi4T?>>IBKlk9RGAX>vAjC>yE6yggN7Onpe*!OMjI(1p{{7l9I_`&+MxU zkvZ0-Ss0fi_gr0Wr4{n*Oi4!AH9IS4)$Y0V?JmdO;#`jW=5jd-F8J7LWDmrP7-Opu zP?RVr0K^$9J>rsJ1}nvi6H5gILQ0A>V_2iK(mGf&ayOX(0D;Z8hla|kdP+v_l!GrE z!>~WxL#wXb@@#PYh(Jj4#KDj#D+e~B_!^834lS&EC;+UVbl9VeOmc((lzFE}q^6h&ixL|N#I`0r1jK>vx1T&(-yh9-@-98~6hi<2002ovPDHLk FV1k^ir!@co literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/Search_new.png b/tribler-mod/Tribler/Main/vwxGUI/images/Search_new.png new file mode 100644 index 0000000000000000000000000000000000000000..a548690438690bdb96d32322579b781acf4b245e GIT binary patch literal 675 zcmV;U0$lxxP)Px#32;bRa{vGf5&!@T5&_cPe*6Fc00(qQO+^RU1Of{-7%)k1&Hw-cHAzH4R9M5^ zm_2V3K@f(YJ)i9a+cHX#0u+gYP*Fq*5~7MJQc)mEB#IO?RLL&@M3plCATA;b8lV6o z(ZmfAB79i5F-Rl^C5j?p*~U1zFAs&YiEZ844wcWZw3^+Sxt)1$XQaDTA_4$~_Zf;` zmiWDYn_)-OD_KG|{@}@Dtx_Ta8!uKUeqLmEo3Ox(#%VOkm#$o&xqA0LN^&n3$iG{y z{P>1xD3URA=t(%uCL$1v7?dTFn^=CDQL?&R;p=9)?zkkQCSignLe+9u-#NHxvbUZ7 zQ?HYXnP9u2B4Q}Tgdtpr8w)0-B8KY$9$|wzmPbd{B@^2s0iipM4!8~`jx~IVt$l3C zd$VUCVq3%erU%Wf?}1}1r4^KG&mNgew`8u`k~%r9 z@1ARD|GD((d=Mgo^hl7OPx#24YJ`L;(K){{a7>y{D4^000SaNLh0L01FZT01FZU(%pXi00007bV*G`2iXJ^ z4Id02Dp08a00K%$L_t(o!`)cTYZE~f|IK7JHO(OxDc{Z@L)ar5pz_L8b69Dm_j3JDkN#sZrq)D9yVik*WEU~W%`51 zym{|8^XBcm8J1y01OPzoLl)n@6w%r|k5Pw`>wgHPPd< zX;X4ZjsdAOVDF;Z+AS|PIeKL@>@ wIh#!jUIZ}m&p};Rt$*y;r7KS>|DJ>X0&#zHf%#EXEdT%j07*qoM6N<$f_oT2!~g&Q literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/Search_new_win.png b/tribler-mod/Tribler/Main/vwxGUI/images/Search_new_win.png new file mode 100644 index 0000000000000000000000000000000000000000..1f4d4058f97dcad72bc498e9103cd5c77fdf31da GIT binary patch literal 682 zcmV;b0#*HqP)Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1rrP?EEP7ZYXATOJV``BR9M5^ zmd|SwQ4q&J?{%9WO=?1gC}=$hRtnzQLsUvn715K57xf<~h@Sl$ynFMeARhD-1dE44 zK}B1n1gt6ATG`?+Xp**Rvf0h^uz5BKyGiu4+b=xky_wlJ^JQipE6hz(fgoD1lK8w# zZ1n>{hZ7s3eEc-k@ktbb0%4k_O8wd(@#Y1XYlH>PV3h6jQ7YGF0T5d&%PRfyF~u(h z3@J!PjKGtqHyS7fkpao?V`ErXXE6@Fn}hWg496xZ4X~WR6B*Q1u@&;;BE>=;Lv9q< z-q~R|Vt{26CP*O|k&5-VuQ2%f328fkL)ajivhsZs1FJ>?LRXD8L~RV}wcsW8Xwj9o zCi;BBY_}qTMZdW@y(;p2wf%hM)W6%b-p}95SL12k-_}ONL1I<$TspwzQFrN}{n)(p z8rs%*PdDFIo7ZyceY95Yn=5BB{lIryBmb@R4UU&4Yh~KbJ3V5&f18Cj< z*ji(>?mK<;9bUd(XN`u9P!sN-aG0@h@*I)_03kRSM4(Bet_}bZJDv>hki>@E*Z0T? zq?H^YBESwKn*oFT9>g!*K-zcjV@2ZbD;5?4@dON~VYkfM_<6>%Q%IG)K;`-!YRK?R z{@yN2KZ-m%e1@rqIfM|1QmLd+P+D4G>&XMk^SQwDsAWcRF5V=2``%yO9}gomisakI Q_y7O^07*qoM6N<$g8srg^Z)<= literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/Search_new_win_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/Search_new_win_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..57bf2f836144e8ddb635dd4c9c4252435318a70e GIT binary patch literal 676 zcmV;V0$crwP)Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1rrP^Az87tQ~&@1Hc3Q5R9M5^ zl)q~fK@`V7GdFuNmx!cDEO{sU?u2tv@pSlMYM zrm>11Hc?S=!>wq@U2bzf+?lbsTeG+BCYSCeUwF)$_r5c4-n@Au{8itAV6HgRHNyhus?po|8a0qTG5fGH&Ry&CJJzUyep3CO zO`9Dma_4bser5N**q+DF;>NiBDBqWa)kw4^Q{n)(E_Wmj+K(^G9wYXp_t}n{^ks?Z zr$_ES>Au^kd|%?NGCKux3;)}*^z^zc`*!o*9?5aeOm@ckXWhnI=(VTy7%_ila!XIf z$cmSDhO|A87a0^Lr=jjW?~FIk_0E(0SWk2FcxgRp+i8!ywcUzHv>{WbC$vL>2=T z2!k*q!>law;BuiW)N}Tg^b5rw57@Uhz6H8K46v{J8 zG8EiBeFMT9`NV;W$~;{hLo80e4RPc;pupq3(Eio``449A3SKC(di9BC35*{ngiQ`% ziEH875_EoJrxeF>rCqCc)!Ib%&v>KpwI{eSi20`7YF*z@)~syP((^|*Z`$`N#L|1I zR)N_<*#{c$-YpdkzJ27@?N^=MbDmevL>2=T z2!k*q!>law;BuiW)N}Tg^b5rw57@Uhz6H8K46v{J8 zG8EiBeFMT9`NV;W$~;{hLo80e4RPc;pupq3(Eio``449A3SKC(di9BC35*{ngiQ`% ziEH875_EoJrxeF>rCqCc)!Ib%&v>KpwI{eSi20`7YF*z@)~syP((^|*Z`$`N#L|1I zR)N_<*#{c$-YpdkzJ27@?N^=MbDmevL>2=T z2!k*q!>lbCYGe8D3oWG zWGJ|M`UZqI@`(c#m3q23hFF|VPLS}JVENo$^HijPiLZZRio_O%yX7jsru|dw=DFZ3 z7Qf>0q|WZII5vvWSn_nqV|O|dN5T<}uaFf6j5jcX;JiimRvgN{U> z^0lBK8&}ighc1bk9bb4_@pSU!!_OZo20!X)a^z#UVjb$ZK{sdt&;bmdu6{1-oD!M< DpPN@H literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/basic_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/basic_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..d30c28dddb7e9e62e80fef472a6475832b85a2d0 GIT binary patch literal 264 zcmeAS@N?(olHy`uVBq!ia0vp^+Ca?B!3HFseJuF~q!^2X+?^QKos)S9vL>2=T z2!k*q!>lbCYGe8D3oWG zWGJ|M`UZqI@`(c#m3q23hFF|VPLS}JVENo$^HijPiLZZRio_O%yX7jsru|dw=DFZ3 z7Qf>0q|WZII5vvWSn_nqV|O|dN5T<}uaFf6j5jcX;JiimRvgN{U> z^0lBK8&}ighc1bk9bb4_@pSU!!_OZo20!X)a^z#UVjb$ZK{sdt&;bmdu6{1-oD!M< DpPN@H literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/bcicon.png b/tribler-mod/Tribler/Main/vwxGUI/images/bcicon.png new file mode 100644 index 0000000000000000000000000000000000000000..d4d75c01d1da581f8eff0f9bbc94ccc4afb3e397 GIT binary patch literal 421 zcmV;W0b2fvP)%e|#?Al$0WwKM zK~#9!m6E+n0&y6}KfmxgM_tYPdSli}M=w#GlbKJ>Tcy!}BVTXS*?2}Hl1hg=n-8EV7H(#rw`lKHD3yAdrcpk-DfW#08 zIS^2om;v~BC9?z2J3?1X4ooIV3|tz)s=m#|I)z_l@irzi}(HoQL0-6O8<6~|#v2QySkfJR9T^zbpD<_bdI{u9mbgZg z1m~xflqVLYGB~E>C#5QQ<|d}62BjvZR2H60wE-%M@^obf%j&&2}Hz@5m^p{1XQS_GZ!WZ_!6Fd?Wgzl&xb8*W_ uY~eK!%znG#(7{y`j^4XBPotHAk&VHz+8<6~|#v2QySkfJR9T^zbpD<_bdI{u9mbgZg z1m~xflqVLYGB~E>C#5QQ<|d}62BjvZR2H60wE-%M^mK6yskoJtmXOfFp2Vo|kKu^D z%8sokq<(Y;avXZ!7%^X>RiQ@y=9B*{A8IBjxoNZu1SBLW@QDayKVnT1 sH4@1Fy5f-I>Ip~h-J7S;%D~9RV6$A4|9NQgbf6^+p00i_>zopr02gjRqW}N^ literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/blue_long.png b/tribler-mod/Tribler/Main/vwxGUI/images/blue_long.png new file mode 100644 index 0000000000000000000000000000000000000000..8c827a4f6f26e2c5d1b606288b81a058cced327e GIT binary patch literal 1209 zcmeAS@N?(olHy`uVBq!ia0y~yU;(k$bFcx)5-Xb|AjMc5U&QhjC?r|p8d2h$pPQSSSHj?2l$uzQnxasi zS(2gP?&%v4-pD5ov`Wp>#WAGf*4rC~j0Y414jj<`$9m0ayH!KyiXD?D7@q(Aw;E^? zLt2_Ah>CGz1d;-aLF|q|5P3ucL?(%VNJEgb8zfkPq(cuAkW?5|J{mHkiC`FH56diP WR=(p~qcVWGn!(f6&t;ucLK6VUHJ~m4 literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/browse.png b/tribler-mod/Tribler/Main/vwxGUI/images/browse.png new file mode 100644 index 0000000000000000000000000000000000000000..b6552c0a1ffdf172ed60e75961d19d77421fd527 GIT binary patch literal 629 zcmV-*0*d{KP)Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU0|E*JJIuWN>Hq)$8FWQhbVF}# zZDnqB07G(RVRU6=Aa`kWXdp*PO;A^X4i^9b0pdwSK~zY`)t0?)(?A%;f9G6MwGAq2 z#ZuG_Mwlu^VzDRGwG*uTCGmg4$cR)ibU~#E5QQ+b5u#E6u`~-G{YV@;uI*#6-Cl!j z5~<6*lKtLu_rA~F<2(DcAT!l{@9^c5%kfVegDp&;VIhF@rgpbnKJT?T`i|)qU@;i^ z+*+6uLUmp_yx!V5`F4o31kJiMR#=oMgn&U95kxVPk1yNw4v^)y{(T`<;;ThzAFAA8;_9$Bk)UhXk#_?+x?U{$N5A$-tPZDXL6n*T*R}nIvxL& zoeQ&LSthP(G4|5zIn#*KAC|rJ?>xE4GLDO#i)Vc2bbK8-MYxrfqP+-qPt$fwD^IN{ zOI4_lBniZEalmB1j;|x92zL~$i}dQKa*lj#U6iZ?gnE2m(L1<0kSFSvlT?vT=IJp- zoYTZsag+%I-VQ;iNB4I4+I7wq)CK0-{37@sZ-!u0WbcEP)Px#24YJ`L;(K){{a7>y{D4^000SaNLh0L01FZT01FZU(%pXi00007bV*G`2iXJ^ z3^FNo#6&1R; zsr{!$;$c*b-a_^Wqk3&mSovz6w_1LQ+Q(3BR%u?x37U1yHC>*6Su1&X@-15VYQ6{D z34X+5yZq6dBD6i*9g_(*sDZ==nC*^1`84CPKgF_-0N@MY$00004XF*Lt006JZ zHwB960000PbVXQnQ*UN;cVTj606}DLVr3vnZDD6+Qe|Oed2z{QJOBU&SxH1eRCr$P zn&)=(G!TZ({a*yBr1wHP@B!()b2mKX<;+oLy^6iDt<6UIk)u(W@@OQ>-u!fQbX4Th zJ%QhHNkO3K(&*vg;jeF{RiiCg=~b&(XdfV)f^#O9?g@07?*rt;s7~pWdrK#PTbq`w zGx6QxYjw5pQ>C}J_~U&sx76%J^FzJH-6@f1Xsp+bcVgVM>vhUQ$9laAd*fEL zq1jc?4|iv2u#QG4gno_nxa&;E^>9^aY>+#NR=VT^b2ogP8@-tnPps<8SqnOY>v87_ zfh#<4c<58~-ny0nR)3%%tObZQ0WJqkvyZ!b@neUJm_=*6#^&Hf?eTe&2-N8^F+5j+_N(c5rv^z zTX<7KZ#+OW2t9uZP2M`SmSkaX?R@`{O9}!-3ZvQPkIX$y5@ha~dl~|1tQLQ^U$_`> zIpJ0yQ%;Q7=f=%MO6eFB(uKD6BWT`27u@IO=Eldz4WAPuF+rAzl+rOMqzi4$Q)ypg z7u>nXLy+WEIGH;TJa@J&zgH;BludR%EG;b|+PFH^HAK+C&UDU&g zj(fzSsTi@yPdtK%LX=1?73neV5;d4H`YfS|ySuwCz#$PmcXNv$ z&n#>zw9}$s^t+nXT_g-<0YX4=?h%XTKuA^&^m11ouzNIE=t=(}0qxxI+zH9Hs>o8r zoub&-^^`F8>9V81W?5p!xkoHI?pO)^artg`!;1MZZ1g>MzV+ZY3Cq7NxHu8{$(UY! z52Avoc-YR`<5W15M1*f)>ZO8c$Bq*t7OQYKE5gn4Gs}vi7{cPY^Vyc7W05xsi9E{W zy0xesQSJQ81|kF{uC1*hhTIVuW6KbV5sTPWuO@D7ZBge-VQAE>@b!^i{u*VbTV@O! z`r_~Rg)kHw6+ZJCcN@j!M2%Fe(Ew>HFkRd=Ksw0{_?9Jep9HJqlBkMDxvq>;h05%3%nR$fT$qSsPRsWSTqNif@X3V zE8${?R%k~RdqQqdsE3{>XDtVgkB_get~~c4ac=b70QcqP<^BErKW`;-Z+D3uZbAQ# zI~}<+PT=h9?E3op>FLQpyGrKX81s{plZ%UsySuyR=jVR7bG|o*`~3X;=H}+{@$o~} z;eed)gDk*ctTXr8tT{tI&-xvAH0DxD0MJ~tIj{x)(qKbMRb{=AMQ?8ml?qGxszE(pb&;p1G$Xkj84x_sl&FfizZgzGv=f2&A!^ d^F0mse*prFAq^!nXbJ!T002ovPDHLkV1lQgUjqOD literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbFriends.png b/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbFriends.png new file mode 100644 index 0000000000000000000000000000000000000000..7780514ce191321fc70be38408ba9c956bfa4296 GIT binary patch literal 615 zcmV-t0+{`YP)$N?z_Rc23cszc2pt&Fj z9?2KxIFlsF`Fwud{ot!qDt7RAJnr}VcDtR4&%DiM6UXuCbn5r}I84JZ?Dcw#T0I;N ztaiIy&`YHf5s2&cI(#fCk7l#ESS&`P(R@B9!jx^7%cWMUH5!fUdj{{ugscJLtw02q zAOyBrtyVb$IzAEh42MG^P&(&2osQx*wpuMA01KvYZr2Zp<#I`009lYr>~=d?H6`Wg zbV`I20bQ@x<#IWSqED~q9IK>g^$sSJNtSmP5Gmi=4;lB}&-B(0L1se^tJMlA&1N${ z9;GIqB{uv3@^L@vkCu3Gl#Rv)g8_%pCq9Lg9Q>A`z?^4;&O(F4tV1ZLv zhFbzw@7~r6ZLWqDABX81d>;1t4yo~C#2G=@m(N}W7yxk$4`;i>e}n)4002ovPDHLkV1gdb B9O?i7 literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbL.png b/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbL.png new file mode 100644 index 0000000000000000000000000000000000000000..3c0055fe0a50d23bdbe513a25766033ce560141e GIT binary patch literal 4404 zcmeHL=T{TTw@o4?B$Pl1RfJGAA{_+bY7%;p8agOJkPd>Bh*WP7E`f-2q=uq2A#@a^ z7eT5NgLDMxMHCQ0{Bhr>_vO9y{($#o&dk|!&N{QstUY_pKgNdIED$~j003ao)zL5o z0DzL`^HMPAd`ypAF*`SOE|}XG0HE^yb^6%#^EI!(rj@^$&wc+Oyq_}wQG0AM}38WgA{M zx|N5{uMXRFhnZ@j9mFB%Yis4+&A&92Ml=3NJI0r}ALj1Nx3(^7(icZ0fx?fcjrh^; zNf2Y8H2vApDhu)lM{Zb2+jaUD%+CmJL>?#%0>j?gUeADpSVq9AVZ{+Opzzb*m3HJC zs}Vb@mJ8F{OgrQf0AkNP0`_aeXcxC|Gp!)I@+mDeoE}UGXXU}HLSP+3(1_Wi^8~+E zBiQ{b0LZOoz<+JPt1$NV_WmJPbar;`KMthQ|3U=8-_5-?ELNaM*F!hAM39n}pGQYW zZ{=?;EaaW$2zW+@bdtsZjP zl_9qN2+qD0ewI5LusQh}DYo~jQEaX;?ZvU1bf(;F0x9XKDS{!6vtqUBZ=>@e;G2D)Ps4Ogl4g9*x43tQ4y^j^G;sZ6k@5x&$AKlZ25x_T^Ki8bT;AR>+Z2=}yKq2L1{n{&R75k& z@H@7#^~((iql)bd9#x~YoxQ^5o_q6P!pvpXKzA@=%O2vqXvqL=SWkC+F2(aQ=Yd4A zk_IbE5?wZj&}n(}{k|9>!$35C6-m5X;u)~m?WEKF#vl7Pg~zFm`M|ikuVM;`zEK9@ zRF4Z5v+B%EtR5adNzkqn;Nc552PnM{LmH%F39>5nm=&aVN*yJ!zs)*notb#N6QOP^ z4CgWXb`^TeBXii>ss##OQ~*)KrO!K&o^S^c99IHu*p=NcPv5~ z{zJrDiTSk=dH97|eX~qM)s0?fFNr?4-jsu@aDFz(WzI-@uFw1KKs@aU7Is>$5M7E< zAT;nVEJ;Rwlgjz{8cJ#(XciGw?grq~I0%{nNEI@y0|Pd1F{yi0g=@KXFEy3|fBs8z zJ7QoL6ILbxb?SP z67S`pVwfZpl3cK)1XLwg9AfojNKmX#!(*%}s^qUHv*{~iHUmwcK>&H?-Ze|VaD6b; zmbw&?>?4C`%vEs~hz-DiNn$Ng47L%*gPMWm85uz;<19zGebs??Pxd}Bq$LgqzrRj} z*+5IbCL~tqxf_xz^Y*FVx^K|^(r=3ncBp~OBa(yh9TdQ?f ze|4V_u{fnFYZpi?h0=T-YT}D-YQY{!Vz!xjS$7mK+=gC^Rr&;`%)&o=7uU!YmeR~8 z^1_TI4MjX$sr29!D6D#6p)4@YPhP&>2;6H<@HJ2H$urdU1?0_qd0=PVw!YQ~yJ~T_ zePJmMNifHMx37IVa;c0K>0>xYI`6(O;-&=eUUIV$kFd_1#6|A;qseaHPz+dB>+;&l?sx>wM>ExstLi57dVp*2e6BkV-%)1*z7@RRm zWg8-S$7XAeHY669*1p*bfJo&D~xcQx;;}wO$j;}~*6q?=+Aw;a$A5%dPekQ=G zA^(yraYO~^Z$uA9dPG2!ucE=(aP$%GoPinsN6_JW%)f+PaZcEl$Lm=TwO|c!I1ZTO zclN0dT3IaI4j=G^pT6}(l<@3j58vBvAV2U_^SpbW^eh3KOZ$1e9A%mOgwS)#`^jAr z$20dlN5Ede9J%MU$VGm(Y3}n9iBv05&k+kCRtG$7!SniGm*zi)3a;;HU;O;-ZXV_b zJOrDVFIKN!ysb!VqMZo0XklXhY+hoF7%t_$_Z}V7KvGN}r12ssw12(~@Jsg$Gr&1r zL6nOl**8ONvLC*$WMpLA7m!w(`H&{fJ$%X6fv2JUmSoI!o|g2TX2RjtRG~*RD>pCC z_hZBIn9^Ri-B9828^^0(=!tn6&jk*gbhhZ3r?sXkk)tO!x<3sdKBE@0% z^<;1yusUaAxG+Z=A8I%djkpd`@Gj7M5JN;V<4)XQtSaXAoqt5C4iQ9S2OR^jE1M6V z%7JCRbF3V>lxiu}?Ku01A#?Bx%JxD-2{;A>d~ zduR0PCK=~$mc7c|CBC`<$(5*$_FQy%<5Ttnty;_n;ad-dx=MNMS!bhFoA$~gVTFB1 zpYpXQL3Yd0$m!eNDqEd?@1K9x5H;Trcu{mRPlx6F{qFHmA_c|jMgBI&BcJT&shtv& zJ!ov;AiZwG@+YQ{od^(78FaKT`8F-9!~O($C5N@Xd zhIxSSrk0#Al)<157gmAa=6PwIYhpenxgS?Pq{N3wj{{l>Gbj#oBWpu(e{%*axQpYU zVTGMqWlxr4YY&u1Vc5752WDple+!jbaa`V0#QMmlg-9}ALZ@WuhFd1g@qL?|^h3Qk z2XVA(!(hKon0Rfbdw`JVg?dcARR7%8^+?!H=QfhAj<*VqLPWSVr|ReE3O(Yfzxpid z^EGW*cjnm~lcZBR@Mv%Ib2Va$wpa583$!F)UY@I|o?-i}V^Xsi>0^gEmqbxDry$=ZMXvrsd3WXY9XMRQ|kJ~7y zsFf+j&?T8FiS}B-nBCij*doPSbf#XmtF^UTk2R{ z$gx3V;)l_Y1fJ1-%vX_ec03MP^(};+>nSz;iynAXHsn)UY5m?N?CtS}WDmnDd8Fi_ z_f6t{e8yr&Wx-RGQ+4i%-houMXIwTX-#y4&$)LKD4;!>fDrm}ID!P~IRdI;!#6 zM>#*rKV2rtSj6{??zwctCCa4Ov2(g$Gu9ICIexU? zy};>eGUi87_^_cwBWRbF|XfT(SNlWderI6sRCO6J{Jyx#f|t}1^X z>oVRWZLqLUzfeV5^5e`GbBoNTC_XCch=E^AGILaOcQE-ZFB<>r?8o|8gU@`}@&3xu zt|c7oTuvWb-HZ|L{1NpLvl8V|oQPfh((_Yg4$sO&mzf^lt@yQwx`>(To=kk1th$*m z7sf2OR@Vg(0C_&(YocC=g_!;@ncVYusf-ed)4nkRrij|~rNvpt>rriMjB?Z-9c)aj z4(CR_dru+Qy~b~vIuJQwRzZMBGn7z`p%YqV@iZL z%f+NRrxD4t#oi&FkvBTWnNXAZLeb^E$y;E1b1?wHQ`A=DGCbG(===KEtEI5Ymq6r2 zv77usuiHr6(htIi=k66Fserq9KA#zQpo!O(lQBB=#U|Zu*Q?9v@+hhig!tE--g)J4 z@w9>yTx9kk{iea2lvP5b>4oV9?F_2LS55jreV{?m&t%wS#&m1845JdZ(bl=gwGK3# zd#l$!TDvI7HQQQu>*>@>&?XMhFeE#>sMQf>5cKARxM=_=*f0i4%B@Is?Ck8IUfrCC zlgCF-wB$=g%cuOdb(*}O3%0~P|BEO*j`wKj$ozUhlblY39U33!go*}eIx2%W@K6x9w?5RIQoGzz@?nRvhENUH-Z&KtK)46tM z9e%fwAWobBKV`l3#V~tlo4Y3m7DT`T-)NFE17^l2_G)zau{2d8ZM1JSbk6~jmPSG= zPJ)qJtXKp8pSc-!cgUJW_K&fRtFWbun&LOqpgm(B0)+;v48TVa?9?bezmx2}oBKG& z?5HeQ8aaapU+}B0UnYB-yG}5wgM%2bZ!p~T4>c-U`Yq=YVHKR%J+4DPJFAMU{ES6* zY&h6A3GF@DSDM%>A@32qI%f!OfFK=2d{h~AHzTgoX1?3lMN-p~`&e?bPktSEY}$Q1 zBcsq@7hsH6=GF{&nhc^or841pJJJFvwQe@AR% z+N+=zKtb@z=ox{ZD>dfSh#)yO_pgcoa2-1A^LkxE9a`9K`4oZ7gqWvgtc4kR2ty`U zZe*H7T18^OU&hE0yn-hV2gZ@z2pTcoTFA>+P(g>j7_Ak_GC=|1+AM+CwX$39%p&KR pkfi^s_WECi+5dz8O}hSJ%zG&2sG?d-Isf(m=xQ2jRA3#R{0EB8<;MU3 literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbL_audio.png b/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbL_audio.png new file mode 100644 index 0000000000000000000000000000000000000000..e58128ba1482d490707344d066832b907a487bd5 GIT binary patch literal 2130 zcmb_edpHvc8(#{`3DdB#+}1@$OfuWaWFKa8nN>J-4v7hqEfVE2Bg`?UD74~Glv8qR zCYRi%3$e8k+% z0CiUvoF@RV&0K-Yfl3OQmptjEP&+6N9u5Eirwgt8>x{zIi+27g+AAU?I);2H7~l{T z5g2UjdVx#{_6#Nm#YOUi?EnDP30IthcdTe$>`&6C?n#gQ9qD4{09#K-B1KGOYbYF3 z?dQ;(+O*?NK+NcFk4;aV@CxBnJ6#|WbF9= zx+QaS$zb9D$2zx>E)Pc)v_?&3iTlIqNF>tG5gcnwcKhvQ-;7i$jh@Jvy-1;yio>7w zvOrFY7CE{Iwp%9tqkM6MMug+c*AmVO_D^>YFq*id{pteWgW5>LnhO~q#=(|A?qelO zLBUj_>kz3YGF*C5O`%w}MB9P$(HUfxYPm3Xk8jn_2Zzel(OKV602a7HtQG zLUTuu$vS=G3%7Y;4IzNBK&K5<#OBeLFRzN5WB9_S&z?bl%T57mx!Z1jgP8{EKC7)o zqS2?zyypSe!>4`4SR0!T**?czee?5`hb#sZ0h~(5p7>PN7_dO-(-i_+EEbEw^k3x* zjU9Il&dij>QyV_qB@z5}?$NpM1sN!1ys_h%IZI48 zr$Et=u?rvL3}MN@$h#Vvn!`XGYYvIU_9$F-5mR0jWa3j7@J#5R5f$<=4-)pe<<`B6 zEt4y2NeM(k2B-vOZN0KI_UcQKz=A@d(9rPqfCCRT4eeMPhp$O)Fl(qMp$MQ86G}{e z==pstJ1@SKS6?q%{z%Ar0W!%sZeHgFnd)+D79(`8J$as*n#yLgHI3}6)iXdlJ8S83 zNq2X5W5ulO;R&%7v}ffkFM;0}3&(x#J$}w-W8?aR%jb-hcbnwAomw$F9D74HSL;QA zKwCzW!=Y#V{4#$Y{rtH`JRNOIi-Y5E$!f=){bX+_RwK7J*H@{KvE^ke*tcIKdDT1d z>q%!1fXz>&wZyNQy{Jy0uA{FKlhue<%WGn`x^_>aWZq=mGtiA^LU@ z2!w8d!eZNayeCRxc0qkZ!%^3+XziqhfwG%<+XI$pUpfA5H1%?KYtJeiJ*TNR6gmI- z{DXR@!Om031OkD!0i%|ms^IG2KMg;nghQ``@As7t&Y_Cye5w{l+OYA`gIj6-ws-%K zND#_LcEe3NPF0-}-|EvSl+4eE2I>_GWtU=OA1n3r^bF_BgU31d`Qeq7m1kNe`ucJ{ z(}^DX}_UZ zQLaxF5|Rh5pgV-~;8J6uIJDt|H-R~gUUn%hvS;Wgln}*~2Ah?6dwH?dQs)h$0cR#O zkB0I!V~@9xJ)+#Z?JN^D)eitvL+~ECuOmiyI%;VN_9KU!yRkPCc3s7>?w{~!=H=(- zkNT_lB$nR-&*4~FN0+$<;a{_rP}}~OJCU5xX|$Sktoa9Gjiw9y%)J ze9}8SG`OFJ()mT{{c|>LC{lTL?4c)re4wHH;HRkxD!04agC6fj+FPxjccqOt`Gc7j zi6zl1Zk@&^ZB^bJAl6tB^n8=qZXE{Uy}#)M)%j#TRe@QrwmrWFvHbIH{1Ja%0(ZIk zs}B;YPC9vgqOFPe`c3a>ZtQJpnki;9Zr*)uO6r8Ip(71dOq&AsDUx!py;H|KyDZvm zQIvU(FAVS!c9lH3QYz$3@xNDx7v9`JE`OI58=o=P!C-5PMZ1e)tCN}rUs=3tFF~@F zR8F23bzGuT*Jm|e#op^=t2i*725Bg-V~A(;4nH?^@qWy+up6^B!r{$Aix$M5JiUE% z8~@p#k_wNOfB4be?zYJ5*TSeK%u#0tCfJJeB`WZ6bkYQu;O;I@f5|#Cq`@C@fr*qe zSK6f!!y05cpV8b&qJ4%1+1ko(y}1_=71jTTY7=-oyCnC4z~!;HD=ke~%nKkdkVRUj z7kp>yb$J8%@o`%ljfNHJ)70r8hS{w20PCXpOh?tARA%&o+0=;8rFx8H0lnw2#eF44mGe3L-xPq--k4L*2V;sDlKY7E%G&-D_n~cgBU6h zNuvBpD48y=*Ol0B7JtazTOce>Us912g#3HE0UGZVRQFG_;D6&(sOC4d!+`BVK_xjK SxkvF709>6(xF?PQDSrbgA<1_D literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbL_compressed.png b/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbL_compressed.png new file mode 100644 index 0000000000000000000000000000000000000000..3ed4067a68ebdc61fdb857cea8cc4092b888e4c5 GIT binary patch literal 2349 zcmcImX)qgV7mm`gwYJ!bR<)F(Vymq#5h6l0w$S!kYKbjKDoSmk71c^5ZHZV)ucp*e zl-^R2sIAplYNv#__)xJIpKGg|x!>IX-=914yzhC=%sb~i@0l}m&LgL5u#+MRA^-s3 zB;3x%1pwgF=J9f&W4t@(-kdAX2n1OEn_Znxj(R^Us0Tc*0s{)WZ#skm(+w}RBaYHNDi4V8^N1!Lt+LSHveM5Fa z8Srl_(c)ra>rb%K3bO;n4iyy@RFF@}MR{RgC6G5G$N4Lw(0p+_?#GjjbwpqY219la zCa~)I%C=TA$2d*c%jiYSf<3tL`ZCZwgbG@rr$Jjj(r3m7da`WNPJ`#(HknYh#`_iZ)<9-w9^TF^7>9V2&qz%1o7v|=w4Np z;6Fbd>vvP|!;$s({U38(G#afjW{tZ$+dtYX2@FtDD;*Q*&@5%S5_FkOp$v1xSOT6tn36d1)D@bV$#w5@H zc0xQP697E?Lv>?J^fm>Ze`fhU)53D5+pqd(sRt_D0~+w26KH*gy)P_Z^RgAOuv_By zuvu8?WUTajSAV}`2Egtifmfwuf+64k*?`?=@I#_Kx=La@J3HI2Mwe7n!2AaXyEt9> zKC!>m{T&O}n!9Sdfxq~sQ6T;!qF)@=1F8}GR{Iz~S>kvQnLxZi&)OVX9D_hbl4&z^->*JZ; z<0-@Fit+(qF?IOH|| z>NV7$GEOOvG1pPW0k7O@Lz><;R9(#9#!-YINL?&oC>1IQX3@ zC@9E9mPx5RsM4x|DXQV($QbABbgc2P{|barRbiqQUDOHFxzP}2?q^YfOu+3 z%PTE(_oDQ_c6aZzj4*5phMP;oCJv<9TA?E1%62qwgg#Yt&(%m;C=sv7+ox+P&-%)j zYIP2dBK>0JtXq$_Gc6UjlrEc^iO?pXtCNj#vkzz>rdI)?7FCle$ox=hp>93Ij)JBj zxa6w4IgvwV_%P=pZnf&7{2ycEqhmD*lF&a}XawBP}-5h6C{%u-d7 z1qNS@UJ>Eob@dGj4C2YE&r-wk5(3eT#Vx5P*ojhoyb|##jMb?a9hTJ-yH$wYdYB%| zih6d?rUc4}Tf0VHYJE$c5U2)e+vHTpOW)q#we&mML^X>rD^00XSF2AR zA+?c%p2CJYW$(UT`kr1)VmWMumK$q!ek`f5AG7sYc`|pY#X!&-B-UXIHO+ysKmR~@ zvLcM%{xMzJrnz>uA}An#yQ!nzc4?o}IK|<7HCmeN3X&taBV{^TRFg3R&NaenTaiS~ zH8t5$BQe$XsD5_Lwb`O>*f$3Alpc3=wTkNN*EWd>Q&ZHe!Oxb_6f3uTH^4q4iVC^@=&?$#)1~G*2GGu%sQmFv8%h-;K3bMq2Wwy0 z-m^E-FcnnC?Tw=lyO~C*cU*9kjBLl?Z5?)@J7|?G|BWsb1RJ~()o$l%+U&)Z?s(Jj zQSB><^D)U%GrQo}3)KZ_*t11eSW>rk1`g+0ZeDQV4oB4JTmj#R5f#|jcfBQYrgqXV zfaGs>Tg0!V-c*&~DhPJ?GKLD?h=%}F1;7wMur#UiaxxVhA_#7sS4jLl-R=tDpV{-T f5BZ>VZr%S28lMVA literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbL_hidden.png b/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbL_hidden.png new file mode 100644 index 0000000000000000000000000000000000000000..956c75842149696a89d3996b7424afbd2a9ebc1e GIT binary patch literal 3415 zcmd6q_ct4i_s3&YOR1-X3Sz5GTS8k(YYT}Q5qs3Gpe3ytwOgxJZK>6w_G61#YQ%U@ zJ0XfDq^O5ljnS{~Kkz-@^ZDs>&imZ^xxK?+@^b||a`AoQDrDg8f{pi9Pg?I0I*UGv>%xV61K2*M&?xTYxhf6MTGbs#ZC0yERA6^fI%l@Agles;u{b` zJH!f>2g`lqoeslAB8eYx_*@dOgS*2;co<+iY*B}>>}KP8j5 za&iLazg} z>eroZC~JpZPf%aL%ZDz%F$>3*_|wEAn_t!7Kb7UdON+RPX&Q8=1!nEl~xYjNk>l+U9Gp|TYv)P~iOL6olv#IiaBlzQd*YPM1 zr^_P9ymE$WplTwI*E3%4MlIET75tg#tNxFjr109!bY;5>=dG~Qpt{kk0@CWir-|N1wo z0*&T}J+{LJ`F!G{{9kw`YN!m4d7-8H8c75fQ3D6s$=J#E{ixtRg2q`|? z-#>2l5A~3`OA63p_+~uNhhuum$gFaMfq|h6j?K-l^IuTtzF%hyoYco>!@nh_bzV=1XbdaoKF zpC<$AK?51C_I~^3IDill5@zDg^iYy!e;pfJPG4HWbJuoJ2^|pNfJ4XO9K{wBSy@?u zOAm{b_%*`R9Or;3&lobOwjH#eDfXgqn-Lfc#!Jje8R8oECYrBcc+%zo8KOa3N?lsc z|8pX!Ze!+21SeR8O`09@v!^~QF)W`;`<(+mirsdotwWUr(2g2T-w3guzgt0Mf!#afM|yK^RdJsMZ9>Lyeom7P zXGSvzL+(v&7uN8|{pmU(YHXjOPA1uXugz0Zn6$*Sd&gRVjlx3{8~c0T+gxp$kwI*> z9#5rm@~k4#X{)v;CPzG>I~~Z)hgcH|@-2z-yhOhh6l6AccbQNmo{`d95LJb?GUOJ>D-@1Qwwf~&DjtnDSjMMXxOngtQO zpDwJ9+JTtM^JTY^$8xCPd)Rzl0WT7Txr>EbjW#QDkZe(n%5q5fP5*ZT{Q;an50I^FnJ(XZ7RE|PXE%9q| zq>iAHSQ|4l&aLcTe)Z71g;aO*2hQ0uzgGLNf-J4At*xv+g&Oqk?d?rK9oWp>Q9eG` zG}gvB85tj+{Uo+1IFGp*Ro^KGNZYuKltnKTDtF57)y7IPu=C$7ocniNo_XdWh9fqL z|JQq!HdXyOP<2+WPVW$@Gh|NZd?vN>9(RAws%d(U>TLso^tl&fKHSWZtSSBCmsPs9 z4h}oj0&-mFMb!cm{Y@XP*5+nqA>~eF_;+&7TgmW9PakJ@^OEQ1^B^LDus_gSKJMe` z$-L3#ecRd4c5_YcD(T%u;XkXex{=&^XNj*ZU$Nps(g}m&eT0?-y-6k)vAp7;J27-i z>8mEAeh}^iYDtE%-xzLxUOD1Uxg%WzEmYi|OsN=6TJMdS3&`;EdNI2r>7A66MEW-q zf;_2lhGt9m)T`GDgiTQHD)k`krFRLEYlog-upd#m=GcYMz`s=e8ck~ z0CXUHy=1+C(?CY|4EAVK>P7@K8Bz1qEvc)s^8q&EH#F4mPUxmF@h?-)P3=rDx!=IU z+f*=v;|zg+94OwWW|V_5>>x?PXJWAMR3J=~3l|Ia;vSCUO@eBWLv=4XF(U$1iyZ@g zF7C$uJy4PB-#gC~j&$y^f?{y@3;(d;+VpRX8Q#72sTvU_D)-Xg4K6`4@rO1CARVp(E&cKy7T z;VFvVUv|O9D}9W4@$~fk?BxDN-p7yzUxvavn@v6k?H)4S@C`fY!p|o-XuTD*C%A9V z%?dVGSj`;FVEO2n3E@k%^rWC?KGRHmq5A`|q7GJuatYl^pPv>(4RDDr4nUAuHruSx z_9mU8k#8U5$dq#X`9I@O6sKR7H4g7rT~ZTTHdxM^^MF+!?^QzX8AwVm4=A-r*v9r z+p7nSGL{8Gt{J|{voB1dGXq=NMaf&m<#G=eCvyFS|8g;0eVW-X)#*J43z{T2RU3K) zH7|Ym2KbEivX4FI+Fw1qJ!g?NSRQW*l({LCydptj-Z}J*aVyG9+hc4~rVKDD=j)c3 zBs_zxIq@95wT=sMC#o-}F&y_=I!U#3wHIs&WOYc)1n9lTj5aN~2qNGi>_uGt@y~Ba zvtWQ*CF7)J~2q+CQhd9}|gPZSg}jm)-m` zpT&zvC<)DH*4kC=)pW6B1!8;U=|AmneSH%!qOFJKZhzyBv#wO+I;cp#-}Lbdz2(vM zTY624ZC9h3`=2Jg1RLpP+$LL)T?HYpEntCt<=22cOJnnki)z>&o7I=Xq>#aqYn0A~ zg}2Q!Dd1gi|MlC_Bc)@hRkQ{?+(RdI3OJ1@xBk%1jhOgCga{yml_k(;?IKEsOD@`a z9Tk<08LYLqSzo4cpHZy1&!eVd}bd>#l8U;oWFH`|O)m{_?7-U;m$P+eBXaJNNeOTG@96A_9sX z0s@LTESne4e){QCQSR#vif8)rTCA*hv8B1o_A&9foA*bbbm`zIwBS-=ap53@Z=d*Y z`1$0S=bxWHe_k*4hF4j!{>basucx<8?O%4l=F6u~N$#B!>{Vy_ioM|yWKwF8O-M>x zTcr2>!-s%#-+%r6ckrO2@%|wB-rc)*@A{LF`1642>*wip_wV1gx3fEE7QQdwrY@dDp`S)+_Wj8NrzIM~j?@B(>cR3~>FR`kv zsafOwX^l;}iIi!n>hd)==bkt@t5RC?+mk0tq!TN5>g?aSb7gegKjTIY`_(l^y&He} z$;--qt=ju!qUz6Y{%VuEySsHxF6LC8sB-WuL2^IF9=k@MO% z7nx1Ee>v-CaX{OVXSIFDAFr95Yi6i?=)7+HYol{ZHu9L>b$s^gmzAtqXOYZ|^>tzE zwD)X3xTdVEEX?}ktdnZW5teN_w$nFm7l|EUvqM?rO)4GJPl_(rmB{HS?xX}bJIST@XR-<3oBpz`4iLVc1SnG?{ipR zPHN(@)n7i`+j6SBcEa9!r^L^GdbjiGCzrs_i%#EmY>6(i*i{pEea@S|NiEvUug~pJ zaI$mji`u<;!Rh{6u1a@$_wKPc7F~HbJ@&`#e&*9`#vJvjP3^wG2$S_<(|&()U3%XB z+5j#6sAcIjEJ}AuQ(tY0>B=}K`dal<8cVhQEqN;eUD;{eqNkUyJSQsK_ijSnetQX? zPfPXxOKsR05x@TK)rw7Z)6$>XtX?y7zJ1|Sh4kjhCsq9A`;R|8Rd@1z;ldcFFE2X^ z!k@gJr(e6OI^xAQOBue|!iSC+&-FVkoBs8sy}3l6+v1|zxBfbPUViZTCeCkZn`iFb zV0M2`HpGfZFWUY9&_YGZ2Tj!$*(iYg-W{Ilgw?U>Gc?e&k!yyBhvcQ#539^x}J mWL9DUCPgY^`v>uiYz#S%dGBno%`pNNS`419elF{r5}E)MgC4X1 literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbL_video.png b/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbL_video.png new file mode 100644 index 0000000000000000000000000000000000000000..c333e474c6fe65a8ff1f5840131df3bd52bff680 GIT binary patch literal 2103 zcmbuAeLNFt8^y2U6h}Lq7ycNpJ8I57GY?YNn^D<_pAuoB4L#s?!PHn_NwNN(J zPGzkOiA6$;uq8TbEL$C~(J7vt&+}i;`R}>@xUc*E-Ph;-e6H)hzt>GWg7w8r#!i8ZII~cpNNZ%NK6imCj#8U zV+q9V-qE2XqAxKtJTlLK{82L6G8aP|4#8#n+Ghy(&*K-hNbcIvO79MGEoTWICky$64T_f#Sfwzlz$ zijl_ZzQ~N#mG6_imDoa1e936X_;_1eTl?iLz2)$sq?X0SMb(x`K@S7epMLaf{V9Pj zC$r9apC@>%Ig4w)4kiQ-#zsZeK8iSfn(fD~`YjL4y2pzrkxV5+ zv13h6ozGiaB$CqmuPy~=y1U!i*$LJcl(?kLM>apzZ66#QY;SL$j}E8A#>S?kJeq?U z7N8z*pFDZe(*Wv6ozjxo$CJtO&a0+)htzLNarqQ{up)$Y4zu5m#$ifMthjyBU#!_6 z(>Rc^bPv7enK1p!EuvNZ$nbFPtfP%h+$4f$beeF^5X7+WKj1i3!_k{vwGM7FB71vM zl9Cpuhq#*@^0n=?EDZ-D3DTIY%G2TS;%>;?%!F4Be&>6BQBl!@zvkuQk83xmaw)*{ z>?6h9+WUzg&Dq23?g$?Us=))7t$OcXlXRKypdLj?gN&GMC6Mb(+&fR;?k?d6XtbVF z{%`(5%Ef-62lYzxZ>;0z-e>Bx!Kg+aF}CTBdNAjz2oi}NqEEf@2Z~j+S!FAUQdLp@ zR4(<@(mjBOi@)8xeQ;oS*neRoDI#KXS*@wPl|rF#R3bZKgPSrs8Bs)H)z^z`HhXtK zktmOb=~_q|C9jeMm(u-rnVP~IAM^Re6>poeRx|7T*h`AYs9=-FMIw0er$h`T#>+*p zi;5g}u^L2p7Fq8yxmpelvJEs`AFXhMbMdZ{}Czm_;G%}gI8$C7p zB>B*E7>2{0J-BfdsB;F&RMoQP!fTQ^z|?cDM7xqRcK8zAb`yO;*1Z)0R%Hn@fTTm-kths-E3vooqV!dE}_l?1r%m18)Wl%}P0#f@j#jP~1rZzbf2Tr&KD{DQDlcUO2GL80LJ` z-ob(1MEl;wVj2I$xRP~oH50W{g9>`UG7=_#e(SUrc@XiT8Jd>_1|dO6SD+hE4WinbCn4Vh2AY~O5g~qLHj(;a z3-d?%D!h3#wEqhq4sFeG%k$A6?u|wo#^z!AuF?$EEr{RWz_C2pAK>EiUhVpjGW5T7QG6vF&YKjiugSmbiB%?8( zm3+(XYWyy)Q!e z>K4ZaI%=wO(U0xko_&^~-%J<_hr=`EvTc3`v36Eck31sAj zp7L#Y&3n-MJq9NF>g-;Yv^CwVJ4Y6{Wz%YEq!;~aU-kSlD`V^y8?yOsE~PPJ*|y$P ztkIikbBGU`s7m~Py^?YHyof)uf9xrJfoGJCg`zD-Idmcn+^QeA!DQRA9*+QpBM`yt z!5v%4(nfYJp_E|tIBo(((L0Hz-(&_^^(3KyV$8A*rrjnXA;B8>cT7lhboa?AL49w+ z=P!U{$=(a*E<)pAG48J1p!9VoW6C)!qiK_Fm}J+O`Wqy$q(p_szrgXY(hmUr4^ck| eE0R?W2tf08V{*h9^%2$25AgQHdekFBa{dE1me0Nb literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbL_xxx.png b/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbL_xxx.png new file mode 100644 index 0000000000000000000000000000000000000000..5963698ba19e3e625a38eff03d2a974d9a123a2d GIT binary patch literal 2686 zcmd5;`7;}e7EYhmy zictI7sMOlpsm8UGzEDdLv5R<_H}n32H*e<6d~?osemLicZ_fGVB;JJDNS;tU0RR9b zZNXMB0N~gKK7S`Fz>hCun9lrk9Bb}i4ggei8ww>Y^6S&V*6zX1gxkTP==&IexgWs? zqhcF~#$sR?v|rf09*hY9aN?A$l{x(3#0uH$?k^}%{5Uc8c}0P=_f_zq%6ZuFXSdMV zUHwqj%BFb2zT}2vL6J*MwKGJ!79tH6kk?5lu>xDMPnIh9v>*6JRT#!!c|AqU*JqfJ zP)}F-b91g^=RF_fN8Jl`DIJpm1f0R(j{#%^RDJ{SAtUhrPBZM}@E=`g*mlhCA7dGv z&(D};eP&w5OVyfiN8UPuK%m&z*yhkRXnjRR1=hpAc4BGCUyqrs>mAAC_Qc+3=P}o3 z>DFSH?`>^uyu$+zcKYaV1H{l?W|1_Atp0ZPWiaZ_T6Qgr;eiLL|QNR zE!6n@(L3i!s)Xjcj6_Sv*f6UiVSNEbtDxKfjn(IhT z+rTMBxxRkj=cgt$Iu0_#TiFGU<3>>1FS=r$94ZJiqa7N;IK_r`U|ZFv0iTN?*{WV) z2a6+be@HmynY@4hhqJ}I&;=HU!!cSID|PctrY*((`Zd0z!ZI2o zLq>5~|LLCtkieG%%r16nx8G5-xDgc}_IcVzxgfsaJYzwDWz%!gl#Jfyt#lZS4Zc<9Qp#>UA%t;ojc$iHk(0ik<}m+df=Zr+j^qG+bix4(bOi9XFjK=B}A@lUq(u?2$qfb>!E-PC;$e7JB zrPJxkZ=5uZLsvcJYkex=Y=<6UVPU(i80T7Wb=@+T+rol9^6>DuJKRg=YyRTd**D?O zOhi@DwCUCRdq0N?4F*Rf5CSK&i~x38pr5!npEW0>Q4v&uv$5EQQ#6<&!FzqXQqO}- z#MM@uGp@mw)C^pGwEx{= zBlYXGQ>B&|h*5+__VAl*(#~oJU~zO5{w&0+A;?_BA0p8TZoK3gKlvfTSn2zaj*<+% z*;*?@5)-Fkkzcu10@u=T%)2n=*7GSTC-~KGEY zcfHx{D5e7kjI*z)*OvPfgn2Xos#7DucOE9m{RIa@pT)$aY1;Ip6+nulr}1#nq1K!~3Hameuqn`k7Vrm6**(9eKCgVS<8!?%V;NF}Qp5Xm|aiY1`3P z+{@n=HVOA`H*-gF*}j{SPKpaQ8j1NE0by{YCi5mXNIg>oQ}qo4Q%8lo#Xa1pQK>~b zcyAFXJi!V67UK2|IAyILsDxU=t&q~@NGG+GExi5y`ZNDZgVfa2CB*HsnoE%l7N1_& z)29=b8QtC8w`KfE55x+4iy3z#;hso@olRnfEZGWk8U*6V%gyd_7QX5_6lzgnK=A?= z1OjQghzn=Edihec{_mdZ%f%eOdL*fT zGV2NqNQM3c6~Ib6mo=vfUb^CPu$V6-Vx-}jVCBF~(CiAMI%=)}+#smwdd+Qy!9ck6 ziN?K?HMkTZtNnh7&G+$Uo>zjfhh7#wPP;@Kf84Y_rxv2GUl1>8@?hS+#b?rs_!lg( zYWq#d;u9Pt^9HY>WBF4bXGLYc8K^NXwnfC_`vXVIrq~0zy=`Bhkt5t!!FHek>Zd>I zurpPw6fF)V#z~cuDW`U&Z>k(V4*yX$IwT7PE%aFi)03M#-1qgzC#I5R#6o58u>GN> znYo%~?30o3!Po|6!vS;#6+JoFsUU>g%Ue<*;9>IB8LewgL?^XLy?u&g)>nYFU~BXh zxwGcFp*!TB`RmUt^zG+USi6IW=_Eb*eU!yXF{|tmn}Qm#$db!QB+^kf$E@p%$G-`lP!yO z;OK3m1c-5thE`R}-hgx!<+0Q%R&PN0^_G>j<=$(WaTBTVPU`llcd2>ZuI0o&2+&2Y zG)=^547F*GOwS1?Gl<9t=s3+?a4>SZW!*ZAbJ_&Q@Oy04_c<+1n!&_)GyGb|A2s^U zHy$)LHU?BpcdRsApHLgfi`YU6IEppt{aQNgBTP#MTDL1jD`(F1sc)4lNVueQc6OQ| za&@k{kaeh@$wt^?s_fm}hWN+GIpzhgdO7SJghT6go3>VUz2zpr)OQtg7tv4bm|J?f zjmyJMnWB)S_VPC{oSdA-c2J_c~pVVNoVKhEGkkN{h2s1?=X*5iKx5je(q literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbLibrary.png b/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbLibrary.png new file mode 100644 index 0000000000000000000000000000000000000000..db3c2c350813df60baed8fd4d1fc6653ebbd87a8 GIT binary patch literal 895 zcmV-_1AzRAP)<-00004XF*Lt006JZ zHwB960000PbVXQnQ*UN;cVTj606}DLVr3vnZDD6+Qe|Oed2z{QJOBU#5lKWrRCwC# znCVXPP!NV~DWV8&Q9;mz`@aYOka#5q;tsMXi^BKVH%!h~Kyiz$>HIi3XFB`LnQ7~t zot>>E9|F(w{(u|)+Mj^WWMeQGtgo;8zTfNhMx)XG{{Dy9VY$D*FPF5yqZQLU74kMRsI00y_plfb}NrootN9 z7^lINsdcR4SF>a0p|uSoCJ_{@p<6_VzYXl!v3E zBOK^-I!1s?tYj8z*q|O3kjvr(C8qNa3Q<52m+(aWHO4hYB8J5A_$4(y;Lf>5=j^NfxxPk zmHp=CCb8$*C z9-DZ|UAOK5nRvON!gJSY{4iFlRU^B-y@e4uWb&iqf2C~XaydG;@{Yx*p&_D@wHN}O zers!Mey$5>Ydocj@Lw2Mm}PRnAy zG#W7ChKmPy{-gWWP+jqa1TPrZ7f6nMH zWkbff8->}9zd1?Vt7h*G3yqHTK;)}H4kY`t>AQMG`VpJDk-Cw(k-Cw(ksSCZzyOdT V<{k-N#$5ma002ovPDHLkV1h#7ut@*_ literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbPeer.png b/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbPeer.png new file mode 100644 index 0000000000000000000000000000000000000000..b4fd77ffba3160a384c3c4c5af3848325ec458f1 GIT binary patch literal 1080 zcmeAS@N?(olHy`uVBq!ia0vp^0U*r51SA=YQ-1*|mUKs7M+S!VC(K#9UIO`&C9V-A z!TD(=<%vb942~)JNvR5+xryniL8*x;m4zo$Z5SAsPkFjHhE&{oGb7ep+fcw^tH{J4 z7a;ic|G)SA-*xgXUV?&xfh&&|e?K9xOFr*4yO%n@_bgwvVzteaXTCe1ws~)CR`Xuj zY|c3cNJ8}+c;n;a%ejC2{An3$)hJxC``h{~S>d?2xOwyDg{{77UU%4@v!bN&(DvoxjcTy{{8heHA{@8dXGIW?D-xOXLe7i z>vPJvix&lDuUYY#UE>VQ3HdhnFh^~g(TYYV^SW)4+a-Uzd)IgF;6cV(l}q-vwoi9P z=uF_qnse;t&B)_L5~rT_tv+@3EU#BG$1=s@2-CTe$EU>vX6P4uIjkYDiKVeeTi~$e z8Z=vuksx-TAk6w*vJ0QtakiUlRLi#u1gZ z{)JV2^5yxji)9{$1bQ#E=DDt(v3r`jUht~#kqI(x>fuWEx3{pcC4XM>K76}cm*XD3 zcJrVQll=oTYi8~_6nj|j`t|F%4B1RKMbbaMp6#Qyw;@uddU~x?e7=j*J^>|{_RTx9 zVz&O;%ECWOa_P$-Z+~88o&B*%miH@LO1$Fa(4{+_6)xY{tLydNfBEJ0dtcn}Hx}Mf zrgYhe>EVj~TcUmybuRdKZu8kk4_8aCj9|%XHdDSk)%o_E%B%l^J_=YqD=W5(`K_5? z{w-xs^;zCKSHGM{x!`l7#(I*vzC=DBce5MAQpJUJGk=@CYB`o;zp8JWr8I-HQ`S4&PDNqi!5Y;m-Zw7Z)$M$hZD^up$jCWs zw>9VMP`N6L?8gm;d95iEC05F-z2C3>DrleSp%+gAy-iMC`SoPU(nEJO|FR#*NdD{l ztRsK=6NATlZ|pcT!PHW{hjZ8egPptPA4~C@(dSx}eDK1L?TvyhZy&vN^E|h}+B0VJ z#tCYx1?PB8>V4$?XLkVq%s9WVx=!rhlZ@CO%*?MpG)Gs-U=OEN*3@lZ&bJ4N@^SsO zNVi$k<1e*uO@*xCGs!gBi;8m(3kT*2iq>dev+vRAskwT8{{E#EC-!FwTQp*#`q*mW a1O}TKyz@fO{x}HC`V5|~elF{r5}E)$T=BR7 literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbPeerS.png b/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbPeerS.png new file mode 100644 index 0000000000000000000000000000000000000000..f2a240e2ffff1a8bf0903b0f67dc38a21383d3da GIT binary patch literal 294 zcmeAS@N?(olHy`uVBq!ia0vp^LLkh+1SD^+kpz+qEa{HEjtmUzPnffIy#(?lOI#yL zg7ec#$`gxH85~pclTsBta}(23gHjVyDhp4h+5i>p@N{tuskrrK(nYRj10L5{&7~fi z|Nhti6kjSbF=)*Rzmo;$Y;!rPBM(fPQJt6k!K^R&d*zCEtA8YiC+@mdCAlp(dbZ2T z(%9J^uZ@ohC%Y(qHQ-TXG00xaV3~d{sOx*(y!7WW_rLQlt}DDWOU~`|CWhcgO;;qG z^~5zUzIEhmHqVuK6KJfZuB#~TeCe=+_dO<$*(tNsTr^dG3VfcBuUcx7hr*;M oJC>`L#Z1Y0Wc&YRe35tp!<`M@kKb!B?FaeX)78&qol`;+03i=@BLDyZ literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbS_audio.png b/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbS_audio.png new file mode 100644 index 0000000000000000000000000000000000000000..c1d4947bf773eb55e0341bd679b8c94ec0f9f8a5 GIT binary patch literal 474 zcmV<00VV#4P)=600004XF*Lt006JZ zHwB960000PbVXQnQ*UN;cVTj606}DLVr3vnZDD6+Qe|Oed2z{QJOBUzY)M2xRCwC7 zRZEV7KoA|FiD-hjAu)zTkKhgD2rgW)f}3c#fJY$V0&)Y6z?KzbbmM{mvoIn2=EX|L zES!-+tVO4*)4W$*Rj(KXLGbZTV{I;tMk9{nL{Vgn34$;j4tbu(d77rz>-8mq$z(E{ z&1mm=UXmof?|}8&y@?f5o-GO6dPJ?$s`2?7akiz5um-m(Uslm@LZ>8eHlk=&mS=js%3* zxWI+z&gZk^IOtkZ!_&&M-=GymL9jm$fevWXG|64B*I28n>QaOGd|uZz1U#Ki_xn8^ zV*WtYv$>i`F1b)L*4yo7+cr4Bia{hv(&cjLUgXVYgVBZ!4YfFq zqbR~?+HSXHSt9je7(UGpZ1C`Z03g(WW-{^sbRz4mO~3AjQn!imZ~GQt0ObLi5!oOT QTL1t607*qoM6N<$f{*{w!~g&Q literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbS_compressed.png b/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumbS_compressed.png new file mode 100644 index 0000000000000000000000000000000000000000..70f52e8086c94d30154802784c755c10cc510e50 GIT binary patch literal 332 zcmV-S0ki&zP)=600004XF*Lt006JZ zHwB960000PbVXQnQ*UN;cVTj606}DLVr3vnZDD6+Qe|Oed2z{QJOBUy-bqA3RCwC7 zld%njAP|PdSlhaRi5(;825e$)gDW_K8_-c|Z6mF2U}yTb0uic}Rj^hXn_(mA-E3hv^YF$^K6&M?gs;YW3rnpRJPAR31<4Du= zF`#YROLX!2zW)k{eQoSCO+HP&>?7w~O4&3`Q4~P{$P~p=600004XF*Lt006JZ zHwB960000PbVXQnQ*UN;cVTj606}DLVr3vnZDD6+Qe|Oed2z{QJOBUzB1uF+RCwBA zU>F4h4H)sr@iQ|Ra6;JzTnqxtQ1*py3MN#1X)9*Z*UrY|Ni~|NC3!< zkBra@ocD)4hB}-f* zN`mv#O3D+9QW+dm@{>{(JaZG%Q-e|yQz{EjrrH1%xq7-dhE&{2+Ee*ijaipbn7Q}h z3f9G9Jy8b^9B7J<^7ZXqAh=600004XF*Lt006JZ zHwB960000PbVXQnQ*UN;cVTj606}DLVr3vnZDD6+Qe|Oed2z{QJOBUzGf6~2RCwC7 zRWZ&)Kos7QRS}IwqtK|`fFn49BX9#p&^Uk-a0G6krIP3$|4NT(lm{t z2=Xk;utFo?Saj_B{;dK8Tb}3n(TK;;%d&v0j^pf$APz{hEkUI#ChNMsM}+U*WF&yu z(Srj5s>wirD5%DM=600004XF*Lt006JZ zHwB960000PbVXQnQ*UN;cVTj606}DLVr3vnZDD6+Qe|Oed2z{QJOBUy@kvBMRCwC# zQ!xsJFc387SskcgW9bu8`UydQAou|F2Yx^h#9DlTg`e;Xc9xbJxJrZ@auAM#99qaV zP7!7|klEQGWEckJOx%={aS8li;4Zt+b=|gYkZs#kRTcfua$Q$V(?F(aVvHSgV45bS z)N!1$ED0f5mZ9VOeiTI@DABB{igKVMdxyyW!Gi+=%d*6AoacGhbwLo|s%=}(^Pto; zjbRw$I6gTJXE4w67ui$p&A_rO*LC3_48t@{L5iY)6+D+D38u#QbzNgu*jwNCG6&KX xiI7E-?}TxRkVW7>LY6vU9M?y?1a{yhzyMI1b5w76%ZC5}002ovPDHLkV1lHjj=cZ? literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumb_audio.png b/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumb_audio.png new file mode 100644 index 0000000000000000000000000000000000000000..782c492c75ff702c442c6481abf1a2b63a28491c GIT binary patch literal 612 zcmeAS@N?(olHy`uVBq!ia0vp^wLt8~!2~4p;(26%6id3JuOkD)`V;1?T`z%r$r9Iy zlHmNblJdl&R0hYC{G?O`&)mfH)S%SFl*+=BsWuD@jNd$6978H@y_vnSk16=FDn@RAJp13`n}`~DY4QL;uHmfyYt@o<{jzFHJf#x6D=bf9)^<`c8#ohN@-&cw6eH{zoeATPEzWYV&`$gq@yYGQHNb*19@0IVq z`?Tod@6D3EZpR-_JeXn>sW-i<-fU@*rst&lMO)uX?(Dmn z75em~#_sy-(qrw16Yb`2mMp7H-F&m)&DW~6VXNnyt*bxrvcxH1^UX6)i(K40a_+qS z{yX4+!V!+CA|hO@UHQ^BV#>8Pa_o&4%E`TbXZ-UM|u9;4q~Gdt%v3UdlC*_?K)QZ3o#NaZEvefsyr+_Sbu=|rpzD@@&X z|FSP{yR*yvbDIq|wM-IvcGjIV=HgA8yq?6v6yYBLOe>!2AWb}UD z&!1=ae)pZdg@5(cSM8<6e@&Q*&C5^UIeXWrym}JllZ{OOMM(8~IT&ZJXA;T9@ z;TpLz#BaH9sn@O*XOlKQ5cP9^oMIHYHcVb#{`>F0MLTt-dQFiMI9+0uYI5s>`Q(!q z%D3N6OxcoCu_GGPTFIPTk<9adkp|4#VjRn>6V~mQuYb zck;GhN%ZoTYd@^`dpWPWtE}52Kk2z#dp0C5VG4Lv@KEr@-#YuLUKiUY-Ta;&n5ZE+ z)0=zokwZpX*u0KC41Bzr(M@ftq1nH4i*6n`eY#tw{c1DA_x!CAdiYe(+(+o>j7^G&2?H96N-Rb8?uNo0KeRmwh9>5ap63$JTd z;jezMs0s@@ue!YS@=Fb`Cnm3sK1x0?ckf0|D{et96g)@7MmLKY5==?XPdM=kt0MoqNjh$iU^)`{?wjL8-)B^)mO1iNl_P*Y|@nbcP?!H{P~{O zZs{Ic`$1D@m*RsBDgWQ|ujrm&bY{71qeoDfSK^-wJ(8B{vF#~=b_Y9JxA@H!I=(#r zMBSQc8y4iRzaCuiH~by{^}p4*HXVC6zF1NHDy{P6hWHEv2?0He1wr5B)2}QywR+6X zt-j=T;umh0lov&k*OpzhP^=9}I{Y_E=)3ft=kFdqU#8t9({%an$MdWC|Lqs}SG#0q z^s~Ho({|3@o%-%s-pt)#E}VfRK6`g*@?Wt8hL0y?I8v*x>;h&x22WQ%mvv4FO#mPt B#!mnM literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumb_hidden.png b/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumb_hidden.png new file mode 100644 index 0000000000000000000000000000000000000000..0ac575b7e5cc47d386aba65251bb6b8b872c7e23 GIT binary patch literal 1546 zcmZvcdo&XY9LI;Ik;nB~UQ^7Q5X$D!wwk##3sdG1hP9e9%JsN4S7jda$SUPEB#b2H zQHogPk!MONS+(S5m9Z;ttGj#7J@=e@?)~HU`+k1s_vi2YJ{ev(H${1Ec>n;Q=>9X> zN0Ky2VL;N79u=aXD~T;+lqU)RsPA_GG6a%YE7p||i>DrojVGQa0ZjCn$G1^|G??r4;6!sOS&QHy48^}dk#T2ogu5AO)Z(17JtNTq2H;{}0Zpz~XL zWu8ALjNHRu@rD?M=$_@MN@fcKVmEUyBP1DOBpE4x{SCjrH}dIpKb+Bg>;w zGbJ0r^zsBwx4Wt{ke#))nIvR~^_CJgoBjQJ(q7$8MNp;vaC}w$fR|+C?_pNLwQry z#BiU?`oI8VQ1(}V6LQ=#U30tLwx-tBStYsm_~zc~WKi%CdPv0SDCn-D;pOG!KjhDc z?$Td?@ey{N8|11|%!fX$2&9}+83Fu2 zueq*N@%cp6T1s3)^o>3k{4xE$9!xdyh$HyNRz}Q>u3ud_kkeV26Z<%+Z%!9DH=Iz5Od#qt)WB4CGLDOWDA%qf@3iSq9!!5tgm9s@JWSwjmOUmX`K# z1(z@5*m;M)>0fw?PeWhKmzICf7cLwdV`iN>BS(oU!kVeeUm-D8v^ZEdkm@TU*025G zgivTjm7)*MO-%*8x0`G^@jk*TQtc*7qw8i}U0vt_Xj%+UklV!Jh}csjA0G^s;p+q` z+srjh_Hh-9NO}z00<83u`%P@{(W6EIoa8_w40L?dj$!8eLDM^TaOVBTg5u;ik&%%j z7Fc!p8sA&X^UAM=mwfcq>=T+^Tc*03<*a!t&lcF80Q&{G9`Kn<;CEc>$J^-tM3>{x zDNCZBdiwb*bpMc$?Uy2x47reos|E{9UEgT=pyZR}-`n*- zS)~Q-;i6<>ps=V`4PM1}yikc7@T8MFJMDQ^-&SIa9 zR$dot>qJF%hWe4f@8PDKW0VTL%UDY$u_OHU%IYd?vgHXuqud0RAmcOh*?DPD-bgEj z%e`nr)HO|~J;T|9{bgw&{PSjm*{7Yu!^4zXi7yD|dHpdw9&g-Ga^Y6A`}X3c$6gtQ$k}`P@yju7n>T@at>j!1jMs- syVN#R=)XMokMRD2|C1x|TS&4LU|u}dA1|h6NMsIhcg3Oa><_*0Cu45sn*aa+ literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumb_other.png b/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumb_other.png new file mode 100644 index 0000000000000000000000000000000000000000..d4f51066125688550214e2b84cce5f0a3cde30ab GIT binary patch literal 616 zcmeAS@N?(olHy`uVBq!ia0vp^wLt8~!2~4p;(26%6id3JuOkD)`V;1?T`z%r$r9Iy zlHmNblJdl&R0hYC{G?O`&)mfH)S%SFl*+=BsWuD@jK4fx978H@y}50l?-(d?{A1QE z149tlcr(Xm57X@HDVuNRsMWHXNq4q%0>Rq*UsTL5z1qL9=;U|vJ2C#}GH0Ey{B`HH zW75tTyP30}M>5?lHQ)6*ujKmO7wdLkj4fZh``s>SG_kwAa51^dwVQ9w@l(J1-}duQ znb$8KXPDf2|9#i}L$$l_+P#`n*?Cz1Aj3?*=F`R6%i-iA}XWck{UCIzky+o?AB= zdl%3Bpp{1s7I>__+QoJKV};Mey?o zmCd;qg)`qzXMVU|zx#`6@BfFrm%q=PAFMY$^R|J!L;U*d7bkx>Zk(X+Y#&u7{;2R} z`MjlP(~2!*+E&Kuh&5ZE_n+4rQlb8`#47x|(Va+R37#1tXSB8U#=Y0OBW(C{pP8-n zJ!Q+b6(_oRXUTWX7oJvVV3!ik|9k(#i3|JL^`~buFf-g`dRDuWX(tabsW5oD L`njxgN@xNAd6_Iu literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumb_video.png b/tribler-mod/Tribler/Main/vwxGUI/images/defaultThumb_video.png new file mode 100644 index 0000000000000000000000000000000000000000..b5e55ca41f652b72c9af064cc486a64713c09150 GIT binary patch literal 623 zcmeAS@N?(olHy`uVBq!ia0vp^wLt8~!2~4p;(26%6id3JuOkD)`V;1?T`z%r$r9Iy zlHmNblJdl&R0hYC{G?O`&)mfH)S%SFl*+=BsWuD@OpKl`jv*Dd-pt+4YX0&?i!Q$`v6Ag<=>md0|FTJ)XLiY-y|?4r9H(csXS9o+&Ae|t@kEZ< zZ23>;f39TO`#Sc^y7G&=@43FO5{Gd1cHjG|S9N{&i`e&z%0V1t`F#_=zy7-G{`;^0 zT6>N^e*AHUi2w4-DMlB6iwEgV-+c4U=_hAyZ?0YVwyb*h-B(|$cEqd=d$BHT^+}mm zKH4H&8!PPQi+3M=T6FSh(b=@k5jt%Oi%x9Yp?~W=|AA*UcKy?PRvK{lHGjF8bIfZ? z)Y|Hoi!W--KOg*L>W76+a~aE3UH956lvur$5xM+k<9B8q206`1Hihy!(@%RXb(*WR zzT|t}_Q%c>?!Er%v@t}haJ}VcZw{88WU<1|FCTRF_D_$nk>Pv&^;g|}6-Sp7Hs>Ec zm)v_j%T#LeWp3NsdE5VgTIaU-;>(gra|MmG_*!Q2zdFf6TwMuhC|Lm!L^WEg>^2;v|r>U>3bmD7wKK3!?WX&HwHfE1|o%y`U7vhz-v_&ig6!pXexZ{Clq1 z!bOV~1qWVySrTH`*mr*M;zf%tuUzq>Bt%6>3kY`oJ?g1ue!aN-`_8i5PyfY@%HLV0 zr_~idyK~a}-tnrv|J44L)`~L~ikExdIT|ZeUO4?o>>X7o7tTNuM^R7}{BHT>mjxCq z5+B5*3;82gU(K?b%Ld{|?Y^70GiG1L`}EB>uh$gv80y~Jc<#;j-{OV#^Ut3zIBvUU z<{u7ify+w%nMaZ~IxU>wqqer*ZhpSmY!8)3zE+=hMd(-^&oPtkKYlm*k%hy#Go9Oy z?1@|7>7q3Kw4nfNmt4$?gLYEAQ8z-gj{dti-*35YU+!|(ZH{{{p1JO3p1jOg-Fb%c z{wFMnoImt_^63{Cdq1<1;L)4TJ$LfSmYrgGNxAdY?#e$EjJA;BGoIOF*u?bMNQUL5 zB44ifv_q|`&X%^6p+Oi$Nd zOJ-M!5Kx)DVbA%J#hmV!=4?~Fn_;5Fcy`9Tu+>NXW;?D7F_Pw)!7se{e)W}xzFtY= z+hx0RWTEZ)irkKqh`$5Eo-~Df5X4{*=rbCENV`x zt$*8=e%<4c;9tNw|-oyb_3Yi=+8`J%@BQ=e|tik>k~Fk*cCuln7je|l=~Chh3n wt@7@X9-Ps+dxtla3uk~7fW*sxX&zvxp7p8b>L>j+U@l_tboFyt=akR{08tmO82|tP literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/download.png b/tribler-mod/Tribler/Main/vwxGUI/images/download.png new file mode 100644 index 0000000000000000000000000000000000000000..45dc2c266a6d3affef7f3d6d9ae5badd6b1d8cd4 GIT binary patch literal 1672 zcmeAS@N?(olHy`uVBq!ia0vp^f=85jTwBP(NbAOboD43zA+fV53fN}5%WiyPEVAkS7Q zqokz3N?*Ucyj-u`STDaQUEk2s(o)~RNZ-gv7pOwFxH7LKu|hYmSQ%mn%p8~0;^d;t z0&tju%uP(nFDH z-~5!!v`U0_Ljzp{ix5K-D^mk2Q%k6JBt=l|Hu|7|KnfUesDu23Cayprz*3Uo;Z;yt zlmm{Ilw|$XoYdUZypm#Im>HsmSxT~1T7FS(Vu@X5Zen_>enDQkjXpvUulpdb1||z& zvebvCOPJH)QAra9BHU?WJj761TJmLQwKM|*<8enLvGu>zYGV9`Qlh6%KTw=3jClH))Li~zKeFo^e?IrV_MT*$;5kc%ioZLK+$!%s_P*I7T)|gsGl$OK zO^Y_YfArLq;o+R-^zU+;f(R$M85fKT{=v4N)g-=| zt4gYS*|I0*NOAn}+f&V1Y;4U~Y<%)wS^k6O-hdXzqN#6J#WXEGt~lNQn)F$f>#vzN z2u?c7z;svkb+&=+MMFP-m7Q_%3l#eTj)>)K*?joy>Vi)`>GPI-yEb|Gsv5i6{BSWQ z-w*yCSHC_jicncB^Js5l8qcIRCv+Ta7HsprDR@EGuGQq-y4~TgZr@mW^3iPZrHOAc z!k5=w(!KXJ%=P)Px#24YJ`L;(K){{a7>y{D4^000SaNLh0L01FZT01FZU(%pXi00007bV*G`2iXJ^ z3^NEIw3R6U00F;AL_t(I%Z-ygXj4%X$A9;}*U|)`PYTZICYGzb1%IOosr+{3*` zIOhOhwJ^)>&ya)PQ#cpUFFd~te5f1o;M|SdWdPxvgXLFww!endw;48?#x~w;?qsh& zdRp{ZElyLdga_ZY@T4R?;++aG){&ATxG-^M9+>pm*$Vky#j}RACqU~{AxRraFd{S0 zZe=g}?C%9=MOpyoFTu5&g9$I6z^4_Q5KQ=jxTVG=MuS#~*55;|(jQQ(Bp$60ss&oH z_ctbi7}ZfB-yc%QcYBQjXOS2KN_F(*Gpvh;-~|Nqa=6S zrxh66V;rL(+K19&2U1#0a>so}0Tzk1VBJYA<=|I6(NeCr$2xEhf8cZG9`>(|jzX)& zk?94wpC9~B?qriwdTbsZai|TFvGj8Q(_X}3Vk`x5lUmej+=85jTwBP(NbAOboD43zA+fV53fN}5%WiyPEVAkS7Q zqokz3N?*Ucyj-u`STDaQUEk2s(o)~RNZ-gv7pOwFxH7LKu|hYmSQ%mn%p8~0;^d;t z0&tju%uP(nFDH z-~5!!v`U0_LqlBy(-1>bD^mk2Lzs3XMNsWF`k;V73K(#xgZzXhu0SBbQj+1}RZv=# z1CExIWc}2f)ZEm(l44+(8KQ<+O0rd2eo<~>iCt!HVtT56L0-CzK0*+$`yj3cCJSJ) z)Q6`_nA71=NfQPl+-YJw#86sVVid!Fk%58nn5T&PuKHk3<=qy zQ{uuicUMVL=kC(CHzscV@3O9*C236v+p<+mAqRsV8iXub>MRhdt;ppqsIf?~>4<&O zb1RoXnd{4*FZ|@b>C3(E``+K%d~Tc0mTL?JTk=2uUA^e)njoF8X&tkwLyH+&u1S5{ zCUXB<_McyoA99#&bo|!eS}(soPjex^%;U3m&1&}ug@;uzx63-TYhDxjoiAW zSC=I4GT+~-7)k9{J8cfB4)_jkZ9!`}$|oy#x#_k}fl@Hhx*P$!=cyQMDg}Kj$6Pi4W?3 z9=VaT(e2WEXOWqrDL*Gw*_`j{{dM$ShBDum7fcme$G+-3+Be5zOBuuNn-igR8eOEz}TdB3Us^EdOX#YS->P)Px#24YJ`L;(K){{a7>y{D4^000SaNLh0L01FZT01FZU(%pXi00007bV*G`2iXJ^ z3^N|oU?6w^00F*9L_t(Y$IX^8OB_)Yg}*m5n+>~&A-z?8h?Bw}U?qVFHYs9?6gCP= z_5}nHvC}j*Dbk0uA<-RDg%|}z3}_IEiP?Y~;)u92Z|0^5W@m8*-d4K@2JSs`?%eb4 zE#@f^0f2)YpRaqG^VTT+MbJvncON`y04NcG&w=J}x6ZE<1_5mpC%?nz_nxh2CHtE` zTOT&UZ%4R}L#5>Q)m)MaD7cIa>&MOe9{byMejMSZ2^AMGXX^sRfed|`)1g1!s;CzdnFW*$^e)v-v-y^OL)cGpP_b!9_!$;T-6Z@XAlP1 zzGCO}jP>S)UVQg4P}kS3U~+79xa8pyjZXo| zgW!U_%O?XxI14-?iy0WWg+Z8+Vb&Z8pdfpRr>`sfO(p?OD>Jiyldl7X+B{txLp+Wr z`*?e=PM1h&Xl-n4T=?SIGsnjZ>`LQ17GK$KBOju8^oRk+L5_keGyktJv8r}(KGibu z(;fF9jRnzNx+>@Xw6AdFw^Win?X;!&f-KW!k@?IUS1ye7xVZSk5_hYG`xZ~Q%)Uv& ztgxnP<*a2NBs+X%VtbAR$mw46^a_4?EVKDkAVZ|Imj99I`cHr^V(@hJb6Mw<&;$Sq Cqg{#s literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/edit_old.png b/tribler-mod/Tribler/Main/vwxGUI/images/edit_old.png new file mode 100644 index 0000000000000000000000000000000000000000..7fa89b62a43688f203742056944ee94184ba5026 GIT binary patch literal 650 zcmV;50(Jd~P)Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU0|E&vDKB3OmH+?%8FWQhbVF}# zZDnqB07G(RVRU6=Aa`kWXdp*PO;A^X4i^9b0ryEnK~zY`&6hoE6j2n0pEG-R_ESw` zAYd>SHXADK<9#j{O}r{sgVlSXsz63L+|M42Z@Eiy>>Xis-subLZX}i=F*| zndm5ScHidAymQX*^4@a~_X-ihR&$NlkL$EsHLP7D16aTY;5x{d6M%5{=@!o(-lf%C zB4Z43==A$6mx}@*;rmyYX>9L&Z|w+eh~mwYJAB$hSwq!P zplxP?6kx-UGGMLYTSteA=DVvuyIoGW3be_3#mE5xuq3emqle<)1CFs#i_Hg46g#~> z3hhyWxwsSCb=pVadn^o$9yxSu*nSJQE>7btmrBt8aLD)I=J`06s#c@W!*t0H4yVtX ztI0fo9*(%p)#WexOV0VNLwrgkt<;4rK(&7C&%spR$OrW^=ozQmLNJPWiBSC*!j}os}C!A zL;yb5F4RsP?%k_Mkoh71MGra_uAdxTI3|TfBH+%-J*QY^(zo*^7SP{WbZ0pxQQctjR6 zFmMZjFyp1Wb$@_@VkNE-CC){ui6xo&c?uz!xv2~WmimU~`UV!Nn${p~5+D`9`DrEP ziAAXljw$&`sS0kHMXBZaMcKvvo8|q0HVJ~%MELqxCFkerC4!XbRpb^h*w|MTBqnF4 zmMA2prf25aDk&%^C@Ey7*eZpa`WpBaIHzW0dQ=sq23ProBv)l8Tc#-4+i}@cSOGQX zrj{fsROII56=85jTwBP(NbAOboD43zA+fV53fN}5%WiyPEVAkS7Q zqokz3N?*Ucyj-u`STDaQUEk2s(o)~RNZ-gv7pOwFxH7LKu|hYmSQ%mn%p8~0;^d;t z0&tju%uP(nFDH z-~5!!v`U0_Ljzp{ix5K-D^mk2Q%k6JBt=l|Hu|7|KnfUesDu23Cayprz*3Uo;Z;yt zlmm{Ilw|$XoYdUZypm#Im>HsmSxT~1T7FS(Vu@X5Zen_>enDQkjXpvUulpdb1||z& zvebvCOPJH)QAra9BHU?WJj761TJmLQwKM|*;~h^I#}JRsz0>yQUk(sCT7SOQk&8w2 zvs=*)^NZ5Q#DyNsFvYyuohRvd705uJKvQKx;< zb3+S%!*%hVs!ulmm{)z?a^E!jl!aT_8v0|WpMNiM*m3r+2mC=-mj0IIcgR?qSRMRv z}WTp;0H1VTVIx_{C%(YutUtjD%iHJ_DbdZWwuve^k0s6Fhi*? z##6@0WL4O`y>**XqR%OLIH`EDT)ZI^cQO9;+1J+ESF08qeqOZv6uZE2k41(p)3mkP z?(E}<{`-fca__a-#V3ll{VLvjacbpF_6aIawx8VQQ~gupd-MKk*Y%c#H^b+I{$zO6 z^LYNuqW#v@hw@KU#ykr7l7GOj_V(5ZE0tFVdQ&MBb@06`KPlK=n! literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/fake_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/fake_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..174d859574cb85e56d587d8bb989a92ffea2f321 GIT binary patch literal 1685 zcmeAS@N?(olHy`uVBq!ia0vp^%0Mi@!3HGfm>-J*QY^(zo*^7SP{WbZ0pxQQctjR6 zFmMZjFyp1Wb$@_@VkNE-CC){ui6xo&c?uz!xv2~WmimU~`UV!Nn${p~5+D`9`DrEP ziAAXljw$&`sS0kHMXBZaMcKvvo8|q0HVJ~%MELqxCFkerC4!XbRpb^h*w|MTBqnF4 zmMA2prf25aDk&%^C@Ey7*eZpa`WpBaIHzW0dQ=sq23ProBv)l8Tc#-4+i}@cSOGQX zrj{fsROII56=85jTwBP(NbAOboD43zA+fV53fN}5%WiyPEVAkS7Q zqokz3N?*Ucyj-u`STDaQUEk2s(o)~RNZ-gv7pOwFxH7LKu|hYmSQ%mn%p8~0;^d;t z0&tju%uP(nFDH z-~5!!v`U0_LqlBy(-1>bD^mk2Lzs3XMNsWF`k;V73K(#xgZzXhu0SBbQj+1}RZv=# z1CExIWc}2f)ZEm(l44+(8KQ<+O0rd2eo<~>iCt!HVtT56L0-CzK0*+$`yj3cCJSJ) z)Q6`_nA71=NfQPl+-YJw#86sVVid!Fk%58nil>WXh)3t%Df_dpB*+}I&;K>A=$k`e zjEIVY?3@^`rtX_1Z(l51UGMVOSVpv5a~sEYj!gE4Tll(;9tu2L)Lyjpx#smhJD%+0 z+c~}3cXE|p<+Pf5oAZ;^pZHGcU^uY6@AUKUa)%qeYdP37|2=i@XJ$}U+OYZQ#_Jzn z)UGsi@8f7+Z}0Z6S3P>)q}cnZ>7NgtZ?4_4IK#9g*YVQI1tNd^g3S2$*oBu6p4?D_iTtmo6ix38{itCw2N)TjCA#ML8; zE{}_z{hj^)dCb$z9Fqimg&a#_661{Szc}}L>(rus4`=LMt9hC6h~cak5{J6fH~GGc zZ7j3j?-Z9`^4#``-P?b2@+*$+Tfyuh^yzwt=(BS_C;$ES;{D9ErsjID-o(!2Q#fbw z_l(H%xi>lYFMVD*v#febagNAuB_T)SxlYrj3X9FW{dHAStmgGu_uhnuCg#^havzkC zIP7Xs7B8B@*6YErL%6Z+VaNpS@}oERS4p2umdKI;Vst01rGL00000 literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/familyfilter.png b/tribler-mod/Tribler/Main/vwxGUI/images/familyfilter.png new file mode 100644 index 0000000000000000000000000000000000000000..27937f00337447bdee4f9d3fe32bf9fb90a2e870 GIT binary patch literal 332 zcmeAS@N?(olHy`uVBq!ia0vp^F+j}C!3HEhrMwacQjEnx?oJHr&dIz4a@dl*-CY>| zgW!U_%O?XxI14-?i-B5%K$!8;-MT+OLG}_)Usv{8*m)p0TZihaM-zr>V X{7zD4_2lh9e=&Hv`njxgN@xNAD@}y+ literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/familyfilterEnabled.png b/tribler-mod/Tribler/Main/vwxGUI/images/familyfilterEnabled.png new file mode 100644 index 0000000000000000000000000000000000000000..e376e5f3c52e88338e06560fd1a5f38c1d80bf87 GIT binary patch literal 294 zcmeAS@N?(olHy`uVBq!ia0vp^;XusI!3HF`S_B^eDaPU;cPEB*=VV?2Ic!PZ?k)`f zL2$v|<&%LToCO|{#Xv1WAk28_ZrvZCAbW|YuPggaMs_ZC9wDQZB|xENo-U3d9>?E? zI`TCr@I-h1|6gP)&2=Usq-XEWo^=kog5Mjv3vO-TZ8@}|Qi7jP+){RW#}T>JQt{7X z=Dm67yX)%qH=62fTRbeKlI%pvvPAB@`O^1HJKSka;8tgyFK_yf8HASq+vejorK@}1 zGRccUnKvd${=Ra!(KC?canG`5?;r)W&KZ9#6c^6DdDv^$zL$62F#Oh?{Zrvb;-$_A ihMxY#NBZTplh_$lomjH&GdKfX%;4$j=d#Wzp$PzPHEs6* literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/familyfilterEnabled_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/familyfilterEnabled_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..e27e27a05a5326184d17df2bbd2bbeb9fffe49fa GIT binary patch literal 294 zcmeAS@N?(olHy`uVBq!ia0vp^;XusI!3HF`S_B^eDaPU;cPEB*=VV?2Ic!PZ?k)`f zL2$v|<&%LToCO|{#Xv1WAk28_ZrvZCAbW|YuPggaMs_X^$sgr`0YIT;o-U3d9>?E? zI`TCr@I-h1|6gP)&2=Usq-XEWo^=kog5Mjv3vO-TZ8@}|Qi7jP+){RW#}T>JQt{7X z=Dm67yX)%qH=62fTRbeKlI%pvvPAB@`O^1HJKSka;8tgyFK_yf8HASq+vejorK@}1 zGRccUnKvd${=Ra!(KC?canG`5?;r)W&KZ9#6c^6DdDv^$zL$62F#Oh?{Zrvb;-$_A ihMxY#NBZTplh_$lomjH&GdKfX%;4$j=d#Wzp$P!~Rc^EZ literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/familyfilter_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/familyfilter_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..32809d9b4574d762320272b5d415409502a1c3cf GIT binary patch literal 332 zcmeAS@N?(olHy`uVBq!ia0vp^F+j}C!3HEhrMwacQjEnx?oJHr&dIz4a@dl*-CY>| zgW!U_%O?XxI14-?i-B5%K$!8;-MT+OLG}_)Usv{8*m)p0TZihaM-zr>V X{7zD4_2lh9e=&Hv`njxgN@xNAfxd({ literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/familyfilter_win.png b/tribler-mod/Tribler/Main/vwxGUI/images/familyfilter_win.png new file mode 100644 index 0000000000000000000000000000000000000000..19968bd512f53f26f8ebdc67083c73a4149112ad GIT binary patch literal 1484 zcmV;-1vC1IP)Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1rrQ68IIp!)&KwnUP(kjR7l5- zSI=t{NfiEGWgwdI&~7vnoaqHoaFb}SV58uVi0df_bMU4Y@gT@dkQgyuTqNu+Dn!B@ zqOjt@lZUWg`~@O0`~zaQc*w8G%1rl6B5w~}Q(ZOPnRTFQV{~*BXV0E(%g^_H6beaAGXU`A%a`_> zstu>Kb~CwF+@>goBLfhd$4zi+r9hOxr!+Z#Bp=Q7<;$0toSek;^fV3}IDmzP1(Zr9 zbai!MXlST~U-FeUd2^H^5t)x5+WyivA%K96#_jK01;96WdsuU@^v z)YKIE`}^TI4qm^0ZTLp*NXhr_-{a)TlVE1$(^zl7bzOY=^a&(P9%@!`V<1LNV}58=8l3WY)fmx%D-!2{g6a|f>LqO-FTuIu9V z?c2D2|9%QTp>#{V9T<2Vq$`qmzQ~FW`=XQ z98XM4@Y2!}&&T1QXt*oqYZ*Q-Gli(Jn zkHfMo4#QAA^)TdT&z`aCy4>B}&6Z{H_U+r1&9W?BTU+BW47paVvSnHPuk2T=RplE> z->|N=!;s&-d&h?lALd%E#z7G9kt0WVetzEIV9XEeDP!&1VHonRUAv4#;~eU2Sr)G> zFDGNx<905W<4UE%b!}{Qb(MR2d%2z(GvZmVr;OJ_&Zup!ot#&%UPZZF#^B%}8jS|_ z?Ac?~!Yy020025N82~`1>}N6=RTkn(9Up*1grmofV&{$>c>er3y1Kfsckf>8+xLqW z9R`;OigdHVMG$Ge@yX20n4e!jF4vQCC;-`Q+uEKx`}gn1?CdN~ojRos`Ptc73=9ka zQmm6# zQ<$Ef#@N^xqVQUc+pIT{rVNfVo})=wKe4_ z7K<1kAIIFskNCc^fw{Rk+_-TArBca=FZ6kc&m$7LPK1oPD$z>ax^)W|E?fXJ4gaDWcM~@wcWm(8(v-s6<;_Fz-%^(t&%pD&A8H;-9(j^=} zaRNaQU~zE~llSi7+SoNXj)R__9^Cx>X2eAo8UV}t>CT-yu(-G=HDC-652IGAp;Rov z^FHHe+s5_l*Ky{|aFn~Yiv=Opf;1&XbcbdDw6U?l>T)h2R9e(@)NiGFk0NoHk^0_m zGPJ(Fj?vLkjEsz+udlDARsh5@l>}Du>p>O@%{3xTG6B|a)h&2>!DN`@9M^!RH~>qF zDiLhBXxDJrkM+vvuTnH9>knxw#(EMFDnb9IY0ilVuwW?`RDvY)?f$779UUDgm&@oM z=vN;)+$;mkU_lU4Y@+7BMqb+qM66x~QhE|tmPi317cIW$>N-Wz=^r*6=cC>Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1rrSvEN2I}qW}N}yh%hsR7l5- zS50UXNfiEGWgu!ilpA$|V+Rz$O`@}E8vk{c6@(njT`%H6k&K8Y7%#3e?5;5q2{}Yz z#e*jgvuqZ|9}s_z<{$}7&O&e!G~|C}rhAesZx3BvT|GU?ZbQ*k@4b5OdtcYjYx?%> z8v_h50fg|w>Rl;E6+6)`wH|=RXIT~q!iXQdO`p}r6*}c}Xy{L*QYq}*xf7`$QyBVdC}e9{ z7A{@71R_9?m%qoK&>RZIPYeJiCnwilRI5Eyy{{;U_$5Kn`3yj;9yfxm5&}`gKUG_N z|0f^A^ySN!=8cA0c~w<+BTohV{~*B2M)CAW$j@K?@Hk- zLQN17i3Adf1X^2L@#f7NtxF^l7#J8pQ&SW6?c0Y}uU?_QzaP!b%}6Gbc>Vfy$ktya z0Px|%2c%LdFtfHMcg*tU)B+qXyj5)mFfdW3uT?!mTg)YjI*wr%wE^x)ychoSggU0t|)w@33< zl}{3Yl#(l(<2dX%4i}3>e)sMjCzDA}<~l5;uI;hC8k&StZm zOeWb?^QDyRxJKV`xUH>?pFMk~V?25CgfCpUpvQh3f6iL?R_i;C!)MQ)<)=@d^1{Lb zzj*P2ZQBmnUcGviCnhGsaUDltcR3!9^ZfimNH;e(#|;e)AwR`pkq;j}%(+}n%iIc| zj^i+-l8$fZ(=9mnBAhYoQ*pATUO&3DUHWNY_b*X3>7wuL@UkVBcJlsq>( z8;w~G`tf+27Zw({Y{=&4=eePwfy)&!bGaNJIdX(crBVck>z3IqyR456^CG)`{W>z4 z493UD@#)hiz196&ZQZ(cAR^SnVgP_zHIBt%#<3tm7oZ>K=&_^NvUxL}KYxz5Z{K3q zu3gx@`}@#V6QQF4p(DlIngA+ngNu%tkH?DrU6JgJu zJs2Jy_A*ei;o)Jlw6p}5t%`a2^l3;bk^LhZ!9nD4poxYNaA|1?b#--EUS2_OZ*L^I zfDjQRD+nTVMxn|)0RcjHcQ^X``!P5;h|bQ=aFt_L4VCAeh!PDYU%!4mCJF`Z>(r^A z@au2CU~zE~U%q@nM@NU&MahV^j*bp=cXwlCbQJ%rtYBnh1UGNqL^_?W`k`;%zK#36 z_jR5m5L!5~@Xbvi0C(=(!TIy&!OXaP`LcGXYegYIV6ZO`1i?lvX;cE{JRtyP!qH>L zA*DoJT^)W%CUvpFRVo9ITrw(EFTNk#|EL!)Uc~VeC*U{^rl+UT_uv7#I=hfeCb4zv zR^0memWRb88UVxdzh%p2OixcM4jAq2?I@K>NOz=BD11hvW#Pt+8#r^O-OD{Fdmsp~ zZ5#Xd@5kiiq*`PI2(N}_0JO5Q!n!zDE_7Pt84b5N`KPx#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1rrSvEN2I}qW}N}yh%hsR7l5- zS50UXNfiEGWgu!ilpA$|V+Rz$O`@}E8vk{c6@(njT`%H6k&K8Y7%#3e?5;5q2{}Yz z#e*jgvuqZ|9}s_z<{$}7&O&e!G~|C}rhAesZx3BvT|GU?ZbQ*k@4b5OdtcYjYx?%> z8v_h50fg|w>Rl;E6+6)`wH|=RXIT~q!iXQdO`p}r6*}c}Xy{L*QYq}*xf7`$QyBVdC}e9{ z7A{@71R_9?m%qoK&>RZIPYeJiCnwilRI5Eyy{{;U_$5Kn`3yj;9yfxm5&}`gKUG_N z|0f^A^ySN!=8cA0c~w<+BTohV{~*B2M)CAW$j@K?@Hk- zLQN17i3Adf1X^2L@#f7NtxF^l7#J8pQ&SW6?c0Y}uU?_QzaP!b%}6Gbc>Vfy$ktya z0Px|%2c%LdFtfHMcg*tU)B+qXyj5)mFfdW3uT?!mTg)YjI*wr%wE^x)ychoSggU0t|)w@33< zl}{3Yl#(l(<2dX%4i}3>e)sMjCzDA}<~l5;uI;hC8k&StZm zOeWb?^QDyRxJKV`xUH>?pFMk~V?25CgfCpUpvQh3f6iL?R_i;C!)MQ)<)=@d^1{Lb zzj*P2ZQBmnUcGviCnhGsaUDltcR3!9^ZfimNH;e(#|;e)AwR`pkq;j}%(+}n%iIc| zj^i+-l8$fZ(=9mnBAhYoQ*pATUO&3DUHWNY_b*X3>7wuL@UkVBcJlsq>( z8;w~G`tf+27Zw({Y{=&4=eePwfy)&!bGaNJIdX(crBVck>z3IqyR456^CG)`{W>z4 z493UD@#)hiz196&ZQZ(cAR^SnVgP_zHIBt%#<3tm7oZ>K=&_^NvUxL}KYxz5Z{K3q zu3gx@`}@#V6QQF4p(DlIngA+ngNu%tkH?DrU6JgJu zJs2Jy_A*ei;o)Jlw6p}5t%`a2^l3;bk^LhZ!9nD4poxYNaA|1?b#--EUS2_OZ*L^I zfDjQRD+nTVMxn|)0RcjHcQ^X``!P5;h|bQ=aFt_L4VCAeh!PDYU%!4mCJF`Z>(r^A z@au2CU~zE~U%q@nM@NU&MahV^j*bp=cXwlCbQJ%rtYBnh1UGNqL^_?W`k`;%zK#36 z_jR5m5L!5~@Xbvi0C(=(!TIy&!OXaP`LcGXYegYIV6ZO`1i?lvX;cE{JRtyP!qH>L zA*DoJT^)W%CUvpFRVo9ITrw(EFTNk#|EL!)Uc~VeC*U{^rl+UT_uv7#I=hfeCb4zv zR^0memWRb88UVxdzh%p2OixcM4jAq2?I@K>NOz=BD11hvW#Pt+8#r^O-OD{Fdmsp~ zZ5#Xd@5kiiq*`PI2(N}_0JO5Q!n!zDE_7Pt84b5N`KPx#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1rrQ68IIp!)&KwnUP(kjR7l5- zSI=t{NfiEGWgwdI&~7vnoaqHoaFb}SV58uVi0df_bMU4Y@gT@dkQgyuTqNu+Dn!B@ zqOjt@lZUWg`~@O0`~zaQc*w8G%1rl6B5w~}Q(ZOPnRTFQV{~*BXV0E(%g^_H6beaAGXU`A%a`_> zstu>Kb~CwF+@>goBLfhd$4zi+r9hOxr!+Z#Bp=Q7<;$0toSek;^fV3}IDmzP1(Zr9 zbai!MXlST~U-FeUd2^H^5t)x5+WyivA%K96#_jK01;96WdsuU@^v z)YKIE`}^TI4qm^0ZTLp*NXhr_-{a)TlVE1$(^zl7bzOY=^a&(P9%@!`V<1LNV}58=8l3WY)fmx%D-!2{g6a|f>LqO-FTuIu9V z?c2D2|9%QTp>#{V9T<2Vq$`qmzQ~FW`=XQ z98XM4@Y2!}&&T1QXt*oqYZ*Q-Gli(Jn zkHfMo4#QAA^)TdT&z`aCy4>B}&6Z{H_U+r1&9W?BTU+BW47paVvSnHPuk2T=RplE> z->|N=!;s&-d&h?lALd%E#z7G9kt0WVetzEIV9XEeDP!&1VHonRUAv4#;~eU2Sr)G> zFDGNx<905W<4UE%b!}{Qb(MR2d%2z(GvZmVr;OJ_&Zup!ot#&%UPZZF#^B%}8jS|_ z?Ac?~!Yy020025N82~`1>}N6=RTkn(9Up*1grmofV&{$>c>er3y1Kfsckf>8+xLqW z9R`;OigdHVMG$Ge@yX20n4e!jF4vQCC;-`Q+uEKx`}gn1?CdN~ojRos`Ptc73=9ka zQmm6# zQ<$Ef#@N^xqVQUc+pIT{rVNfVo})=wKe4_ z7K<1kAIIFskNCc^fw{Rk+_-TArBca=FZ6kc&m$7LPK1oPD$z>ax^)W|E?fXJ4gaDWcM~@wcWm(8(v-s6<;_Fz-%^(t&%pD&A8H;-9(j^=} zaRNaQU~zE~llSi7+SoNXj)R__9^Cx>X2eAo8UV}t>CT-yu(-G=HDC-652IGAp;Rov z^FHHe+s5_l*Ky{|aFn~Yiv=Opf;1&XbcbdDw6U?l>T)h2R9e(@)NiGFk0NoHk^0_m zGPJ(Fj?vLkjEsz+udlDARsh5@l>}Du>p>O@%{3xTG6B|a)h&2>!DN`@9M^!RH~>qF zDiLhBXxDJrkM+vvuTnH9>knxw#(EMFDnb9IY0ilVuwW?`RDvY)?f$779UUDgm&@oM z=vN;)+$;mkU_lU4Y@+7BMqb+qM66x~QhE|tmPi317cIW$>N-Wz=^r*6=cC>&YZE~f$3L^32+>GHiw8YQE?&F{1uxm&yn6KLxqn2ae}v#o5FsZKqoSAM zZVy6hJwz-OO4Un?CDz7T5ACKy5_R=S#j8@ zGduiFg1Ww>JOH7!hSk@K&9w%*-_PNjA;jUw@80t5CvTL{8=lK*>qB?v8`2R}N^)l7 zWTX)S{=^BbonMQRPb&?!zaWPxmC_s}(ah!&7y(y+TVux}sd;hk~ZY>&BSJ8NX-O~e6`ID~LeOuXFA&Ih&Wg{FS8_W$G& zl5hZlW%9hq{ToHz)Z1#~m~1W61odim`aO8RH~v1Yd6Y7J=L=J=qp7jQg~>IN(FoGC znArIDYVV3MXKHM*iPX3`1>~laO>Ddc60JSJ9cn0hLkTs(5I8&IlXH)ReYh0 zAbVZs!o>>AN!kItI7B(s$M5{3%kwJ2vFzv!7#y*_-{Vym_51)|f!_Z1^G?AN{vcgkpKVy07*qoM6N<$f`T&*xBvhE literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/fiftyUp_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/fiftyUp_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..332d1055f732b01e64bbcf6501828842872cbafc GIT binary patch literal 397 zcmeAS@N?(olHy`uVBq!ia0vp^;y^6G!3HG%>OYYHQjEnx?oJHr&dIz4a@dl*-CY>| zgW!U_%O?XxI14-?iy0WWg+Q3`(%rg0Ktc8rPhVH|n=B$c#-iczMV}vWull*)?g^1c$6}Hn$~ROS30$f3f*W-x&dS z-i^8{8FnwH>p#Eq_nc?pvpLKSUnet}6db#s$uM_+-8F`p#eDa~q%xK2wi1oVH(g z$LBvvm%4W3m90?rW%+BfZQ}Z8-L9~4O5kl|5DTBnu&Vz{X7blKp=u7@_xj3mdKI;Vst0Fq&#b^rhX literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/firewallStatus_state1.png b/tribler-mod/Tribler/Main/vwxGUI/images/firewallStatus_state1.png new file mode 100644 index 0000000000000000000000000000000000000000..aa24d7ec2cd3b8e644aef0d3f12fe1dd55bbdb01 GIT binary patch literal 601 zcmV-f0;c_mP)gUt!DS4f|NBjgCl33iXL2k=O7 zK}iu8Zd{BaAQ*u(DUiKNcC)b~U|i(y{N8xpr+M?<=n3Z>0LGYWU=CbyUd6y%YkgN3 zgmVsK%xB>JnS=kXFIwvx01-veHE?@jfI{!I)_0O)3Vdk}AUXdXcurRZgjX43UU`QA zUNiy1e>&hl#Df|E(W?8yXfy&~yWL`~{iE%y5X0dRfIQF3fKS=#s1fN9Q4~4P3~w88 z5{-bUeKDn!^m;vlARq_=0IaqDTp6_>q?C-uV+Mo4BVSP+09lriB+2O^`u#qW$)x;t z&av5SSgls2zuj&Fpp-%>MI6T*4u{i06wN5lbC$~`d7l5`Pt&x73BwQ}1hd(U#bR+f z2!JF>$g=F&HP%|JwS-|vr_-sluTtt|r9RPWwJINNp>DU!dcFSZy6JTKqf&|11pq1m z0POesd+&W>x7$VDA<{Ix^$wx6{wjg*z5s*}3pl<~N!q*#ibJVQ8upoSx*1IXtr@Q5sC zVBi)4Va7{$>;3=*WlCHlN`mv#O3D+9QW+dm@{>{(+%k(&%kzt}ixr%MP1rVFUI0`l z22$r-l$uzQnV+W+l9`*zU|^|lXs&Nyp{i*OG@en})5S3)qV?_cgPE-k0uJ*Rne#LH z-#WtY!{}-CW2M6W){+EHO+_H1MPvi_#ugC;HH8%#Ied@)3ru=&==#?O)_iZyB&RU@jBip3uh9H+sxD9NvhS0zdSbBU@yUfQY|op2@a~9= ucu*qnn?b3F+lEP=TjabX-^0TaKiGDtrf)1goqib@pbVa_elF{r5}E+9wW`Db literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/firewallStatus_state3.png b/tribler-mod/Tribler/Main/vwxGUI/images/firewallStatus_state3.png new file mode 100644 index 0000000000000000000000000000000000000000..18c3fca0435b4501a914c3821bc6fe61226f7ed8 GIT binary patch literal 556 zcmV+{0@MA8P)&#{h+Ty&h&L0dT5i zEC6tn2MP8P5r;_wOw$BlI2_XN_t&naDp9Rg0hmlCal!A_l8}pb6L7`8Ns6vS_th6pO`Q_X;8CbUKvFWn~GaD>|J{(RH16yN&Prc%H{#F!;2`Fbpiq zLf7>;Miu4T(P%U%l}buUJ#|rZRMP@~m=i*X z_P? literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/go.png b/tribler-mod/Tribler/Main/vwxGUI/images/go.png new file mode 100644 index 0000000000000000000000000000000000000000..d4569407a078df71cf949164b879a4bf1fa0e335 GIT binary patch literal 1690 zcmeAS@N?(olHy`uVBq!ia0vp^5+KaM1|%Pp+x`GjEX7WqAsj$Z!;#Vf4nJ za0`Jj7yt<)D`RsY0y+i^l`9GBGM3-k^34D{h912x(J1+Bobm0uK+ zpPyrg799%C!NxWqohXVBwj%VWx+IpQ+5yAELf61j*U&t~(7?*b!pguX3Q0di5t?@2 z{FKbJN`!Vp16>1)5JM9yQv)kgOQ?1vMNsWF`k;V73K(#xgZzXhu0SBbQj+1}RZv=# z1CExIWc}2f)ZEm(l44+(8KQ<+O0rd2eo<~>iCt!HVtT56L0-CzK0*+$`yj3cCJSJ) z)Q6`_nA71=NfQPl+-YJw#86sV@?~eWGy?0A14Y{AR~t*7X^#-t zt?hPXszU2Gc9DZqyPxRq5N-Lv9kZ)IICiJ;?xv%C)4|+(Ek=Zkykk{^f9tZQjCJOA-|BvQIs7I>O>?WAlGc>xdPJ z>^%S3dhfA6E9<*%etcK$eQQI9)}D+y{=6Q!^`W0r_HTZ-&+$Y@)A>KUrpr%R+%B_V z?}UCE&4jxwR#|1gCod{*mO4~#H&=hYm{dT8gUgGT%t}tys*Ez9_a|Tca{O`ol};1p z=juTWIokJ5SN%EDr0c-1r8<*M^0Bk*ywDjF&mH-?XOpl)cHkG@{>Yp+@}C}=w=P|E zQ|OpP-LrLR=ik=oEo-^{{ZjnegI>Ll=H_0G`}J1#SAtK>o@om5;!koM)wuSonK5%+ hz>KvsT<`zpes}izLvxZ!PC{xWt~$(69AsQBGdo? literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/goEnabled.png b/tribler-mod/Tribler/Main/vwxGUI/images/goEnabled.png new file mode 100644 index 0000000000000000000000000000000000000000..f0dd401edea9a5b77b2fb11bf3550ebf5cabb707 GIT binary patch literal 1978 zcmeAS@N?(olHy`uVBq!ia0vp^wm>Yw!3HEfnZM=$DVAa<&kznEsNqQI0P;BtJR*x3 z7`TN%nDNrxx<5ccu@cva66d1S#FEVXJcW?V+*AewOMOFgeFF2-g36P55{Irtt z#G+IN$CUh}R0X%pqSW&IqU>V-&GLRgn*>2>B7A+UlJj%*5kY?Z=IeGPmIoKrJ0J*tXQgRA^PlB=?lEmM^2?YL|ztbm$x zQ%e#RDspr3imfVamB40N0ofp7eI*63l9Fs&C5U?>d;=7m^NUgyO!N$OlMReb6im$Y zjLeNJ&CPTaf)h&=yb|*ijLa2`3=DvTk(IGI5CI(n21<5ZK-#7#CCw_x#SLmFkY}ru zQBqQ1rLSLJUanVete0Puu5V~*X{m2uq;F)T3sj+7T$xvrSfQI&tPC*(W{yj0adJ^+ z0XR%S<|ZcPmzLNnDS<441E}W`3*Z*z6+;6L7>xQAi3R$GdItJ%lYts-fPz-w*vc;o z$eGpdzlLatY z>ci6|%<1r`qzMBN?ldtTVkj*wF^b{8$iTq#)6>NO@5Dr+C6X@0vZvo8PE&DlT~Z@3h0`-#=B) ze>yd*NvKh+CCYEj>zln{^UrxYan!^{+P<)7Jal(>RfV3ehw$FsFjh-pT*fKI?E$TX-JwsxchaQa&a&#~wb6sg{DD)eohYkoxpM6THBY1{jU@v+x8!wd+c}g9k;V*ZiVe#Q)2zI{vuFmkx$i*o!2^I z?d*5FX*m779jIzq@y|cY_S_UPD%G2B@0~wAf7!;@xomDMD@!#ro`o--wrFqLLhh>t z-`REE|Nm#h{Akn9CEtbSU7v6A=(4|h(f-SKp3S|dZDB27@+hYI;StA4M-&X>GU9i$ zO*(Qg$Y}+`f~Av=ehUt;*#19UJxOrt$&)SDH8~TGM@DKkc?v0U7yq!demk{AV8v_6 zIrkp$xn@X7Bx@{v`(BwZiRa=GwL_B^ziPkN$Hmm}_}tf350~cWKkD>tJ;g6_J3UU2 zQGmN$fLnisw{mmni?=tdc&`@zxU*F4olds!%9N8bjLRRr&dMn~_d7f5(Z+LsSPx#32;bRa{vGf6951U69E94oEQKA00(qQO+^RT3la|>$;?8b(f_qtzqReirzUGH_t#>R#L zs3jg*`I*l0kLcDW)3m|T3ARq0LKc@W1Hmv6#PSso!w>`ji+KV97?h_m4?xTy#xTLe z=a?BHLVirh6btDRHn>`{5?NncvrfN9_u{9>wO@G9b{A#s>60wH`Ch0CTir|$S8UmU zj5b?sOc-2@HSuxB4gs1LnZcav$y{YBqu=OonX%HtB)lRNLV4z7hCnRWod+`Q>e=!kV2o(bgy25m0xK_ zL%Q0)U^F%81{j&K0E{kQWc|5Ud2I11_d;@yS!N-;ZXVyInI#W-ad)R6ifp_PVQnRx zv+Q=LxD3P0Vrj%wGMYm_e9cy`$B|A0gW&^*_ndRgQGsh$zU9%^-*O|vbG|3D7u@Fw zz*2ZMj5POfb!5y9LQ;w-$ax`?DJ?xYTFm3hg6M|pzoEksFk)6s>WN2+iu=HA|78YmSg0H-~b5t$F@VnRWN{p0(jHl(ut>QSG$$TWUw&>g%gL zsU@#1nv%P#uG+V;S$pbfMnm>vYZ%4&H(z_#s?^e2z+Pte2pE;GkGf7n0xo;exc+)*A9;y748#YOzIC6o!}ncw@b zER%Y4mezo@j?^MDHdV^M%h%qim*@x#XN7#B3p&Tuf8dFi&my7@Ocf2qqJl{Mq5dlb zi1>UFR6!J%qk?29WqlB*S+*Z+RpVaNxm1Z;LHp-FX3&|Bg31g4&ox|G`;+VE-e>XH zF+>F|z+JJ5qM|sWFjf>*MHMs*mxv5)`AUT{w*XfGv=FP}U|nY>q^?(4Tw?RXPw00a zFNgoB+uuDd|8a+#ue`(aAAE$MEtxrpv#0z{(PESnpb^gQLdoQ))(F{oC88pScHTy! zh0q!qima~QHWA~mTQ}MK>;k=SzC>1UOoh4s!dW_RpXb>#FGks?9{tN%SE58)YF$yh zYnN+lIdrT{Txz4SCZDstB4VXdS|mmJd2GW2{{kuoM&Y~CZQ}p{002ovPDHLkV1mO_ BHCq4x literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/go_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/go_clicked.png new file mode 100755 index 0000000000000000000000000000000000000000..1794c9f1e377e543d4fcf5da3317f5ba971c0260 GIT binary patch literal 1694 zcmeAS@N?(olHy`uVBq!ia0vp^5+KaM1|%Pp+x`GjEX7WqAsj$Z!;#Vf4nJ za0`Jj7yt<)D`RsY0y+i^l`9GBGM3-k^34D{h912x(J1+Bobm0uK+ zpPyrg799%C!NxWqohXVBwj%VWx+IpQ+5yAELf61j*U&t~(7?*b!pguX3Q0di5t?@2 z{FKbJN`!Vp16>1)5JM9yQv)kgOQ?1vMNsWF`k;V73K(#xgZzXhu0SBbQj+1}RZv=# z1CExIWc}2f)ZEm(l44+(8KQ<+O0rd2eo<~>iCt!HVtT56L0-CzK0*+$`yj3cCJSJ) z)Q6`_nA71=NfQPl+-YJw#86sV@?~eWGy?_9 zr5zH2?Bbmrlbiptd+jOx^u9tiTwutJiO^6b75$VaDthMdv zl$IEuce~Fo+&Mw*Y~Z^`k2dc;fA{aa_qF`@&R>XPc33)P*^U{H!!5gmG7ovA)a`zH zQ$7D~^aow%Y+m{4%Ofj)PC2!`GvI-o=~l<3QK#o`e!lKKdyQUN#n+mxQNNlr4;7x~ z;Vs|$H`8L>>GM^W?f!2lxPIZh+}>qB(l*^IQa)I_^4y8+`dfG6f0bNIdHg!%Y-Z6R zBgdT+nUfmWeU7xAULF)V*^4ufYtiQ2LiRqddpe6G#oda2)v2~_`Kf;(wzBKLk^^u3 z!;g;+{(836@&0$d$)A|4Dn5zj#n?t@e=pd-UG9xSg3I6Yf2wBK3kX~}=)u(V@1PG` z+Vx)!Sr_J?DwqY4Bmw nWxX@&{%?DJy83SJ5Ag;4PY#{p3|<4QDHuFm{an^LB{Ts5#PTWV literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/hline.png b/tribler-mod/Tribler/Main/vwxGUI/images/hline.png new file mode 100644 index 0000000000000000000000000000000000000000..425b00d7954330e2041c8be31376ca489883ce08 GIT binary patch literal 127 zcmeAS@N?(olHy`uVBq!ia0vp^B0$WD-OtF@O1TaS?83{1ORSSB3l3e literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/hundredUp.png b/tribler-mod/Tribler/Main/vwxGUI/images/hundredUp.png new file mode 100644 index 0000000000000000000000000000000000000000..566da5116c766f0ca39b1547e7c501a5b6a540f1 GIT binary patch literal 588 zcmeAS@N?(olHy`uVBq!ia0vp^GC(ZA!3HE*&&$6DQY^(zo*^7SP{WbZ0pxQQctjR6 zFmMZjFyp1Wb$@_@VkNE-CC){ui6xo&c?uz!xv2~WmimU~`UV!Nn${p~G9VSf`DrEP ziAAXljw$&`sS0kHMXBZaMcKs)&cP;Zn=UV4U|?M0>Eak-(Yto4t+$LLL)-kw=S<7h zy?DfynN02V)tVh`{ol#!^`8#u`j!Osj$8qKryP}Pu}A5NE<7E_+yeQ&^6kCde|zu7 z8*|>ht=*vb`;fu=<8Sty+4P2Et*H~omy_#G-kkVo;~e$aDW~kR{;b}_&~i;`+kS1o z?_4qQ%kGFCI{4sxZE0dwrZZ=4#>^b?`Y^pZWnM=;)GwbOAKpB%g3sdXx5;bgZhu`Nch&R9YtC2|KiM}?=@#aUv&|Lzp9juu z+5Vu?BWAbpnf@<&Kju4`|A>B3!nwEuB9p0dR*l8+{~@sE)#5*sgdc9^cjc zg^RbC&a}1p)WmLQug<-;yycC+4bxM0(*k9Guh>%_$?{NMv6Saof| zgW!U_%O?XxI14-?iy0WWg+Q3`(%rg0Ktc8rPhVH|n=B$cCW74aR_g(U{(HJOhFF~5 zJH^rKh=EAk{%LV)%MI=&upI1-HrI=>m@U|n=1^eD!L|7t!=VRh+b+*bdZwfHDaPmM zlCwR6Id&CK{?yMI!5=tucCi2 zZ_bIsnXQ{mf3DD8bCq5GzV7$8kH22<{_0$Ea=EJ1Kki@m4qx@3m|Ul#HAl9ty^^FVdQ&MBb@05xKo2mk;8 literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/iconSaved_state3.png b/tribler-mod/Tribler/Main/vwxGUI/images/iconSaved_state3.png new file mode 100644 index 0000000000000000000000000000000000000000..ef871ec1fbaf45f96a0cb718f400ec247b3454ef GIT binary patch literal 1846 zcmaKtc{JPE7r?*N#J(HR+ESyUZ4jlkB&|yATNSOPhQ!bgN{Aqcy`*YMMPn&yYi*^5 zGIo*C#BK=ojzV zZm9^QC_S2vlxVYV5Y&*v;ma{kYI0uDU>6axRy0%GXkILKV*yDareqc;fTz)H!awY-?E{hOSx#0L9&gX7MT3 z#{%q!rGU4*IaHq||K&L+SI0s6vFffiP0BUl0}0F$L_LKYS_ti~lr+S~g*;Kp5=t0E>+8QMLPyZD zCST2Mu552_zh7TppNy=j8oLu1=z;h3H8{zV#KdSUjXKR*s1CDK9Sx=kCixUbddRkJe^M=u24L6EojMoTs1eZOjY+ zK!H(gmk$;@n-4k?(-_-1Rg%KU(frwv!V-maYjh}4`#d4M@f844ImP6_5QU29m{}w= z?e6XE)YjI*CnhGkXp=9-*LISUl8pL)IlH!a@cOmMxh!T!Zvg2rUI>zS(eFY6OqWWf zhJW7O?P}l3&d$Ek*W3G#ed>>x5_{HSe-0``5!LJSVw^n}^wskGq+22|Tj6$9#gZmo z*FLr0+8(~~{BUN=%B6Pfv@5KRHMzdWEhKoJp}TRGm-G5KS^Jst2=CJXrKvFx}R;4zWFq5 zH86U-f)7WZIimo;3ab3auM4)u9`+qZg?9LYi&4AK*2e>EcumyuXXkPa*#ge zYa6}m_W4FBmn@inSlI5#@p9Ix{8||Ic*V%Ld}3Wuue9RuLYo}u#qYH)aLPQOQ8vm%t`*JKy0 z0Bv+ZjNEt@Dx|EZqqEcLE|!G(xuvC4UAE@Nsto20Rbo4})TJ^e$&aGO8-;$nXrdBa zth@q7^zZTU{do4i8li(s2Ju1&U_w--mL0h@b8~b2q-ZkvTYxRVhzA^@N53fOn zs+`C_$*fKfJSqY$E-HGxg)08kqy4&|ZP;(D)d2WUc5x1oGSogO54PSd?wpQ}P6BI- zVa8#~0heJCq?Nrx`Tn%Dw9j|&_(Kg=Nb85}2|H=idPcDOW?Y@~%b_y!*2LOI<1EEB zkhA>!d=_)6EE%Dq8L62w>5%$6osp=~J{1EodVHm1aN|j|Vd3ZP@B6c! zM^pAPeT6SGW1V-(tg7zQqE#|U=L`(;_(+FUH$l%0AOQfqwHX~46@@ZFA_o(>pZr2t z32g#3ki0A$mX&!rvMgFO0PX4d z0ceT*vQp}B|HpK*oYJ9B!?-2qx7}b;p2@hKcNw<%*Idxu&3QN9PCfiIS?k`szNW>} z5*UvePS`*u$E}1ZrgrlC1nzq^di{@sIq@VAFpwA?03Ef9xO(Y@ns%Ozd0SiCpOpIA z^P!TtC5mfs)PEJf|F4UN>fhN$iFO{Jia$d)MGToMz-(}*^UKQP)X+uedc;`NsWj^Q zQdLWoArQCy%q3T=oC4im4c%0a6w5U#k4HRgPx#32;bRa{vGf5&!@T5&_cPe*6Fc00(qQO+^RU0Tl@cEv5~aX8-^J;7LS5R5;6x zl+SAuK@`V7vt0shtma@rsA8lD9z+7gKOi822c=#-3D&E42!dxTh!+WkQalv2MJ(t= zq#$?*w0aPGP!p+#VzFC`iCSxuY&ZLBcE>}Ewz|va3&VrQo6o%O%wvR73IH0L85T>U zR98nxQWzQ=jz2SS<;DVlP)fmaImgSndDd4lbSVdoFZJfk(8QC=ZOCkKmCCEE=}G!i zdLlrE3L(JsBy%gZyvEXEj!2e&aIEg$RfXW%yX!r_ z3p)VYb$B!h_ijbl3DF58_-;I4;VBdgP+N-`8i=2)ssUr8@b*QbOAT`i0AKCMPL092 zBV7Riw9f~Rm%UUk06_;dbu)Qq-yQ(IO3Feh#n$P;@xJC}p~rX8#NTO*RBsxD1}&4< z7WHfEPx#24YJ`L;(K){{a7>y{D4^000SaNLh0L01FZT01FZU(%pXi00007bV*G`2iXJ^ z3^xcw*ibD100GKLL_t(I%Z-#lYZE~f$A7asF(FtYc<>~8ks@B|r3bw%dh(W|c#w)8 zK*dYHf*@GDmmYelC!^AXMKJbcdugKyYKyV07O@h$+az_(&g}EBA|%Zw`NLyi=Dq*> zy@44eA^@=Y&SPg|ii7>L3{%vh6^5nbH}B5?C=r3$>J-)0IrhFWs{Wwx+hOI@^rHzS zLE{9A^IOfGdck!Z@)>uifsF+e9Ci#J8VC0rHmeit)(dWI$*UMl1d~^jH6`5GpsZoc zg^a{S&6k=-6p;sr;-uB3Yj9x_7UyAmqc4r~Z*Bb!(O3&MI+|5D2{-P-r!`o50zsqy zY+}*GCc#Y8_(eD~4sVyBTsnGgEDpwkG2PLtTpqk4+<6G6&voZogGTINl5TdeJ>=G+ z6Ozv0+OQ4FFOs|IRcz3RfDACGu0UxP_J5|r>D1z1d$8~rKG%j0sWBZNz=)ugN_Jl^ z^hZyT6X;yoX0qKAjbd;t3qadi-znUlUO&WnlTivQf~XN%XLI1Xd~Ali{?VFo0P5#2 z7Xzg$&B&r;lqVcg`Q8YgehqFtc(qbi|Nj%<`SmN_n07rsG6Oy8?P&ekZctucd+!7M Y1q`d{*W_Pyi~s-t07*qoM6N<$g4iYkfB*mh literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/line.png b/tribler-mod/Tribler/Main/vwxGUI/images/line.png new file mode 100644 index 0000000000000000000000000000000000000000..5e0d60d57ec675e389dd79aa0abcd7719a2626e2 GIT binary patch literal 139 zcmeAS@N?(olHy`uVBq!ia0vp^%s?!{!2~3a&lETfq*&4&eH|GXHuiJ>Nn{1`B}-f* zN`mv#O3D+9QW+dm@{>{(JaZG%Q-e|yQz{EjrrH1%$$GjthDd}bmlPLo-m<0U-ycin k38C@v^QTQS6NzLvx8LpfyZv_xfC?ErUHx3vIVCg!0DM&{h5!Hn literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/line2.png b/tribler-mod/Tribler/Main/vwxGUI/images/line2.png new file mode 100644 index 0000000000000000000000000000000000000000..5e0d60d57ec675e389dd79aa0abcd7719a2626e2 GIT binary patch literal 139 zcmeAS@N?(olHy`uVBq!ia0vp^%s?!{!2~3a&lETfq*&4&eH|GXHuiJ>Nn{1`B}-f* zN`mv#O3D+9QW+dm@{>{(JaZG%Q-e|yQz{EjrrH1%$$GjthDd}bmlPLo-m<0U-ycin k38C@v^QTQS6NzLvx8LpfyZv_xfC?ErUHx3vIVCg!0DM&{h5!Hn literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/line3.png b/tribler-mod/Tribler/Main/vwxGUI/images/line3.png new file mode 100644 index 0000000000000000000000000000000000000000..5e0d60d57ec675e389dd79aa0abcd7719a2626e2 GIT binary patch literal 139 zcmeAS@N?(olHy`uVBq!ia0vp^%s?!{!2~3a&lETfq*&4&eH|GXHuiJ>Nn{1`B}-f* zN`mv#O3D+9QW+dm@{>{(JaZG%Q-e|yQz{EjrrH1%$$GjthDd}bmlPLo-m<0U-ycin k38C@v^QTQS6NzLvx8LpfyZv_xfC?ErUHx3vIVCg!0DM&{h5!Hn literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/line4.png b/tribler-mod/Tribler/Main/vwxGUI/images/line4.png new file mode 100644 index 0000000000000000000000000000000000000000..5e0d60d57ec675e389dd79aa0abcd7719a2626e2 GIT binary patch literal 139 zcmeAS@N?(olHy`uVBq!ia0vp^%s?!{!2~3a&lETfq*&4&eH|GXHuiJ>Nn{1`B}-f* zN`mv#O3D+9QW+dm@{>{(JaZG%Q-e|yQz{EjrrH1%$$GjthDd}bmlPLo-m<0U-ycin k38C@v^QTQS6NzLvx8LpfyZv_xfC?ErUHx3vIVCg!0DM&{h5!Hn literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/logo4video.png b/tribler-mod/Tribler/Main/vwxGUI/images/logo4video.png new file mode 100644 index 0000000000000000000000000000000000000000..22035c3092a29933485e4513380d24f6c8e10a35 GIT binary patch literal 5890 zcmb7IcT`hLx5tnm9Yg`?qJjzm6h*~g5R{8_1QG&DiFCM9lnwzx^@^g12}qG9#pD1% z3{6S^kt-NNF%$)YBBA$Q^P=y5-&^mG@2&ODTIY9W_UxIp&&;0R>^*;(o9YYjO7e1W za0nROykWt?aRADG3EW(4N%@Nq68i&o(=pNE;3)h166DboyDs6chw``darF;E-*@5A zarU|Ea?Ic!+Reqn1??Q-_sK z76C`xu5_%)NX>F<9PSE!AGqdt0K~@zZs>2|z(Tm8Y&b^xZvcz2OJK1m3g|Zoqu8bY=`4&A zJn*~ye<`6pNeMQk2d|VAZtc|+YGMjKM&K*6G@8#vs+mv*v%AGDGBBvoBO+wzf>4C- zP;gcPC~+R-x$*=0Y&jJEI_U~4Jv(Kw-HV0i7%OhO&T4*A8abM>LOnd;cVhvg#Yd>C z#T(8ty?cj7ywV*a0|AeV^MTp&ATJrho2Qi-%$}y1s-kTc#zR%Hy3`?H7J{eb#TjK^ zq87ngz;r*+etJoa;+V{Yjw27$er)!=$paUkzGQf%)IoY5qruk{j9lRNDkc+Z{mLGb z9$nC9t?dCGyr4u@%`&ZU>wZ<;oMEuy4a+So#rDA)`-Xk9-nvG_{6*Kv5B)p3d{_lw z-EHiRVQOb&ZXCtd$peJ%aZq?CjH>G?vTpb0;#C2QnPoMem2yHT|57+SRn>l(_?1Tm z+;`;yB*$O%6+(@w#6Rx#5Zw$)>YC6u@F&=jWb0RlnBW}pr0r?kuL~9#8lE1 zAIsTtLyL9sc}N!?sJV+gDaWf&p9d=T3;{Xnuq|TpTnv>tnoLoOKVtz(JnRfH1&e`* zyf%ty>S+PTNSB*1Sj3T(Jcw9X@_1N4U24lUdu%JuG6&Z+M+BNdVEd~lfi$E7#vQ58k$@W0^;)tqA1`PV?9-EnkGr8HMEq*w({OT zgsD99Zu^B3u~r1-|FcQRWTjh^+5*prCMaX|gmCzzP4DqwCfC8%Z0z@8a#I)ECZEeZKlm)X*IKI=IiuMS>BFFUo(CXSyXGD6)ilRem zP;aM$d%AL13nHPvGJwPf#UlKJ6{Q0E;_MOQfs zghcHz5*Qh0F(@r%rTw_d#6z+0uK^cdg<|!ON=EDKHnIAfkweVZL1M|4Ob7ImN-^8S zyNjN_BLFq6G=J~DN9F!@Fw*2IhUb^xMkRB0e38zP{J%NP_?wiVPV3uIyVQRD0ST>tz0Dc!-$`mwe>=c%*dM)YC5hT5 z5rEQ4=@^%St<32cN<4s)GGNl?DB1xWeoDD(P@pw!=O|BdgcNPm*DTL zCd7UOe~o2Y;UzZj{5(#WtK2cb`m#l`d&7 zFrJtAkm=}Lef_Vo@}5=rUJ@;na<|53mv5~EOLl)?Rw0blI47<&+i3gldi@ty^b-tw z%Zoqd<-@k-saKU$PF?27HEx`GD;A4PbA(X?X8--BvN{$fn%iR zMJ_O&IeJ>%`1|W6Ycd#_R4-}|5#y_UcXe;tb1*XBK$X-beP>X;A;b$k?vXR;gxecvu(P3K}URoq29JH94JU=`Pk6tO^#3`J`1TxnPc|s@iSDm*&SB!zrkb6&4JQ}0_6Y_=3}Jy;Yf?C3AMi{Os4}i}q0#-gVL3))g z4V2g%g@wUsAfo_;KPo_ToxpVc-dng=h!zBSwW9+HIyGaiTWi}r$&qUoMc^svc|!<7 zNM9sZpddLAp|XccR?E@C3t%_J!FqStD>YoGs>okMOoV6hWYJW zzm-P(SWzJKh{%2tISb6fI&N@IUV-34JbXHwNRh}tEf-_zWWuS%_vV?V2V&kAaPkVnvW z+}f`Qas!Hv` zOPnFJr&~XlsLb6uMe1um!u9YYz2r*ed7X+`nU)aqg6#I!E?9ElvT*bnU?vOZs}T6E zk+u8xz=655`P3V@+9pCTYI8_#%?CRaDLcJ+ZH;dxe>e&-W z2sf~~Sh!9~zf|s7m)0A*7s}fFH2&u3VTG4&ikfWTS8Sbq4d*iq4$O-hiGpB*BlbOD8xcAXja(#_* zIW(OZi%?ST^%h1!p$O#mvsh7X;75Tbo-Q0eOHbARAlRBZ{`d*Hm4(#1N$lAGnb>heDds=4;k?Fa=x;PrH1@-FktAu^qA<(+Ug&@d7 zUyYw#U1u!3lX4FA7gdMv*2k=V8?&`jrzRep zG#tcuA!U(TX*QD0%BnhnerCS!?X%@H#JscbH`dB(SZta-I$?A@PS0n5d!um6{e8n+ z#KIUmo((GJ3sYrm!on)dtHaAPJ^i}X9`}Fp`}+c^&cbi*Pl1ZaH{K7755M&?mg^HP z-L2>(wFgcd_c)BC8HxIijR1LPzVn^vm;pqPYLHRppDdASOah`3Lv1m16bR^6yK*k< z(YGv8N9#qT`}kc}|Hd8$?aWrvw8wl}y4|aoLy>%^Nk+J#?5YRm0w?nJr~9B<@q)x$ z)phAIxUMoTmn4FUDLw}$#je+-nyqIWKZ6|h zK1}@Ks8LaVRljZHT?j?@Bkl20R9pxLT&A&tbr+ZHXIitkqEsUBQU}mi`&slY!YxqN z;?rva%4+@G&Nny6b3^ZLSOyRA@$Il-$NpKLkrdj%aqUYJXAcat!I|g%S4fY-A~Gg8 z9W4>2o8EbFAa@APO?cW#ZC-XyIW0GPXLk!ZqNPE+Yq(fTa8iwo#1swF=60e(eTc(w zRdsdkg_^!R?I{Z(jM>&+XBkvhCT!WbW= zA|e96VZy&^-674$=e{iT5r1V0+5SjAN#gQE(oc&UWG|R0TnKc$iprbXE7ns8smzxb z?dQLjb{@!GaHVJY%nZ#}QXkc6%M5gwj!m^solk5G>Xy>zXdnK9phY%t^Cz351%Av& zWPKYhe1xjJJ4Ljqc4r|}WQQKe@Bqzf;f1-0is%xDP>@la%X{VXfh#D-Ty@6*H6gPz z;;3T&;^*&U>t?A6oJ6Wjtq9X&eR#FLtmTelccTm5`BUuJ#h^*qwIe8=T)l;Bg59MO&A((sq+}XGW!qbbm+u0}P525&r=bHWQjbxioc{Mm^xZ0QSL3x1yf;9AJqwiYz zKrl7H*KtEXNdzi#G8xSY7n&W!s_s(8oBrV5t+HGFF1BCc;{BwL82 z6Gp@DQkB~-A7Ho|Ophha2s=1d-Ht~hO-wcm4K!KDjjCOBH^LogvZ?~ZfmuP|zH-rI zp<68K6r#~8-nM_rHc`KuR&XYj(4FG>NFxglxQIE81vhHVPfXYkOM1)ogj27~V&?cj zUJ4Q%1BDeh>khx!&`c{X57oDUp{dI|v3QCSGW|f|b$|0LX}Ccl(dlIbu^hjdm4K(k zQIr;RVSjQBb!pAcqAKmSH>=w{fi{ASKnd85K}TMh#_WTUM7}v6htIMBFNBhO`shh6 zvQoG%515VSC7oqdg@x>^XBY!t=7Z$X>HEt8>l)hUcQV?1$5-^#F4pd2%{nicJsRre z7yJ`uKUw`(d5B1R9w%qEAm?Vt=A8EYhU<_yvB*iTVnM<;1Ljjx5;;`)bA$PVdb!wT zn-~wT)GQp#=C}TP<~1X08fqj)%a(1JMb%%Nc)OUZAr+OM3hzcJ1uuNh-q_eJbh}zkTNFA=FOi2_P;ZJvNG$ zeelVI*7;RQgm+p@U;{{pwUcfsfnvVOjis(@0N?Q|JSi93HAbu*=QOjAkqF#COJ#|x zyzvDe9HSTrq5A^XG)Y4?s1)yBA?nj{2Wo50b8>MDaH;`x<{9^(% z4WD%`GNg@+yCaL3V;7G2ac$;L91~LxUwXN?2*81W)G5S@7=}{U3x13!p{T`98 z&F4u95=Lgfc&8(Eou!)#K~%G=k*DwFb47BuqHhORlzWREkwPZ)yZikS;+fPHeLn+R zbJ1wZS+3q7*m&ja!O1)t&tvb6W5o@pLpU!P4m^w*-L5;gApCLJKa;M5!rb1D_kQtK z{$TFfJepi^UN*iW@+eZfk5n#IaIZ_p*UrN=#@Hk+K7vS?i^EsE{2&#~e&qf1swr~boj@NaR{ d-Q7kGPL5!Y{dN%WK<}5#K+p6>sjg%6e*uL3Pv`&u literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/logo4video2.png b/tribler-mod/Tribler/Main/vwxGUI/images/logo4video2.png new file mode 100644 index 0000000000000000000000000000000000000000..53ef47ae7c69633d11882ef1465ed3583347ebe3 GIT binary patch literal 1317 zcmV+=1={+FP)Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU10N0=1C@nbH2?qvw@E}nR9M69 zn0-uDWgN#phkN1jrV<1K38Vo_aQT90Enh}TWArwBA$6J42DwVnWi-^mtj%=Va>AT! z>6|(U%*@S+Rzqruq}_Ol)KGju3Q0i_CEhNlKOW$Y&SBoe?z!AQe73uL&hvYo=X}od zJm>fOK0is0VLY%E=P9X@977E7s}n)_yWoW7F_dGVJiNhkl#@WseFO5!;l~yzX@UGo zh@CfrP>FBB&SMZc1zZf^;sO!RL3R-D()sJ#nFeQ$sXVwIem@7jy@Tu9+u-019lySTUJ^vlfT^>ETd4j8Dt;DA zJ9Rd!_!uTc!__L-n*}u$I(~f)QVGUkbu+r!9$p@{g2ke3_+dePRseBHwzWO54RSIZ z5c~Yb+TM2D1V^a^$@Zq*T~4T{Or8#c(lAkYjdl=}yM`F(8{hyaNrEwcwzm%$Cp4F? zP+p_q$sl#xl`3W37~i4wm))SEXQ;n0u8rZRR=zjN`b<8E??}opwDIfS@ zl5mzIk_viwA2OC$m!Yi%*1c-;rqwU&a`6JBFBJYLWsV3bI+n+YSm)8`c7rl`3?0=+ z`9Ml0L`{bUsSy9BQUs=C!n4!t2xans;wD(Ojv;|xNPxglSo9twELB3iy9Fl33>!*0 z9q()w4^CVz@|uM?uxve4m5KyxPKHSBv_G)UT;_fV41xVyq3+7y7g)7XBt2T1;LA_p z#%0Ld2Y;5slv!{p556ya#7teQwg%CvQW5jwli~H1U@ifpQQU`wi*^&I!yo5GZejZo zaQCo|wfYlSk_KO8ix|55BzSs@(6ur{sMO|08)oXvw|(-q6prkH7yIPz7aN3D3VQl~ zGPo3u?iG2dO}SQBfN68VV1U(Iz}rV0I6@(&9ZU%WZ5cLynO~A3{-Ok&xj;{ z_#^>#@2=I>)ByfLkiJM9D57G;^7Tgsn>Tt_hmami)8Oz0Ko(KIAGTiw$TG;i0_4OE zNd5o{vf*xn)xKx5P@xCP;IJ9iZintp<#;xGbXSuQ2R+?jbQ6Qiy<%^;n!wU6B(e;+ znZ$-~yfxTvv4}Q7p`!ntzXgn&yU<|Dq^=AENOBBez;%ro0=-IGf0(HQ%ALfSpOVaU zx=3bZU_H<|!a&Wy#(|A4nfVELt#27eqErI&Br{hPnI<^~H((|Z1{fRw(*x81ze#2W bvbFyKeujE7mY8g-00000NkvXXu0mjf?p|nC literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/logo4video2_win.png b/tribler-mod/Tribler/Main/vwxGUI/images/logo4video2_win.png new file mode 100644 index 0000000000000000000000000000000000000000..f8646f52854aa9d14fc4cb3c0c2882223d148198 GIT binary patch literal 1524 zcmVPx#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1rrP-ErG>CHUIzxhDk(0R9M69 zn0aheRTRd5@6EK+(z4c}6ibx?jZjc_A%dhz5zuJZDxg5X6p6~BBBnsvAPGyrNN7XA z2qBcPibz7(BrJ)v$Rc*Afb6T#mZhaoXs7e8f837K>3h)kCJfZXn@sxdJ@=jay>m{_ z`R-A~wey0k>C~Aq^LrF=;)q~W*SZ60cM&ReaQqMI>3|B4gnISs2&g{q!J1>RzZmuv z!`eLP_U1DLHSj}NxET^Uf@%X*g*Gok=5}~v$P)o&x3t@4hbap|Q4BPNz?^lk=SO&0 z2F~Z1cg_F2r51innXWFn;rTM_sx601p_{SF|Wdn%RzEqc!YIdL^Xp^Uj;Fy zE}a5TrFCGf+XoR;$!*xW$~vfmD;7ZsVeNp0lVR^>gM+Wa@m$a}|Mjv`Shc{y0Rp=w zESl@VJ(xBG61zjE-ZCxZ{|0~jDqY*DH;nmO{!^TF!O{#kd)%~oO%o`d{6j(=)4k4= z7|*T(?82)1ygA8idAVN#9*=qFX&QDx0HEfshonK~_O6@fCAlup@?Yklt6`9sDVwIT2ieEfhw#9m7+ky zDD(T9#!Aj*-c+cTQ$j=ocrM0RcE+V&YuM!RbjQ$rR$Z6a-T2GPN+I=iD1V@rwd)q- zJk@KU@wzJ)?v=plJiV+(au5UEW{18h#M>^s=PE@?Db!*;-5`4FYF#!{B7! z7Wtf)qnA6In=eaPv6O0qaWjl%c?aO?Ir)5Ow{aF9pCJo*t(MX(ki54?L&7V@vh`WM z=Q~AkaF>2hi#GCS=KD1>pc*@2;yh#7xqR5O$$#yqW&ZQ)mtbOlxNyQ$pxbK=DMf*4 zS;m=J2v{^#f0;P29ZnvF<}G1ECS-prQ-)c(CeR+gqwg?C9%d}tmJPW(4FbahpZ0@_ zhw@@-rOapod#&DkA;Gu;Up@^BCO!JSySD=(6#`)*(AO@8Yv=U`emO`c%O^cF%|xKz zzYA%@eQqc%hAHou3fjOdg~>d1;;_6{tFmAeR zdKBM+A7;bFQ;@zK{yGL7d%=!1@XLl;EOkELS43SovgReHz`JAN=swv(OiqCot>wP9 zUE$9|vbQjQBZP(fbWQjMhEIldnX-m1-Ujs|W$7B5CRyt30uz?n`*Rh&&4GF?9NS}?ACQ~#=x|p@uLaAb43Mc|l1VSjfd9`gwu|QOfo!j aLhuhs1S@x(2Ot;#00002Pe^Dxjp) z=y8iUdc@8bM+UijBE3VUJiLAV1K~1KQc^Nf{$6kyTOA8^ zi=Z>!e*PCQ!QM8QOIJKGNKbt)8DpaZhLI=1n^E4O?oyGcfWVNGk#L#cFi(QtGe1_7 zk@|f}C=xF7H$HY2R#Ios!QN8Zsv0Vu>RQ@T+PbQmdYbxry2?^E?x9kq?txO8dQzI| z>fjg6lUjP`q1wMb{Qt!7Z>&MJ0M%0MpL+Q} zsQ*)x|0m!4i|_v@;Qt2te|bpGUWX(ANC6j3PF;!2S;CD6Zm9R=85B`fsQZ*kU)Bd( z=xD2Hv`MhImFz)({k3zc$9AHyQuiY&icjd0X9FhU=@kqeI*G9xKT9T`2YnIqRbbgB zRLK2tb)F0{_@1*%wV6B6L#_)ls=u?*sFKr*`YVP70AkYD@Ov2y8OA>SXu|ynlBM0stTW{6!%d3%Bz@3$!q<7hXo7aE0^qn3wk? zFyMi!9EG&~fnJDi3GK5uX6^;}>0IuqLjW*+Jt;qqrBWQHR@2wqBG3GliyHF|TQ6?u zL|WdVo`(awr@7$NzB7m5)Sd!%00{C1kuOU7q<@bu=rYDN;5V=r_ojrY;BDnSLY^sU zUK+q%cV4oXc7+qXDzmVQiJ<~Cn~Ao`%5f&2M#ZE}id?`wQj=Qh&?BrC~9 zv^z)t9k+txZHD`5$$+_Hx9jB(z4E|0!uDM3`#T^M=9#{IPym>F^iBWxD9kFgNRIzc zImM{^-&B0S>&Wx}ech99`cKll0%5NNeFdc26ngaUYxu%O_JJ5)NizzU%HQ<$8QhWp z07P%n7@+?2X8?BH{p~13+B_s4P@D3EiCv^T{Lc$1Nm2Jm>A(aVV|#;m(e(1^4j$<8 zAT~zSsEAfT`}`l&Op6NPXfJI5lL@gUqzxhX%=mGsVyWvKbAx~;OI`7Q(LS9m6vl4M zPK}bmrbABH-i)Ig*4ab!1*E#^P%#^*?#us%J}#u=maIYfd5_oRQzs?_VXtbtK0=T> zX(3g7MHsRwhEuEiEyBJt6mF(`NgWC@oh!JwL(&&aZ;XzrrA@{=@)zLA6$%a1U;>VLs@kdr{X0bLgsE25WK*X8ZVZ?{Bi#t5sr|Lh~ppCuXsaZ`bhn| z9{-^;tIJ8tth!dTDK&41!=h_4r}DYqKVU* z167*v08(Hli_l`x{_Oe)=oV6-z21fXNWv<}1PY74q78h3kW85vmRCd-x%9T*1@$8K z8mQC2fe0Sp?5(hFw#3WrjM@8;%o^B!eg2|9=l;f~gAwoNU5c3G$bVr7=){Wvrbt+1 ze)<=P;%EKU-dRX^FyaeG-{N;oXGABbw&=8I5_U?^@o(#S5y1urk7}58haxV^BHHP9 zc;(tAg!-rG}*7NPCA!P(T7SgrqbCGDvE7>_hKgY}gJ=GC+P=lvc_*OLj2x{k;@ zI{j(4Um?iypMqxU@#0ygGS>Yi3zF5%(dQGN#gn20EUhq5;fNOJJP}^ccjj354k)d@ z|J3`smj!vtLVOS!kSXU6|4YDIIe!9`HQg+pNvUQ) ze~z$}nOi(sBUO}Z;dX*y7-ARp;rj1$C_7od8G`$*f<`$#fYm#`0tk*46+!ZWrsZ!o zI~Nsszr*(xY`@_+;*_lt{A7d;w8@Bf^=4^O$27jdw^pDTf&&1GZ(N41z&=BAYOegf-sUNTk z(8Z>+{Xoc@CVhd)$0>72lN*7t5&a9L{^{BkAXw5?znaXsr(47T%AWdqFGvO^soOE|6AR z1&}?iN~GM{_qzEg#(rC$UFKw%RGZy1wk3B^gbj=O55Eg!g||C@bBhCA2amRn0z>5& zj%SX-PIL^ZP{X=Rx!}W92c;@UW-7^q7T3Auv0ao!FC%{Lw_OtpxS)vl4>8>w9R@qC z+Ki`Pc^g!CI_j%=ptY8SYoJ|n8Bps!vbBU|nl-xEY@iC>w&LmecG1h_^tiT-sEUBU zre;PqYJ47l5<+XTtmKwPXtDJ~8htxMv0+lNYTu*@%DHc(B(Ba7;g5W>e4#bq@DI{y zj_OR%N?wRa{#yF}nwf@B2H0MEe?J?Gi@Nq5r!0?EdFY?N;o8f1N8Wf6a68(9d%3sl zqSelw!ID;4d4jAx)EAx)@`{t`cYhf=ivSOFSWJ@vdR_!=aSFf~(_d@zn}4~;5va>@ zH(LUdtXza~op?bLS-w<@@)7n#Sq&xl9o!*uzqSouuZS~py*PvKF7YEB>bDm8fqT1i zWMoM?y2Vw8ox}=eI`5c1kX(R-BEH>ueCc2xbq*6#z(b%Ky|!N^kKav^J9oTi`ySJH zE6-4(#@t9g`H2k+JHuWMTDeo4C!^c5-3~1aw042Qi|5j&-Ei3EU9MnD15DRM-%}&=wQE=QJD@6b?P7HYnc`H`*!gbdJ z6k^0WZOpx8eS6{zdU?V~^K9$ZVLL6f!=tkObYJ8Ax!8K9m&-}6&;(YNyPkouQgJkE zv0rZ0ML?6>?A6G?Xh2pCpx!HC?db{j%(5dTT_R_$8QJed3sg!-@~1vjQs}B)1(d59VkcT27Wp4id-f5I=VUL z_;-Z+PCXsomS^T$z$MMTy5NEtYKx`dL66MG8i^ zWur-3^xHH+W-<*ZPYMIk#UqYq3d|2C$^;t}#^NB)+3wz0Ig&!z4fu_0^0wS zz+&FstQe-ZTJ@Bc$k(^)vHlLTqgTclMnUV{YtQYvsQlt9ASO?_mSDS0t0?f%!@M_o zNL2nvJ)3FgzUDfnVS`chr8M+#p}-XNJlUy0e36>sBT)*pi}ZpH-k&W(=y_wvt9!4Bw%4oTT5UDW^c z0-XOh9}Z;b0Y*5bQZ9cmL31s=i{!+1pmxf^Tp?#=!duI&%xv#TJLp8~TWuVwbhNW& z8Ovr(#tFul{NY^{GX$w{OgSB6eU|Pp^)co{|F#J;E@OW5vk~l%IwDWG%^v|Hp!l(o zEMU9pgF3POl?4{p#!)WbemTdYH+&5(3ir9yHaWOyWBF(*b91bTq2uX>8pDdPEic9> z-TKbKEJVOeFqMsvDdS~g7-(ut5QKr=99(D=?whjTK z{0Sc-hg~2p9^5iAX8wexYdsttTz`GT(^3@l+nsB}1*MHS=~b zjK$ASYQexDpF!4m6}FYCBdeG&Z^`7q&Wtr1;i{lx_>;1*hGGf5tTpFAPQW_&6w~22i$o6;*Jj2OeTS$hr=Nb;mS70H094SjNiHY;48d2a@Kw^ z*?>ip`PmSbTq+hb^%ji*0|@_fY-1GtY=oRI^hXm_=%d)HoqN#XydJVv*5-;&j_B^^ zY~>_hC?gsZeNzGxy*RpCbId{}Y#Y0r`#wFlhPEaVWmK_R|C__@^E_A=LC58rO(+pR z8`ScpoU>}Z_K>gvLNp|xg1n&z7cCmO9Jc}gw40iHYC2g0Lz8?PYkA#_ZGl!azV_Tv zlJTsUEX>Sf3gR!J05FTHYWCEXMc```3lh6o8pMyi@?^s$^Pg@Sy;EJglz*bda=vW& z(%82+q`g3|A=y$=609*yD`F|u0j@lBxE|bDk7X+AF+8{T?}{ZN$P^0Z`omdyE+x+U zsDZMoS3L-F&1c?z>b+iy(O-!5`75EiLJg>K|Mh+@OfrZL%Z<@i09{F zrPh)-o{#FHknp5}r-Ry+HfIWA1i!SMdTw)Lkfg9#f5QbbV@o&;tY$=Q9 z1R)Y!z-$;KzP2{c2kMtwMr*XisunD-#IJgENe5|I0l)htWehxt&qMLzAiYj7+@54g0)4s9^OuPR9D0ALqm`L%Dv z_GUEa-Oj+Q3!Kl#$xq`|Jexyzu&On^4#|#`tqgfj2mZfsiL_+#kFuEa2@)96D_XO# z)jo+hm0cHe372CDg2R%2B|^X=W$^;M`D58bFUkOa7g;{?*yQl=UkmikO&Mu(J+9?C zOG2sjKI#oV_kmTQtc*2r7hf152PWn&-`>k(L&|sX^=)Ka9He4Fg0sOi_3ZF?}@zUq01JT5&r zUmyLLFCckltFhUe`z5WhF@I`nk{p(f2W{X*M6|QUA2!~>Z`@y(kP7V~z9)^C)EA!U z0%i3+EOd-fSYQ3{=Lff~tJkm=)kEW>^AP9GKhJ1V@vj;^b)C)bOu^8J4Q5jdSo*TU1}3CsZ|6p(qJ-|gF$61_5jWEHT49zQ z_-!kTgCl<^O^4UGra6sQgzd}J_2d&I;nWsOipza$3;H(iABAoYX%B7lWI?p z&{8p~7EQl=%p3#*_T7z%QNm2%))NC*zFeU z6AQVPQx{w_q<@ISAD_xtx}$QGV$0ZNwytQBwj|dX7XIU0n~max25qw}Hov28bN*5<=xy?0^0|{1%>N^YVBekcPo#lA9W6^0}1{k?h8w$H>5Eh;XjyR#u+He zzQE#SB1-@KGxNk39mY!k!w_b%wt+!aVAtc)&*!(6fCWm%JI)!u@F``weNN_ zKEC>w4-u4S-?&3>Iy{PU?6#YEcWN$`_{xSQ9)!~%f))mRohM$@C%a^uX7@`;-}Yu# zQ7f3NO1ZW$S?3kbnA9{45@UQ(_u@fOBdBXk-jj+;X~OOAvl3-eJE5mL@D)0e$9{py zK2~YH3KDb&tV?$1N8e$`*RdxkJHE065g=2pI1C}NR_e*1>Qa0Fr{&*W)%>9W>%!F8 zAL%r0l*B97kLn-Fm?gt1XPz!l-cWf4ZH%k!$EKy456Jc0`OHyS)yR%`7uQ=9MYKro z!s4xihX0cvH#88oS#$QmMyKSE{=39KqE zhKNuN$Yko+p3t_Dd!{_^=@j?X?>UT+*2!@&r-4(AO6!Mri|Ck$U=RvG_+}UYQ0qW2 z(F;!!9C|6AqmOIs%&t$M9J>M?T9n!8-wa*ilKc;Rk4+Qmyn@wLDrn!h&upT(pu2&- zmg575-})aDVwz!4__@h4-1%N+W%xq*!L8f8XV25Um&KM0GQ5O76v+)Os<7`hnh-9r zcZ{-Bc^&l9lInU23+7o#_hgYQZvX8uX!Uu^Q7lgwY zv-jJWdgte7cj7#&Sq6Ct93V_=wdPM}N4_WT9p2@5zrQo3zZv??@ux46o$+DU^}1JX z;;$jr_b4&DbDR(6lviTLF-ZkR;r>EGf;Q4LE}?MyNZJ;S+3cPo+RQjrXS|bsz{f<@ z*K1HpwM}m3#7fJm?m^gO%W>4`~UWHl6$;$-c z8R;&0Y=?I7y=*okQ*``CbpPV^NS{G+o{CvN*!*bJV0o-;3-bazrkzgyBJ$xlHT_veBVSxNB-T-4`mMO`P+w;iUrN4ZOKeMb^ErfNyfJ9E z=y*6e2jrws=5@fYBrR8riSsJwIi9JSbO$f~Cv*#}lIlv!RW#hf)#Ut6cduKVgeLLl zAvN|%)HN(h^91+Del-NT!cPs*ft4s;W5Z4+>@G%aQh)Q}@z4NGEtX`j#Pv6eE!7uJ zpVfj4!Y(MP7*sh1Z{C^In;!+7@Z`fW^-DXBvDqv5&x_}(S-ys>b%!WEVkhQB_Xu(D zd-77)FTR#I7TH@cH;+z~z}S9Uu&^Y#g6U}{Frd9WT)~_HPwSm?jvNWSUglaOPq;S! z*o#mma_GGcRTT_B*!8P&%)F7p(C9n@ZTiDPp=-vPGXigPoP z7cDCUneMFRu?`EBj_ij0jW|viCn?t^mDN+m#fvjX9wl|N&4t|{W>4Xq8BV19z74p=yKM)r;1s3RAnjERAuG}m-YM}{npH6?sAaa3$kk(M5BMNqL zqxB!LfTrSRdCubfEc695xJr*vxk>_1r=~=OPFs+gI#19eudh*@ZCuRHYhZ9 z(k<@C`vpX-UtY884rC-(58e}#smt>&Thu|T(90kox+glSo9QAd=IhTPo*S&T3F&n= zF;ihbVVx1qQ;WqRZiD6U5`X^XcdD93hpC%j_v~zicx$P_Le_V5ME?fTGk==#to{8d zjHA4jDWoc)`0lyf&na509?FF<-^ya)r>Gv?VAy;U|wUJ*NK0kI)6)myxbFzm$o#(!o*27u?PGZffNze4;91 z!09;U^KKPaOZ&t^VNdA-XgJRy^_hqKN4JwNXG5;*-%d=_ie+6;=4cLLeEJXI zkm_&rMNivJ)z2}*7(h|46C82wt^)3dv{Nw?S^KK|-g#mT%2w3<=x@6o!DjI6gXs3% z`JVcfbwN~FtYqNLiLzgV+cg#F?gM^W(jOv1!XKiz5)+)X%p?98g zwi-;gtKZEildlMlX5n5Mp!7FB_qMhO@OATcSnxgzNSaS<_4p&nb5bsZG6M(uBZqM4 z!Eu3I32zHiaq-B|VR4W!r~GC%?1jlz?+@-s^tj0cGgoV$FImM&nD^w7XxGFM?gT(Y>#kfDER`RQS%6-dAw`$*{cp z!YBhSst6j*Bh2}4aR+^&ZVqwsqD)ri{bdVw@dx-i?P}}vvFR|sTZD<@1bs$0vmO7X zj5>UBH=`4EFM4PIfBw=oC6>r|vOE9h*w%qnqwY*d@!_^Cx9#Xuq0e4+CTm7C?q6?R z37470@0_zhHjmUx;S1aFsojJC=24M}-rUn4BZ2}oAf{;G$rx{tnOl+I=#C#>)Cl)n ztaM}8H2PM2bky)`I`Cwil5{wp*o&lky3W6$0jw8@N@w!;R{ zEu7J9!@7Ev$=|+w<^?x!R`3Ye?uQoebIfXQjK2f;3>?aeE{zw2sPHa;XWVfS{WZ?d z(>&uE{uX`fG7Fj=~t1^i`HM0>#vDao~LiSI-%AuYOBN~Zi|aV zzVM!BQdb80Y45~wW7{$3+OPYcQRTC5^Jx<*@@kE=;g|AxS8&19#iOetWVfEc4Nl=g z=5vqG#p=#sFY)+wK4$XHBySF~Iv$498E6uHqBuE+I@z>hO^$;U>pCooNlpsKX2=RY zb!6K|&hKxOr91o5;|9y|7Z|PBA*3tgy5BQ|@%=4ArDb)}rs>9?@C)PEHJj#0W{Ox_ zNfC8;SxPaJsdy#=`cXvwo<8gJ<*}@v!`yv=8@lTKIKC6j&u8rFdUR!K_t?juud=49 zb1bRgn0IQ(PKnb>)`ett;vPs>5D|#X ztw0xkC!J!Q#+!QEstXI(^7x-#xowG1OL13VR>{{ZV9>)-yu%IIb^dkj!(3mM+QP_d6PaDGc-k)8V;Qn>e*Und zR#05$cLlCcl!6^?V&qSt`tMxT@NW0HubE-nvA3TjfW(r_o)*ER2&H^#!E#Mkqt}b0nb~_Rc0ex1}Wq1r^wVp zDpfeGs-&&5*cUvn3!n0@`}u}2_s)iOxJt$p`dL^1{&oN}etFadt-7j2(s<17ZYMjKS#DpLsMw zm!l3(45Mr=qdaT(MmAL&Csk@Q=$lS1vrpPT*bY<>=hI*+cl>U}TkxQv`?5zvd@(p` z!SVhUiI<fH!CY1QB{9L+ zab+x)uoD73)Q118SbkjbOdzcaaHR(#XO37rC6a6JNUvh1(WP=Dp1}|QdXeB7w-QE( zA8crtf+kolmU;haCu<&mP4-pnJI#|Jv1!xx)ox(wB7h+!WdS>RGBW1Op!|#fpcXLY ze5D2s4eW%OUc@FQ(mn>`vVcvBOwTjz>pCzO9jL^Y>vl-2Uo(L`{$TELy#Ikx`E$A5 zL4%K(R!A77j>)HN`n7sQ`YW0%D)ud512F!0KnLowEV!!dYXFW{*y|@m1uy7==OOSD z;?Ra;j>;=k4RD)-&)*!Fl&&@Ssp*=x_<92)pNUYsXnHD?42Kgl_U+B9ZJZc;(b6bp zWj0kxEpRg?t&<1k6nUsj#?C!==E1@R59tPdn+A*jue`h$LsY_EmlDMp{d$D%`iIpw zrr*|FFmNy6`8wvD8r@vCV^|Z2pSad*$vN1?mz5i@AHz;l;GyO<<5&7di5Vg9HPfj& zsNM64Blkr&2b0Q7FO-KHecM%q+nr}HQ0cje%EFMnT=3`}53t7XIQ!9I)w-W-Bhbel z{dS>lYklI5bJnzL_S!7rV#FpzCC~IhxbquzW90i*`(Xv5ef3`gA=%M~*vj7#eeRBQ zVRO3qGl({dd{&-sCh$w^Wnf$yWabiLX2_t5XRJ&fOD=f;Iqn+%f?DB&1(R?UgRQzP zm-K0TK_;xxKA#4*dM)TJTCyA&zVC(DQRm}#N18g((E@fe3Dc4|lEipR+r~{?p!Vy1 zTMfvAwPzOZQt2KR*z?`kJaEKBpKqNRVQlLNVDVs7Cv1)m0w*?3S6PrM%vkSw(`uGR zlrYU=d*iv+#$Tx!*InQ!{^mC2T`R!eAKbhxVYU#@`D=0dVb3yg$Y5t=4C7*Ou|WIK zpjvigs@$>TGM7&$@)!CDa+~!&($Oj)N8~p?{tb452?c-bawlLYlYS!Ub zh7f&KfF9+Ik2dy?ZhC)e@Z?H^({{|u2vs<)NB7XXQjUfwcQp?)o55EHq!UB@w_}B3 zhNSlriA&6>9GQ4)x$xZoAqcW*^37D~psq&TcM>m-NFCl`{o&@K1^csK&dSUt*f^=dDX*I4 z=56(>Z0X$xMi#_!ww+!^7{N#GZ7~c)6VsTbkJGaZ9;}kGJlbXxon0_G#;~TK7pbuK9v)Jaa%Ih+%SVFOXwezkT&d=h+o9DdLxqUuiY2*`FXLZXI;SZ9qDS} zX)mGvR9q_K{Q9De$(7= zhHPUfi_Ym0Hs!{2+8K%yW;md~1){N2d_~Ha1NMGYJyY8kFSikYT$4L4s=Gz=+Uf%6 zM{hD*6zTrwZm6mj2BGYEK`e5z*Ial$oV_(&*ZB)AxrIXPb1a9vSG>2p9B8xM*;B-} zv)!anfA3X7T=M{hy!*x__p$4_ZTsAs2}^@tiy^LDaa6;)cDKcO6=4?vJW zr!n?PHqFh-!J^ZoT`P3N1jRW%`8+qrBu?$$awo;U8p9Tn6+HSvnfAVSz^HV* zUE$HbgJiSj>z=pGge(q+F3zKObj~68hh#7hNQFS;>YL-p1! z7{wZWyX8`uA1!Up+_2;64Ew;04n6I56hrc0h-gRY;Wt3zq<1|ea&$Xn2=&$T{yWym z>#!d`Bg1VcgmbIL{cc4xd%CEiA3d->nj&~izZ-vWB!rz2ti1CaGr2rceLiAWC4REp zS^J29mx`H`aeLar;>t5=vG+oY@L#{e;i4Zh3XHP&+%@sOk*|4-^(+r_m^XNwU}&Gig2rZaXimmagd?x)v0G^}8? zEM{zXtDuM1@0L5y(S@uZVjbg$=@?t%ZixA4*oSVXzP8w&J!N#U3ciWsI`rlE&LaQq z*_qHG(!0^_Xjs^HC0!=p><_1?l?mmly?3`4VWwm*rX>lViDd50}3uKuw{tYy< zDC+dy2!mLsj3t7bN`xpOqV*1nFv6CGY<)$KqHleHkulMHlkP<4EwFGJZx86?4b(w* za+?y$o60e&voD;EB)wbC!HX!MK0AEDCMp}e#^etBlU-Q6FZ(dc9lhD85-|m|5l_EA z;&ay}f9A%kNyEOSzPx5Yl286o!-#hDVsl!HdC6}3X}{vSF4pCeD=0Z^NM#b_Lw)Cj z2+P)v&exte)WMpEZJ7F|q;ALPr6xIP9pZe*VA{;tQ}2YW9-pZ-6UXD?9%AuEJ+XqF zs3v?Oo7=Io1|+$Bu5J#yW{T3OYm`=YOa;8es9dK6bTwwzg7q!o`~%sGRGNbHhd?%J zkP+VgQ29N&a#bdP5wDO2ap=|DuNx<=UufSCKaLOl8DV_Otuf;1Bdfrfq(-Zn1?N5f z=|xrig{E3YS_&NK0NLh*Qs?z*NaNUja$Io%10QXYI`)J6ri$THpV9N@)PdB$hbGi4| z(<}2e<=&}w~)XDsnZUa8b>th)jn`aq*kS3Jy2|5TiF`hBtFy?n_VF>{c? zL3@9H4HgCF*y)$p3Z7oiC^^#{Snf1l+nYfiC$0P>4X-UOFNzzXx@eBN2t}?}wF^>s zr@Fl1DVI7uTl*Yh*w~mT;#T=d=Z$BV?>XLhwsC)I5wn9Cm@d%f9lV<$vtyLP!?HXe zE&sfO&c1X|mO&v7+%o#cKq3J^VqqO4o3%f{mbb5u?lC{N-&zWMgh(7Np6p zmpK33>8H41{nEm-+tyIjgZuFkKN}!x{SnVQjJizqn4I5q)aLbYxZeNqNHvlmpCGAQw zX5y92=EpC`&aOLZ5guuwLOgMi)?znhQ?Wc9Hy&{+ zkVOgf<6C`5tt;9}Qf)Qbd{;KAV$uENCyOcQXvYs5Vig#p-Z$)|J3cIajlJsJNe94y zdLrdLPY1Di)wq9;vbD8*diTkdJtdVc;}=n%dUny75Zy-uY&MSGtW+=$jHbM&*4AFD zvfi=%bnNK`N-5gj*!U^;E!QgcvD9?mZmd#%uhv2<&E0InK^9At1WkHB3BO#a)GGoY zlh1tpwMo4)z~c!xm9`M z(c9p6VB3A)2#zPeAHgN@G@0~k!rjV-F%_xFwulN>T&?vHoziua1kD6FJqZQ&0K#NvW_zMOa*!r5E1r`9jXglOQxgxK(swmPo!s zKk8(>$YoA;0Lq%R0AWh7Vh&Y7EOA?$tw`jU1uDq`sit}R5KMN8u9w55`-E5)+Zy?MKz4myG@j7Z<1 zkLq~c_O`=`{GN@L8dWA9hcCW17x!yF@a7tYt_IYZFkEXi3c6v_wP0bsDgyS8NQXHJ z>hoP3^-vK`C<1k91Kzpzf_N`aoU36NVO5kD`eYYsa-*EeTS-?9xTR%q9ZVqEgN}Tl zt%Hvna7rt?)(JAlUWV;wjsd=oIfX=U6vrbabMten^F86r+ViY!`c=hT9?4hbIm_M2RF^Rk`6e&_Qs55FZH|d=~pa8WmK2g$6%{_s{v(+bv%QpW&wvNyRaYqH~q6d?d~;2^pKK- zW2lV+dj!3LeAby0J@SZ@GfiwrFF6n zjZJ)>gw5eeIe*|f33wtj5EJF(va7pW96MRxex?sfx}61_bfC|Rco{9W>8EMI*m&?D=}({4 z+nD7@L$5+`%V$l@oPF61vA6yZ5ryR;dLtRsHs8%L?YM4k-uF*o6Hfe6FI(eHci^r& z8ym7cjg5R+6%vQEfELzl&XoS-PeNWl+bWbFrW!d@|oSHfGNG3{nj)`mP=h)y}nt#*r7~Z_4^0`!vZMS@z*S< zUckOw?OcKY-*FALwHEWEZnlRpCT4A?gETFS`8}OXS)McFYs%;(01lKMPm!tGiC?&I zl|>iu{esg;%wBZa6wW$fj9Y<+bz^zM)6X?Q==j>y{g9RgaYQCSz1o_rWGkq;dY}tTjaV zY?yb#)}Nnpl)!>(W&yy+J4nE8N9i$dgYoW4^~nmU+T(f%rM0(ubs2Y<;*e;QTp z@np$l>Kc5b28`t%!nhV{sPb5$EDLF9T$I|k3oCo=)6R3fVYjxwPvD)Jmaj<0?bQlZ+MSF!2SK$`){E~~OIb^CHaR#L`8@Yx4 z&~VYc#S9-#ze5le=}KY!t^MU!t$96yIfA?PDHhlUr}H5QkfJB|xWWCTuXCFfrQP`f zt7{7zhBHhNA8#a@SFT7(nc-?fibvg|v+K;U`;bb#o<{F>V?EX+{+oSbAT3QlESbwl zni$fH(oIH_mcJHct8e@OwXazO)8%k}QUuUnRr6*kzF*K)ubAw~FdkQiU{(ArfY`E2 z1Gkw8we#BBznMYl9{?xo_S{%sO-N5%47d_KW$z!crjt@t%!0`d;Fv3d(2a-gLr;B> z>^O*Zet*pK`q6Q7zy%3%BJ3J-%j)Y~Y(-ucperz4v4*uhX~f(oGOf9reTlz8Tu*2j zCs(YXc>lKg{g8Sbi-Q&x5BYE=@Zbqk4tG^iE=`MzA0+(_f_n%V^x$6FSs&+HrC?e?w?W_}+Bf)74g}c$bBmSZ1+~OV0Lyeh zCk&cJ!L`JmZ(yBv3%1N&X{$)62PZ+($~8LsVe*7Z*!~{sughaCKo(Cyg%YzRHr=&! z9Za=OoUf3CJ02*ICkz(@q)@OIQeCC%4@@&NAO=02k#9awllIh#nceyN>Ol+0pQFIX z9lJ5%+SBR9fUmw0wfqHpdn@Va)Mp>)ZCwnLi zlX!bJM6Os;1jYgo6eGt$?t;;aHt&BFTXLf$O(HHwNeo2l8x*_mx69(^nmG)cri0yi zqgF?7GY<@yEw;VU_~$^f4}=+9c0Wh})JJUFpE;hVgY^Q0IU&IoMveXkD}ni_4vvCc zn|0Ex3fO_Zmpm*(Cc{q>G}wgiJ_%roDW{ZafMhW2N-kC?W0k;tTNnd;%tz!0c^`1! z?&sGM7Y1#sQqNNOw>ZyKcnjSvmUe448~C^eKHd_Z zxVxxfuM>ZCPJ>g9tpSm{r%Dtra{QGdo|)H6)&n2$GJKGGIOP$xQV1fQCZQdk1)f28 zn^XsR^zsyr4v$an55$fYxK@}cK3_Z=+*kfE=(vo(F2vL-P>b~eU%}%DZCZVDRJMlI zO$;C3@u}wR$e3cH&(U|zb*wn{lcb{3iUHo6Ni&@S+LFTtEL2cY6S6u_#` z_yyPl6f~HW9w^i;P&@@T5Go?(bxx?|>x-liA()nT2Y1TuTZMx=wME5^4qxQ0a0)%o z2uj9xjY?}`dg~+PWu-1A$@CoKlxD0rWr?m7>E|}D=oiqsGiJ#?HY+aM1>#yy&qC6Cq){*>Vu7qQ}kc0=vB**_#`BEjE|D5 z?*l*Dee}7PP}@SxE9uq5jj8C$jYpWKktzd?=!IoYa9^8zwvz?SMt$<c~p#_udLTxu_j|YBD_5rOZri{kcY^&YXBDaph}(JCFm~9jH-)J+uCh zG4=u~N1yfAs=#V{rbf|(7AN;kVteq5VY-PG5nE^bt&tV9FaA^+5yax-W)Cyi!nojr z(E{4*+VOEg=!<ieG^&yIgGEbAK{uIt zoF*8ena^jG-!E6uCJ?_RGzEUEO>(i!2hVK)+rxy6ld(_2Poc6Gzj(0uO}nz635%ff zKW}b66}>AK;8ue}(mG_n8E~s4q6+B2fE_g;Cj+d4E6#jvyX(VIQw(_yl3b94D%9db!fKk= zvld+0M@8OcOyi1H$cL0%m){|=Inb3sr$CEaRc9&^8bUsWVM6S&%&XQHem%@c;h10Kd6! z>>yHbw>>+!ftaP+$rkdb4%#Lnt3ykhf#=)q8D;fpfKzI}O zId)WaZjtR4YT6sRu%7c!!zu~e;FsZ9dG7qT&R)gt-htlNVa0|owB@>J)uV^D486}t zC2m=~sup?xdASm!&yF964hlO0cQ*02_^CAtJ%3XIK#=_0&^+Mtu-qTrcGzy0D1!l{ z1xx?}-<9IWv*MnlmgGmQn1#T{5BGXN9kcCM%?aa1!-e&)7x%{55}h~(B@0oWjSrTpD31zyjCX?%Ay78+F~`%#h!wKL1`hNjfKEX%P(e>Bb(aR``> zuFSlfb;OMry6FSXH)FI46U%yui29|V@g}-8(dbHGu;aNPFJSl8@EE)6Zi1aC?EN1X zI~2n&-)+qffNBW9ymVoK`RDyLUme8dqa_9Jx%|+zpa=xQF}?5}S8u=;Zp75Mi`hq3 zO&dkP$HqEeDei=!@OrI`od$#1sowLefk#h8kK(!@^ikc5gOl;g?pAsyoemvb{&Xkx z81J#$IsUw-9lF?`^v{t;J!79~G3Y%drHn)PXAq}trX(A5QEc*H&`Ruhek<0Mm4+%{ zbX*EO)Jni zJjo!ehDD-7Qchn5B}SS01sE{(jKw-)1au|j3D(nj%dLH)+IKb#2yJ0_wB{{t)dlrVQCF?q>W%9&4*HKhhEL_Po^3&ahAJDKSKKJ-H7ku?F%Q&NLZ=X)(U+ zI6AYvSH$c90t7vEqv(6!{sL}caXD~Jox8*rs~q;@qZRGwtBB{FGqd?28;B=%tc-Y_ zgG+zKz@&ir+Fh@N^hK+$D#CgC#xrrIfh

A8eINWL6ShPOVNr8!kB>2=nj-AD~ zABp@``#xt`G$mCK{L&3PY050<$~gZ2+I#YUsMqhGF$~IDUD>jw#VsUDi?vcnC@K}9 z+>-3+TE;Si7PkdSxDh^OX{Us0FyksxG2z-XreujQvJA$|e9wEh_iyar)#ejZiuv$ErKZt&*H0WZnX)IG) zK`dCCSeZ?@du&-wJ^u0uSD{vIEAdKVr0L+3>lZ#d1vn7fusjts6Ecb|jdyNt=J@PCxB$8!@o#omziS@#tE|>>dbR;nmc9?y9yVS$~zgG~6-p1lFR# zZSsR(+I?yM&2-O6+g-Hz1*+@cxWmj%SE-G`%^QV>=2xzdT*F0PxmaM-P0%9Vxf%r_KzqOx2zv-by4G52gv zA!Yt7;bU1cK$a$cVN-p*&}F679+2|LnRTU5Z=>o>Kc2ETRtyX1k=CPp57C;zTZwB19tR?vmi=;Q-M&U8KP_ti~^XwLqtWI zPq>Aj@HbzwykEFQvdqtexPf_>UVPijx!L&*T>emPsF8qbP*_4Y*vfH2l3ap0Ut;rRWqjo}reKQSMzZ7`r#TepQjm25P_BBDSpx`LV z-?LU&edHrqzS&pVQsL5g9uJvj(nD4-8AOt4TGk@g4Y5KHX_HDDhhc7svxV3RshM5N zzx5}Ry{}wV@UX?Jv>3;(e!f1TiNaPUwhFdIW<%b_AjDn|Lp@e}BfS~-UVg`oOvz1l z&F3i8Yvf|(o>s+&&AsDMj)FW`#PC{_tYzujS}Y4}&_Z2S{fy=nBeBB;vI-J6!YRu~$=O#E6|t0Tw>3 z1V2iJsOq3!Tt0i?M$EIba_P{oUjEp$4T+fM4bSF=7X6VZ5p|7%|xRtNFAt z1>CV^Ng4RHe;kMq`IZx|TzmEy?Ox{h$W-6UBiDfIRRxf_rh%csz)Md~w-hhOKNruD z#238U;juz?o%)+KXG%Ts*)3feURlj@^pR_|3z%-}dx{ZG3?qp6nv(69EE5}+9T(lr zZM%lFmBMm2GWXT8-;X2^GmEiy2O!wTA7gS!buemAp*$`wOtt?pSxsx3C&5yDhi1)s z2IrJ7a|6+3{C<{7HN^?j!vv58`eE{fhOY=7d?3rLNb^3|(m;~B8@F{}w`LvPL2GGy zL^-2#BJxrwh40=swg?+E3c`-YI6I5&#^Z9zg!$^iHV@=+;jSLDQCGcji9fIGk#>?0 ztdQfr=LRoMda~h>7ZB9|z@l+XF)84tqw896ntozXA9WE|m&f0E(37rb}bY1=|jt7?!7Le>ZL45uhZ=ZEB2B z7p`*>`symhwA5^%7Sun-F-*-|_@oxuO9F5{ZMh7Uj-tkJe1yr{l2DA}y?0{bT}+6M zxOK)Pq5bPjmN#nW^ek{lL)}x1q@;+PA@8H)(<0y2#sJ~!NXGvJk2z-lj(_(hnOco}AvSEH0=bMCkFe2S5y9(Rxk!X9(L*KowZRhnW1{8EEzfp2)4*N?t2n{<`ax0U7cYvp@3FH>n*xEaDOgGbasG#HZI zA7)AZs2%TFHd&C;-xw43Mm{YlCeb9c!cKu-7QiOOk_0Ghk2N>`-yN-V@VYurm9sZX z#LaWqbOYSqjWGxEz0VQ}^G^yW{02YsqZTD%yahFC!{}+0X_zE<1=`uv21$Ls12I*N zQwMVtMIgTa2;Lx$x7Io=NGRk|d=|N?3zlePx>5HYwMdcq+O*O_U!-;N{S0-anJ>OL zv$Vei`H=_){ZMavKM9)sBqx8Yz}=OoK4K@g-(bo-FO!%3LA(^w5CzarpVUK772ko! z*SOu6+=QpuUbD#rZ)hHg?8hruBb;a?HNR9)vuQE zLj1eq%gKv4Y@qc+J$~%A$xjg}^1Y73z>z=u!X0S_b{dZRV*g#(Lm$svf-P)nD?WJ@ zmsE_tHZ6tVV#6A6*4~i6cdd8kpu6Bu5P5YwWJvDCP)JX9JQm%WPoDPTHU$9!{=A8k zD_$7Ilz{F~t^&&Q5>pSIU-}3QPKeC-OFypeO=umDn4# z^48S5hyENJ>F=sUAhL~w^1oZ1l-MjGNM(epWFTD2eD-V=25TIFuQ6=QXHS;1ZMjso zo8vNZ^$el23!)wBn+x>OQ#lMn#e)r0v#cjvzO6&RBX*lF*+gg47P0w}@rCuLzn;J| zuL!n(wUx!cHOF_}L(ZRffJ_4DCAURvpO)U5g^^}|?E-YhVd%V6?xd=jH{&zJzei+5 z%(BONOw`rP{h?u7dg`#C<`Q198ZW>FwqSIbwIh@_LDyhVT8__oO+oJ;`(MP=_wH?O zG7+S;!q}~`6qp;2%`IZbaeyMYiII0yX9!4eb&@SLZT8|j7Jw)+eng#c>wRp1FrGp|Md zHo8^41tp&%5=%$jYsaI0?W@!lPtW&muuo5|ME@K(1=FKfm_YYIQ{z#Gl7^=9bhE@v z-2TltXrCrQ$MQVDddG^o9YChHwNlC^Nck%wGC;Mw8K!$xR!yTIqEav@a8(HCtxHrI zDW$2ScZbsmOGEFDyoCvMjJ6=61U6w-KDSmr?EuNEn;Q_+pk2b>z6x3bnw#?-9OtaC%wl*< zKIRj^`GEo1Of%1WDa>^i=V_PzrhdU@Cw6I9-?tEq3rn|?Agkc5pbMQBxRY=3-g{3> zO=O=AUY#Z^Q62c*b3vo#hJ$C_Fw7D%JQe3x9`D&re!P}!Jt``*(+KByd{=8?*L+fF(;{&n!<1S{Ev?CqybP z-B||rQBB8;b<2Y0k2G}Uk*R1R7VV59`JhR~`3)g^$@XFxE#C|qTRdL!#R3U(mSD|- z7^)Gsn9|z=1z5i=SNq7tom04AiV66NoNR8-WVke9>44H|Dw;5O+}+_#$b2G{A8*H7 zSbj{WeO6MdG5Ix6{?e&~%q@37>ls`*!oCNW_w{HTN?_7IGZ}(K)@e*^HCe@G->IVB zy{aZTv-{@@viICkxxKKA^PV_<=D6IfFCK-(y6b~(2**|FCVtkbg*B!WM|wMYAq=X( zy;WAu6z>zCIUENR{Pg&lpFGVUsiNlHSthfy0eaQF z+jK407GfNEMOD;zqq*M=C?c?k5}xT2i{aaACDQQk7+IV1q|p~~Um1vg9C*OSP!t&D zd#~qbhwg_LO3ynHQYqMsD%Ze4z9zmFTR$<{y?H1>n*aOxgC6#2=aZXWq}YdzJ%SxW zIOs0#bK08rHeE9+2HJ|*1Oa4EMEF1E&Fx*5C4qR~b_&~erlc{$S&7?m&fE+{a#k&O zfW#a4-xDY5nj^adA}Y$`j_={^pWrLL&bgiMZIk!6Bd$IlUYpVMW6__QBbw3tb`cMs zN-Y>|Z0f?XV)4yNbuFG2NG$XGvtS=a(2-Y4j*~J*Y6r{^Edsj=ZF8Jbqu_jokQfKd2Z$SL|B2KsWo;(GCL5P)ij%7E&9_ zilP~bQFH!;dGm)*GaCUMzMWJ?0+S#4M*h_87rGXVSB?7+?=ZARJx3YN{t~Q$n8D>K zxWH5a&2+qtd4m7#K%m+Ay%yh>%AC3#tyyCoXMxcDBR-SR@MpFhs6oO3U=aw+EUW;J z9B7{zY@4I{muB+{%{}#EGQ4Pn-0ii#+a5ZtdL3k9LzNRWED{iZ8-cI5b(u+)s*ljt zfi*R1dTVXITU|Qg>R&AcK3&Rv^RD0r>GAuArBR>;D8daZOx%ta4tPdbI;<1}Gil4I z0sfW2lRCodG8rMk&8Aise`L>@aFSgQrDO4 z<8Z9O=i1O#7XdmuUt3Tt$Y!6Y1@_|BS~|u%_W|n;iW*mWJxP-u1A#cuR|8|2m;yNT zns&a}Q-YgApg~FzL}bNJ09|Ha5x}GmWE8fwr*EN#2OBy^^kX@&*ruOTg>(Ci*TDI=RgKPd(FDK_>tz`V zsC>j8M-lG1tj+#ZCwk60SEkmPV(4i#~#8KVD8EcOHete2I7#g?q7r zW-sosP8=~OUX<`O!hLlqaOzP-6kK4x#V-{jQ$PONG4{b8cf_C_YH#&%>t%6hW}^|S zjUKce7-8})aB)Z6p~=!Vm*E+D`<63}Z5O5qXCihNRSToQjkjO5C9r!<6;O_D2f8kW#PRD<0 zOPVIoEQL_Jv`_h2RzSpamVy-eq?)0KoW&0}b{(c0ahJ<(=T+MtskV;51FOgrK58Pe zWFy%J~o%6ZILj=u8F*c*8 zE3l2Zbs5Cp!M7_?*53h>K%ylV$vr}d)I2&@cAfZO3iw+GvP2igGLEqVG6*=|iJ1v##*}K3+tg8{@pSO^JlEl|%#~vw&X_Yc z;811SWl+!w7&SPD@K#ruM_w~hqMgqD1X-4qx4O3TH8m1|dC>itX7utTTm#cYqj!64 z-fTOehf}TAIa6}4#Lxe&&vI0G(TJs6!n-yNNOp-f1oLhI6WiJw&Wg^gFuK*Eh$F57 z5I-6FbaVagnSb05jR{`)AXh_-b}K?o7sY=%FKK^q1oA}uUK#F!qiem7>BIuUu6^?C5pt&Kkf){| zH4DA)z1XIUTyK<8NTap8UYlbHs3(!DDJ;xkX2W!cew;jF_gH4_AC1jaA*5qP8P;`V z49xfvz)27YX@B3xGW^zujnih7COcn~6dzXIYIG#e6B|6ALB;~P_+Cuq$SC~`%o~=r z3v$L{dl^%{tN@<^ao(5rgI`}=d;YP|k9Z8x6rS1f9xDj!>#?5#2a@_$o}8)i-gwAx zr6nvq2+gL|G=5jqMznQ}v>rt?^3I6?q{Cbl0@~=3Yt-G4J@GfMxEh)M&rWD+>W-uT ziZhfm6Sqal_q^kxZ)j?@aQB6ggb-P~y+k;tmWcuU=U>RQDU}j$-SQ*3%?v5;=z2y)~i;^A-huroh0a{EqyCw?#IK<58(c)MM3d z5PZxdMwpJ45YdJwcdd<4zpzHhLbqBBP~4c~ZZ)Ts%fUkz8ehz{uFQt`!NtqIr>h7n7~sKq^wACjv&20Cq^W`G@T&~(%2acRV>(P8+#*TEK0nX)#25K9^YdSTuy z&s>BCnOr)4zw9*f#N%ktdqBSTXQfBzyw+;KlEcsBe+MNJ=1R>i#BSyq9K0!vn4C{E zYQ5aD8^5+>(;?$?#sJG95v_p`04?J2l&CdiPQ-wKbbsgfDcdqPI-6+<6xi^l+Vr0- zj=UO?PI|M={T2aPklI&~IB%^1<6DgdZroaIp6K?sB(W1V+$l!$dYK&k)>vLho9YDU z!VMn_*Zi{>Z}6^rDF56<;eNv<%9l>O(nhX{5mQ_ZjfQZ{LGn@}^k1aXJs`DyWnhLR zjf0~ZzU&@POiq40Iz2&xyUN+a5c9degjf`muHCfYHtmTiHA-g_$iZ;byK@z|M`KB{ zX1##9V+a4;>1@$Wp%uo`nQU?Lgh%tdVGCT)4CxxA#saz+evWxG1v?4%_PQ|q^e$mJ z^#92|NRzZ@MzKKY3JMwTDl8tucZgk|U{(L0`21ZaioZF@b z4IIkK)0UL6d9=<*Rc&1g%KPOKnx*C+*4-PE z*ox$VGoiD?OzLRAgDy-rL%^&@={HB$BU+gthaB^fu zM?Hi&aC<6kLw8&IJZlIjgEb=Xv0f_x}8kkWh52*{wKJZFkj&|sNwjFcsZ2Xg`07}8%<&f^B5?>q!63nw04dr`j zY~Pb)h6~)?+0ARh@3PDtB~|$V7MXfjO@nmJQAW-SM>^PArA_@kNLB^yV8= z<5A^VK>RAbd0Iday&E+V^GbWpnR>DP_O3CVc~SZ z>Hg(8Kl1yt?@S0EWa*8HaG@WFXy_@?pCKq zxYBQKtIbEg>Yg)ZIO>AvA+y8Z3C`vO4)s{NcpeverrFR46%2iDw4Q`9pAU{<-2kC4 zy%1y1H%DC|Al)>*1kU*b(Vn_iAG!6q5vhvyK9cicQ7}-sTHs6%k^?+lv>JH4{Bf1y zlN-6_U2RF$Hs76{Y%b6}Vb#?~7byaV_Iu*z_|yvH&b=;Ih1L^2Ci&EtogCWrH!{K- z4ksH9TwQYC@4F29eO=wp`y=ybg(hlUo_8DYNkx@^^vsa%)eT$Ca@}#{?4V~t9%I!n z-M6FBP`?RU-B0VMYDMmChcACL&&ocw*}RPOS{lt94cLID&Eh$4@yv}>DEbEETl=N2udn|UQk|2O=UVZ@gdvwov&Tol@c(%ms2TnuT9bT05 z<=&Qen=8yoOVJN{nFs>rWxw`nn{6)XD@c?Ia9(qS5L;3fkX<4s@`28AZY4~WbeVUC z^XGZ9XL$0%Bz7Y2czLvtMQxcdsy0XNQ;ny8FDM(^A|DwQrEs}=yu0m)TTS3=zi9qB z!>Y;g+oud(8oq-jpM5=^v%jY{dPaYLt5>sCMcT76k~BNOJtHkrn1()0vyjJ(D3kJ7 z@_m|8=N-|Z=7%xW6#6#FngP49wQW1AN}OakuFOABTSU9p3 zXDVLzB&=A_t*_~Byo4IkbuOM7!k^b;&nx&>TnfLAN0lGa@Z>M5#3F_HDh5Nl2t znNX-wh(JOQH`a-w(!nr}dYS|uq(zZ-wiI>%;E>OvIN&y6o^o19cZm>95F_aVtzI6B z26%9uW(ff;0DEwc9LK?);ak)zvBh_@%bbt|+?%M#y;g5w;!rEVGNfMiv^+v4x2r&Y zq3DL8LrA4wFthn^7X~EuE2Her;?x7<+8dDIb(X7&%Hp?3m5?QXBp8|1w(IZ$ILud3 z+ZMmgMrkdAsKh%fjgbXbnpWVv_+y0GFAMV&*@gbIj34FIKl@{!K$`@u4b2|^v%jcU zM;77Z0X2+Oz&Ef>(iUAj+zQAy8$J-if&B)5J_`E<$*5tU7G&)6zyJ6T vB|ruL5yOA{0c7x>WWYq?e=_m^jZCDG(!BL-G|%w_W#Klv?Jb|~^oadGzd~Vi literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/mt.png b/tribler-mod/Tribler/Main/vwxGUI/images/mt.png new file mode 100644 index 0000000000000000000000000000000000000000..7c36e5bdc701ffd4b12bb7b9c65961c35d456a82 GIT binary patch literal 934 zcmV;X16lluP)X0{{R3&}<}x00001b5ch_0Itp) z=>Px&08mU+MWv;sU0q#)fq`vpZ9SU(L_|axW&8^Z3#GmN9cui&%jp3D0byZb8C&}` zH8m@G{J_=y6B83KFfhH+`W9jQL81LEEiKH<%!Gu5k&%&6QBlds$uf%l=jZ1@KtR^k z)*x>D@ACIOJw4Oo{~J>K`uzVsefrAE%G})C!Q%eG!NC$$=P4;ECwcwa+S(y%{r~^} zaBy%bbNqUGdXA2c_WAuaiTzfM{+XGXE`+bWbtE-)z zox;Mx9aq$*!Tv~x`*wDAGBPqLeEn2ZRC021m6eql85tfP9wsIx%GdrnIy!iGcsiE- zsmcC0l>M-Ak(Z@9*#M@bLBZ_2}s6`1ttx`ue}Wzqz@&jEs!?`}^(f?YOwO{{R5Dx3{{wx}Kh% z&(F`()6=xHwBh05v$M0Ut*!U>_x}F=zP`T2#l^?R#~Wz;?(Xi!#>V92C}Fo*rs z)z!|<&b76*va+(&)YRkS7-RhT{Qs!O{^jN6>gwvdySuiww&394Zk6a;f6v6*{&Sk_Byjv^x&5%l_O{3V zW2yZ-mHj_|{JYTixy<#i$^JfZ-7b9mago_TpZz;%%)izCuE6J6j{aSO(Li(HsJ`x} zx!|k41^<{zioPUyku}q4;*B`FExKYmC!3XVW)m)IMy`X0{{R3&}<}x00001b5ch_0Itp) z=>Px&08mU+MWv;sU0q#)fq`vpZ9SU(L_|axW&8^Z3#GmN9cui&%jp3D0byZb8C&}` zH8m@G{J_=y6B83KFfhH+`W9jQL81LEEiKH<%!Gu5k&%&6QBlds$uf%l=jZ1@KtR^k z)*x>D@ACIOJw4Oo{~J>K`uzVsefrAE%G})C!Q%eG!NC$$=P4;ECwcwa+S(y%{r~^} zaBy%bbNqUGdXA2c_WAuaiTzfM{+XGXE`+bWbtE-)z zox;Mx9aq$*!Tv~x`*wDAGBPqLeEn2ZRC021m6eql85tfP9wsIx%GdrnIy!iGcsiE- zsmcC0l>M-Ak(Z@9*#M@bLBZ_2}s6`1ttx`ue}Wzqz@&jEs!?`}^(f?YOwO{{R5Dx3{{wx}Kh% z&(F`()6=xHwBh05v$M0Ut*!U>_x}F=zP`T2#l^?R#~Wz;?(Xi!#>V92C}Fo*rs z)z!|<&b76*va+(&)YRkS7-RhT{Qs!O{^jN6>gwvdySuiww&394Zk6a;f6v6*{&Sk_Byjv^x&5%l_O{3V zW2yZ-mHj_|{JYTixy<#i$^JfZ-7b9mago_TpZz;%%)izCuE6J6j{aSO(Li(HsJ`x} zx!|k41^<{zioPUyku}q4;*B`FExKYmC!3XVW)m)IMy`%$)5 literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/mtEnabled_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/mtEnabled_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..0a4d2a15a3a61cd3a9d51b2c4eabd6f70bfa9b4c GIT binary patch literal 949 zcmV;m14{gfP)X0{{R3&}<}x00001b5ch_0Itp) z=>Px&08mU+MWv;sU0q#)fq`vpZ9SU(L_|axW&8^Z3#GmN9cui&%jp3D0byZb8C&}` zH8m@G{J_=y6B83KFfhH+`W9jQL81LEEiKH<%!Gu5k&%&6QBlds$uf%l=jZ1@KtR^k z)*x>D@ACIOJw4Oo{~J>K`uzVsefrAE%G})C!Q%eG!NC$$=P4;ECwcwa+S(y%{r~^} zaBy%bbNqUGdXA2c_WAuaiTzfM{+XGXE`+bWbtE-)z zox;Mx9aq$*!Tv~x`*wDAGBPqLeEn2ZRC021m6eql85tfP9wsIx%GdrnIy!iGcsiE- zsmcC0l>M-Ak(Z@9*#M@bLBZ_2}s6`1ttx`ue}Wzqz@&jEs!?`}^(f?YOwO{{R5Dx3{{wx}Kh% z&(F`()6=xHwBh05v$M0Ut*!U>_x}F=zP`T2#l^?R#~Wz;?(Xi!#>V92C}Fo*rs z)z!|<&b76*va+(&)YRkS7-RhT{Qs!O{^jN6>gwvdySuiww&394Zk6a;f6v6*{&Sk_Byjv^x&5%l_O{3V zW2yZ-mHj_|{JYTixy<#i$^JfZ-7b9mago_TpZz;%%)izCuE6J6j{aSO(Li(HsJ`x} zx!|k41^<{zioPUyku}q4;*B`FExKYmC!3XVW)m)IMy`%$)5 literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/mt_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/mt_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..7c36e5bdc701ffd4b12bb7b9c65961c35d456a82 GIT binary patch literal 934 zcmV;X16lluP)X0{{R3&}<}x00001b5ch_0Itp) z=>Px&08mU+MWv;sU0q#)fq`vpZ9SU(L_|axW&8^Z3#GmN9cui&%jp3D0byZb8C&}` zH8m@G{J_=y6B83KFfhH+`W9jQL81LEEiKH<%!Gu5k&%&6QBlds$uf%l=jZ1@KtR^k z)*x>D@ACIOJw4Oo{~J>K`uzVsefrAE%G})C!Q%eG!NC$$=P4;ECwcwa+S(y%{r~^} zaBy%bbNqUGdXA2c_WAuaiTzfM{+XGXE`+bWbtE-)z zox;Mx9aq$*!Tv~x`*wDAGBPqLeEn2ZRC021m6eql85tfP9wsIx%GdrnIy!iGcsiE- zsmcC0l>M-Ak(Z@9*#M@bLBZ_2}s6`1ttx`ue}Wzqz@&jEs!?`}^(f?YOwO{{R5Dx3{{wx}Kh% z&(F`()6=xHwBh05v$M0Ut*!U>_x}F=zP`T2#l^?R#~Wz;?(Xi!#>V92C}Fo*rs z)z!|<&b76*va+(&)YRkS7-RhT{Qs!O{^jN6>gwvdySuiww&394Zk6a;f6v6*{&Sk_Byjv^x&5%l_O{3V zW2yZ-mHj_|{JYTixy<#i$^JfZ-7b9mago_TpZz;%%)izCuE6J6j{aSO(Li(HsJ`x} zx!|k41^<{zioPUyku}q4;*B`FExKYmC!3XVW)m)IMy`sD&r?i}j?rq21ZS3dh=67#M!o9$>4N VRKKYpFv$*NqNl5$%Q~loCIDxiOSu35 literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/mute_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/mute_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..4262ebdc1304067aa1fdb6d9839669fe0f713e39 GIT binary patch literal 232 zcmeAS@N?(olHy`uVBq!ia0vp^3P3E$!3HD)_;n@%DaPU;cPEB*=VV?2Ih+L^k;M!Q z+(IDCc$$$R;|6kw6$l}Ap%;P3& z@Twt@y~nDdbD>sO<3cU2P?6aZdJb!L9h7CwJuP6jva9LC$Dcxxy4wVrKX!7+ignH5 zFSgj!%=xjvL>2=T z2!Sx;rMq>1fP(BLp1!W^H+ZtKnRr63yEm!WhVKqwUrE zTT4VgUzr^k^LyRr2ao;Jj8?4>b-OQnaYxNIt!H=2exEs+$h5g%FL+Mi%y`ywXRWcK#o;ADkIK`KKM=o4YSTT;KWoe4x7+JYD@<);T3K0RSsFUPJ%@ literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/my_filesEnabled.png b/tribler-mod/Tribler/Main/vwxGUI/images/my_filesEnabled.png new file mode 100644 index 0000000000000000000000000000000000000000..2c364e37710097d364cf2b8471019bc23cd5aa24 GIT binary patch literal 1372 zcmcIkT}TvB6dvgZTT@9wO^I$(QCYL|v${Xi?)tZun!B1ikcDXD%ypf1cV?Kmvg?us z(nAG3Wk&Q6L?9w0VGu;U_f}RENI?`<54}jCJp^@UcimMV>dDL8zr**PbMAKzbG0jU zqS#htBM73ntu@e%_ak@~6>i1rmqoOMx6Ni#n^1_yg+loXzHif8&zKlfX3c_EA1?mJ z#&XosgPIK}B1Kbypu{1;cxcu|GwuV2k6E-z3lY`^Q9?8z(I{&J5FA$wn9z)<**Uu> zgd^>+Fg?KzUee-r(Sq2Mah>qiCv{PZLIeh2L{WVt0DuIF>?2Rt1(~4U42P7~lmWx3 zkRYX^l1C=%{Wfos$K*IfB1pz#s>vsPWZEZ>&z6}cK^lUhJ~ER}Pp}I#YX$_hc9xPD zM=hvz+Bp~JaXF8Iu!umrr~=LfIEKLs$2(m3!^xxv>9^tCYsiCqci?z7CcgQ|A%t|E zrW1*TJ>jrxMucWP9uLiMG{;dGL75{e5|fl_?#nn?@e_ciWGH%CQ_HV7pbaA*Nm_w& zBl|c}&jnJ=taw;PIw|TjYiH=3XH4{BBcHC8W(b-V^RFuhXbwBP7IO_KA2rX10TDqz z*27IP9*T7dEW>kdp5e}};%7kX=s`_Z21oLAmZKPlz}E70PM&pV>8n6lx|gt9{GevUMdVlFVg%B<8u8M3<3IDu zWR2Sbwr!CMZxUWAOZ!EdYY~Qr|&0XVO z>8D4d)5}e#gr`(d!AVa8d{;#_lxZasr8B7;3r)mFzS=pJhzO&7y^-18U)n_Xlj@i4 zH?JMC5#i!#Vds|4j=AcouMc~BBB(s{wx1{G#};4M=KGxIACpV(mltnOKCh9+QzO*E z{gO+&X1+%|YhG7)f2KbFsjLg{pPGd7w_Bq^|BpG<*$NNFaGFG0ODOQ5>D<^~0rhfl literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/my_filesEnabled_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/my_filesEnabled_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..2c364e37710097d364cf2b8471019bc23cd5aa24 GIT binary patch literal 1372 zcmcIkT}TvB6dvgZTT@9wO^I$(QCYL|v${Xi?)tZun!B1ikcDXD%ypf1cV?Kmvg?us z(nAG3Wk&Q6L?9w0VGu;U_f}RENI?`<54}jCJp^@UcimMV>dDL8zr**PbMAKzbG0jU zqS#htBM73ntu@e%_ak@~6>i1rmqoOMx6Ni#n^1_yg+loXzHif8&zKlfX3c_EA1?mJ z#&XosgPIK}B1Kbypu{1;cxcu|GwuV2k6E-z3lY`^Q9?8z(I{&J5FA$wn9z)<**Uu> zgd^>+Fg?KzUee-r(Sq2Mah>qiCv{PZLIeh2L{WVt0DuIF>?2Rt1(~4U42P7~lmWx3 zkRYX^l1C=%{Wfos$K*IfB1pz#s>vsPWZEZ>&z6}cK^lUhJ~ER}Pp}I#YX$_hc9xPD zM=hvz+Bp~JaXF8Iu!umrr~=LfIEKLs$2(m3!^xxv>9^tCYsiCqci?z7CcgQ|A%t|E zrW1*TJ>jrxMucWP9uLiMG{;dGL75{e5|fl_?#nn?@e_ciWGH%CQ_HV7pbaA*Nm_w& zBl|c}&jnJ=taw;PIw|TjYiH=3XH4{BBcHC8W(b-V^RFuhXbwBP7IO_KA2rX10TDqz z*27IP9*T7dEW>kdp5e}};%7kX=s`_Z21oLAmZKPlz}E70PM&pV>8n6lx|gt9{GevUMdVlFVg%B<8u8M3<3IDu zWR2Sbwr!CMZxUWAOZ!EdYY~Qr|&0XVO z>8D4d)5}e#gr`(d!AVa8d{;#_lxZasr8B7;3r)mFzS=pJhzO&7y^-18U)n_Xlj@i4 zH?JMC5#i!#Vds|4j=AcouMc~BBB(s{wx1{G#};4M=KGxIACpV(mltnOKCh9+QzO*E z{gO+&X1+%|YhG7)f2KbFsjLg{pPGd7w_Bq^|BpG<*$NNFaGFG0ODOQ5>D<^~0rhfl literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/my_files_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/my_files_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..2c364e37710097d364cf2b8471019bc23cd5aa24 GIT binary patch literal 1372 zcmcIkT}TvB6dvgZTT@9wO^I$(QCYL|v${Xi?)tZun!B1ikcDXD%ypf1cV?Kmvg?us z(nAG3Wk&Q6L?9w0VGu;U_f}RENI?`<54}jCJp^@UcimMV>dDL8zr**PbMAKzbG0jU zqS#htBM73ntu@e%_ak@~6>i1rmqoOMx6Ni#n^1_yg+loXzHif8&zKlfX3c_EA1?mJ z#&XosgPIK}B1Kbypu{1;cxcu|GwuV2k6E-z3lY`^Q9?8z(I{&J5FA$wn9z)<**Uu> zgd^>+Fg?KzUee-r(Sq2Mah>qiCv{PZLIeh2L{WVt0DuIF>?2Rt1(~4U42P7~lmWx3 zkRYX^l1C=%{Wfos$K*IfB1pz#s>vsPWZEZ>&z6}cK^lUhJ~ER}Pp}I#YX$_hc9xPD zM=hvz+Bp~JaXF8Iu!umrr~=LfIEKLs$2(m3!^xxv>9^tCYsiCqci?z7CcgQ|A%t|E zrW1*TJ>jrxMucWP9uLiMG{;dGL75{e5|fl_?#nn?@e_ciWGH%CQ_HV7pbaA*Nm_w& zBl|c}&jnJ=taw;PIw|TjYiH=3XH4{BBcHC8W(b-V^RFuhXbwBP7IO_KA2rX10TDqz z*27IP9*T7dEW>kdp5e}};%7kX=s`_Z21oLAmZKPlz}E70PM&pV>8n6lx|gt9{GevUMdVlFVg%B<8u8M3<3IDu zWR2Sbwr!CMZxUWAOZ!EdYY~Qr|&0XVO z>8D4d)5}e#gr`(d!AVa8d{;#_lxZasr8B7;3r)mFzS=pJhzO&7y^-18U)n_Xlj@i4 zH?JMC5#i!#Vds|4j=AcouMc~BBB(s{wx1{G#};4M=KGxIACpV(mltnOKCh9+QzO*E z{gO+&X1+%|YhG7)f2KbFsjLg{pPGd7w_Bq^|BpG<*$NNFaGFG0ODOQ5>D<^~0rhfl literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/my_files_win.png b/tribler-mod/Tribler/Main/vwxGUI/images/my_files_win.png new file mode 100644 index 0000000000000000000000000000000000000000..a0f21f3778cef6b16e3bdccf01770bdf21e5aaa0 GIT binary patch literal 1120 zcmV-m1fTnfP)Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1sM(t9N~Z1tN;K5@<~KNR7l5t zmRpEcRTRg6>-)a>zHb~wg)ZPjHk3LUO~ix`V}llgFBQtbfDt6v4MI!0NDy5pF}jFU z^kCow^O9mlL|G`}lugh>kWU3s4?-7cyrgI5J343od)WJ&v(JpcP0vkUWJl69kkmZMt;%ZvYBZ^xIE*C!?rQn z;mmKdGUAG$3M3@j_jFe?ozEx#rn&Qe+V?J|wWS|BJyGat#}Cok(ocKcB3i>MsedtA zv5N*!=i4up4fiSe?}D=7K6SqPN*X{(k|^6Yq|WD~N|HoLk|;g&p|0#2@!yw{LY^dv zG{99ytk35&J0Dc~>3${W&Z=|bGi94r6goGwNYmecqvXPQP5u6huDtw6#gAzKCF7T9 zzpxp#d$7(FowttAvu+{D_$BH;pQ5?<8Ky^n1T(s~t|5JG47a?mthDgtdr--Zp7je! z#xDXE^kKg7M(8A!s!mQ`^acCDF6yTz$ZorvZ0G^9f%}3lk5(gxEPdj27Cb)0)JLZ( zP|ozPrEAIc)K4CPb`S2R6=cg-c>`CWpp!5JvWUl}!+b8p)Zt&zb;}C5g=Wxs>P>2Y z{y}5nC|w73(>OXt=cNO)I4zO&uIA#gNwS;!3V!7Dy%?|qtuBdc&rZy)C% literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/my_files_winBlank.png b/tribler-mod/Tribler/Main/vwxGUI/images/my_files_winBlank.png new file mode 100644 index 0000000000000000000000000000000000000000..a0f21f3778cef6b16e3bdccf01770bdf21e5aaa0 GIT binary patch literal 1120 zcmV-m1fTnfP)Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1sM(t9N~Z1tN;K5@<~KNR7l5t zmRpEcRTRg6>-)a>zHb~wg)ZPjHk3LUO~ix`V}llgFBQtbfDt6v4MI!0NDy5pF}jFU z^kCow^O9mlL|G`}lugh>kWU3s4?-7cyrgI5J343od)WJ&v(JpcP0vkUWJl69kkmZMt;%ZvYBZ^xIE*C!?rQn z;mmKdGUAG$3M3@j_jFe?ozEx#rn&Qe+V?J|wWS|BJyGat#}Cok(ocKcB3i>MsedtA zv5N*!=i4up4fiSe?}D=7K6SqPN*X{(k|^6Yq|WD~N|HoLk|;g&p|0#2@!yw{LY^dv zG{99ytk35&J0Dc~>3${W&Z=|bGi94r6goGwNYmecqvXPQP5u6huDtw6#gAzKCF7T9 zzpxp#d$7(FowttAvu+{D_$BH;pQ5?<8Ky^n1T(s~t|5JG47a?mthDgtdr--Zp7je! z#xDXE^kKg7M(8A!s!mQ`^acCDF6yTz$ZorvZ0G^9f%}3lk5(gxEPdj27Cb)0)JLZ( zP|ozPrEAIc)K4CPb`S2R6=cg-c>`CWpp!5JvWUl}!+b8p)Zt&zb;}C5g=Wxs>P>2Y z{y}5nC|w73(>OXt=cNO)I4zO&uIA#gNwS;!3V!7Dy%?|qtuBdc&rZy)C% literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/my_files_winEnabled.png b/tribler-mod/Tribler/Main/vwxGUI/images/my_files_winEnabled.png new file mode 100644 index 0000000000000000000000000000000000000000..9a08b930c0beb902325bfce3b7bae5d6e8ac9510 GIT binary patch literal 1208 zcmV;p1V{UcP)Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1sM(s4CT}Li2wiuN=ZaPR7l5t zmVZoERT#%V=k-Nien|*a6oNBASLr1D(U2n6hRLdBnQU6Nn5*R|r60~eh-h;x*1n}#DInak=%o~hEIv&+ckx3nhHvfvN5J?3B7z{8JBWrboBOrTSew?KPdh$* z^R&2j_ZdVBkq3^2V~Zf##n%7v;rnlV!BE+8KHs^SlE*W=umNp-gS_zNIVNj&hlgvS zDA)*TLS^;y+>^DtiF&u*!@>|*1ZaWahTDSxbsc}OXX7fC1eA5up6d$(7hk#;zC7TG znqA=@5hiMOg%Mqt^g^)n#{~fLR^7|@T~}=yTKoPcD>)8;+Y{5QsAyv3&Xo1vot-1C zqKTVhlk?K-5)vJ5kAAOV{umKv=QJOl>L#tCiNr(SvFF%%#-?VhzwOc>In}3#KiJ5c z!!3N(*>6a{k()uy**>&3nyLD+mwkCz0K|tP*s(s1x{iLUtDpP-VZ(|BRkjj;)a?@W<;PYj>!*knkH2yfcvz2*e5$dd3E5IgkP%y z4318+v8L6^SBs#O>lco{lFN~w`lveH%d$`eN1n?zq_t?C%1CBvW)6UYjQec?h?t+! z{7ep=>?UHVHx$WBe5 z@wF|8hSQgCP}=mX(KKSnb(R&1NByBzur};Z?r&XzT`05En~(tP8T;Hks^~Eb(y*)wY zGmkhX71p8^fD%NMneP*}tA6ETVm9Mn#%f(Dl{gvHHU6#?;2h38L5m&Kcmmq5TUQ Wh97EN7ebu?0000q+7O literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/my_files_winEnabled_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/my_files_winEnabled_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..948624b3bbfc8e77c6c39f5e04ca1b215a717f1c GIT binary patch literal 1208 zcmV;p1V{UcP)Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1r!V^5Wbb(6aWAON=ZaPR7l5t zmVZoERT#%V=k-Nien|*a6oNBASLr1D(U2n6hRLdBnQU6Nn5*R|r60~eh-h;x*1n}#DInak=%o~hEIv&+ckx3nhHvfvN5J?3B7z{8JBWrboBOrTSew?KPdh$* z^R&2j_ZdVBkq3^2V~Zf##n%7v;rnlV!BE+8KHs^SlE*W=umNp-gS_zNIVNj&hlgvS zDA)*TLS^;y+>^DtiF&u*!@>|*1ZaWahTDSxbsc}OXX7fC1eA5up6d$(7hk#;zC7TG znqA=@5hiMOg%Mqt^g^)n#{~fLR^7|@T~}=yTKoPcD>)8;+Y{5QsAyv3&Xo1vot-1C zqKTVhlk?K-5)vJ5kAAOV{umKv=QJOl>L#tCiNr(SvFF%%#-?VhzwOc>In}3#KiJ5c z!!3N(*>6a{k()uy**>&3nyLD+mwkCz0K|tP*s(s1x{iLUtDpP-VZ(|BRkjj;)a?@W<;PYj>!*knkH2yfcvz2*e5$dd3E5IgkP%y z4318+v8L6^SBs#O>lco{lFN~w`lveH%d$`eN1n?zq_t?C%1CBvW)6UYjQec?h?t+! z{7ep=>?UHVHx$WBe5 z@wF|8hSQgCP}=mX(KKSnb(R&1NByBzur};Z?r&XzT`05En~(tP8T;Hks^~Eb(y*)wY zGmkhX71p8^fD%NMneP*}tA6ETVm9Mn#%f(Dl{gvHHU6#?;2h38L5m&Kcmmq5TUQ Wh97EN7ebu?0000Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1r!V^5Wbb(6aWAON=ZaPR7l5t zmVZoERT#%V=k-Nien|*a6oNBASLr1D(U2n6hRLdBnQU6Nn5*R|r60~eh-h;x*1n}#DInak=%o~hEIv&+ckx3nhHvfvN5J?3B7z{8JBWrboBOrTSew?KPdh$* z^R&2j_ZdVBkq3^2V~Zf##n%7v;rnlV!BE+8KHs^SlE*W=umNp-gS_zNIVNj&hlgvS zDA)*TLS^;y+>^DtiF&u*!@>|*1ZaWahTDSxbsc}OXX7fC1eA5up6d$(7hk#;zC7TG znqA=@5hiMOg%Mqt^g^)n#{~fLR^7|@T~}=yTKoPcD>)8;+Y{5QsAyv3&Xo1vot-1C zqKTVhlk?K-5)vJ5kAAOV{umKv=QJOl>L#tCiNr(SvFF%%#-?VhzwOc>In}3#KiJ5c z!!3N(*>6a{k()uy**>&3nyLD+mwkCz0K|tP*s(s1x{iLUtDpP-VZ(|BRkjj;)a?@W<;PYj>!*knkH2yfcvz2*e5$dd3E5IgkP%y z4318+v8L6^SBs#O>lco{lFN~w`lveH%d$`eN1n?zq_t?C%1CBvW)6UYjQec?h?t+! z{7ep=>?UHVHx$WBe5 z@wF|8hSQgCP}=mX(KKSnb(R&1NByBzur};Z?r&XzT`05En~(tP8T;Hks^~Eb(y*)wY zGmkhX71p8^fD%NMneP*}tA6ETVm9Mn#%f(Dl{gvHHU6#?;2h38L5m&Kcmmq5TUQ Wh97EN7ebu?0000=85jTwBP(NbAOboD43zA+fV53fN}5%WiyPEVAkS7Q zqokz3N?*Ucyj-u`STDaQUEk2s(o)~RNZ-gv7pOwFxH7LKu|hYmSQ%mn%p8~0;^d;t z0&tju%uP(nFDH z-~5!!v`U0_LqlBy(-1>bD^mk2Lzs3XMNsWF`k;V73K(#xgZzXhu0SBbQj+1}RZv=# z1CExIWc}2f)ZEm(l44+(8KQ<+O0rd2eo<~>iCt!HVtT56L0-CzK0*+$`yj3cCJSJ) z)Q6`_nA71=NfQPl+-YJw#86sVVid!Fk%56p*we)^#G(vBkYotANI}*gzeI<+AEy=q0z0JDrJyNod8V|?b`Cs$@Xa9vLXS2yEuMP`}J%6lo?(&*H9^77! zPh2Z~`Qzvnm4*vDzsv5CdYP6tYf_P6%L3h`R;D`&4%r>E*iOeC*gW&l;>FI%mfau4 z*=L*#3Xmllo`y=$-D6j`a2|YS`pi)w{bTJm=9dD@6XfnlvR!U^=veXBL?kUN z?xjT`19!S*-0yb@|9rHB6=HYkzbH1h+qCTBzAB!#zC@m4I!{t6rl8wJnZYvI=P z$jQ3mDSb!ErLp8yY)n(*>w>2&DaRc-ZfunPC3ooLyrL_su7!qnd|uzY<>6=d7uKvR zi#HfV_uZ)E`8EH#=~N4`bu6jvs%;)3KEgHc*GGqZxtG;ZBIVk~8C`bp+S(r^7gTe~DWM4f-mNOg literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/nextpage2.png b/tribler-mod/Tribler/Main/vwxGUI/images/nextpage2.png new file mode 100644 index 0000000000000000000000000000000000000000..708332d21c9d0586442858aea0def32528be364a GIT binary patch literal 1304 zcmcIk&ui0A9FIE2*15wbsEFcQ=3s)!OVVy#V%oa3tF5ectd&gPymZjIkVM8mM(WHTkIj@L65dZzI*M=ZI@sOuy63#m#v4p zBzPK6FW{tu6s%h&DC!2HI1%zu$W4q~zUtG?`iO#6#&yL()TCyW5J>9|s#{Lgz4`WH zo@5&EF$=jFNwo}C@gb~d*p!^`Y(=di3`(e?n=u*yKm%Qi(eq!sRx*d4x>nbH{3CC#KPt+c*!zp$< zk?FR~BQ`=$u_4cLu&qppG6_mSw=Ab3>$h7%b+ zTI4xNn2Xqd&#KFLC-W*DqD`B;J9um6$EXwBEwGbpY=7XCf9;oOW3}b zd!@Hqc_f>T??)ov6g|o$Lr=RadS!+rK00?1#wOC$2P<9=I!xzPs@4 c((#ug8LE_4`@#2L21!9>r}L>-Q}-VI1s-u!+yDRo literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/nextpageEnabled.png b/tribler-mod/Tribler/Main/vwxGUI/images/nextpageEnabled.png new file mode 100755 index 0000000000000000000000000000000000000000..44c0c1eb71e1154fe83b704a8d35a59363423511 GIT binary patch literal 593 zcmV-X0Px#32;bRa{vGf5&!@T5&_cPe*6Fc00(qQO+^RT3k(h?6vaOASpWb5<4Ht8R5;6( zlTS#LK^Vq=Gv8!oENo&@8%P%;ics-Tm%!*;E=V2f5*?yHm+BA}sGku8os&c4* zLaPyU=npG}QXp9#%Jxte7SzVlw!7aq-=FEQ$hNuF`oLUfp7)t|p7$AHjDhs4Hs)t+ zGGDr=1yW8fhsutfzVcF-H*;;Irk?OI9c2y3`(ZXQw|G$T{82yY*U_BziPWi@iv7^A zNs4HDLw&#E^ZPj3Ap)R%xH$oqg1ZTLdcUf`$~7wR;|lG9TP&X%Kw8j!1`ZsCiD6in zt=@{~BGE25#qzp%1g7sn7(i1D&R&D_H(<;5N}f{$?IKLiF~ick)#}YHaQp)7*#`m$ zLr6`*_#hO10`Tl>9$_wZ89u!IyJH=laIy!sHiHo0I&h-BB#myLevFwqQ-(xLyL8z=H*6@=GIcD=@$1>Q$7{ka8(Yk8`wuE>s^`A7! f(Qnuff0+4Sd;iQt(=3l300000NkvXXu0mjf_?iS? literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/nextpageEnabled_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/nextpageEnabled_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..fc3f50dd0895f910c1ee5a6c40fb8994aec952ea GIT binary patch literal 627 zcmV-(0*w8MP)Px#24YJ`L;(K){{a7>y{D4^000SaNLh0L01FZT01FZU(%pXi00007bV*G`2iXJ^ z3^^&5GHv|;00Hw!L_t(I%Z-vhXp=!0#(($azS>wLR>5W|q(c#O37tg(f{G$kC?Y6g zCl|%P;NYOHB3c|A1Zf9B7X^_bh+?31kU|r!5)>k#+7hE}tTj#S-F>e^Y-2SsK5)a` zz0dQ$_dKUW1m3+&kjo4*TZmCC@Gm&sf)T!&)-|UhhffbFd7p_h_B2I)l9I7_N)gtf zx@0SY2>70&?1bl&`F)xF%|wb`eN) zb;#qtU@hath1{jrpL&&u05I0S^MtSH{bquu{EuamyqTRDNTw#{egZC8+EnUu1Azbl N002ovPDHLkV1iHR3m^ah literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/nextpage_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/nextpage_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..105ff613a1886e50daf084a1ea113fac2e786672 GIT binary patch literal 1748 zcmeAS@N?(olHy`uVBq!ia0vp^f=85jTwBP(NbAOboD43zA+fV53fN}5%WiyPEVAkS7Q zqokz3N?*Ucyj-u`STDaQUEk2s(o)~RNZ-gv7pOwFxH7LKu|hYmSQ%mn%p8~0;^d;t z0&tju%uP(nFDH z-~5!!v`U0_LqlBy(-1>bD^mk2Lzs3XMNsWF`k;V73K(#xgZzXhu0SBbQj+1}RZv=# z1CExIWc}2f)ZEm(l44+(8KQ<+O0rd2eo<~>iCt!HVtT56L0-CzK0*+$`yj3cCJSJ) z)Q6`_nA71=NfQPl+-YJw#86sVVid!Fk%56p*we)^#G(vBkYotANI}*gzeI<+AEy=q0z0JDrJyNod8V|?b`Cs$@Xa9vLXS2yEuMP`}J%6lo?(&*H9^77! zPh2Z~`Qzvnm4*vDzsv5CdYP6tYf_P6%L3h`R;D`&4%r>E*iOeC*gW&l;>FI%mfau4 z*=L*#3Xmllo`y=$-D6j`a2|YS`pi)w{bTJm=9dD@6XfnlvR!U^=veXBL?kUN z?xjT`19!S*-0yb@|9rHB6=HYkzbH1h+qCTBzAB!#zC@m4I!{t6rl8wJnZYvI=P z$jQ3mDSb!ErLp8yY)n(*>w>2&DaRc-ZfunPC3ooLyrL_su7!qnd|uzY<>6=d7uKvR zi#HfV_uZ)E`8EH#=~N4`bu6jvs%;)3KEgHc*GGqZxtG;ZBIVk~8C`bp+S(r^7gTe~DWM4f-mNOg literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/play_button.png b/tribler-mod/Tribler/Main/vwxGUI/images/play_button.png new file mode 100644 index 0000000000000000000000000000000000000000..d0383ef694176320120fe6954a74d5d42ca3bb38 GIT binary patch literal 1700 zcmeAS@N?(olHy`uVBq!ia0vp^f*{Pn1|+R>-G2comSQK*5Dp-y;YjHK@;M7UB8wRq zxP?HN@zUM8KR`i=64!{5;QX|b^2DN42FH~Aq*MjB%%art{G#k)|IPA#Ks90@HO@t; zi6xo&c?uz!xv2~WmimU~`UV!Nn$|$0Sp-2UB7A+UlJj%*5=85jTwBP(NbAOboD43zA+fV53fN}5%WiyPEVAkS7Q zqokz3N?*Ucyj-u`STDaQUEk2s(o)~RNZ-gv7pOwFxH7LKu|hYmSQ%mn%p8~0;^d;t z0&sYN%uP(nFDH z-~5!!v`Q@6jZLi#458YQ6hXDy=z{_RDPX{%4)PP4xB`IyOG$=@S3zk}4mes;lJ!$_ zQgc)DN{WGDW{4VQDalr8`9-;jC3cy)iRr2O1$pT<`UpY1?t{1*m@I(FQXif!VNQof zB~2KJaHom!5JPEcNrTA${R|9@k3C%+Lo9lGr=0f=2^2YQ|NW=Lg_E;fn-VUzu;?nW zEwN8fU=nZ5jbgoN7RBTEnrD$OPm>4t!bu)0d|MQDNL+|=(OWfhL#xV9pIIx<%$fQ8 z%RLS4!-6kA++Us=)&jx7~#wY7m9o}0|%yaMD!O1pjrXSwB)68lfdzkSrMz&1_ zMGR?jhc|x|=xA}JEn#ES2q{>L&&JyT1SN>H^1s if)(ekg{+)=n=NzRn}cP-#&N*fg2B_(&t;ucLK6Ujw-xLF literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/play_button_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/play_button_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..c9713612549c8e8213f095d8ef696be952609603 GIT binary patch literal 1697 zcmeAS@N?(olHy`uVBq!ia0vp^f*{Pn1|+R>-G2comSQK*5Dp-y;YjHK@;M7UB8wRq zxP?HN@zUM8KR`i=64!{5;QX|b^2DN42FH~Aq*MjB%%art{G#k)|IPA#Ks90@HO@t; zi6xo&c?uz!xv2~WmimU~`UV!Nn$|$0Sp-2UB7A+UlJj%*5=85jTwBP(NbAOboD43zA+fV53fN}5%WiyPEVAkS7Q zqokz3N?*Ucyj-u`STDaQUEk2s(o)~RNZ-gv7pOwFxH7LKu|hYmSQ%mn%p8~0;^d;t z0&sYN%uP(nFDH z-~5!!v`Q@6jZLi#458YQ6hXDy=z{_RDPX{%4)PP4xB`IyOG$=@S3zk}4mes;lJ!$_ zQgc)DN{WGDW{4VQDalr8`9-;jC3cy)iRr2O1$pT<`UpY1?t{1*m@I(FQXif!VNQof zB~2KJaHom!5JPEcNrTA${R|9@4?JBQLo9lGr`TtRI7%F?-~3KBPVb`ef>tM+D?v_2 z&S{C6B^5^8n3s9=&?UE=q{XeBy_`)dt@T{%10A~)j%YPCb_+69%{p9=enK@-|Mk4r z=g##VP2&~M-(30O$AjOWpVb~VaLVRo=-sL#zf3PU`po1Og%wv~Eek)KT3gNll!^CZ z-cxURHpp_e#s*Gt$E%yv`})P&ohGJ#7oR!*IQ6RnFP}y&?3r-_ztpAsNPef72hh*zeIjwEE=# z*Ir32N}HzMzIc^aoXg-3OW)j_w_ioo8gE4@3QA4Md3*n=6@#yIoPFD)irb|=7Xo*W?KEz_6q=fzv?$~1Nc zt*C5srs~}c5oM_-U+j*^u$X&SxpAfw^FhAr%ns>Nmm~{jpIKwNw*CG!cb6}se|DAq zo44Zny7V3v!3UF{e%kqQ_oo-XtLn0D&ik#-RPg=E!^Mqi+aLYf$kfNR!eZXnW!ntJ cpZ;S~a{VXd;B`9Px#32;bRa{vGf5&!@T5&_cPe*6Fc00(qQO+^RU0}2foBxg91C;$KhBS}O-R9M5k zmtANaRUE~Ccjji2-E20-C>mR#D3LY~Dk7pNzBNc7_|}Kgf>Q8Jj8w>zP=q29lo$<& zB2^SIVlgoiA)(OFheBx+D-si0Nz$0j7uh6}Y(D1Rnekz>Nj4ul8+Wz`F7q%m_niNk z|G9Ur@O_^wOpZzh`s2(cO3}(949F4(!`K+}OYLKCzN>fM5?KQJyB=iZj|vi_6a(uB zCX)7YZQJGKUIE>gYM30g7Vi!r4TG2^LnYTc{CWxIr$ZAG6`m#08dShH_dR&&HF)L~=<9&auS1H4CM_VS=`vA`#^CjH@Nxru z{|Vf>T3{?R5?2K`-tN`%p&DpvfuWo5#rrUq_@4q45;q?Q4rtE0=b`O)==lzQ`W#Zr zhZNvqYzfFUb~~^e4QHXf4_<8yo-1o;43q-p7aV^scuwVjt1!|% z5afWDzpYQy!G|62P772&ntNVXW29pkf&;s8@)Wf9!s(`i=ha&&P=`xFzKB&qBnG7s zNZI=~VPPrAg{g74_!bOa%aH;Pnf@M5xDNdIDSY(-tSklX&qzEETsIs6i7~kF1`PD) zxejb^OoiizRyh4^{^Xh8#e#_sTlE23^L?$la0d#kx-bVqs5;vLtIPqc`&o4cs~(QT z_Z`4eK9q<9p6vbxu673JI$uP}VJ*e1#xZ72ytw38k<7ZVKYsf*Jo0D^43pby&_2tO z@O__!`Xg0Qdp>Tc4Gh2(MfxY-Bp;kzqSwZ5{?}jFI-0H5KUO2`q-ClN3=!rEwS(N0 z49+a_*BZ{*QGaUB;dG_($tsZ;iENWnd n)$R8j)<2tN`Sz-xbv*wMg%0lOF;T-H00000NkvXXu0mjfJyn{M literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/playbigEnabled.png b/tribler-mod/Tribler/Main/vwxGUI/images/playbigEnabled.png new file mode 100644 index 0000000000000000000000000000000000000000..7abaa7af13f2b623a7bd771d92192794df75cc8b GIT binary patch literal 2034 zcmeAS@N?(olHy`uVBq!ia0vp^3LwnE1|*BCs=fdzmSQK*5Dp-y;YjHK@;M7UB8wRq zxP?HN@zUM8KR`jT64!_l=c3falFa-(g^DSr1-Hzi)bjkI>|+1T@_s;@1VL&de0{8v^K?;Zqle1Gx z6p~WYGxKbf6ciMc6f#q6mBLMZ4SWlnQ!_F>s)|yBtNcQetFn_VQA4UA0`Ow9C* z%#AF~&2$uk6H64l67v*{%oU6b41k1@m9aSx0UZMdN_Jd8+NLNa%__*n4QeNlXRDM^ zQc_^0uU}qXu2*iXmtT~wZ)j<0sc&GUZ)BtkRH0j3nOBlnp_^B%3^4>|j!SBBa#3ah zI7~t2CMM;Vme?vOfh>XpsOJ(3;1=Z-Ljw;OjQSOc1^R}12KsQ5ff{Xqf>z+z$}bAZ z&(E<#iw*_nU}GDQP83B5TM_zGT@p)D?SSE7p=)5NYiJ%~XkcYzVP#+xg`^*%2u-_h zeoAIqB|^KQfv%BJh@q*Kv7wcT8B{xxBB*v7eNaFk1q?XUL4HCLS0E5zDar8gDkv?= z0Y^(pvVLk#YHn&?Nii_Y3{k@@CD|%1zbH4c#4a;8F+EkkATQlUA0dd>eGpdzlLatY z>ci6|%<1r`qzMBN?ldtTVkj*w`EC^Tje&vL(9^{+B%*ce^wbqH2n=5}P!-V|{>)NzaJ!lf&I>|fv-SSGkf zV7H{>HPNXmqRAz%g*MOrd(txfSofQCH#3Y*rqw(!5Pv@V^S=A#&#X@x+MG{j(tmum zq}1)D?^;*SZUqBrnU)tm6}@Gj`F9H+4L-)gJU2Xdp2cpDqs$j|Qze?kYi}x_zNP18 zZ+4;1>e%TWQ!B6aYO+k!Ok42il2DS+w z4Zmb;Y7UxwuJ^`izqtFk6`k|a^yc=Y&--2z_AyBKZ{^X(-~Q8EzHQU7I`f!)+ND`9 zmocvUx5l|buR<@8k?B46rGbL`su zh5EeL{SMzQNpPFRbKNMAYsng!mpeCPG1u&6%bVM*UC;8^L6eJZ%KXNQVYyugp2;VE zb^Gz!{KxjicFa+Ui&XbT2UJC!o5_DM>_r`e`ZtICtVwJtYP)7}P5EgteSOO|>4Lxh z7WF(ue@&$>bQCgAb<1S+y19e5?|xIh`Oo#Hi5e_HO=rU8>ev{Too$%Q{_sNRk5|(F zO;hEzxQOgy+|M=TW|RNM5=$8i_T5p5-}an(oz<$bC6Pf?BlYh0fZtq5|M%V3MotfiXH}b7YB+hd!`w*m z;A&Q;6T2iPYX-|6_|GJia4nfFxHQ(+FA3Gty#4*)aP{dczfQA46`Ay26n`iv?jy4jXnYx>|cSpm#CmZ9p?%)u7 zefHCN>o?WRaYcW!%=Xkdo;mR?Qqi)>pvh)o*AZv6jn^gRcpv6;SL+5WG5(si%C&Ci z1vy6+eLe$=1Aj}}tpDCfeiTq;%@!pt*&W`}DfJ)t1gxm{yTlS@C)y^!;boq cHs9s$tH_^AEI#-ESo<<~y85}Sb4q9e07ELX%K!iX literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/playbigEnabled_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/playbigEnabled_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..4729b0881af3c7fab01e326abeb3963fe79401b6 GIT binary patch literal 949 zcmV;m14{gfP)Px#24YJ`L;(K){{a7>y{D4^000SaNLh0L01FZT01FZU(%pXi00007bV*G`2iXJ^ z3_1~&9fqI)00T8iL_t(o!@ZYHXjE4e$A9cWNHl+cySPzqgik#0(?ANYl)Bn5A1+w=wp(jf}IEv;&D4^+?%Xq|cA=2To6U z8#^1f?25d+E?xknlFeMZaFW>55aZ7PaHexe0e;)6$$3s`3}Aa?9>(&LMMOpxuiU=u zlTVZX0;I;9={pu?)z|iXJBX`#Px948GAyY##C1=DTzC2ejSN#uy`=| zC9$VStwHH|@o%)I#cT6cgT2clJ{rQPLfX}IuDXoPLMVf`lP`3W8kRRkbWg$$IkiU5()f`z5e zRmCnSHwTQVzAS4$f)(rFN)KE-US}87JZT!i=Lc^CWTsvNpywz9i5MLH4jvAcY{SX` zs1l(6EcE^i8K>+V$ajHpDdmY69QhU=+^Mt=aqkD9|19(#f%H^W+pPJI+D(8OqplyI z&Zz4Ha3Sj10Ja?kg7X=5QGe%5#6pM&5X>2aCB&=0ymkrh{O(%RczrOP<#~1{D&vdZ z>723b{7>TRCf-2UJ?Fj#kdmRvO!C;{iB8$FbKmfkFVLYq`I)4QVa`-^9vi1;G}Y04 z_0QoKZ^QIAd%HZT@ivdjArMG07tTElJ&W^4BK_m%CnD_tvbgU5`iJ(JrZ+pL5lPpb z17tJY9DSbb8%rH1I-mVIF6hw4)~3*+H`=^HhAcs|I}cNmcT<^D-G3!d_20NX@$%X~ XvRLf(EBY%$00000NkvXXu0mjf1m3r^ literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/playbig_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/playbig_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..d034b99c7ba4353f333689dda897f898548f0b3b GIT binary patch literal 928 zcmV;R17G}!P)Px#32;bRa{vGf5&!@T5&_cPe*6Fc00(qQO+^RU0|^frG+Bt7P5=M{GD$>1R9M5s zmtSZcRT#y8bN9|}HoMuvHZ;))h0+$QeQ2q9vBfvRf7myXDrq73T5aiz4}w9V2&HJ1 z7O7AbsidG*Ye9)%#8PZSx-Cc^+6`vgWRqq!G5eCR?%k{-AW!=1V3 zeCOWp3}YE%ScEHmZHx|gP%3QTUIB7h8i8v^5Yc9=SYommj*Y~-4o@wdx8?=>`?t-n z-|i$kl4NoWfP$kCmPO2#b&)I2!x#vM#Jrd?Vrpb_XKMSa-ReI9(>a@OKJDbMp+uvk zwTLMwFTd}*O9K~fjj9SuU++$CT~8DoO-on;2DB5X0DEprl%d745PGIQ5v~i`uof_u#fGrpktW$885>Tb9by)s z-R0j{-wyvw1SY_my=tvmBud%7w!eO)BH)KJfeLBHBqXl#KXAr_r=Nozd!Vlu&VLq+ z0#_l?8nllCb6L?GhrQjf`+4~4I1KhTC_p383RDdaEJW+0@YXRH8h}&p)Ss4GBXL*Y zz*0QX2_OFi7r%sapFpXwVh6;$fNI0|wC9UmaPk*;=4Jo277kYh~&b=*{MUc7k=_>yO}c z4-{v7+7Kkh1J4U4TC*eY?g6;=TW!yKAOiccoc$2??Wmo67IMw2UIOtjs&3qY2CHt& zfdHyr>_E6F4N(_HrX-=z6p&B`Z0i9}UicjP&-?dvt%$~;Tw+3#7k9Tjm2tw+ zN-jJo@P7OvIP&H=h!ClJsM9$zqV4Sy literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/playsmallEnabled.png b/tribler-mod/Tribler/Main/vwxGUI/images/playsmallEnabled.png new file mode 100644 index 0000000000000000000000000000000000000000..cb118763807b1cd21c9f49e630723f011cb7de07 GIT binary patch literal 1560 zcmeAS@N?(olHy`uVBq!ia0vp^f*{Pn1|+R>-G2comSQK*5Dp-y;YjHK@;M7UB8wRq zxP?HN@zUM8KR`jT64!_l=c3falFa-(g^DSr1-Hzi)bjkI>|+1T@_s;@1VL&de0{8v^K?;Zqle1Gx z6p~WYGxKbf6ciMc6f#q6mBLMZ4SWlnQ!_F>s)|yBtNcQetFn_VQA4UA0`Ow9C* z%#AF~&2$uk6H64l67v*{%oU6b41k1@m9aSx0UZMdN_Jd8+NLNa%__*n4QeNlXRDM^ zQc_^0uU}qXu2*iXmtT~wZ)j<0sc&GUZ)BtkRH0j3nOBlnp_^B%3^4>|j!SBBa#3ah zI7~t2CMM;Vme?vOfh>XpsOJ(3;1=Z-Ljw;OjQSOc1^R}12KsQ5ff{Xqf>z+z$}bAZ z&(E<#iw*_nU}GDQP83B5TM_zGT@p)D?SSE7p=)5NYiJ%~XkcYzVP#+xg`^*%2u-_h zeoAIqB|^KQp{{{xh@q*KsezRtOgoYysCFBDP(UCB3^>$5enJyhAP`_F$?)(hC@snX zM@vewerir?ZfaghF)+*wQNt`H*(xo+C^xahE;BbVJypLTFWp8TA&A#~5LW|}1u$9a z!_y_q>F}te2?G)CG%+4xC@n29is8S=z`z*p>Eak-(YthtwKub)gzNe3){MP^;Vik= z<2DF-%kN##ny`BRp>y}N_9}6dB?;CydTJkI>G^R;U8UDW+}X@<{l*)rhH7cGchVjh zeLpi-GR-C1H~41t?9Y!?8aCxfl z|5@v){^Y-}mTkV$iO>Ao9&P=(O1{eN`I`H$>nH3pxL_5*zF?Unr-AC+#=LnePfYov z=6{)c_c{%C&V(5moEkUm8JfjJ?tAQ9GJAc+>1iita%@Z#uGSIRGdWoNlHSW+nc_>| x<}*}3HnL7Jn!diCzd(9|hQR&$i<}MozqZU@y6xmidte#O;OXk;vd$@?2>{PT%Uu8f literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/playsmallEnabled_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/playsmallEnabled_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..cb118763807b1cd21c9f49e630723f011cb7de07 GIT binary patch literal 1560 zcmeAS@N?(olHy`uVBq!ia0vp^f*{Pn1|+R>-G2comSQK*5Dp-y;YjHK@;M7UB8wRq zxP?HN@zUM8KR`jT64!_l=c3falFa-(g^DSr1-Hzi)bjkI>|+1T@_s;@1VL&de0{8v^K?;Zqle1Gx z6p~WYGxKbf6ciMc6f#q6mBLMZ4SWlnQ!_F>s)|yBtNcQetFn_VQA4UA0`Ow9C* z%#AF~&2$uk6H64l67v*{%oU6b41k1@m9aSx0UZMdN_Jd8+NLNa%__*n4QeNlXRDM^ zQc_^0uU}qXu2*iXmtT~wZ)j<0sc&GUZ)BtkRH0j3nOBlnp_^B%3^4>|j!SBBa#3ah zI7~t2CMM;Vme?vOfh>XpsOJ(3;1=Z-Ljw;OjQSOc1^R}12KsQ5ff{Xqf>z+z$}bAZ z&(E<#iw*_nU}GDQP83B5TM_zGT@p)D?SSE7p=)5NYiJ%~XkcYzVP#+xg`^*%2u-_h zeoAIqB|^KQp{{{xh@q*KsezRtOgoYysCFBDP(UCB3^>$5enJyhAP`_F$?)(hC@snX zM@vewerir?ZfaghF)+*wQNt`H*(xo+C^xahE;BbVJypLTFWp8TA&A#~5LW|}1u$9a z!_y_q>F}te2?G)CG%+4xC@n29is8S=z`z*p>Eak-(YthtwKub)gzNe3){MP^;Vik= z<2DF-%kN##ny`BRp>y}N_9}6dB?;CydTJkI>G^R;U8UDW+}X@<{l*)rhH7cGchVjh zeLpi-GR-C1H~41t?9Y!?8aCxfl z|5@v){^Y-}mTkV$iO>Ao9&P=(O1{eN`I`H$>nH3pxL_5*zF?Unr-AC+#=LnePfYov z=6{)c_c{%C&V(5moEkUm8JfjJ?tAQ9GJAc+>1iita%@Z#uGSIRGdWoNlHSW+nc_>| x<}*}3HnL7Jn!diCzd(9|hQR&$i<}MozqZU@y6xmidte#O;OXk;vd$@?2>{PT%Uu8f literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/popularity1.png b/tribler-mod/Tribler/Main/vwxGUI/images/popularity1.png new file mode 100644 index 0000000000000000000000000000000000000000..92a15b668a2bf5e39f2f87149924671872c66adc GIT binary patch literal 1378 zcmcIkT}TvB6dr#PT~kU-B&04AL4~{Xv+BCj?uzbeyVh=|8?F)LICEDAou6h#c3eWx zDD*uIgNO*CD1-<>jG)4|f~1E)Dv*#8(y*wE66(&b?y3*<Cl2lSLbALTrYpV6GZNVYzOogAUj!NNyYefCGVdTL1i_2gTS%f+Z46#Sj7{iSxd@|^4nh6{vAbPhOPv#Q|HUY1!LQr9&tQ={t z02Ng>+Ce)VRpp?C)j=aG0onm*l0*y5*d6FY$s`8uv7p_h@=>PQ*RU27-Q0MWt}6^d z#9}d9%x;s_PJ(heodih}G;Ku)tJWv!tYMY314$?Aetb~lR6$8-YWih6kx5ibR>QK~9kUgFhw$SHAB*8Ht>_6E%czw3(>wbog%HYGfW{x?M&r5eFJEX?jlSfHVBhY z-F(v)JNS=aJeSf-=~a6YW#Ksj_COIzx`s5PHfzRnOjK4yR`&=Z+X)Fp>U0rl<3ICD zW{uhcwIz}2%k|V#rQDW7TK8>npLDvNK}vFFcAy`~&48~q>>pkld64(4eOJS|{z2yQ z#P$5@izCwV#O?F>r{28H84ZESfzVv3rLkOndgS$rcVF4cm&p(J#_xQJXDxQkHZs>{ zizAr@%ILSD#Ucf3yz%&ITSn_#;r&~WvWr(2zGhDbri{X+BDeq7{QLOv`ROLy;vGJ; zYGtFh%FYIUf1mm@^Rs_!HnTMA<$^6^&+?DEs|(pL2F5YzJ?)YA@8kFTzVH3MkE~8+ zj(5`sX^Nt{Q_1)g**P-1+S|zb$~$F|t<{aDa_wZCYuB!ldxxDo?NU_d0e`k^emMV| z7<=$|9>*M{VBIo7PB#$63Xls!cH~IkF`w4!BeGTjR}=?PW12OOKtgv=#d1pS$;XE> zB+`B#lTV)zRm%t}J|yTFHYJUEwxX602J@(>n=%6cV1Ta4%*;@lP1`ZFpeL6dlwHo` z)a8;YXv}DYmOPP=4TKfo8D-NIJ(;Qc6v^2)Lk3hKxFj>ReDdi@5VITv27_EcWkZ8t za5%_^c_BPJ0I~`O6N(A=FyL91EW8*BlaC}*9ZZBKyX0sEaVnmu$0RqIS-{v9A*@s? z!AdA-IYr0`f&f__@_c|G0`8KD6)#}Ahigu@{KS!~I=Wre)c32*TZ>p`7(Y;B)E}qV zjXDI%F%n>f z02j`2tjLduEI+%AUjyx;r!7q{EH&vI8{mZ;M-&Z-LtFH1pgLWGz7X4lsjBY3X_p=R zM=(uo=w&ZDc|&Z+nX@|ST$-8M%_6#> zDM2D7QoZz85`pwkWP$Whq&;L2_+a=@5IsmfL{B|*W>FpBv~~n4WdkKNqsu(` zbblB}+G1mxLMJ#;Q`~|LaVHg>@EuF)g4l&D(2hD})lUKdNFYmovhhfW3h6=ADOdLx zDAE&-ialK-E0M?6);}FP!wJm&f~=&P9`ONj0+=N<^|Ofk}lOekan`$hfh-zAQU{yIlFyisq%oU())GQzI zf`tNDk5U)Kx@d2drZ}dQqnMUu{4{74J)}u;{6dyaQ!a*$(pXUscVvma475o1L0gDr z!lYEU-?YjO{v((qX7tj}8QrO}NFqetNI|M)V$JB4nn@xT*9=9l0AgHSyKA1KB zGrx4!xGiv7!c1Q-rJgC}njEsauZjD#)3pq;Gw;KC`~!Jh!^2j+*z3UbOfSIoI@ zwDgVLbv9ieI!`?sndyD_BfO!n0nA)&i0yVv?w;*`JN7QVZT7kl7OK#78>ib|&AB?c@t#de=6uUAr!Rpx9U36?3%fjrn`x|nhApA%w3&yXNH-v9VH?w zODO^$LZTjmpwfp*d?_lzo}>sQ2sEsuw~~U0B*LIOySl4B)RULFe~0fo=iKicW}r5B z#Oc`Xz%a~NRq3xoJB{Y%j5M^qvg%c|Z7>d1g)`7Nogww3d!|--!oaXiTkN^+&HFP8 z$e3eRG@9i)6iiuFKv<4KjABXJLsBIL`wrN&JR1>GBW6s{Ayy`-tq>fRbr@6i4&&I^ zt{{q(ZDSe(HJqqM3BiUCR#e0IQcDxW4rqc_*d{Ap8~}g=S@Pn?ivm1FUg~r37Kg}DXbQlJb}9yEdGWYU4xMc?iGw)A?C|1=d>R9_pj_1?NA+`P?P0ZK(p@u6QZ zp%3X%;-Xj=?FrKq$CPjs)3l7A0Ii}2R7s9>r|2~0V%RW^6m@e&OY~)+MLJK~LM#&| zuDboERd(*m!B98$WkiTi}pwG5JzHn|b~Kps^2%fgidU$2d3t}n}~_()u8 zx;T2>*?4~N6g50F)pzf6ApK$knCfk4$#=ZWpXqP?^dfU?+?yA%YN*m_XMMfv>^5h0 z_Vmcfksa$px6gR_BOU!KokB?A-Vk=$H)pr3!* zJeS2z_D%E=eqy2E!pJ<|T=rw`*@r*BZ{K}%V~LoMh20!To9d;kCd literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/popularity4.png b/tribler-mod/Tribler/Main/vwxGUI/images/popularity4.png new file mode 100644 index 0000000000000000000000000000000000000000..fa839368137df191f387ab120a933c94242e6fb5 GIT binary patch literal 1392 zcmcIkT}TvB6rQy-T{X1Q#1bzfLXtawySmP}yQaIEio1$#X&LA^bJfA!nPFyRSBuh~ zH0U8JAM&{jsYHqj%7{P_Du`Y}Lg=9&qCW`g#fR?f>aO}wPhRH!9lr0JbH8(#$+p1p zY-^zv!?5h8MqfMHX*9Fa)6n`ddO|_lZoRH4n2yG|ba4XRGt|aYI)-KLGpFV4>iIQf z%s1-046g=xLsDcAl)@0BSdw;55gkgu5#cq6)rv|V1Pzh~BZ@Ypx85%f zph&qUrpw>L2}+pYO$ZSUtC*`Ms`A1RG(aC5kYqOw0KkDHy7A5mKjl}wa8PO-)nLbH zASjFu39N|Mc&x4{hsa@Q@E{rv$vPKx<1wEcI-6z^2Qi2-<-|lBp4^dIxEXT2XBC8UIvU4FcgIrhO;}-hmwg6++#(%OB4IKc3;DKOmuVOgNC7U zBpHcBh=`p~v;mT4S(c<2l3{EJVbe!sgOA!|y(I2r!;cT@f+ndkO-;XipE7K?aoh}) z80(MY)kGj!Ul$L_NJe>;qzQ^lct%7QGIFtMDOylbLY{5q0NxI}%Vcf=C8H+!(8n9l zgY>AfQLK%225E|8syK={y@?+OZKL}YQR*K_(rL=Zut6FrYUe68=$k<6bQfs~u}PSi z>gJoa*};DV6NQ9c>aZ4yl|>Xt7=mFa8#>aA-l~}>aQ%uF<_(V&<_93D$^$MkY5Zq? z@vKo>ptd9veYuf(qLe#wNb0^L?&D5(GDv>fn^g1znQQXZ1{){8U7yc>aV)F8Cy+~} zRTO1XyYfOGrW~R1vENTErTf9WrR8}67_ZAM$$1*-L1sh^b>sPaZ07*PQ5rc zTPA0{8ZWH6wEX&gYVV$FC%dbUEZWBGYU&hw2D3gpaAD!{A2-g36P55{Irtt z#G+IN$CUh}R0X%pqSW&IqU>V-&GLRgn*>2>B7A+UlJj%*5kY?Z=IeGPmIoKrJ0J*tXQgRA^PlB=?lEmM^2?YL|ztbm$x zQ%e#RDspr3imfVamB40N0ofp7eI*63l9Fs&C5U?>d;=7m^NUgyO!N$OlMReb6im$Y zjLeNJ&CPTaf)h&=yb|*ijLa2`3=DvTk(IGI5CI(n21<5ZK-#7#CCw_x#SLmFkY}ru zQBqQ1rLSLJUanVete0Puu5V~*X{m2uq;F)T3sj+7T$xvrSfQI&tPC*(W{yj0adJ^+ z0XR%S<|ZcPmzLNnDS<441E}W`3*Z*z6+;6L7>xQAi3R$GdItJ%lYts-fPz-w*vc;o z$8bh!dFeL#2tmB=gSZ-)EP%;U zAD%8@PKQS&O&Ex9r-|_pLuqM=7~2I_U;(n$)5S5wqx0?5jhsx50*qEa{7(d6)*kR+rkuXG=?Qea-EXA(~)GeJEnlYO;V2euz#7eS0-ytMr8MixX`_K zC1x=57Kluoy}bJX--km`gZ;I%JZ}5zk4aS%8I$|z}?%f3)tp`ZPl&Z&1}G`X105m Zd6nOsdzGu#N&+3u;OXk;vd$@?2>>+;dWHZ1 literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/popularity6.png b/tribler-mod/Tribler/Main/vwxGUI/images/popularity6.png new file mode 100644 index 0000000000000000000000000000000000000000..c47cff3b1e475a2de3cd89d8cbaeae48ff1dfd5c GIT binary patch literal 1393 zcmeAS@N?(olHy`uVBq!ia0vp^hCs~2!3HExSA^yRDVAa<&kznEsNqQI0P;BtJR*x3 z7`TN%nDNrxx<5ccu@cva66d1S#FEVXJcW?V+*AewOMOFgeFF2-g36P55{Irtt z#G+IN$CUh}R0X%pqSW&IqU>V-&GLRgn*>2>B7A+UlJj%*5kY?Z=IeGPmIoKrJ0J*tXQgRA^PlB=?lEmM^2?YL|ztbm$x zQ%e#RDspr3imfVamB40N0ofp7eI*63l9Fs&C5U?>d;=7m^NUgyO!N$OlMReb6im$Y zjLeNJ&CPTaf)h&=yb|*ijLa2`3=DvTk(IGI5CI(n21<5ZK-#7#CCw_x#SLmFkY}ru zQBqQ1rLSLJUanVete0Puu5V~*X{m2uq;F)T3sj+7T$xvrSfQI&tPC*(W{yj0adJ^+ z0XR%S<|ZcPmzLNnDS<441E}W`3*Z*z6+;6L7>xQAi3R$GdItJ%lYts-fPz-w*vc;o z$8bh!dFeL#2tmB=gSZ-)EP%;U zAD%8@PKQS&O&Ex9r-|_pLuqM=7~2I_U;(nh)5S5wqx0?5jl72p1YGB@<&bEe)yF!c zk;`nyG?SdhhHKjyRfQEcGWr~lP-t`#R2En|_lD-DC70*U^RxPU%AhV)jamO8}xvXUl53^tMFD>AHz!TG$+OxUgVfWA1xw`}l1Sd|O-#k}US?N>O zp;X?BBDEdsF0{Yky6L>)mBKaAEBo&~f4i=(>h`;ZtBeyk?j+A)^?LAf^%rl=JB%}! egzedPn_uplEPCbT(T6~{GkCiCxvX2-g36P55{Irtt z#G+IN$CUh}R0X%pqSW&IqU>V-&GLRgn*>2>B7A+UlJj%*5kY?Z=IeGPmIoKrJ0J*tXQgRA^PlB=?lEmM^2?YL|ztbm$x zQ%e#RDspr3imfVamB40N0ofp7eI*63l9Fs&C5U?>d;=7m^NUgyO!N$OlMReb6im$Y zjLeNJ&CPTaf)h&=yb|*ijLa2`3=DvTk(IGI5CI(n21<5ZK-#7#CCw_x#SLmFkY}ru zQBqQ1rLSLJUanVete0Puu5V~*X{m2uq;F)T3sj+7T$xvrSfQI&tPC*(W{yj0adJ^+ z0XR%S<|ZcPmzLNnDS<441E}W`3*Z*z6+;6L7>xQAi3R$GdItJ%lYts-fPz-w*vc;o z$8bh!dFeL#2tmB=gSZ-)EP%;U zAD%8@PKQS&O&Ex9r-|_pLuqM=7~2I_U;(n;)5S5wqx0?5jhsx50=xFSbrmT;N{tTJZl{`Ft0seBr?T#hv?Ca^9SxekmE77X=|MSX4k4!Vh6g~mM&R4 zb7_%t0@sxTahFt&RVFZtEWGwGL(81G?||ft<5v6@_jauRmv7GCH;FH?uV~(x312c^ zZ{q&Q9orG}!s&_AG{rM(t{)RM*?;f#+;#RyQ%&zX55B20>&h6*>{{KJU|1-VY!W)a+ zi`^K86*bfc8_`aqIoFkk)<<@ehPK&ebwkvJ#zB|VgYE@-{Z13Z3KuwY#`8Ca$B?na zs*PDS1{5q=Q$bW#AV&E}nkA{qm8;h|v_%dgqP1Bm!GKtmq_sj&CmS%O83}Xi-Q{5v zX}*Jrg|={_rVxSyA#6p*`1Q6fhzV$cRv4F6KMnxEfh_s)9pxb^q}RaRa(#yZBOT$W z*pU!@623m*=4}p<6=(^-R+6g8*?v6jlS606OyVF7u@ZhflTR$P4b*4`1Z4#65h-sO zD61eCmhrI_Ye7V?z$QTj3=0^FLJPxrS@fY~(gP2;(asxE8`l`D8;^-@etfrO=^RO> zQYj+kB{U;W(mtP$q!^N6JP6@2_p6p*dsK5p#>s@AAT&io*3+6geuY-8-SXqO6DT{z zA1CP9K&m+|9+Hu?1)Zb`ip+XOL>?KrbhR`is%gop4 zO;haPKY~eORxiEXNT$moi6l%y1*(>bG^794OcJ>^%}@j@AS*%~lDZn_$(-?@`DL<3 zZGqa7%=YC(>e*6G%OR)xw7AbWoz5U7c~9q{A4q>guqs;L`|ZL&!OW_8weN_N&D{f6 ziejB-_EEPkz3RI8C7j>A9lScWy`|LsuynM?NbTYBf3{ryK+heM*z4y@Moyi*(UWX? zac;@X$TjQ-ZrmDjC67MdqvZ30rTXkABf}4B!S_|sp?nq#h|bGesC$3S6A_JwD{TP VBb~p82W*r%)=(P`_E)zY{{u%Je2oAA literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/popularity9.png b/tribler-mod/Tribler/Main/vwxGUI/images/popularity9.png new file mode 100644 index 0000000000000000000000000000000000000000..81dd4292e11397d2eef37c0ffac4767f57666195 GIT binary patch literal 1377 zcmcIkTSyd97@opR*MhXN=%v$y4^g{kW_I0ir`Z*CyhUr`ZndS6aO`vPafvne!lPj|MUGnlWuQo z-t5`sAqZk~Ym3lJ<7RD@!zPLi zx5cI5ekmlAwGj{RaG0zjTLg|epjn&~CbK>{e0I%{1X+mPA0~78B%dFR!}(eK;bgLdjCkYNJW}$Va@2jHIpSSshg^3M-)};MbOZCc~~(1GrwHc zxGiv7!hByYr=Bn6nj8wcuZjDd)3ppzSvo-705WFlZwYPOWOPBt-=2XqR+0UMq*i&=rx3AATg)hwb zS$2B=@W}R&p5UwLdtHw^7dAdg>lz;v`^kB~vU;paqUcq~q z6OxfL=fLFr&&B4+WGa){!GFBledT5Qymn@X@8MnF*XL(c0K1+^O%r F_y?1Tda?ij literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/prevpage.png b/tribler-mod/Tribler/Main/vwxGUI/images/prevpage.png new file mode 100755 index 0000000000000000000000000000000000000000..13bf6b9382265db6f3c603883a0a28747346dbf0 GIT binary patch literal 1744 zcmeAS@N?(olHy`uVBq!ia0vp^f=85jTwBP(NbAOboD43zA+fV53fN}5%WiyPEVAkS7Q zqokz3N?*Ucyj-u`STDaQUEk2s(o)~RNZ-gv7pOwFxH7LKu|hYmSQ%mn%p8~0;^d;t z0&tju%uP(nFDH z-~5!!v`U0_LqlBy(-1>bD^mk2Lzs3XMNsWF`k;V73K(#xgZzXhu0SBbQj+1}RZv=# z1CExIWc}2f)ZEm(l44+(8KQ<+O0rd2eo<~>iCt!HVtT56L0-CzK0*+$`yj3cCJSJ) z)Q6`_nA71=NfQPl+-YJw#86sVVid!Fk%57U-_yl0#G@AwS8&-+(JCoVZ8 zHZk<&jiZf$48Bk8z8L&@!gEwVWJ&Y7%#z47g##aLrZi1h7+h*Q@45F%<^H&5KKJe$XlPuh~>bZcG3d)}7trl5d%U)t~UZas2( zRqAm;1!kj*CJ{4QU)2~e9Qdke)%W2ohjd?ldpiGucZX(`7JSu<+0CDAu9J0Z`V?Ld zv7-#w?)0MPj;Yo*4hN=399Nn=?`Os^u9Y8^6z3~_doEs z({HYFbQ#x`Sf=YIf-QD&=EX8>|JY@1QY!x`J8vJuf)eJXFAp->$+Xxe-njnuy>cJ} z*Q7v(zfXP#JqzxYw|ABGtvonk>DT3lRXZI_Q-kR|MbY!i?RmUecjyZuNzxCMOEDIOQT08@2@$!$c8QfLt81iSFJr0qt;r$lC>6QC$=n((MHVMhzELY`5f1zCD&B znf3>m+3bX<*(Rd~5XLv1h&13ks#Zc6ETE!c$rJ#90){SAGpDm`)=8m7V{FAm`IVfa zt&}uDrv{Qy$rlOPL|6sBS++dUm#I)wB>Nx?DG)+%Nv5jx%x0%R%61XxV>nu4V|}2n zpW)-Y5br+)@+t=7ss;Es;8~VTycmmCpTR=6s1Op?s7!$f4oM}fK{Kqg| ztNZ0Fx#h4ex&~1hnaIK(@r>K-TcA z^m;3IWz+C|SLCaryP4$p-IgNxfy`vmLrTwsR}arcud(}52R`3h`#t&X>9hGeZ|2s@ zV7~4CU}^B+m+_w77e|*qcsE|&?9PbaCqBJ@yt>f|E;aSwC-TP6ivwNDi9eLnOz-(R d^yuW}j|bj8FYNtcW@bo=NMPx#32;bRa{vGf5&!@T5&_cPe*6Fc00(qQO+^RT3k(h>49`heP5=M_-AP12R5;6( zlRaotQ5462_vY1>+CX#=tQEzoh#gvs2mvj05)m~XJK4$eo6`F&w%SheWV}m-hjY| zwHx5n1lZSL-KHjHX?q4G$FjoPS8?UuKFFVe!68r@!T|2wgiEKPUi|}mQVmdYbWkYG zC0}v(ei%ImJ-tyRrvbyOngTUJ6ZqhJ$^EDZhql7`(dZ2EdElebsUkoi|9QKGi;!)H zJzc-nod%%nQPhjwTkM9iDk-&$pVX-FtWM?n0=5znE^k!*+d8l{G8C_T#ePtg Z*`Euu;9bsxr|bX#002ovPDHLkV1m)20?Ysa literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/prevpageEnabled_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/prevpageEnabled_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..6f1873000ab9391c7cc08bfa073f3e5759aae6fb GIT binary patch literal 621 zcmV-z0+RiSP)Px#24YJ`L;(K){{a7>y{D4^000SaNLh0L01FZT01FZU(%pXi00007bV*G`2iXJ^ z3_2;jA3i<+00HeuL_t(I%Z-vvNK{c2$A9;}H#(CQNMuF?Q`Dvqq)kDKB#I;m6k4?~ za@(R0f}lk)n-E&mdgx=?R?sGBAp^4!j1es?jldivo5{>+s`+s4eebk*jOqBY4xHWp z|G&R;?olEFAKvtkn|{R7LY(3P86fxHc zHCquxAfgpjPZ*gi^t-%!b&L5K%EqHD;s64k9~$5`C@jPjbjBigU6#IfV=Zk8ur{3g z&@niA8p8+zrWVJVaO2*CaN;bqcYxMl>#Wgw78lR5E?~!QINlF?4uEz* ze3%@C;U`T2UQ<9c2EFH?>j*e5h`{W7NI!+ov*0)F#xuCsRRPzM5NQpJi+M;tgQ>A# zYoUL-LJ%TQ8fs-&u5EA>9vbTf1lB-e5Jras)NLIwa2*D3!1i5#G^^V)D7kZ4$miB3 z?~KEl^RTZAv;tosn}!#Up;}pyjTbBEP_%pwk_ku+!D1l@IeZdsK3FH!h|TJVfg;SRzioPGl|EK>`)nt-0-|l55CQH8o1(xZjZWa7?00000NkvXX Hu0mjfMHB`3 literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/prevpage_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/prevpage_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..13bf6b9382265db6f3c603883a0a28747346dbf0 GIT binary patch literal 1744 zcmeAS@N?(olHy`uVBq!ia0vp^f=85jTwBP(NbAOboD43zA+fV53fN}5%WiyPEVAkS7Q zqokz3N?*Ucyj-u`STDaQUEk2s(o)~RNZ-gv7pOwFxH7LKu|hYmSQ%mn%p8~0;^d;t z0&tju%uP(nFDH z-~5!!v`U0_LqlBy(-1>bD^mk2Lzs3XMNsWF`k;V73K(#xgZzXhu0SBbQj+1}RZv=# z1CExIWc}2f)ZEm(l44+(8KQ<+O0rd2eo<~>iCt!HVtT56L0-CzK0*+$`yj3cCJSJ) z)Q6`_nA71=NfQPl+-YJw#86sVVid!Fk%57U-_yl0#G@AwS8&-+(JCoVZ8 zHZk<&jiZf$48Bk8z8L&@!gEwVWJ&Y7%#z47g##aLrZi1h7+h*Q@45F%<^H&5KKJe$XlPuh~>bZcG3d)}7trl5d%U)t~UZas2( zRqAm;1!kj*CJ{4QU)2~e9Qdke)%W2ohjd?ldpiGucZX(`7JSu<+0CDAu9J0Z`V?Ld zv7-#w?)0MPj;Yo*4hN=399Nn=?`Os^u9Y8^6z3~_doEs z({HYFbQ#x`Sf=YIf-QD&=EX8>|JY@1QY!x`J8vJuf)eJXFAp->$+Xxe-njnuy>cJ} z*Q7v(zfXP#JqzxYw|ABGtvonk>DT3lR-J*QY^(zo*^7SP{WbZ0pxQQctjR6 zFmMZjFyp1Wb$@_@VkNE-CC){ui6xo&c?uz!xv2~WmimU~`UV!Nn${p~5+D`9`DrEP ziAAXljw$&`sS0kHMXBZaMcKvvo8|q0HVJ~%MELqxCFkerC4!XbRpb^h*w|MTBqnF4 zmMA2prf25aDk&%^C@Ey7*eZpa`WpBaIHzW0dQ=sq23ProBv)l8Tc#-4+i}@cSOGQX zrj{fsROII56=85jTwBP(NbAOboD43zA+fV53fN}5%WiyPEVAkS7Q zqokz3N?*Ucyj-u`STDaQUEk2s(o)~RNZ-gv7pOwFxH7LKu|hYmSQ%mn%p8~0;^d;t z0&tju%uP(nFDH z-~5!!v`U0_Ljzp{ix5K-D^mk2Q%k6JBt=l|Hu|7|KnfUesDu23Cayprz*3Uo;Z;yt zlmm{Ilw|$XoYdUZypm#Im>HsmSxT~1T7FS(Vu@X5Zen_>enDQkjXpvUulpdb1||z& zvebvCOPJH)QAra9BHU?WJj761TJmLQwKM|*<0nrS#}JRsy;J?Og&if1)$g9mana+5 z{q(9SayFHMTydN3RegQo`r*~|6F(G>EMCCwuarCSn^Intz@nE67immcCEl!h#pFiH z{}l7biAE)p&$MkjVQ27W_j}{d`}W_JO_Nup#4R(q(i zG$A|o)6&T2*S|MeY;lUIlldZdzh;-7sn6-}H=?S`cYNo4XcgkMNMJ(Bk^-3@(&}s8 z{@wRGA^Em=O-*$Acj2TNL7KV_LZVE{4T1%nI|9pIH|S^l|HAPrgkjQ|H|g)wwQcqk ze|xtqZdqG)lg1a51QCZD=k<&JPusWe&PCRs_pFt35a~@k#K=;7@5~nQFNvGiKA!4Ydp9S$VXCX_?N60o86H*e2(x}p^hs`r zGQDryn-!Yz<&*r0YuaAM2A{v3+0^i?BHjOd^fnphw7h#(ujgH4Uw=A-CDY2f=aNXC z_3_?idrhuZ&YmuQzUQLEvn56jx~7ubuPmAW)laiA$nF8J=oA_6V(EZ+^_RA~J>?eo z@>1moL*DySrIPctA20Afa;Rh<6L-^GhPcv+40&Hqx;=jJC{*&{yhpSDvh()+*l<7j ztH94k4yFhGY;836_Ita@VHwwYj%j}t|9!SUadjOV;}3S$!+%|EA8DTiRv!$Wu6{1- HoD!M<7NIDt literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/real_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/real_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..504ff05cfe658e21aceef98222bd6e3d57c59982 GIT binary patch literal 1708 zcmeAS@N?(olHy`uVBq!ia0vp^%0Mi@!3HGfm>-J*QY^(zo*^7SP{WbZ0pxQQctjR6 zFmMZjFyp1Wb$@_@VkNE-CC){ui6xo&c?uz!xv2~WmimU~`UV!Nn${p~5+D`9`DrEP ziAAXljw$&`sS0kHMXBZaMcKvvo8|q0HVJ~%MELqxCFkerC4!XbRpb^h*w|MTBqnF4 zmMA2prf25aDk&%^C@Ey7*eZpa`WpBaIHzW0dQ=sq23ProBv)l8Tc#-4+i}@cSOGQX zrj{fsROII56=85jTwBP(NbAOboD43zA+fV53fN}5%WiyPEVAkS7Q zqokz3N?*Ucyj-u`STDaQUEk2s(o)~RNZ-gv7pOwFxH7LKu|hYmSQ%mn%p8~0;^d;t z0&tju%uP(nFDH z-~5!!v`U0_LqlBy(-1>bD^mk2Lzs3XMNsWF`k;V73K(#xgZzXhu0SBbQj+1}RZv=# z1CExIWc}2f)ZEm(l44+(8KQ<+O0rd2eo<~>iCt!HVtT56L0-CzK0*+$`yj3cCJSJ) z)Q6`_nA71=NfQPl+-YJw#86sVVid!Fk%58nwWo_?h)3t%srK0-juP$j@1D_2=TqwP zWr>_15@zOfK<)O-YnR-%e06?te1pl2h|U=bams5nTP}M~T%ghN!mM0lxuyTk+xFXc zCmUMdxp()CVDKCMw%X@^&+d7@`Lsc{rzYcr-}l}x*5|mrC{TE6gUZY!n;zuP*--nX z_Vv5hw(%bx_MU#om^A0o3SCE`P$uOD9t-}MwYT*)sJ{5ccW?rmLr}!uCwFUFf1CT? zkG-yaWkabzYade&6G!_~*LN}D=hJ_$S}<>Z*s{37hNezu4tH*|{xhG&+b{pTojrNS z3(?QZsu%*N9BxtO5UZIPsW0KQdG({Sp#L=QY$)#dwhQx!wuoWm{$lO*m+BqT4r zbGg?n?7`awe_DO^F51Gd`Nf$v+!;^RtIO{0dMI@v`}8--^F6yNayRi@Dyhp@s(ELF zT5sB3lhDe^;bP}|b=oagngm4q9nZCVE57&t6bCQ)f@4Bi(-!XJTye(!W!&0dMl8-% zoOTViJ1TB=pTDv(z&t6{>Ux9f!8(TXzbhvE*XYlWEj<74&5qMyOD*=uJALNAceroP z0fs$Mo9A#dnNPI{ZTKVc*xvS7_08$~&i!TH-T61{fBgb)V7FVdQ&MBb@0PqSc Ag#Z8m literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/remove.png b/tribler-mod/Tribler/Main/vwxGUI/images/remove.png new file mode 100644 index 0000000000000000000000000000000000000000..3e6e75cdead68bb55e3da0b418e893a7349842d1 GIT binary patch literal 782 zcmV+p1M&QcP)Px#32;bRa{vGf5&!@T5&_cPe*6Fc00(qQO+^RU0UZw(CZj(#q5uE_ph-kQR5;6p zlSyb)K@f(&?)PS5;!I*0%?5Bh8%70?7C(0<-;PsqwS@3(>sH}#Kdtu#fm@*AqN8h^+Bxb^N-G%R zxK32!WWoNWJ^2GNDVRPFw(N(=Q^B*fx($0_Rs)2A#uPYC7Ne!Us)bAnf*ho}p)U!u z7eY+}$|}KgVAVFLU!aRzJqzz&gYQC+BWe4XI0KNihI@a02V$krcmU=vh8gpqBo5u* z;MzHOa#I&9n4^&7e4Vx6wgq=D!M)2GR8^x#e(y3oxUScj_!J1t#&<}54fR%>TJ77mM#LLHQ-{>U8<6&hZEZ+=~C_KFl zrw_s78(>6W1^^99VEaKW|Na%Ya2$T6p{ElfMX+oWtl0^zZ{Xdl z!g7Zu@F@@VLy}V6x`iuebQ>s0C-r1}7h+|Q^Yvp(iJuQY33qMci0tiZkzr6|l*9%> z9`yHt$`5N6Qjgv+!5KgyNcnm1S=wtcn(N}9g_Kc|^XACFD-yN^u08na{&+#(9gIP5 zD6N-i?_#UNc>$1aoI9cKW9y3kT(}~D82_Px#24YJ`L;(K){{a7>y{D4^000SaNLh0L01FZT01FZU(%pXi00007bV*G`2iXJ^ z3_3X1uO7?*00Lb}L_t(I%Z-!GOO#O*#((#ncV;xFQWywP@hfPcg0LtdL^a8Rn>rBrD3DbIS&c0ps@|+ry=VV7Fp|%c|<5c)c3I`=Yc@{HKSd7^Oy|0?nP!a}(;0!Rmtb z@9~wyMN06>Eo`pAm$%>#K+8#+L>oBK49PhddjnhR`(j9mgY>9EVh%=!A)T^4H?`aM zr&q8vU(J;gx6(po&>o~#AeVtKR`2XCY^;J;_+zU)9JB^?0HJ#5yKT$P?m#9DjV=2+ z9OP<)Ta_^ov}$+tLM{hi-`U;hxd9jYY&9Q-p;WAdxPY-9;LIgB-wS9Me-F{I^7{GU z${pzHf%z$jecOvSE|4&WHX(o`A;@Q7cETpu`)Pa>LdT({6MX*iatV|%3D?tlLuM#igtu`N3FQ7OI8`Zn)gQBoK!=>Ky3rX{5?0Fa!IHDegOHbg(Jw~ z%89k~iPx#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1sM%C0(0IIC;$Kk07*naR7l5- zmw${D6xu6&3EA}oq@Bco76xb>Wx3E}xk>2L zI;lUE`on7K{r5Au^$60@h4tQBq>k;U-n-nmOHUO)%}ag=Q@a2P9(1dKDWazGMj0ln z*#G`3sXgoPkG&m})fQjd#lXN8l_tBIG>Kp{!3PQzS(i&%tN=@$|BTww`_PwNj-(yx z*XQu}{xkG6-+YO)X$9Wm%W&7si${_OUM_=|%iylO8Y_|!UM_?Ek3ZwC{2AVY4g>6q zPjCkMaDUl}m&-)50_>x^ahF|zd&exSQ?JCLOvf@Ph_qCiTL1m1b00NnQ((3J^CIq= zd3X!Ea97R2{@2kk3maH;5cO^@6Ve*`yj%t#v{#G1h|G2wtPTI<+O4>2Z^Ie7oAjX} ztkbW@7}NV6rS_|PDUNBfbaDcJ`v~cSkHro}eR`J4vw2FV3P#o+ z2dQ5-pVEo1DUB6lywoNszkHVJ#{HxZ548mxJM5~DJ^CE_C)XsfmJ(Qn^xjACe}9~dL8xdn@I;svUD__OJhX`>%j4Cp%9zMepbAVCT?lC^VTXSh%Q^ygG|zlhBIv7t?Y)j2$b{Z5|xH8)dV za0m69`ocb$UD!$SWFh{3st|de093dx0!j(a;VluFE2!Vx7iqr9D1wazWNMz*MrRztNP|Zt5wo} z*%3)UT2j*(T$^`0=@$;7tA4DjH;fZA8YK5drUk%EEj=?RZy&*VW;=X)0TF}MIr0SZ z{daKQVAVlGbN3A*R*LFf58)5&!pJDf#m8pS z(BSPGL|Zo1-#mmruoF8!8VROk&#|g^K8SmC7hQMGBD;Dn_UYFsJ^r_lvApR3s$8J+ zo;h?px}4fC`a_!k@J=$@){tF27wPN?fVI?C-5eh)8~0tG|9X z-oedeSItK3l+llb6H)_9^|$NjSbHnoecdK_X5T=^;4-?FO~)HvPx-M!p$)oabQUeB z@$qpnUQnO}C5qrNoc~qJ5M}D9kfbzLjJ!q~i3mzHcSyU+7A+|eIiTVn9T9#}=0QaX zA}ydBSlVfg+F}$)^fyupL;^igjO}Vg0~@BUiv;Ktq_ma*NHbkxlcNZ_;fT<12@P#I zr`SFT4nzrPbj+;ie+XJP!An^8q-nGy9UwtXSFyuy&iyK|T-waT(uzdP7o{`=qR=|{ s8jO8RG+lydPx#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1sM+sDjhLJfB*micS%G+R7l6| zmr<|7AP9vI$^QTU-3#A+kSexn*Ckul0O%h}ITq{W| z-WV(OuPe|;X%EffEB(Z^$usD^KTC1McmSskj0$Z|nMwFu&dNBAWEM)CSxga+{I%c{ zLq1Vj6i&x%B#%A1fm$JZl{tSc+3;3D%*k0)%^bu<5fR|dWl4N5+Z!{P8h4yVY^P*j zSyC6qRpLGbW4cy&!7GPx#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1sM%B5UWZtqW}N~!bwCyR7l62 zmwRy3)fLA-o88Us<{coyCa4GqK^|j(fQ`yaS`3}=R3Wtyu$@jTuOOi~sAO<_fZTGe>go~!10aFNZ@$W)eG3@0c@lV$|7(Aw`;Pik^T$MP%VfVG zeeeuEerQ(^B8U=1ZhoWP?spY<4jwy!$JNd3qYzPecm%Kg&^xC3vk?G?Ti_|*waPtT z5f~3~yNv|Iv84O(6#C$)0DEexX>yS9aR~+A)zaxwOulCz&(9pg+_6L5;ZnC3Y?1#f zA@m3FvnBmzjqhc9e!u=4W-*g^&p>u8oXnUZk^cMwlGYsT#mjxV`f=XFS&7CWn~t4N z_v^6%Fr|I)6+gKzhFyzNh_spcrn-Th|0pNXN7e606ly{z-LYTz}7^rG!RIC~V+M%=~T)Q5-(vjPyBk;~S3 z3G6;z$+Z>-ZCR-}dlWB!ewLimRk(T-FZ^^IYo^ER)b^hG4?7CVscq?C%-~4gTRN2q zu~9x+8`$Y^i;46Z@odd6Lqz)FZUP{$qLxjcmhydlEA|K*8)uJY`P31B5So!o`}R~n zoY0J1+Otx18Y1p_03qvh0JJqiUPWCW+d&C&1K9lezqwT3s_T}o{=AuAMrq9rPNvVH zDsv%LQxGqHR;tOk@Wh{vv+~hVRAnxpDsut%$Aq)-$Qd6khvMo6POh6n>$atc2(RUR zOL<)rr`FA({Ka|HG_~s*3~=K6AIM)ho0?Y^kuolhRevq^>Z0mhE6_E%xuct%1?5bL zjrQlht~YnLonPk`^3t5~)V;cxFMm6WZ>sBcP50RKtq-0YRmzLombDaswyY(zWi8P) z+|Ty!+_rFUVj}bL@f@qH(*p+ZM#_T>umx+yMv+r;ne7WE5*uzK!eZi$lmtC^ z0J|1W!X9BI#B5~Ew9%Zs+Nc-m-bD}~Y{LPdx%qqmQ6{}#x$X-JrrJ&O|o zp~2D3u76greE%u(pL+}ht~GZsWycq~K_#NKHExii>UuW+?Hp&XHsWyh=xq<8H0^^S z;a0b5iM#yNwRUiCbcptw+c5qpCou$tL6FM;IvSU;H%9Y%Zt;Oz>O{^<7CpsmPG zOJZ9u+a|#qnE^I4n`V#Y?k%6_&J7N?kpJ9c#75e@Rm5krm+dd)wYd|RJ=#vV#Y9`T zi=mmHpcEJk;-{%vJ}@}UN>xJ}BL{~2wY*~RLWqc8&)vId`gbz!?qyhXD0`PqWybD& zW{$k8FLw`)W-B2sngh#{5HXNfQOoLsB~)%o!61SuD2VoM7a?XN3TSk6_bG%*L{AV= zAO<}OJQx?vfn^UP0!J%rS#_|4pr`))HD6!)kxr+J>+PLvFSx*Xdz7x-&rTh|+K*3j zslF9wPY>sRXktZ9k*;GM&K@jgBf&-kRX5sMbMUm^5d=_LW-2W!QYQ^#!{Jh{wm4|& z=;nn_Oa15TeZBH#l=7Dl4?o{CEC}{63lH5ph`*FpYurn?c)b;8kBjql&8)~R(tG7! z(B zrpwub61Q#BiYdc+HSauUY8nYP8c4b?hJ8yC@nn4Z^!xe4ahSL71a+;Qj2sk3#>{vv z4$9Ntavtm@huO(brccUFe~1Tr@Wgd z+8*ksLzf)S(ZWB&^phbxlQ@i5zAWR|%2~X9LYV)@-rU`blgGsI%l(C1ZfIxZpfKKh zaI`9v{B61bBy+G7mcqk1`*~yU?(lR ekg6MA+y4XdzB`(I?3um*00007IZ~ebVG7wVRUJ4ZXi@?ZDjy3IWI9cFEBVIDLaS&00JsW zL_t(oh25CFYg17e#((GLCTUDUrGtYKa8M_=3gToOI(2Xs96Go-HGhE?r;hy(8c(p~DZE;u z^lrWO;~ToBNX7K9Ir_rtbP$19M58K^!rbcftdjNB8efaEP0J<~oqQ2)I3m;yi_N{` zJ35EEv%i{6(lH%OJ8W)&pee0iM)z}s~=7b3W0&|+k8vz1yI=hkf=%kJPp1qjy_xsf>W_s(e+!x=6 z_Z|e>^m$ruk7410zu&tb72DYF?T7UrRX$#|OpSXSi)|ZnuY* zli%<=pky8yoi+K`Y*5g`Ch@QEe+jXs{&9h3CsNZpBs1p#sM_plrWUQo6xh?KCZe-* z3UBJU00y)Mm;0eLM*si-07*qoM6N<$ Eg7c|H^Z)<= literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/search_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/search_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..366ad5d2aded33000faeeb03a4c6929db924b1fd GIT binary patch literal 722 zcmV;@0xkWCP)7IZ~ebVG7wVRUJ4ZXi@?ZDjy3IWI9cFEBVIDLaS&00JsW zL_t(oh22=sYt%p(eKW~sZR=9#Nd!T}qo5$-!NW$6Uc6QiENTy4?EVE>#IqDnLP1w- z@hnK~Nem)bYGv2fgFU!iQMVpUv#xE{Y-T`!(;8Z*mw$L;wJ+ zE$R4JDdETF1*9EP)PEDo`Rli*Jr^S)z<4@?^~w~!Z48cFLWo=*I}+9U(Np@d@v9c2 zSI;J}`sSfkTZhUy_G{{(ImV(RC5Qk`;V`9QjNK{gjOOoJ_^>?s%W*RyA_zf*X1jyM zPmQxGzJD2Q*=-aw6%g*Axg`Q;3}YI{!C`Ge#n&3R5J0vx007v(we2{ERR|X&9GUwX zT@V6l>IB?d0B)R5RwL6W1V}y|QtlNY0xE>)pEPUu_GB^MA6GM3^w(aw_h0nx9fjD@ zdhi;!awgs%-uH{m8vedl|9hPImO*$DZyy` zIQ7p;KNcG;gAba-|H9r9(vg{;E^t>HLS|*;_W=M*$KBu#Q<@rQa4CawxPrB28_yf9 zGN%KFN{*@l05UUS`5|Dq0NC4DHg}B2HS;ASB3ReX&E#6<)JTS|{<|$3Rm-m4THVw? zZ#x$Ao`V*ONAyBYDKQ}R&p|&sqW*TvF5i6LnD%^s0M&~W&u8J2#Q*>R07*qoM6N<$ Ef-=iHSO5S3 literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/select_files.png b/tribler-mod/Tribler/Main/vwxGUI/images/select_files.png new file mode 100644 index 0000000000000000000000000000000000000000..3b9d23983e87c2923b0f8e1ac64d5b4b4802f7d9 GIT binary patch literal 2824 zcmcImc|6ql8y}=3$GU!wS((PtCgyU^G=q>MjAAUsV3_ZWi8;)S3`KNM5+${cWoe1p zez~IDBkL$RiX;*eD=KGeDzRT`)%M5l_gA0S_j5h3=ly=3&-;1)_@3HBw3C(AkcL1Y zvJUn(uHc>m&hMnwgX^^BPa5F1j%VdSk^;wJDe6h^zL8_Uj|YKlQe2s04+f$ZLF0e; zyFB^UT!6x7uvst?g9$)TSR~pMi89mKwqpgRw&LN&rt!llTmZ6@%Jv6fwhS&1#^wg` zTuQZwU?hbVA5Ui|TmYMipsctc1WXPDZz14N0)hZO%pagLSOho>27|*GR07mxpX8Ni^U>Q7$gQ`0D2hk!dZNZ zz<|Zm{z&oJ;4(O?ny&Ct{Mn&=0vx^)Xl;CY9EGzMh{gLP9w;MHK;aS1Pp!Wy7WNoW)fV}?Uv z_J4u@==2rbnN4NT!au{&CJ$G_{a~55>OwJ8rC-Py!E=+n01PNYUM9GEiamj<;czzQ9B$pX!qSG{N=Io zNfK-lWT#wJ)kLA4N0{nXy;j9&Bj+hGUx}L)mxK2IN{q26)!!j@GPZ%DB|6oiYOl~K zQ7M#36(6NPtC1_SiTZQXo#hAfM|zKFX5|Z!nnN4g>XpSJ{UA4D9R1cMKrL;B9>VL8 zRB^4C(%7X8@tcCmTSIBB#tI2cKfBi%cbAo&es9`eEtLeAJc%tYTQfpveoQ&C#A;|umbrNgGSA``Y_c^{>1}8?^&V~@ zV}$GB+==llW7H^(W>i>y0USqa#dg8o)6J53flS`jv#`0FVJi{1T*BIJUm-&V(XtuDN| zVy*q4TswNhK3BY)Sbq2-&M}_-03T&wW^#dTIE`i5PUco_k;5fN<-9iG1@)-6h$O51 z8|<52OS{x`tm}80A9ro@KcFOAkhO?-+`crBULNT!^Sy>oFIO&*+JI1f`{)atMnu%#4L0nzffm=4@Kl*i8Q()o^-fpG_^I zlv&MNKlY$DQ)7U6*;cS%LpTR)KBSLfAL9+#mk z(ctEx-MBaBa&(CVT6&kFccf&RP9s4R6a)GKs=k?=eO{-L+%;7ZEjbQ2%)pedAMV00 zsnjX3NH$F}$1M9jqtcsQLayMGiwE?#)yktXyT_ySlX%solhd}0&2&W)^6W;CBC^48 z@Fw*|6obErXdCgcq>i@U`tPf{ME1qKZkFVeR&z#4F5(R}yD$#pLj7&8*UO#Z#LFL3 ze<#E>QY}Xfo#sUHPkj6lp2FNE{cW*gmvMchTH241PZglm%6585EZL5@68imy7-EyI zx74!hOPWW_dtEajQ{o!F%`7w|x9+e*u^HZzzl^8QRkc!_N$Kz1|B`>*D$a$Lc*eNQ zI%^wCretLyN^?a$wnaBOZUm%~#nUHKvwaze51O+l3lLRzT`~{FbmUe(^7M#0K|Pvs zY0}M*mC)}pzWJ(Le(eUjs;m{k_(Z)Y&EZ_%C}N0Q3^qU8XfE|0GvpqlDKzhh(sHam z$+Ug;TGysPxq@x&slOQ|C6(j~a%D@Jh;Ojo0Xk&8R5a_3fU+R zHdsV_ahaMV;9^#u)t&sO;gHt3iX>zdak}|EfzcHqz5V%iVnJ4SdvsmMn?fFA(DY!h z($bu9(_erSS#v_hqa(fL{V^RAxjK^5OxmjhI`#Ayz`Mec2Haq8`GU}wdm^+TWP>2? z{g1&{l=BpXZW6lJCBO$P^A*Ea+$YafKT5c&!u3S> zhTNXruNb3gE3z89TCBY&o!uPrkX+h}MfklrPs}`1HeFC}SWo|<>e|F9R@%~}sHojE z)IF+g=;G7RX!@-w)bIjcR?ndLk-MuM+`DU5(mb0#);@i~FQYaTTjcI9>oeWnIhy}R zTVgT#o{?a%fu01+s%+ISJQ#OS6xdX#0qrY%;q~;;;`4&iVuRfB=QX%0ZO{3ew70q+ x9?eiP**$ON!lm)2uP@hfoxNN~r?xMPNntW4a_`lNz<*MR!!DxDbt}K4e*@)0Ut<6O literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/select_files_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/select_files_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..3f4cc0f1b9a4a32542d3b8c842d4d6de6b3c95de GIT binary patch literal 2823 zcmcImeLRzEAD`o;%}bI>2y5k`*zRrKXWk+sF|qP;a%yeEn!VV@mXK13T8WZWIx25j zDKwn2d3jJtDoPQu>Uc>tj;D_7)N@luo&I>9zq&s6bzk@O`+UFO-|zbVu0QVdzyN}d zrjaHD0@3mH@d^UhW8ho>)d1fLlTAk8qQ+a}OM-$U5lTG(?qM9CFdhV=wQy#tJm^bS zg25&HwV`}ZE{)8mvsrKwok@eBodApjfOa%rox*%pXap3n%7(l>h zA$*1l;&VNr{(*2$HkSssMPZN>w2dv?)*gj*z&bhDTf>9NeE2#t3yyVwW6@~vg~i!8 zfDc&A>_E6`f-9a&jl>0cd4I_X?pzSjd_D&U00MykC9pxUxlsVd$;k;oV*xA{2_lfZ z9V|Xsh-C3rd=~jz$BV|JaOs>`PiJ(=+t_h@7X)G^(cJiQI5KB05sUZ5JkUl!Nag?- z6dIUQ28nnuh@0&en@eJ|V_d&$2i!B*20z362k2|ouY8&pnNM>C{cuF0osbv@5(bUK zI^xjS@Nf8^LEq8+*;IPuj<0kK8i{r!VQg`BXq=tX7y37#FLXRGW5hSc%(^~vrtjkL z|6>@HGUpd3jvF)E7Aggx#n6~E7M}-thWV#wR0=MV&1I7Lu5>0jiUx34QF!2M@PF3# zxoYrQfY%b3yO)2LKG(`0^6}O8A0q!*^hYL{tleo0foOR9dU=qdcfZc-Wi28s*e`pM z-Th0e#%19iy%R^{y?oXjaQA7VyueI2?l=I&tfTrtcP~#p=4VTMR<`U4nfNXM_1o=; zPUp&bIR}hS*ycSMd2>-VKcX+9azj`SWTh`x_fMmmcDsnq4}4AK$)PEjyX~_1)Ahte zse49>={@zx`bCy*`fuY(%qN_;_FR6g=NRT=alRjWe!)ZyqB zf`5y+vyKjGZN=Gi4{2Xyj>!%U6>N5HtQ4JHn)8OQ$OsRjEc?l!RD(oZ8h__)&W%^M za#QglGmEhC>s8nGrr~z&ZL@^lj8LvOJs^fD%0~taOshHGy#;v%DD8Kw{1&$uOBqVF z?2hpSUZwxwq1yX(C|>vATH$aF)Znx;$>L_{hq#vyVCVg&%pVp;{HMR=X#w9(m!i)xq55S1XIJcwLSU_$>HA~ga<%ZUnHv? zcO9FoQvMj7b#-TB@LQV(dwrMRNR_!gUaA)@WPXy$UDZWQGXvGM{UK(u4C{LpDLtwh zJB-}cS0D^jk4;S1R-I_{j5;+zl13=+Y$%Ls99V5z-*Hc}LziV=wXXw(?aUSnjYko0 z#Q~>ocGDyx!QIQXRqSGK!KqTcewU;A@{Pu(&ns_el;Yyxf(TfbWT_$YqU6!9)1+wu zqer90_bNj;y8VLV>aInB@o?_~pFL&e@IP#&#j06i;fkacP3N`v6Sh*u2N#1YkUFqA zdmE3lvlh$b4vnIWo4Mo#(Z+4W3i&G3aROFys(AdpN>R2^-;tNGhXwH_{g$^fHt!i# z$C5z_ws2%R$GQ&?9Ud?b8`nG_PBg5(_E-0KJ9fKfiPnbbd}_1r)|zUcf5A+0i^u!w z3-kiU$!Jfr(BfR}lEnSKo0PkI{qCr1<#wGd9!J-c2&Zm$YZrDnZ%aKP_j!5$(%@9! zJL?3;d$%6%)f(KA(Koi5GoUuFhM97_*f5RxTDR(_CnD)pd-l1al)O`A+U{u{RY5+U zZr3#k)(Ai1LZyD%;=Da%@uU+Zq&NI=Uv}!Mud7A1yq=ihC@&Lt2(tD#< zx63ovX9n?;wzr64r(u1b4+p#6KN~yeQBqulUfPBi>0f`Ue#6Q>ph7N+G|jF>9(HQ4 zxjf014`l8eZ3)YdOHgT7v1>Nu9iujH$Zt7P*5;CK5Y!p9X1b(6&Lp{IDf;rDExe9 zUc1)|?Nq7RqU$eBdUCgWOs=y~_FF6Kf}8Unf8~8udI>E_m29$*8bBt z{jEMgwe!$Yd}C;&Qm`OR+D0P3 zZE{O%TAZ}BY=WUtJ$YFb-;|UFlMT@W>FITsvv3Os^Kt-%N}-Klf4N%k=*e2WYs-|b^GJy_f4F}a_)7}$T^rzadCk^6 F{{@;&SVRB- literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/settings.png b/tribler-mod/Tribler/Main/vwxGUI/images/settings.png new file mode 100644 index 0000000000000000000000000000000000000000..d57625dc14a558e02f5de7fcd33ec5e3b517ecfe GIT binary patch literal 257 zcmeAS@N?(olHy`uVBq!ia0vp^+Ca?B!3HFseJuF~q!^2X+?^QKos)S9vL>2=T z2!Sx;rMq>1fP(BLp1!W^HyOpbj0Bry0$PAVlRaG=Lo7~T4UOh&P~gcvo2T;KzOj6j zy;p*kg5zS}X&bgA^0Ng9{cGZ65Ll71_QS1N_6%(fiGrIIR^IOxthNX+K6P@b?5)zR z>vWUCwp{;Rp=$S3#nD*A*J;Znd#~!|&6mB_X;n-A=FH7reeMPG=If@v8#n1x&!6)= z=w4{;hw5G7JN5tEv5vOL{O!}kn|$2?=k`zxG7{oyXwm>gTe~DWM4f`6^zZ literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/settingsEnabled.png b/tribler-mod/Tribler/Main/vwxGUI/images/settingsEnabled.png new file mode 100644 index 0000000000000000000000000000000000000000..e3646743dd63c908dbeecb260779f4c40ec0c96f GIT binary patch literal 259 zcmeAS@N?(olHy`uVBq!ia0vp^+Ca?B!3HFseJuF~q!^2X+?^QKos)S9vL>2=T z2!Sx;rMq>1fP(BLp1!W^HyOpbjCp=C^2z{(rh2+KhFF}w8XC>lV8FqA*!S-x=C|JG zqxO5GmFAuMsg$Xd*?GY-gyGZl7cMR(a~^8wrsV473m;}-c=W)g_0}%!07*54FHw&c zye?X&_+{1Oo+n8 zXFOkNxxP@pI{w|hgXx7^Lf>$owkzEveRux91a5_kH{^~arcMVskipZ{&t;ucLK6TF C`DG~p literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/settingsEnabled_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/settingsEnabled_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..958a100eacb2e6cc85fdac0d30dd6c84ac55e553 GIT binary patch literal 259 zcmeAS@N?(olHy`uVBq!ia0vp^+Ca?B!3HFseJuF~q!^2X+?^QKos)S9vL>2=T z2!Sx;rMq>1fP(BLp1!W^HyOpbjMbEO9XSOQn(FD|7-DhyYG^cHg8>KgVc)-(nBRJz zkJ|5%R+@M2r&6X;X6FUV5Qb0FU%0rG%z3Dxo06-SFMODV;n4$|)?2%@10>ZHzC=A* z@VaQ7;+IvAd#230Uw&l0MfuUdS8>{JCC`0R^Yr^x@PqT`=ECA?*8IGNAHkb;ROj{W zobi06<@!SX>iBp24yG4w34OzT+OBky^xgUY61WvE-jF+zm^vNkKn71&KbLh*2~7Y1 C0%nQ; literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/settings_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/settings_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..d7475eb28c41a9de29260505722da0ea8784bf7a GIT binary patch literal 259 zcmeAS@N?(olHy`uVBq!ia0vp^+Ca?B!3HFseJuF~q!^2X+?^QKos)S9vL>2=T z2!Sx;rMq>1fP(BLp1!W^HyOpbjPy*aYIg#Krh2+KhFF}w8XC>lV8FqA*!S-x=C|JG zqxO5GmFAuMsg$Xd*?GY-gyGZl7cMR(a~^8wrsV473m;}-c=W)g_0}%!07*54FHw&c zye?X&_+{1Oo+n8 zXFOkNxxP@pI{w|hgXx7^Lf>$owkzEveRux91a5_kH{^~arcMVskipZ{&t;ucLK6U~ C+-3{_ literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/settings_win.png b/tribler-mod/Tribler/Main/vwxGUI/images/settings_win.png new file mode 100644 index 0000000000000000000000000000000000000000..e4a4f9f0643514d1c9f9f93c458aa4dd46a4fae7 GIT binary patch literal 1245 zcmV<31S0#1P)Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1sM(v6uJKQ7ytkSZ%IT!R7l5_ zmTQPrRTRg6>&$)Vy)&1A955umggvA$p{b~)O_Pi`$87-b}mk2~kixifdKKJ0zY+2?u-Izwsp@4eQR zD2fyS1aRBrvF`*y@!5Rdn5P*n?q6HTi@!8u#rYEYEI{9=a3(wblk^9ycIigUcimyde-Oje)C>dx3>6m{5K z{Z`XQ*&F$C*jW5ny7R+NZxze_@dP?t!Fi z^6$PxV{t#>mzUADTn3llxq;}F-Gs-lGPdg&a{gRWlKzrvwW>1mH^KR{1i$@E?)V`Z z5A_gj*a0Z*y;Gq!>YX&9I(!QO%8S|=9lix(4{3Q(JJsQ8Oxo`!tLgsuRVYjX)1chl z$>{J+KqxQiB0Tadrd%PY-k`K>T7oO=9-!^}Gc@j>P2=G{8ne5-Mv^``RfBd;p|P+J zS=dLtZx!t;XA(WXJuZ5xhcCdCJH5Izs7hMi>UEiT-{K5uzG%;k1eVWZ%rp}Z7p>nx z@aIL^&J2;?{}#ESgN(iYA=>0lFzMB22)_Foq8i+~PX6dem^-E>onc~555;|3(HkT1 z*JX;^)>wroTAYmhaS>zG8djVPS1ws>Y@qRsGq)S7sx%&$OYW0B5RH*LyeGq37~F^n z0_y$G5v_ZZwlhPn18Szkf@t{~iuQAnvw0bU;?n*+JSIwdFXeBiktI#*xWE?Tn*1t`D_%qslu-Xio940mGZ-+=D5KuB%jPT2 zM4EwAiWtF&g7Hg5Z1fXeNfK)*<1PnrFvZ5fVd4PW_UnhjMH)?u$OcA}E1l(23Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1sM(v6uJKQ7ytkSZ%IT!R7l5_ zmTQPrRTRg6>&$)Vy)&1A955umggvA$p{b~)O_Pi`$87-b}mk2~kixifdKKJ0zY+2?u-Izwsp@4eQR zD2fyS1aRBrvF`*y@!5Rdn5P*n?q6HTi@!8u#rYEYEI{9=a3(wblk^9ycIigUcimyde-Oje)C>dx3>6m{5K z{Z`XQ*&F$C*jW5ny7R+NZxze_@dP?t!Fi z^6$PxV{t#>mzUADTn3llxq;}F-Gs-lGPdg&a{gRWlKzrvwW>1mH^KR{1i$@E?)V`Z z5A_gj*a0Z*y;Gq!>YX&9I(!QO%8S|=9lix(4{3Q(JJsQ8Oxo`!tLgsuRVYjX)1chl z$>{J+KqxQiB0Tadrd%PY-k`K>T7oO=9-!^}Gc@j>P2=G{8ne5-Mv^``RfBd;p|P+J zS=dLtZx!t;XA(WXJuZ5xhcCdCJH5Izs7hMi>UEiT-{K5uzG%;k1eVWZ%rp}Z7p>nx z@aIL^&J2;?{}#ESgN(iYA=>0lFzMB22)_Foq8i+~PX6dem^-E>onc~555;|3(HkT1 z*JX;^)>wroTAYmhaS>zG8djVPS1ws>Y@qRsGq)S7sx%&$OYW0B5RH*LyeGq37~F^n z0_y$G5v_ZZwlhPn18Szkf@t{~iuQAnvw0bU;?n*+JSIwdFXeBiktI#*xWE?Tn*1t`D_%qslu-Xio940mGZ-+=D5KuB%jPT2 zM4EwAiWtF&g7Hg5Z1fXeNfK)*<1PnrFvZ5fVd4PW_UnhjMH)?u$OcA}E1l(23Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1sM(y7vG!8z5oCO`$3tWCar$653eeb<+ZT`{S-upi9`SqOhobNf$ zQ5_u}0ssmyOXp|N6bd0&ELhf->8-8ITf*40IlXn2`ImrJ7`Gf!5YHuIJ1BkQFa+u4 zkP-=sCBO~cGh6_cgS}Hz{b-h#Pm?ShVvvgI9h-!ir+jKRd#~s_TeI(4zxu zEok!=5j?p?4o^#8nyv52EpqsM9(^DBgFIS51XuR%Uzt2?5SueINbtHTzj}vHeyV2H z6GM5>pOe-9ptTjg9#-!E_z9rg%f67hb@TDMBf|>yXkpFaN_L(8o7T>5UP(=1+k%P2 zMZ1X2D-Mxu{diGW8bEAbv9%V6T~W-Id1(|LtETRrpD}|I*q$|samfz>;P2`oXHPl% z{;VY~+C|Q^(G(o0aBLm9aFhJ~6nV9Bo!YgT zWIQ>PW#680pttsLCHLC9sm{-!{JmL})!a7v8WGK##iuBkIhI@N=JDg(FHnB9frz5) z$+UEI@zUp|3@O+{_LnEQ-qZ?W760xtSJ?FGQzXZGiH(kA&8%^JUsdbWitV871U9V~ z?wvr)dh^_;Ns9LpL~uW(d6*=R9t04#&`zAL@~e{#fFUW0B9|UfY%*KU2_|^ zTiQuUjA85iG+aaadD%KY4S+kX9c=#j0*k*Y)*ylg>V55`#CuH!AtlihuqsfLJ%x=W)vP~SMZCw& z#yJykX$kc8{xMOknvu$*AMOKf85!I!nv$jIBqw-7dY|Qlglur%Vh_qlfTRPxQ&dQx z565}9cDI$s`o{urt+CA<7$_x77@Ek}i=RUTj$W)|*>}GYv2f=JPSo7S-`Pz=i=V>M z^NdYOL=ZHZC6k7ev-fvu8hmv1^iWmb%p1k0EI4UM@VL47Pjg5cP4Kw6RDZ88vPV(zDi2t>#~SV^P5llm4v1&1n`JtheeZS8C8XaiRS zmkkl;D9lMCDs(nd;*4~dj-^p{1)9v6%p!;=l+r={K>^ybZwngu+J~oty4@p*F--cj rg!Zl;ULHM!mFc5QummAOJDu@gCC9S;R$S@{00000NkvXXu0mjf{k~c600001b5ch_0Itp) z=>Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1r!V&HBP(vqyPW}%}GQ-R7l6I zmVHdsRT#%V2kwP?0fBr;L25ZQHPUH?rfr&`nKo>qCblt`YipUZSh$p!OXqY-WG-!U zb2*K6YuR+EX$n@ROtF-R;Y)^7Q6#Sy@m{!BUe41Wzn6OvNdM@!o&C;v-p+ZR=leY8 zs8A>*04Tti_3%Y76(|AeLB4kr2B%m4fA0N&&(w{&ey5d%JB~4M#Wy4^_mKJ7VGh(> zLqsBm4$rlq&A*7?$}hEfTDsFLeOG>|&F?mJFZ72QT0jIx?yg^%IAj2uva?8XJE^>K zn@@hKW#&_ZA37)^@<3W^$s0Y&{U4tI%DMClX;?cCw==p&p-wHVK2*)lQ-9Oa*1@ak zX>6T8o`hHj@dchPvMnDk?2!f#U*Iw30`bc|Y@VAz$wE5igOSV{2RN|tW1aqViv=(OO2Gh zn@RneY_gsn%+ha9+t6EcsG7T>4r&XtsC<7W6?L~veT|6bEze1cW{l?M+PVDr&P!BY z@e);c3%FxoIypq$^G&K*P2>D1O#@SzD)6J&ybqvCO$Tr)icNNeNDYxE7k$6 zLtxSF!@cc@Io~wrSyB?+#5tqMn>>O)t~c5I-BnjupEZ_#aWV9B$FM$YEC5;yBH(sL z)6m??tw4yh%fK-&^!oe5tPD+ zta&J{En2ww7<>!B*v?t=WVh3H%I%D%Lu(L01C9O=X^Cz#gOHZ&3R@K@&7H)CvRc+1 zsUgwjWW(%nIJAgmo4#=|tel?C;~(t>Z5SEYCzi4$nWQGU!@*J6*zNSLI< zy;D>dM;}daarI6MPxOrk;Ht0H3>YXSj2oQH*Na|21dd#2VCi?i5w&2)agNvBBG}e} zHxQ(x{2ZfGk`V-rX7PlfoaVX_~ zHJVw3S))?PEv?|Pua#l_6WBKYIh4=m6AQ;;;vZ4Q8pE)$HpL`-wTlvSZ6X9C^~On+ zWhRU+Q`Nr7mFbL9;n#u*UuE(@anb8%!OjZK7R<1nS1MvtAgaqPiPfy`yFINM5YT9} z7BphkktL%|=vX$4;k~c600001b5ch_0Itp) z=>Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1r!V&HBP(vqyPW}%}GQ-R7l6I zmVHdsRT#%V2kwP?0fBr;L25ZQHPUH?rfr&`nKo>qCblt`YipUZSh$p!OXqY-WG-!U zb2*K6YuR+EX$n@ROtF-R;Y)^7Q6#Sy@m{!BUe41Wzn6OvNdM@!o&C;v-p+ZR=leY8 zs8A>*04Tti_3%Y76(|AeLB4kr2B%m4fA0N&&(w{&ey5d%JB~4M#Wy4^_mKJ7VGh(> zLqsBm4$rlq&A*7?$}hEfTDsFLeOG>|&F?mJFZ72QT0jIx?yg^%IAj2uva?8XJE^>K zn@@hKW#&_ZA37)^@<3W^$s0Y&{U4tI%DMClX;?cCw==p&p-wHVK2*)lQ-9Oa*1@ak zX>6T8o`hHj@dchPvMnDk?2!f#U*Iw30`bc|Y@VAz$wE5igOSV{2RN|tW1aqViv=(OO2Gh zn@RneY_gsn%+ha9+t6EcsG7T>4r&XtsC<7W6?L~veT|6bEze1cW{l?M+PVDr&P!BY z@e);c3%FxoIypq$^G&K*P2>D1O#@SzD)6J&ybqvCO$Tr)icNNeNDYxE7k$6 zLtxSF!@cc@Io~wrSyB?+#5tqMn>>O)t~c5I-BnjupEZ_#aWV9B$FM$YEC5;yBH(sL z)6m??tw4yh%fK-&^!oe5tPD+ zta&J{En2ww7<>!B*v?t=WVh3H%I%D%Lu(L01C9O=X^Cz#gOHZ&3R@K@&7H)CvRc+1 zsUgwjWW(%nIJAgmo4#=|tel?C;~(t>Z5SEYCzi4$nWQGU!@*J6*zNSLI< zy;D>dM;}daarI6MPxOrk;Ht0H3>YXSj2oQH*Na|21dd#2VCi?i5w&2)agNvBBG}e} zHxQ(x{2ZfGk`V-rX7PlfoaVX_~ zHJVw3S))?PEv?|Pua#l_6WBKYIh4=m6AQ;;;vZ4Q8pE)$HpL`-wTlvSZ6X9C^~On+ zWhRU+Q`Nr7mFbL9;n#u*UuE(@anb8%!OjZK7R<1nS1MvtAgaqPiPfy`yFINM5YT9} z7BphkktL%|=vX$4;Px#24YJ`L;(K)U;sHJ^*`GH000SaNLh0L01FZT01FZU(%pXi00007bV*G`2iXJz z4G0<|vkpiA00FE?L_t(I%cYdPOB_)Y#eZ*nE3DaIP_Zzj*rZ7l5Nvkr(pp-k_K(e)A>At>bJ(8f_iBLv)p5#_UAi;~aVB~HH}>oJAg2t>iq<`QTCkAT^{QIcGon|Lk- zfGAn@=2K_oi-qo5$|o?=kH9x2fd{_ZX}0{d8Q&fy!*sjry%2gxLLdBER#=*A#*Ndq zKUMpir+PmG0TRCkp4)%n^O!4C+7t0000OYYHQjEnx?oJHr&dIz4a@dl*-CY>| zgW!U_%O?XxI14-?iy0WWg+Q3`(%rg0Ktc8rPhVH|n=B%{+>$Yo$v`t1**#qxLo7~j zowCvEaDYhL{+8cHm0aud6PD}o>FM=dQ*=@_4=B)Ta*~a)DiP`OzOI&kG5uqlCErfP zNq5v%oMnGl@c;jv;zRtNDoP9=el1{Rp7XKX>wwDn_pA;oN*^XkDLgKDa`M$-mro{_ z56rtn`Q{>vyf~H&0xDZFk1e>Acb1uUDPBzv|Yz)qmwg^bGFA{M?mi zl0LimEkA$x;n>fouYD?w$QOL9b1;45N~=qmi?2Q}dA0av!IyoTHSVPUkN*GVz1rV8 zburErr*lKoCnv`KO8UwB^+NLLXqTeujqG1I literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/sixhundreddDown.png b/tribler-mod/Tribler/Main/vwxGUI/images/sixhundreddDown.png new file mode 100644 index 0000000000000000000000000000000000000000..c69fbcf8f652cb6378a72762c50b1c0e5f8e791e GIT binary patch literal 566 zcmV-60?GY}P)Px#24YJ`L;wH)0002_L%V+f000SaNLh0L01FZT01FZU(%pXi00007bV*G`2iXJz z4Hzt@%ZY#h00Fj1L_t(I%e9qHPZL2H#(y(RNs2WPG#v1#UbuKMG4ayn=GCJ|&whvw zAA-c2iScTTg+wpLT@D5m4r&M{1oZ-eG(a0^_fL0sJiyQ{ElnGiCwcQe^Stlm*_q7D z3S$gH2+Yq_hpi3Gaji_g@Mo&9;mw@($ef1uh4rNS7*VXlS>NP#KfZc$bgdw(7L zCMAHltMk>D?9Atndv&V~(gA5n7&wy9hoG4aJNemzPF&Qj$sqk=L_UfrfS}d87f+*`87P`3en^G=(I|2ArC7XRD9Gdm`Px#24YJ`L;(K){{a7>y{D4^000SaNLh0L01FZT01FZU(%pXi00007bV*G`2iXJ^ z3_CU{3<8P(00A3GL_t(I%e|DbYQr!Xgg*iHO5|YyR;NWyd|49dOAR=h3`F^;H00Ug`zsLcj7oQnG zAR_QKF+jq3G1bd26Ugf*0@n0{%MfQ1`a=?8ZRGv*xZGX8+;{!TJLK;oQv6sttTsR2 z)olA!$RXdu5NG{oF}M4RY3?~jHxT07p1}87Ki#wf-D$c3g|l>6VRBto^>O;Gd~=;e z;kI-LFpdj7j>`et#;_x`hYEoX0cL5TXKAsPMlZ)|4+9wv#9Q_}t@SAWyWc_}!@(Me zdV2VoDu390!6@cw-czX2t4*!|^lCFLYt7D4BM_At`~u64#h8=Kl(7H+002ovPDHLk FV1mpit0Mpa literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/sr_average.png b/tribler-mod/Tribler/Main/vwxGUI/images/sr_average.png new file mode 100644 index 0000000000000000000000000000000000000000..2dc6e58521a6ebc5b1ca82300a0dab871ce74c0d GIT binary patch literal 2904 zcmV-e3#asnP)Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1se!C7D8Up`Tzh5>PbXFR9M5U zSZ!<-Wfp#>73oDwU;DxBO<6)IjkKjHrID{DErwFpiY;!IU}V`6&;}Z`LJ2Jpp<%Cp zX^kvKC03O3rHhEH2q?m0B4TV|S%_<>H3DrxpuOD7eE0{QXMfDhduM11tYO+WcV^ym z&U>EcoOjNdQLbFM!eB6n&p-bhp->132?;1FD#DsIYY+$o5C{Y?dh}>~_0@3z#P2zx zHon5WI0Auy=UNDXrluy$c;bn>@=&=W5(NqUf$P4ND_3Ig-o2=-ti;5L6LI?V zX*4%CqqDOU2M!#Nf~5JoeQauKLVSEYNZvEtDJdzatE+>mst5*yXlQ6aSy|a#>GCcl z+%mg%?ZW8MqfuA4{I*(Axw<>!=A9j*9m~5vZjbE44?hF|=(>)>hYzE?y!_rLB>VB@ z{|Wn~Ns}mq;JI_>Xcz_!!{DY(o49`cdK!j7Ap~=BayVha1ZHGpu%)GihGDR^wUwo% zrA$ptWlBm4r%ai`BS(%%oq~b_E?TsR3l}bAQc@BhdE^l}mz|x>f`S5OWMpvLwrw;F zgW+(Pix)3uAQ0fpnKPM}mq#Il?UOEjbK5rzgSufbFE5WNDJe8`gT1}IoIig)1Aze3 z)6?0|&_G?+rQX=FV>!LFlzzXT3l=P3Z*OlOn*{|0a_t#2W>5%0Aq3~lnKK~nTjdPH zkhoPZ`AE*REX@ zLa?~FSn4DtC2{7=ncTj8y9~D5-lk2P==b}Xo}Nx!*STTC1_~iqU0uypt5(Y3>bhi?dRI9^~UD%#~**Z1@}&g`U37 zq@;xQGmz`nt&_IKj~~y5h6V!Qym|9jTU#sTbX{kCeVuLl6KQ+fwrzx}X*_f03^#Ay z%$k}SNkBzK1vO2j5Q5{zjiaV%%+JrK5Q3VfF%$}M=gys6v0??EeDX<2zNT3OXJ%&d z#*G{D+L{~K-qfj6DTLtV%a^IDDl;-ND1@M@Y1EvdYSNy2WM*da`0?Y^G>v6tWqsyr zYHGZGsVcj>yD5ZVQBje{M^)1#2iBUZ2V1B&#GO0(!|j!G+bk|FrVxS`FJ7dkX}oaZ zJcSUPI(4d)vp7+soTx6`w;QTD;GF7krfOWid^raV8pN*dZhrddrv$(qJ9bc0HD0-L zg+8B;>(;H~vSrJdmX^jFH*T<~s7NM}GpeepsA?Jk(C_#2M&t%Vp%Ayex0Nrx_#z*D z^ilcrEGQ@-0AA|8L{(LpoSaMm)HH2C+xNZo=9{>B^(t1ZSb@QV2jk|izhctFi8yiM z1f13-Ul48Q#H3knMhv0%Xhw6(Qi@ZiB1J9aEyfBkjY zgAI)z>h0rP=WZWK0;Y-Ie*4YUH!B|hYnKgx+hg9_GL5yoD1N#~|!~Zy#yj zuzs8iZ@=d7<-ALfCid*vgPS*R;t#`zOVPc1_oBSK97Bfq@L1_%IQ+#I&{P%W<>gQm z1#xk4$ji&amUrHfUJMlpfXQf#V-zf^T7=!ZcVp$sl~`U^hhxW%0YIOa#>U1gw{~b6&RU>gw>XW5?u6Pgz;nfSizoNn*qL^;o)e2~64? zz_fWKOxkiLVbZi@K*w>(+v(H)!B0Q^gvQ23w6(RNt*sSv=g!5ULx*4(21w$mKRtyL zUw@6$r%$6|=1h=8Nl6LX+uPCp!w>kj^;=AuJPB>Bt=?QPO`JP-4gfG}R633xIU?tZ zi;DpOFRgwF!C(+quU@q`A~DUe-QF&3x3;wcKmYtQmn>PrhaY~JiHV6EF=7Pg%$dXX z_I5@h5m|O3k%%m1kw}Ceee@Btva;wOHjK5kwM5=IKptm8c`eJK6ZL~dRSdu&D7LXjv6(J>({U6 z^>A4Fk&~0dl9Ceohxu7mRmH1UuP_{r@a?zX=E#vFnV+A}F=NKK)(-Qz=bmG7axyC` z|0;10^ZReXecmNk>ESNa8)8Gl8jcw=hDk|D%*x8*+O=yL3WWyb*uon2U~~A4*gS;8 z5&M3aVb}eLt?SG!Uc8twF)_Sw{(|Ik&z?O5!1uOpwfMh$nLeM7si~>Fe*HQlkqEC{ z>*2h4^XT*Wn3R;nHEY%|91b%Qj>xhciA4Cp2OlskEsb$;ajdScrqAbNe0)65o;}No ziVEI;|NWdic{0<|(g=W&NSHl6{n@@wB^(a3?I?xz@m5aT+*_MBqr1Bcv;I5_i4Q%5KR*6A5)u;d%{SlRmI5Vp6~$H4XnUkY z*P>$8HcL)H`lxhdWMtsT7e_pGFlyJrXQ^5EqW0bP`sx8zSyB|GpZe_6O14nY(YjK{%)2eJDfyAcx;gY4{VtXjFsTEijL?vE5F{T-srXkTS5Z$?R? z(?>=7MAM}M(ssk%88qeAlI;eg+mCjWCX)%?U1Hh+rUF+Wqk4*`*K-m%%B>Z2!en%N zn=TxRVo|8L9sx2b=|s}{bNqm5^>L;v4p!V1namQ1D0000Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1se$fD;02hW&i*RuSrBfR9M5c zn14`JWg5pn2OSjyy<%6rs11f~J19%WlCHYijEaP`t7E#UxfbY3H^5Rpl3MnzEzf2|r@xssw~g#ro+hFan3#W9PmeL?wL6==bYzxpZEEG zpC9jgUZu9SR#H<_DK0MN%9Sg0?%bJS!-kQPl7hqGz~OMvw{Kq#9XbpM_j7q|9O6M- z4u`|?ZnN1)PfsUi%$Q%~p>j_o3JPNABqStYw|65cY0+IrX?k{&l9KL?`*uz!_f7FV z4OCoi;I;g{eD-w(KmB}@0eu`SpEQDDAwfK-D=gt3e0#B;>zh)nzWfikl-Sr<*}8Qr z)22;h;J|?#KYpC|-+!M00|u~j=S~6x1L@nhF9#1Eyl+SI^Yiid_vhJXpM4;}0|NuG z+wCk`vd?E(RuL(Rlr5W{mnh_D&ti zJ6Oi~x~u$qH(yw1XdFrXBBse%&a&mIS?RHCPX{kg;MoNz!J;ZLe%izI-W#7Ji#+dN% zaC!OVmnA+vUb=SeDiIM8hAbo`M8dH!< z#AdVUKDi9v%=X=Gx47JH2@4AoyWK8sms{%V>m@ceRvZq8^y$+_mMvQ*E|<#~*S~*% z85J2R-Me>}xVSi}udi=qGdw)pm^&sWMr<~l*laeLGG)pgaktcSyWIwEr_(8^si_hY z5+V;j{IG@oyQRFmT*}JIEcjdBuDi{*A8E3NYfg#HER>FaSt1b& z|1Lda(}b}}_we=Zx6k81US1yK#*L$2zYxx!Kd*HO5kx>#K@~A8k}m^68kkyyu$9U~?t z#?YPj$}2QFofH%luz&x45)%^*UDceB2!Vmxezjfuc69IFolW^V?o(FWJ$veU?KC}e zViy$^0kCS-Dgpum2n`LTxVV^|J9iQvA8+F1MGGf@F^AcnbzN6MQH&)XkDh4$D+0Hg zHQw7O3aSVzw;kraeO~}DKjse{DAy8pZEQ4MJ9T709|t?W{Dy6xpEkCBYvZEn>aWRm z>N3xjHhCnSI<(`TpMFW5)2;bx-fyVpy@y#Ktu~iF>({L#CntyTydq&*r984`T@M5m!N*e= za)+*{3Zl}YOX{5HciYO!N;YoXNKH)*QBk8gUs*+ceLWEo5mq9xKA$~%7J#6jAft+D zk?-30I|RlOL6spxhH&cCDc*eZO}cdHLS0>*cV5-}rzVze=IiT=&1U0=AAUd;;pwM` zla{uEvuDq8?by_Ux-)K7485wVDth(mWyqqVqbVyZW6`2TJpcS8^7A(u-|v{XqA|Ym zt+%MFyG;7(cQE>(C`NBl&=Xt{{dB-fk?756{+L00QhJ(s*}FJ#t`!*hRpqQ6aP6^%i{b` z3GCv>rE4xL4r!efV(OSmtBirR$;rvIYuApVq9Tfmi}CaGBRV>orAwFIm8=B|7Lc5r zOya_YyfAet9XfR2(xpo@G&ER)q=;p8xqU~0d2{DdU0qFKVIg5*VK^KPYHDiqWnMwl z+x&8hif2I=y(3JXJcVz*`IfnJ=hCrbM-mbe7&&T`v0dodvnPiR9pdY-m8wM!{%X3k4KGX=rGW)<+DU+VkKSMA6UY#FN^hpVpZPz-vVWO;zz)w|MBSUc1S25wa2?TWMLew+Q(7AJGjvhT~^~W?;QA~Rk(LGYUbBUsWp{LO2(LMwR2eWVQ zK1*_Y?P~ZG#S~C;&<%U7$APx=D~i(k;6_ZIE7n3(3~sGSsVE+Q^yw7^18z&-ORcUpOD+uA?Pf}D=MB(N_e0+Qe2?^n~#jhEi&jVB=UQd4y zQ7Q)M+VrZIBt3}ceWGgF28iy4&Y-31s-_MJHBbf~#s<&&Q&!c)c7mrQP v*1)s^ib0f_dK6tpRIEi~ilM5A(Qf_=l9%;Wf=T2M00000NkvXXu0mjf_Hk{T literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/sr_poor.png b/tribler-mod/Tribler/Main/vwxGUI/images/sr_poor.png new file mode 100644 index 0000000000000000000000000000000000000000..2f3568ddcd6191dfab39589ffe52039658b1ca6d GIT binary patch literal 2608 zcmV-03eWY4P)Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RU1se`80Ir{{&Hw-j!bwCyR9M5! zSbc02RTlr<*J|mSmSOb?uNGJKQU5431z-4x#nQy(>j43RMS*D7R{ThiN((8)c0qYs zKZ>bUfri}zhO#RWmW0Jbmjok3Ow9_^$hu;X(n2Y1shxQPLh1eOA2V~`%xep-%O*SN z^v!+e-gD3Icg{WM%o7(bTwo*;!N(tejIORO3>`WYxw*Mmw{9H*fdB%507j144IM~8~A*X!7N-~TL+nujazyt6I{0Z2e?FGVIlYK-HU>P0!*4T z3CE5d!`7`^F@F4b95`?QX=!N~J$f`g`|Pt{Y~!w7yO5HSg1K|&-V)4dX=(8L{U|Fd zgRbj{L?TE_OG9I0BmNiH&*dO~-hYdmwl8nRmHg1Jz+rde`qS$n+FOvgy$UiT8&~U( z;{WQJJb5xTP2;!UeoNCdX__W$Yin6mRYlV@sc9O=j2Xj;6DKkl406Yg9W+gojg5_* zK7Bezj2J<`-_Hjhe2@nZ9#m^Wp%9;Y?m0gD?6bV{&O13_!UQGD%*luwk)w;1`$8y^A=^Q?MIE#vk*xA|HPiH6;QhU#tGl!a{QPVUoUAlBo z-0|~F(^R;1U1uZ`VP<9~@4D+QW@ctG5{XdP^&4_xnkG-4Jjte}CJ+Apcl0K5lMI1< z^E0e*)TEq!FF7%hrlIrNdli(k(#eU5l(W)#?Y#<`hCaB=wwobP&d*@)9}3u8`NU0a z_&K+4;lhOgfGJa^ARG>3<;s;vPEN*C%a(z(@@!jM8x}8KjA%59%F0TRL`6jf8XFt2 zdi83&{r1~9dGaJSY}laIoH=s_ZEbDH$;m-qUmv8DN_OGG1r!t%AR3Kg-MV!kiMqNv z?A*B%3l=QEm@%1XZf>@+1PKzLBqXVHT)TD+-+%u-nogX+#fuj)bLLDC#Jlgli$jMF zq5eZEdZRIs2(+No=g%2mlzFJ`yjz z^imw|8il*snK#m!M^fS5ym>RKtE-Wfm4$8F>XDU|h3e{R)Ya92lml}jEv%0}{y1`T zb8Q~tZ>ji^BxrMNt(Bg+9()&DAb#wG4EzR(f3Cr8^%2PX$Kj`{-H`W>!)^5uB>ts# zaM=g7%5HJ)G(^Wa^gS?jU>hZGVtbsOx?%9di4)wiWebaoi&b`*H*X#dL#L)`95-$p z4a4C0@#Cp!8V$o>S63Gs8X8z$Ue395=c){27zQ;>V@5^>dwP1**pdydFE1~Tnx^sm z`Sa9uoxxy`nx;`V3>wa@8%m%1&dA8%kt0WF7zV@Pa6fr*ak1AfU1w`+D>Y4HZf>r} zM%^$J2iBgt2U}NX7aMjCgxfpMt#j(ssnj%$&CSg;41;IR{6J09n3tEQ=2@KRF-~+B z?i<~@KB!E0IMWR>1kWvVQ+fH|UiK|{fDD1r)*oo-2BR%MkRkB;(h&QWx@3k|W*C-E zIc+$No*uPNys!E_8}asf$BCOaZ9;o{JIc$;k(88#-s{&fdD0|&{`u!{4pu@!0w@We z&!;j+X=y3;?c0Zvk`fdb7pumd(mFY*si_b`448cq5)wd4rB9OR?d`QroV~`8K@t>k zjTL}kFo;8k4&n0U%gD*eK~Yf=zWnk_Bqb$b?AWn*=bd*{50;ve>ahc^XSa>6#fgh!5cZ$9kF>wAKAZtPa)+{&E4G@z}m4KruXK=auabar-P!h{K4BH?{Lb?OuV;J*9r!>6Bq+HdceBL*pl z9ug&Ta&qv^H{aluS6;!eVZ+eT(GlBMy4j7blO&Rplc8xEPMnwlE4wVek5JpAy(kdjDANx`{u=dgXpcC@s#xPr_& zV1{Af?AeyVaof%^3c!GiP(R^DH`bK8N0znejbLS!9U&k!dKp=qCt5)I4l`9xBWC+^Z+tJ#?6 z7`u1x#us0Fp?=ySB?v-FLV}Qs*Iy^AeXG@zmy0Wmtlay8v5yX{6&*N&r8u z#1$yDRtQ&3qivCh?M3U@+VccP{dN?B!5|L){h%jv#PnMDoYO91wJ}a#|8;;>`h^e! z)K{0~!g~;f;?^6KLOAx=+Y12&H{Ny^*Zpj;6~rtM9yhp>t80?it4<7HAZ+m%y^kJF z-2hcQ#qzpUvYpZ(KuA~k0E8eRmE|&43Y3p5u-=L)VSCi-J?wr#IEy9w{fJ5sq0j^P z=%bIY`-9!^`FzOC%*2apUyPk9;pnrS=&7IZ~ebVG7wVRUJ4ZXi@?ZDjy3IWI9cFEBVIDLaS&00E&% zL_t(Ijir=7PuoBk#((ErkV1okXvILsilqw+6HDBUuh6k`KSZt{q7oZY)wQY!RAoUp z3`kHIsHj>A(RL{+iU^5{<3F);9h`u3i75?_WZgaQ^S-~cEZahB4FJb`HeU}Fm-RB^ zt5%OtvoDncAhg!7v#$8ESK-(9dvFu^Sp2wXy?VN`p@cqgY<8;K&C_p4OHj;S%Tq)4-F0Rgs+iwDDYu0FlwXx|^&omWFF) zy(g=`34Gfk@W6M6uQbH0JzuyvXwK0^|%9l-8r$eX>RhW`Pi@BloQdW2)O9zIs#L;*qab=^yoMaW5xJa~7B zvP!Q%)~===sQ~LgpV0`luh>6tvDQSrZBP^aU+#x_QJ#^gSYwaNKJVbXX`rf2Y5;!$ XjRMewppR*s00000NkvXXu0mjfvIPAa literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/tenUp_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/tenUp_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..719841b80a8285d21f74ee54d14d6d7724394d10 GIT binary patch literal 376 zcmeAS@N?(olHy`uVBq!ia0vp^;y^6G!3HG%>OYYHQjEnx?oJHr&dIz4a@dl*-CY>| zgW!U_%O?XxI14-?iy0WWg+Q3`(%rg0Ktc8rPhVH|n=B%{44jLkR_y=^{qS^g46!)9 zb&9VSv!TfGdf}p(maQ*&OP(HEJ6rGVvL+)*jU8G|PItdCv|jSvK6j_<`J|wE-F}8= zYcfio@%MkIzrSaXil89Fj|#H})^$Iuy$&qPU(KW-C|EH+N@216lb1=!!j*F+65Rhw zWbQdF!8Y@l<>jop7@70a3uDD!zc%PEp=S-&y9NI#7isX^{#)h&clE)ZH*9N; QfPx#24YJ`L;wH)0002_L%V+f000SaNLh0L01FZT01FZU(%pXi00007bV*G`2iXJz z4HGV_F)?QV00EatL_t(I%cYb*OB_)Y#eZ*{O)zdT7_bSXvq@tk2x-*H+A^heAeavj zCVm1Xje><>pJr9Ca}_kKXkmzGW0JDA?i@JW^A7hN9=!Li z5)lCSR-5PS*rwUA=pRlaVqxU%$0YzIBCxkn<+L`*<#&q0$Kc1Go84C{n^Q`Ts`Kns z7M+W8OhZvFnSBRGiJ%nx3nMBQSMQZ9e`xaci}k-1Q!Wh@K6wHd!2XrvnACq-crImt z)dSr%_gd?<{JxQ`1K%-caMT_L0bZ< literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/threehundredDown_clicked.png b/tribler-mod/Tribler/Main/vwxGUI/images/threehundredDown_clicked.png new file mode 100644 index 0000000000000000000000000000000000000000..ab5c7022f337bac12a20f3f95306cf992cc4567a GIT binary patch literal 390 zcmeAS@N?(olHy`uVBq!ia0vp^;y^6G!3HG%>OYYHQjEnx?oJHr&dIz4a@dl*-CY>| zgW!U_%O?XxI14-?iy0WWg+Q3`(%rg0Ktc8rPhVH|n=B%{48rjh!H0mRuz9*ThFF~5 zI>ph8*-)fye~YEVdc)-#ikr{z-q}%j`BvdVZpqfB*+Eo$sk) z!Zw6JR4<%a}{a*0PzkHRu zA|=qLf%WfWrGv93Jzn5raP0h#s;A!HpSwKI<}tB$c5b*i^ZpLweU8^_x}~x+Dym;8 zU#aR}x9ccN_Tl4a;-b29_pnWR@Z^5S&dM8qtaB=6UN4Ljyz*}~W3R*E%BRYD+ug(O zE}Hc6#B_PB{Tohnp0n1N*=NW9WwL#(oAqX^Q~YatSD(1wnAqQI{<3Ozrul9j3G35O i98SwFf1S(rfjL@m;hI@bX9)rWiNVv=&t;ucLK6VEFr)bZ literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/thumb.png b/tribler-mod/Tribler/Main/vwxGUI/images/thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..9709f05998564292aa7d2cc3ffaab2b2a0456d53 GIT binary patch literal 1241 zcmV;~1Sb25P)4GvO#JOC3{~i3r!_qGG!hW-mlhq~wL70@>cyk&Q;efH0(P5HsSC zXPTQ1^h`>a`4WJqo0&x02%C^)AFR?!lR%hIo^#+!(lrX44#Ke_EFvHTZ7u1qHDjZD z3^0wtm!*FJ@>*{mqaXWFI;o||TTOEoJIAdUj4^z?}GU1gc;e6^z8yiX>0Fk#X zy{a+kNjfFWa}to*&7D5s&}4Q=Pl7ys!joh(hq?VD{B36D&5;TxLTv1gXn5k!th$c} z^;9ygrh?K4!chrNPRswDFe!3PVWly?E=t6Q&QFNkJel~w(uNqHP5rq8U)!MTJjO+4q#x~c=2i^MwKF_A{v(mQ;B{Fr*lDYN+Uj= zG2QAMn1=mig|q(Q(@3P=*+7^Jo&RoLAtUJs=8qmv;UtEq3Xde*hMy8nNv3Um zhHOl#x-}4%0BHpoRl>Xgv%s;gW&l-2hca$cxDCIqFoD_>4wA?)DPb$qU-ML9aMc#W z$1qWQl&efeD%^%IVMqdHvBVaLk|tto97btRp`_W_p3>06z~!`zRrHr2?cuP&nJJuS z3`MD@a~fgs*$+E8IXOQ+zrMb%a^-t=c6NDr3B&vQ`=_U;UwSy7Z=dGySwYA_s$5r@#KbdL&e!4W0%5!{&SPn)z#I_&5bqr`1trXJ|kyU^I;1g zj8g-K$N?{1yskjxzQ}qJr%GoB%40-p`%*v_0*eos699 z9YcKkot{t*+tOB7c^W*woliDm)4i#&Nv}c$UK0pT-e)J>LfSZdpJFU@(4YoC1;JUG zqGXmQ3o=M^&#WoUB*K!+K&5Z;y>qBtra?yj@4pMwfTT1@7$Ay~3d9fKQ;lkM+O&P~ zR|E@y8i23<+(`fd{Mi6t>aN+LZw6&*q5$X(YCctfG8#bHA$Hw>LEkJaEO;+>TuRns zu_q@dbAzRa<#};RS(?94p_QVKu4UwEs+(5?N=Zr*3eWbI3r)aR=Ldo#q{&2Rb?Ksn z9+G$63@sue;v2EOw^ymN;R`4P*+kC7#Qg8R=MauJggri zIFi7HAY^YjIk0RJZDO`4?hnp|I!{N5IL_ z_`XN{sJDD~?fis1(|jE}Y=mvwsIt<0@#Dg+ks&j#vQnHKhgIn<7D%@J)CyE65*f^C z70*6wlnrec7tFpbELZ)X_Le-1j+E95FKW=wzwqvetBW}<9hYuX>G!uTEn2_V5BLuw zHvCP7Mn+PoYxoE)F+K6W$cA;|o;n!TM8#44@veWTfGl_aNHUArM-)3!5+?gANF|m! zRkLO1E}6Ih`6bIigw?)O;SZ3l%ZrOARa{{QnsI&~+V*yKew}t2ixFfY$duI7)ptJF z>6$S^0|O(v&(4aG7aLzDsC8;rk}W&Qa(;iOEVz!Puqs#%@v(=;_eHnM82*^~PCczY zOAj%lcOH1d8wT_a5<~gaP*C6ZE#2UWIXj5F$!W8a=o+3BV>{qenVvb!5fV~Yym(Foq37q2tv&j zTGAEnFyprBHTVVU0bDvSxo;R>5=vfrjxd^tHY;pV_39=S;?kg=$~0xPR_US)X+Ro!q-~usu<*7xTfKNJ!|Ifg zbo7{f&!73F*XyC9^-&HEj&+P3K0}lc^O>XF#=8Pm&Vw7)H&gQ!c*f~h?8LXJ-j#RP zM-{dKDfw0U;_~-HFJ`YEv_(&Q*qKJsr*G`1DAMW0#RZ8(tS?zi?l@igL~Whx{=vf1 z(p^#`px##ryX+hsgSs({xK&tLSy?CM!HNsjTu5DsQ)%`vk8DfE2e$G6t=#I8aWyvW z71Ekpf)Z={iJn&lYuT8{Zb+X0;FF*|mt<0EvAm*U)9$RBW8Au~2+dpye>Q(o_dCt?~^O@*vj4>3X3nft#b$PaVohlhtFJKA?J4njQD4>ZHOQ0Hs? z88$U$`w;|i#M#o4^(MT%hMHwMg*r$wZ>LAXXP;itj)#Zm>!mSDmVlO;!O6dQA@`h3 z_Wkl+^nFnj|6ThR7us4?1)@H5njeFvNDa=Tl2TF=-mU`$ogE!1wxc`W=pjWdnMBz7 z@cT#C08x*D6k<}i_MqdM(Q&1)9f+SAd5P1gnR%W6`LtayNMm+XIh4g+VI zfAt7go)dVUHs3ll@5*edt*NQ0t}e~AN&e;IAmr#$Ew+ePA(YMi>hRErWv!spt54X& z)ANM2=itEiVtt~_P~36ZXx2Lt#>5#&Jp6OEXWJ2;=wGEgp1T07Fnxv#&Ul*`;%BpK>rf42U!#M@Fqv{0DVsK* zS4OJ|zbg7xuFM@`$PJbGZ%a?hNb+NH9=jS~Cw*oGYZ?@W-L@qxH1zl3ufgb5lXrxp zBmZ8Ti|h~O`|IlxzvqsbhPSUq(QFqp@`**`Uu3MN=I8CStTE%%{#Myo0AY&x|_mR&{+#y+)a=qUP>GyEs3N za#r&kf(#<3uS)S@;Y9r2#r84%crS^SAWGzVst=Fn3LEJz$qn81$j0b9)^!AGEf0pOB8yG?~C{6v|8DCBJ#^t;6CF}P= zIX~Oi+QlIB@2M+PzgCx^5<^Eee3{bH9Et96jdnSy_Q;szM%C1wdHz-}&kpkQGqMTZ zw3CCQFTLoDnybp)bJso&O-)VB%{^1&c$*abH?xAcS>^Yzd+il`ibtM(qz5sM6h{4Dd4+vTAJp3pKEb%VE_CZ9vUiEVtt3G*u)dYo})k%RX0Ya z%tNxEZkstJC4zXc*KDHP!x}5_%|-a0<6>=>EE^I3JAz~ZEr=i&pcV)b_dWyIdL@S{5?t(x(jowodGV&)-(uCC_Kuwa!EW{sWJ zNU)ceS1G<H@=R77z#CSM%KuDdH-{~NCVVm6nEO`@c{ zgJj>bsj1X^B~Q0t`s%5h=4H{lI^uQqnyuoa)13uKahfrIDj_>=%5>XiBX;9G4wJ^- zEcD0|PIB*VvL@ykDo99jGvLS7)?K#|F0_RhXNF%-k5d5|P z`ql~A!-8&#U)6<2enCktz@I!rv03`{Dz1?Oj%4EJiWujlX=w8QX}ISc*`AUHf_%kY zBlO_Z&|~Q#w;)hyDKP&hdmH+n_BH~lE_pvjq{9*!F$$?8wOKR+v|v^4^lLr0c`1RA z%(|@H=@d7eUWt3L(FUD%<`f4|#xr<6Gm7n+0J@ae9Gt1acPP(exy@< zx<@!U3DBXkOZq{eqLg&XFQCX9Fg2y5Qn;3 literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/topBG.png b/tribler-mod/Tribler/Main/vwxGUI/images/topBG.png new file mode 100644 index 0000000000000000000000000000000000000000..70619ed26db6f79f18025893fc9286b9a9c942e0 GIT binary patch literal 405 zcmeAS@N?(olHy`uVBq!ia0y~yU;#22qBxj<i(P=vF< zBeIx*fm;}a85w5HkpK#^mw5WRvftp8my{4u0sz4On*#KpA^+>Um(4K$8$g_u#DxGg(|Bk9EnplZ zG@SteSk(WwfdDeIvHk<0TqI>hp;lqx;3!D_paMAlgRoshHC%-4ZEa2MTmZsOriL!2 zCPeO*E*3=MlClcwfe8OG69FVe1XVoNuY9f3Xr*n7508F+=oul684Qg78M6?2sjG_o zW~y{^&x0MA&tyG*KHKi3QZ5(&UHq@20t@?p)LVF}ej_0L1=h zK?43a4($IMB>y)!{tq_ta+Fs@LPbJCT6q3Scz0J66BXt2b@^&}cqd-FGwt(L#^;Ab zNSKg;tg1-Z>*9C)HeO3O*J~m&Ji0T--?yXtldY(kkfMm3pt&*=R~mX#vx0d4H1F!y ze_=jaD<-Da-%E(-#w&9?8m)#uql27sa&gd!@p%&$x_?vhG#+;o`)_3hy>{j<$+_2J za{7zo<@U+d)6S{yLC4>j&r$57)$41pzULF_BrTVpCzWdN1?W$IVC*YN|Gw_yGZX%e zUh{QFFuHB7dxM?N^_qRF8@}B)(DnFo(98c#so&vwDym0+KVLBK>td~0$Xz?Lucz%I6UnR!+E5jg7WW!SZ^JwmyZnhEpkmXbLybPd&BLe{tJt1(g{9b=@Pg#0?xVK)xDBvXj;_|~>X(~Us;M8OLI5?>@JMOC3nq1*RU;nKX^}OEk z`N$N`FSm%z69WQm*jNTv@}oN>i4hmfCOPLrLHQi_1o?EXb#h>X zzRomHZ=F5%TwZg0al2yK*5@|id|1y1p~kwjb75JF5#F-;n`QbwLH<~}^Wwe}OU>~U za{I~S<*@JUHcRz2EG^f^M?aUZmMI9{a5ghH&v8g*ZU({*v7*RIhK>HaA#RG6!dKnB z`wQ-^xq<2*&<$d1rK8YShmt3q*(Vo+KimJtC<^-`VqWUlPuF}(JYl5vM) zxv^#}A+=%F(`{>1qjh!qym!tEOJ5dy8u>GQ^LpI_GcCxL^DEDzh6!iS2E|s>$jQ_> zIF4|J*Y)?VXAbBrkk^W712HDji9dR%i5?s2yk2+vp#PBT zsG&`f>fi_|YTWSp3eW_)Spq{$dI~AXRWsGHzm4Nx`aq1X(*?%RH;~jJP zX=$w@-wWtrp}zaEC#`uamm!frz+$deffVFY=YDAg#w=BFUwWsU!5RqaEu1j*5@=ke z&>hBL!Zbc=V6r#55SYI>p7p|81$u#rOO{K=qr!wfJE{leB zn;8yukQj)l(V25>(Sv0y>d^|yv^-7?S^2jFYQf5&t~7?{)bZNKQOhj6R#^a0MXRmN z&ga|BUIqWqK@qcl(t1BjFsWy3XayuWGH?6a+4p~`*lj;I4Dv>{Als~h<{;|Z5FG-X z{JWa8mSDN98~e;pB@P=2uVq%ntGuGIDHM{DKm)n(So@$%_EsV3NH#<#D^dOsC0VT~ z#ZQAj>S$tuQ>LF^l7NtUOL3-ya`Z6OvGvxxE8qp-af9868bpvSZ$(rR1Jt5bsF(e_ zB~JUfy_{RVbw*_j-THndP=TMJF(Q8OXjLl|>HJmd;7oBJ83fgt%7n_F(u|U<32acq zJ{Q;mf)^NYJ3WQAD3ucNfnhI^qJq_w7`G}=QW5YZsgP8r{)5qjqFoZUAO$RTw61w56{mUE9pA{imcoZ7T&2+-+JS`_dzfIPltnA)6@2L{1VG+YNH z<|>{jLe#>_v{7O2mBaT7NVSFh%2Z_jE(jjb+KE-;r63><#8g>wkMxgzE@%T?qJ=_f z14S8@Z8@KcTzGPYtkNBg_$#FWeyiqqG(ygmODc0$b_aq12(A3W{(=Te zRVjb&0CpQ%1a5V0!`mP-VlRo(18E)78-xsEZok2!4O<;W;-uF^|DMA0*r$u=ybdOB z5ByE0H+e{7X>p{Av#aCLz(zdYkd5i&mgM>coMmATBU87Vm?0#>Dbo z2H>X_7XHnjTMY^T?)8$w6l^Qu>PY5D_p1zFF2%7*3xN%yH&cZ?J1WfPkumHw5}PxS zM7RwCq}263s=32TR8MmjG?TBKAp$#!r4nilic(#(4Wl{2Sz^OuxYWeAj_ zN!6kasBz0;ZT405w(<82F^4F%#AIC4Ek*Zr!n|LkEl^iF8@z9F$JJ~%#IE99S`b21 z^XFYyLU|j#^SqGAI$V{%m}d1;pT}r}&nvB#5TBkHcPYTq>FW)#k*gO~f^GU5G}BVg zoFd6erf$lDAcO88iDKd1$%8;n9Ab*2WG(fnm6f7s51ibGL5+#Z?t2UFwGTSR@wAs1 z?M^u~x9r%SO?WDzv~a1yQDE562iV3(AsKk?y4iX>%}?e*i`pY;R#J{b4Ifm%jLca2 zd|@(^4S&%DI6oaKMXWhvxGk|P@a10$;SL^xmTA!5P#tNN$-J&UOwiY`mL{(i2tJ9) zv_UWITUJh|n%2R2+xJF)iCnq8DYAMgTb-AVG{x0Os;z0C8_ivlS)BG1H_w?z6=^Lk zwHjVzb&z;s;f5HjPpE}pNF~^IH}=1Lv;RDn@@LGiF)2U9I;P-`7LIyiUcR>8l>L*1 zJA2|=;iYS@*?tok5Q*IgXfYHoMYLmsOwqQOP<&}>=oYtoT`Wzk@;D?zdt&o4^*6&J zhmD`WrQJEWlG~rQtb&DJ*X-<@yD8%!z9M{T;-g)>>)`(TYcA%)z@mBmTL;(9My+y4 zE1%Y?Jj_x{9LKGfq@(H*E0Z&a-3d1I=C!XgfQ8qZ9D@{!gb>>;mUV?U{X%XQ_aH+{ zv7$}<=Q?M0fnWbSSJL1=Rn_}9OQjE_&(&P}58YY~#L}@h%BNa2F~I%p4E2(C8~)d} zJJQF4A$C|?p%pApWfQ>P>EO*0geKx8tvPm??@H9Y!M{}J0amt1W_gCA;~isI@8H^X{UvTp4%tM+1EIoZ9rB+UAMvGRe{z&|N;PHU%#p%Mb-kJ!G7Eakshj& zH56L;xh0^#J2Xl9c_#m(jGPYE88!6h;8h0iR!uYxEQ|a+{F|CRt6CpRgTY?kg{kgp2LWtj)-|bcw>F|$u`yj&h+|g@{t=FiB zX;m01Z#~1uq4&n4e;j_ev9ph8nmuRRWbZ~yUk6CM8%zg+J?zCQf91{NTbSEH;^gSL z%LOWib#}DXQA>j}=8ie8(&uIkKW8wXw5!bDs*io|p;+&H!OmJLTkWN5#e*+`;Tr!h zN{zaqfV|8w^ZeN?F{c|alCqql+}D9p$9V51(Y3d&RcCn~O@%)P)ydbsV0YMyvxu?L zxMF>3y;EHwR~~~o?O!Bv?|kX^Bkny(&LugFJ){3=nVU`-&W*!^nY$5nes9|u@V57I zFaNs--d?R_*S`QsTs1oaQi3|Z?YTz&@4L9Cv&L64fR7Apcd&+C%z2i?S5C&qU+2#^ zQC&t9ySed**0Zu3u5yb0?KGPGocXFU{u$7KuTGlZm-Q6F8^qr`Wr*CzKe|dKS4`4>lJVEgeP8%@8}LXndU`m0QznnFgK+Eg1WfGA{zHd9I**-V3i` zw|5=h_PuTUYu3$!?um0hu;%4*$obc_ z``Q}4&*%Y3{Dyy;C?B+gi-MEyvqP8q&|A#(=3Ri>f=f^TJa0q@#!hF}=)E4={d2>K z|Ids6$_ndu7UF2Bzqu{Z&Ry@$Qv`p?XPvp{V7zC=%M1TaoX1M{)cdymXMJ9aeb%eg z((a9B-w4`p@#*y^EcPm!7O0Y{U*pjJxieAG)<;|4D*&qL&TA3)p~_ zxM`{%v#=G*b9JdT{_3diQ0pHB+1{*My}0TTUoPZ0zn`tX>f2V` zk{?}f797?KHYP2W&RgY4e)XlFpqGz-g!=x@KP%BY4E)R9X|-7jy|;ELlTc0aZZASl z8R%E+QYYM#dc*7_K3ND=KV@5(op03?T(=hV_IzXTAtSWoH;da#X=~xWg$HKUcIXbZ zy|35qTradOK^W^-LVj}*ygJwJ&#!w>9r_>VxPHAGo(_8c`{wtfhul4;-S02vr;X2- z-J$P_hN554SMRY0Z$(CBX2<6RIiKeT)!*luDi4DgN7btV{wqJ0z8mg*&&>Rubswa; zMLlhLGyBaq0bT38KY*7X)6loaw4b4tYjNwj>$jfU%Ndo18tw5O*ZLlUtgfv8ROZ_D zQ-wZf<-7e`-p^a!(;((UwS|Q}dMf9P69~pn=#OzqAB(6D>5CS4NT+J2w7?DAX-3ni zt`OeFjn^~o+sL=wGm*=V+wyBCiC#})E&sLGP8NxuwbK2tx;HbkZ$Oq0opifnS@D^N z-Bd$wHAr5h3c((>J*2BPen())_B<^gE8FW5rWWr8Ca74m-uKm`X0Gk>sLJjP*ZB;A zO6d>*vfhFIh@6-Y;x!UCnA6ulHO@AE)RLyB1rMJOicV{$esGmq-y?VKb4In#>Do?z z=iS~;b1jA&UGa8*`K=vp1IrR0cRJ9X;|?;H9BePZdG0=c3kv^cgk9iX+e)uRUCbeGu|f zHUTi>7Pwfw-Un!r8ef#p?3|}f$o1MqR#4GrzX#u~(+fpH_Jd8kJ!Qy6u;?GTZ3n2_ zyu4^L`hwa=>d5i z+XV3-2!?=k3Zp&t`^eBMxBd5up4~iJj3UdiYWT9#b%kvZy65_h?rz$qk2e^k^$k2& zuuYCbZUt|6myOTsg|SIqkcJ8p<{b^D+DP|;l_WUwee6La+npDQac4T(AANe1nrP3mZ`w@z6h!-c@>nIVr}uqQskhNksu?#`G%E#Om~7kn z%>C1*rH6x%c3M~w1%jKi?5J0%m=P;%I;GYJ6A#~N@3#oOQh*FaK^v#!6V@L5>cMZ2!E6^G4%`oix`c70UG*s{01Qw^Tmv`-F zuH*B23icclooC=y(W@>ETi1>*5RSMdPJPs8Tp9wgO(XK1spsLkW*yyd%cuA%)*r`^NW zoU&HNsh~C2QKu7^eE$~D*v~6_f9+OP6Vx^8N_(jJyja4U3KVhVFyxUF@!&dmsnhIn z>Ag2Q`6BQviAHja5l!~-&VcW3F=|$^5!sqQlUssak>Ib?q4!~eev(0Iw;|ZSP;?3c zZu%Pt0SKSQcJ-yxRRUlpdu^vIy%Z^_qS6agnhtm7ZOOh6tHt2Ik@v@CQon&jq}xTk z#`~ug&oxKKo25Q#j_ZaeH;iFw)3DJUI3HN!PTBE}%BWARIID;{*x^DCE$Y~`T_3?^@t8YUdd5WL zLnrtsfh0=<1zUg7M~^FkvWw$ih$&2p`rYVU)xQ%s@Oq;uwyG75f+%$`--*5hv_;z@ zH>2;$fHV;5*7+&DzZy(1b|0ts5$4l>BEh+5yEjmtYd4gaRsFKMlE<%_h1_`QFJHJ> zc)*IQM=@!&AXc!94`t^rqCyWm5hGt+fJO2j-J^n7O>KrkS%1osi!WgcZ=~+r(IRvm z^Vt|(hq8DftFmdcR+}S8zOlk+QQQwTl7y@I_Jl4W&~&|rsJHoid@rLxHEWw?W3l(S zbf6dcjA<@A5v-wNw*l?Q+z4?PY&D?H+4;%}kdX;9x&@UuMUtfh0q{LwucWE8h6wje z_AS#hGbe{0B-274qn*e8pQHZCux0#goB8W4r52`J%ViFYAN?A*d^x{1CZ7m`>uzRSwIq3KXH$eCF%5D0Z1um4i@tE;i91aj=+Cc*rd zKLwhUSE?NCato3Ek?u|JRdA})X^9rKi7sC18Vl1jZEx@d9g|jU#WqSZN}thQcF=qt zG$rxJ(CTYY(y*aL+2q4oqKg~}0#|l70tY-+!Ot7;-FYqM*Wz}?%8lKx`mPF=1Zq0q zsYw0QsyaTJi5@vO$|W%a$zyTilbqz(pi0XPH%~J#yliLEA3xG`XYHXVhg!)LeT!^N!wZPq38fgGyU=%3(Cx=>G26K#M^$t{R=GnZ|@?viZSdZ27c(fpzNhqb!f#xU`BM^&k`#1_yZM zs0a~5>HZXD^dbCc$c_~1f90#8s7nO02mFAa*rOOQ3guW@$6k0W53ORA9>!7{_PJ3K zKwvPovLtxxRC>2v5XQw#?0mkni?(|`7BfJ9iArm;K_eEY(|iQNPpwi8MEUxt)$Y5@ zR~jGw77|(LBRHq%t~t^o_QLHqtwPuq1m62|FmoEzTXV$7`KR>}m8)+;S*I;Hrxa6< zQw3X82RQxZnb<=zm50HZ>&r!#{oTcTpv>9U;ki2FTH)Ao;_g1H4bV<*Vz5RfiuWspl_=h$v!Sl#)?T-LWGdxw3gmkb!rOCYh`Sx<)f~W!tvnt zg?&3JD%43znN#eYycLIhrR2vO+JLEGI4fRtNRY1%ob)~C2e9Hu_}|?%K~S)RMbPcK zvNCocj_$Te^*^o1ogCF??Zj-03H5Zpfi%}1D-NoUdy^mm^m<)7=2iUK9Qq(`tZbbP z1y0q9SJmoRs)V)sWv$1Wq#_a5y_JUvsnjUh`sx@-LK8gIsV(pYsQG&g>(>e01t{|Y z$aEz;GN_34D?V&o)GPH_I3EZg4*si{AF?dqY`6RIG~&(a#9nLHr*JeY{FRs5_uOM) zXEKNzpdI=0-pk6fPuCdzHi{H#Q$}DgsW3%Oie?PeZGaxJV#75|Q<6VxOJ?H4NPaoT z7Y~W6*eQIe%~vvyauD_K*S2xVDG9jn$vVR*>U^mQ8-_vYNlKkkFbBsGid#C=QOblY zaD!&;ZlY7@&3_)757;s$kkuquy5)La59!;Fb4xL}ogi9C`irlJLctZJoGi(QFsNtH@^P7HKF zrhZfN@i=o%5pzFqJU7quLb5{!CiSq)xiw51aBPA7eEPK^Ri_AYnaDWWr>)PI%PhG9 z$FiSty1DT4C6YJGlhWNJ<*62s$})3OEl|CPLm*T`XQcY=pBegoRWq9kO!a(>38{q9 zhkR>$A3m4H-GYX5l|R&Svi!s?=+d%92a~%wc{w^8@IoGr@*-(j%;!s!6N#pie@j7@ z?20M!B2m7%1CyEF3ToOZR~p5g&$B-T0VctI5)rB6(3y*q80y2Ck+ZB8DW$0*g3ST1 zE2h7_hy!RtAZwp=U70LP8jV2ASjX@dxS@B0im147U1Hn0^8RKTq@H|Uiqz=_xp5&v zvted`nP3Rhx8S{JFzLgClEVl^dV@_fLJ*PCS}c*9#FvD{0g!Yc5p>!T7s%pG1{a7sh%lc8-U^5~108;e!`jF0PsB!DYS z($E2*q+#o5;~e|c9t+Olp#Xh7!+n)ij>zuS>v`(c6oEGwIfioqy~9Wvuy!dn&Lo~u z)1SyEzee1km$RvU(X3#c=s#1cT~biphvCPh_r5RHMkW9IlL}wPgq3f=3YJe~v8Jeu zQh`OBG998^fmV`uSV;+l><39E7j1P6SWkr?viVkLC>I5b_Wjkc67mHP`C##J?`umQZ9yFqL1;;e2d+W-L-Jj)f3%;+EPeczEZJ zT9ALe7iG%h%i&%OsT~>tkNs23~$3*B$cpR_WHSkeI-)u{1j=WZy{_%otk#)9+hFX3okq+N~Yn}ZIo|` z9r6#S=WdmMjT#pcvB)eI@VJ}2}Aj>lQjj`QR8e7^&qaOWrH;bgR31Um!O^fhXS z+~A}&&^}6L+PqGpd?_%7D*JC5v%rEs)~YNuR=O#Aw#bi3Mo61Hf`ejAvUmDIwrN0@ zwcLGC*9RHA&Ys+Hdjo1Eg=j^tFe0+9(93LQyPi)e+;nN$W0)rl8j~lZP=x2`B-&zY zEFSk?Msw39F1KzTb!Xjt#wXM7mrrh04f+sO;In8~vc?*aV2IrwMSWB=FmpC3hP+|& z&FH0!ONA#xwx{?QH}T~QF<4D|DoyPYTJbzzFg1;59UP3#%@6ZxLPSd78{F97(gK*j z{*_tun-5OaIZ@g9LMMb%0C|ZUFDUW0ZM@45x=d z43XPK#fzu-7ndq|{L~M2v|#}cUf5p~Bmw9nB7O_47#l0(te@P%`%H_UaDl~#9 zm4rNtCMQ&oS>;M8(ZHBUzvS~ukBdsXVsuiVn~aEEi1ZPO0IqN^Zu@?*A3scQoCy4(uqVCv?C+IhA4;R_EV7+J)>HShkcPM zYjR)ulGLw1@17uJb;i}}WH9oy13NlSWS8+o>@y+35+Jn!p^34TbbY>2-3tbAl|2yV zQFhN3yE(WK-30Z+jg?3=HEMz|MqgyjuAFQF7B`I#q%}g7<^^vtI?nNFp-_k468Ao! zk)~A3@b70tvQN(2Mvzy$zw&jWxRH4%`kQfwtm^8USfth~+QcNs<*qqFmWvrUX22Ig zuyM%4Ub~~ccBdQ@}UiGRaPot4<*uPod`fd@(qD^fkmZ{wr_1xJy<)nxUn-KXB zw3yaBK+}R0!1r~Z*4q_}t`_`fujriO$sC*4CT8E3D6p)p+Ezw6!XHhpNQrX4N%0eLhSL6}~=F6twqPGdZ zLbQ-TB?JRPO_VVcgv#j&jzjT3llZxSKgS;aY0Uu!Df4;rm2IUvDc@^yC-DA%BreE- ze`hPV9^DP@z*Vsdu`C8>0xA$e?BP_L`z?dR0CoM#J=E9|-M2xg%8L$FL09WOmqgo% zxl(qW)MH~#Ytu^Ax)4viTRFx3j;Pa@9JWFXrfZ79*rKUYng$R_Uorz#@xKl274O6= z%M%ZaC82M-!BFmMYQiSyzPial92v$6Ta}x(-xSu_-nr^}!|n=fwniYue?ss&*gq zutv5&_e-gwB$IYz)PHY7V?QDH>QsHybiX_pY!a02LV#00A8ZAPnQKvH0I(<2Eh_4z zLhyg%#TWGp2#s{D9+W`jdkWreO*Dg`KF9;R#^xhV8;3XFQ?TEq)rIsSC$=pL2?^G5 z=GQ>kAYp8APl7a)DU(YqlNgu<->*yu0?K1Phw1$^R~2X21x*m^c6VC{LZJ)H>l#AJ zGa+4CqXEDja$ThL^@1$DonZ7%LlGg3u3iPANDEgjS8 z19O=^|CBi_TF+n8S^W!#wvmVSMxgIZj&9$dRma-AcHqMe@|9>t?XZ`KD{tX^wU$Xj zRpFvX!4q%#dzdtg#1x^>7nr|oS^Pr!07FG!E8m`{6qrNQVG3h98>0!jAtA`9#4UYk zV2g8V=ah$mEg5g za)WN!4kA-q4EU*7#^CfwuO&h+%%`$B!O(37zL*&~WJ$2H1rM#it2F21nMbz~A)p3X zw2l<79`S(96}Q7^6ub;#ZdDVB<|9EaboCO`BB3D$3^bpusdz3tQ{};l8QoKKNHN1G zy-(oUQm>0_%`+7MaBZcEu#?J_4@=O*+q|hY#;Uo-#E@uBmq~^)p^ebg(xyB(@X4t8 zV6|yU(VE1){}f!Po(ejU6@~0&R##-LzvD<=aiRTCMFIxDmDaz5&aLP5k=9MZFSF39tM$TFs&!Sn#?SHZNt!4vcQvt@_&z0i_X9q7Eqv4JL)< zIrf^M0p(g^GQE*ujK*BBnS*K5@t&Pc?M|gxdAKGzWIw&bUO9UWJjbFkOfm zYVP{h0z#NP~)ojIJ(iZ*8Ki$+y{&|f8aVbT?I5R8vmxImP zUrDGb@mW2c^M$I@;1i26L^&Wkqy}Be-)At_1O?yN`9KBNCkw^xbd`ERsa159)5}0U zGR1y~E8n=?!~eW{Xy%-0kPGq5g1!k8)G1ia;Nw-KZ-_PXoq-%GZ@|Cfn1#u0dBiTQ z`*w{C%(ykVMTNxV5qHk?xD+P^kFFK>po8J3ahHdrSnVTx!k4H^RAOem=V9e15cNz5 zZNPN;!Xx2>p_#oVbP8$kvMhQ}yI`6eBI8b^?_!2<_kmrRKK7XsgKr+x34P*}^)RM<* za)?P*fLuPtmCHOahEJv{~7j)p8z0Z2)5 zwTy=dDL0twRt4R-PN~?$%>MCSy5I&HnuxhlxfL_0%Mkh+dl3JIwXUv$lysx!bhGkB z-Y=z`OL|Vc_^1eEV+p~ibq;+7uP`{aO2-XQkd2FH{6}_Mq!ID6tqm?PQyhBa#X=VX zF=d+A#vI>15bP=pDYy@>Ukw3|GPXE?{hVxUBQlwS!*qY>OCtsi@ME+!H37aVgZm2E zqYiqo7xMzCkHru@^27ote1N&UYc^3*4IZDYV0j}R3*k%WiU zkpxU(@e{%E(TooADRht|ohlNMwJ!rlBu^A=9jNs;RG;NMN=8hg8bED))GP`~ZSX{x zI!YEEr9m%ex3~8%xTTPOR~%B^k~CGs$ffij_9A`)FrX1okx@`o7eFhu`i1?H=`u&s zO7*>cX&*yl#GT`QzgS17s9jr7UsRe!o-_TN(ziRaDJC zxlP1b%P;(TWZF{qa&g`Cdo0kU-4hnh2#PUs#e>R2CxaW$UpNy9nHZNj4AF$|GgFu? zSj)dWAg&uG4Vb1*UKhg;3D9`k#y);d*wscNd9TZY4F~wbT$dEXyj##KrNfD!o3RN{ zB|$0dr>-<(q$WlV!F@NG0F!{+yxL*fGqj93x3Q(VL}Al)xo zLiR8{bytC!T$Z(FK#=8lu89Ne2FAkV%F8e{Vr9mSjKOw-zYswcb@PSjqU>T8s(Dy2 zYe^pw2r?JwLBwA_qi!;wDvY=G!0Wah$qSXq3~U~UG>uK8?IqFb_XTFFuQ%QK6>mUb%kg{{=S z#xAcHGq;11t1w{SK0FT^=3pkd+D5Q|7am*mH;1VhtYQc3Nq{7cM1GV`xO(=kvJI^> zJoGwk`GB(1WK60|&0^k+09;poWmB!|=|9mJLl0X9gha}xz&=*Qq{(@|M}uQr^h)KL z$J7npVvez3E~GN**gGZjS&2JUrY|;m==*xT%=o=tV#dPF(jkZzH{@c&MCpF65-gA& zuzsv+t6xp1AqcRDE$YxlCQsCA!di_a?c_8+NBZ9-N)`;J<>ySSC?5^vNQ;=odn}u5 zK8kq-^zxS3T1!KEjyMd$=($WUgU0g~6@C9*o${U`d<^G^Mn(+5mCiCa2)!C`XkSUxpXeH=1uHHN23+EjHV9ngG?dE#~2u0lb2O1hm<@? z9tps{6Oq^(cY>w+myXKn(y)S)bFmzeC|rfhWF~m4WYvo)w1QypQWxxuW?vJYg>q5x zokS4X%AKOEBZ60={Nzr;y^=k57z2PHfey-Jdq9mj%fWwR0#SKwH%AUm3#!6{zcbcJ zFAb_XTPx{ZC33Ebv$I&<;Zt05IZ+n}H)@%ZtTc)igh|qnziQ&*_dJ#YL7};33M9R{ z}zmaPQR1ErH@YMuiZkm$)xN;lVuf~SMmhGo5=eRx~V_B3Zul+U&92^!1XIx zQ^z!c*j4Cwvy&wm_1MkJ@9R%gt3#Ypn;!%H?pf;bLy4BH=0pHI9|#lf8T2N1JGNLo z`y{6=rzT2<)$5RYm!@rhn@7f^-)WGL<2s7FSOeD!}->&XmAo(qFXl=D|j0m=M7x;0f8g2khbevvMXqrnw7R9 zn^X{N`#K(W7Jv`@;DqOp^Qgn(}qdidaWoNgeHkkvo4Itm`X98ES69DOEcS>hE8_bd&L7w(AvMZ z#=Ng899{j&;3I01@|T}vxSPNL-VFoHTsF;0Dai?koc$i=nppKpmqN)EK-ekQu@^7i z=8>0d6DEByMUz;BB7DzUeo(EVBkIz^EPr_*&1L`>n3%x~H2L`#4vL&sx);IxO!{Qr4@9=7AqL*=$7?=Y<~WN!WoQ(2r87!R zn&}56Ds#KdSfT)8_hiT!6=-Y(R3wuHbW`%WFrBBh+Jf>Cwt}9~<5w54(0~{r*LlB7E)VphU`AJ1R@q1N@lvzB|Q6jF1sfOmnZidBRpOn`&X;gwo zx}XwMNPZp+I1(8#H1l}oGYL0t(LZwl!=5%%VKc{FGuNj3zee+UI2T&0W?+Thg$~Ho z+cLpBQ@W^Iz`wPlF)^svM7B&eK6WqGE=K1Dc$UiT`gy^?-y3{LvFND2ztN!RnWQP+ ziR#M^CI3zENH5@^C7Kv!1*?U?g_4AdRnf}V5D<)Ch;S7jz|d+;R+HtJZDimS+=;m| zkb}P#WO7c!_J4bNvQ1Bs%M@c{3|JM{B(u=^IN?NHv8oTXk?5hXcdrZ7Ec+2&end7z zgYC&gE0O)O{f9@Cis~m+GzY2L*lsIshd%$!hg}#&MDLDMM_&=^8Zu2)kz6(2qL7Z; zIIn}AvxP}4J*FF8+C4-LD9FhpcV`I|m@g4cVrp}=x^Tobnbjr|${^~$6e!q3rKxZp zDXSI~{}aIaKL}6#?@_S-&qm%xK>TB!I5^FiXg4Bxct9`hRUo>jE|9k1;L)Cx#{QCENR+fI!JJ?Y) zaQxOt<}0Zg_wcn_63Z&zs887uIpcpFdFk5%q2Md`-n+6=qKUqq;c; z$48ydA89?e99Z>-pytAP$*lO&RF93wlD^aKZ%y-hp+zsvi6$SrLAT3}2x6u!Sk6SU za^MXrjYtu#7=McOIm3{rTj@L zOpjo-KQlfzZ8$PZhSLA~^R0V(ih0sK1+Q&UuBC%}CyM=m9Z)ZMHnmT#CK6yg&Uz*J z9kY~|ixGu>}VtEsv-`R&XjLjca zku_HO2c0L`E*!_n!sfs&1(lJb7MG#rGbcCvp5LlcY)wrk)Z#k7%E4I*;=QoH!{C@T z$(Urk&caE04E@S|)_nReYoS{4*GaII_n+$`!hMd|KT^L=oZlY&-#kTCR7^w`n~7X$ zh4A^qp{k?Ce7>X$B)?v({zbgMmPC||O_Y7U_98#M$He&fx^?%olIX`N*3Ja5*Q{`> zXU}BS5~>Wp^%mWKe#Nbn#npbk20hqnhQ^+Zt#}{3@hRpzq;wGwB0k(DeR|}@ne%~$ zt_Ty(+-oI0FzVmnWGFs)nd`(yj8&995bhsvPY1*A5Z2yMfVq5r)Vtqix%w7NI?uVPsYe*qj8slL z-#klhyelwoTOaMnwu1EbBzU&2r9V#WxVPI)wT8m#^Qm|F?&pxqBKGW{i5%f^%n?Dauk9uNQo&a6>iogjeA5l*jJE#Ah(a(JKO#j?+=!QT~I?kM!}BZ=X^hST#IO{?LGi`SZQZWlc-i(ocmO6+m{jp z%fQilrWZ@FAUeQLKx|pl+j(sAd+hf$5kMaFI(&Qx{qzJj9Zp$S*%ii>)%o>01=NYI zslY%neW-KaPTw7^0JMrZy_hF2gPhVl5xZSz+xLy@;>e#9 zY_QfZIBYSopQoE2Q_R;~X$h_bvW-%k^rt-KYKGzk2uV(dR-5pW>gEp3}$hh~_PXs_`|GscOaWz2{a2(f_pe3v9SLhK@9xUfJ1X7))i~#ZplXAXaoW+&@4tsMCcc2f zG%<*klEDI42i3vSnI|(@TtS+ov)MFZLcf`@2e~z#2wE7$IMfVhwlc_uVGn{`Py>XG z61jHN0!_964B4$_JWDrPAtxAvT>)n$C}Gt>gcUk?dMPmiz=3MRPuH4Okn~y#qz?(x z!Ff$bP7XHFN`{L+F_&5aMrK5y`~VIx7fHz9ixOJAwIF1qyQ8cQ>Io@QTYTjcr@*b} zkiRxF2NKpt(jYJcld|8v65w|S2jevw=SB_qeG!-D}Cklm(d3BzB zQNeFV;RR<~G)#kxrrgmA+eiX{qIZRp2|#oGy?ihgW}DyOsass4`rm-UT5zJMgZ^W1G{Jn~a^AuK z_>kn&QPfcEhOvPxr-6PL^UoNBfcXMbi8@vSc>DgA0)Q*np4Gz{D$?=LJ1a5lHnSO* zd4aUAdj@s8z--?j5-1?(A@!QM!^Eo~K#%?l9+Zihd|Gx93e^q+Fbf?eF`v6ze5VU0 zcnPcMr$CqQZbD{9BXR%u-^H`cX7UT1{2UuU=-IZ=4n| z1Ksud1@4iGx*wZ(NJN$tlW5XemMJ3|4UYf&l}>_b%pb&Jz<@rubPWKbSE>9_?g3`W zzdrUm@{0bFC+vqW`J12r8tV0HxC1SQbJc}ya1$d6{V&8cV&7<_Tt9xa zTvowN_VjzuyL6ciK|gl)tK=pee$4-CxEG@kPGY9q$Xp|x0}Tt}wkCTs>A6uc>U%BQ z!qHD@c&U;lH?ITwe|Z5SrY8)^)kq(lez#3R2tbf@TMSX5ErkkzQwy6oGZn{Bq~u$Z zTj=(KSRtiVoBZkb@;H!1z-pX?Wu3YVJjKR>#I+E~LfH|l&@lDDD2suisFd)BemOWl z#GXYv69S16x9FTbfu-tDxl5Dz1op(a$5n!glreK-S6CGaLtaagB+`!XfvZw9q4s4w zw2zSp2XbKHE!4@9%tosQN#d*FLWhp{!Z8J)0iZVmz{p_^fiLNm*k#e7DWLf{dghEJ z6CPKjH%Sb%;D*CEn51JJHvlFgSV6(3c-|zYKL$6+r}~qDf{&)g%7Bi>3cSKbv^q?4 zYj7M#Q&9LrSD6a+=@3eV(<5%6OGI5TnIOnDlFAX-4>@DjlU7f%MRJeYfXyUYFY(3yQ{6c|SK*2fUYEZ*}+cv|KQL4Gvs*Q6;^Y!0SGT7HkO+g2NpK zP29hVQkA{IHQ50w^ENC9mV;4#h8zIKu2K*;8Q%T)5g0>Ote(jiKK1X1;0ZaCvCAEf@ajCzlgbrIQ}mcBCSvF?&*;xL2pb`! zbrW|izSXl^1zH&ceg`&j!Ib=Y&u!x3)4Ph}wo=!oz$YCFXkR5JMYpEq7{M8XzP1j{ zg3U#*W{>~Z?u?A_wY5)Ay7OjRMbz6&5{Zt1QiyZF9v&l%L=b>19t#scuEfZ+Gi4lL z4ZMxr@9PFWPmTa&%ev(Yd?}Vvqeasy;xs~_4A&qZ3P2EX{VBh2DnS?xjgTnf-fj(N zN%mMpDp~6f7;M zqpdIvM=C0&E7+9{9C?JB(MofShArFmk`Ia{{hoTyWK_fHKq1~qLyKNVLKEC$h0dxM z!+C02(HzSw@AO-?z<>cOF_|9M`R^ zN~R?jL;@U=^Lo>a9_3V1{C5zhtILq|^Opu3gBQqLSB;E%9mCR_p2b^Bk25Sf5TZsh z(jo$Gjk{0>r!~^B9}QIRVy^q42W4;&0}z;=ifLYn)l)UZ8bAb28)NLEUIJUk%y#9@ z^;36~chZi5kU_U6>oUk(y;woz%gkm_>Y|ZW=z_*_vF2C>aS-Dl#HqGiLWA*ggJ`*zS`#=k4@eR z&RhPZ)3XoRW@F|GTF*;*N)3VrfS~ap^uyxSjq zOG(7g(R)k$!{>;dSu*9Sq)^SUPQChe(1{fFjR-$}SM6_FCtL(CmaBqm0T-1HX!~|B z@ATv0>dBvBhN2sGj0>mxIQ1ZqY}`dP^gX0B9mp>!_tIAdfCgwGtaq&zeR(|jONfH2 z8ErfLB1b=#cG;Vf$Y_vaE=pZ0m9!k&>HH!D$@o0*eklFJ_V1ClWQ!aCm*Ohp>v1wL z6!eB@b;|Y{a?0NbF`$KhaLz9CO0LSO;e#^^kgiT^dPb%dZ4kqq)0E{XhM-T5!;tFK zGWF#a*8xfyKG zFs!G0N);0g)0C)@jDUU{_V+r3Z~X`rrGk&^leHn=N%nG}ws#{adU(rzWGw2-wNqa@ zrX+_~i-0eNXHb(XO|B5?t&xz!kGg8EBeXbYYYz1R@m0{F6GCB{B4{yftdi4zZ$Nw4 zg|x>pU3h`Dm9OGaE_xF?Bof0fp#~LJ$&phw2d~nGlOHabuZ%Qy*|*xwE=(v;&r=Qh z(ZZrixsj~?@tEVw?$*eLtM~;5&K1E8+5nfz=Yj^|yOi!oWGWv(_8!Dqghr*iVfzThB|?`H>}`p2+m|HW zD5LsIKeWPu!68=<0!>Fx#l~o&2BgfJ1KCf}e+F@I?x`vUvY!z}q+7FVUdJqmLv!iN zsDs8WcbEQUmxT~LD$nq`2E4+9pEjjc@4a=#8Y^xTyP3ixsCU5$2}{d-{xA=WU4P@Z z5wlm+u%k$-guv1jVI@%3i{FcOOH~KOb3F@AuC)@a;8owLJ8OuxB{AzvB~d3CiaqQy z__A_v9u&2r$Op4RAkdr5p&RJR;E*rk>8o*tn?Fk3y!@J%Mlkg)t}B>T4aDD1;o#f!xiMGynUDMWWh7>LxnZ{P9 zjNkZ#Ml@aKL(;)e1$x8Xa)%d9Pp9aoYW_f|9u;s`(MSp=OfoQ*NTrf;#vSi7jZqA# zC{)sQVzp{k*Pkjvs)bYyV$GXd)Y&9M7Y>CgOKOMN5KJWRV8b~0fFK!{yZ2AOLpTZ> zip+IZLi2s;{)&*h969B1&zZsjs0vX|UcFh6$2KlhgQpeWtahXptbdISLq0y{b(iV) z2$d-qGi42>^*8CiTQ3A-^*Kre9FlrlE|FTtFy!vo?R#i9o^}(S6kk;&-+=-hwpM=d zqvR`TC_t+6F*2(dW1k$8*xrT`_qQy3fw)|@W({z)PhQf~w4~vhJ0+B>klB-;MrViN zNT9J(f1nA&-T!FSyTEoe3kiOX&8CzSKRE*0O!^2Gj(?8$?4i~Olo>F6C#r7MwQUW2u{#SD6pn<|T-!r>YB2`Dub zqst|Umi|y=79R03=qCJApL}gao7c3Dd9lE9fdxZ<=?V&6+=u9`U}wzpGpkTXT}*}O z+3r3pNMho5333*vlzsIn6M?Os87<~)e18l@3lB9OB(L$1U<#bS6q~Qipr@k~GJztB zsF$U{6No1_LG=M*o+3zn;*CdI1rSAkn>uG4qtE0skeB)Fc29%-XmbIGr)oIjYH99j zSH(!4YUdWp-;6|dM6ybWLIHAufkzCD1KTe2l!mVZ_8H4;XAFZJz-H(})8i`Ymid#| z+t3_y(R6M@(bJ$^3zjt{tD4LP#sVoVH<`*l@EU6F9!{>2P8O^^6J%W4BL&S516id- z4pNYA$mPjtGai!{@eR=#5(cWG0}RD>2ZI$pSg<-O=KU$~OY0lrAA+$mEfn!G_VJ=% z(hy@iP1X^k&Zv?=9(|ZXE$JZRxN#ZTz=Fwo7%Dai$)ahj-O3!bctxGY-W`xr;WJnTtU_jO)U3uU>AnEE)hu|fK79yLi* zrsHbzT&=sO_;qpU`A;hz%QAfhAqgAJq|XeYmv&=zxhF1{iPL+kXv1KR@2a}zG-iLJ zAP@-j#i5kBbQ#!N0NQ=3b;iRi^1tVU8FQ<>T5tkYh=fN?t?F%E65Twva|#xS|5#>= z>i5x-u(i1Umz7M4tuY(E(eK7@(-TcS56A=^8l9w?7Gcoi6Nb__F2@dX{CEwxYt0=` z%k~GOE1geXxDoR+mg&@TqWdjK%#OQrHKY&Jq!;kLWtdlIMi2t7?sF&+lS7bE<|O>0 zAOM3}#-;u${e$Ojkj*d9w7r~bNHhKf{6FjjPhR-NnoZk(qSxvVUuuV^x}M#gJYud* zVyaf^aaA;7>zw^&EO`-t9}82&M9b}(8}M87!MN1^6(cN|T5wh6X#of&8(5cbe3_#R zhv)UELOJ}{NMVI~_>ozumv z9)D({F;!W!>gJXYEp9${z;F!m0s_OW(~{H7m%|WVD-ltHk{DnhBI- zA`;P4ADx(iU4LD5_opm;PZbU%`)HdJ< zVlzU6hnXVv27Rspgm3+R?^yKc6*xpPJY*r48IJMh?F{!N9wmom5+7 zhDwEnhbv_iU4<)lZ5=tN<&u@mQ#JP8@6YOF*o&C(jAv?hGir z*syW}*3ooIM7c+(bHa>SxP-rAWHpw=|c&lfAoKkNgIzJ2JN%1xNII45^n>Jk2byDI3)nT9$AZ&t8s6 zVXn@~jOX_Kq>0HVbZjF%^KhUQW64%w-VB%rQ|ic6rB|x5-O3@{ z)jpE|7$SNeHv89t#g;kz^N>xtBT;IkVaJfFa1%S6!L(WrQ=OKxS&Rr&dM{A7$@qW~ z+UM(075fII?5*9+n`0;y^_NByFbx3;aXF2Nw@)?$cQ$sO}BzHUGeO?J<8dLVyE?p znmJ;3cR$J)nRV1J*2>~eKKx}o zlZuB|sK&ox_RTkmJat{hAx)sdkl)-)Dstgvwy8{(1O-%kNk(J6qRfOrrxuFl%BWG0 zaGg%m6bvgWf){bn$fABo18(D=+F)ax?gl8O_+}fWtsY4+Dw+q_Xs+lrOv|N561?e0 z==Nz-xl!F8q4F$!fU+sO{AWbKqo_r@x)E9pac#f1{Ip?gz10*A|C$rDGf%Ou^B{7? z0s&g-6OqR3A-`ygW1EUqT33@r_iR6C2!fhs@Z)|z-8Eym)xn55fBvpZH|SUqruJJY zqfxKjYRM0jq5Aip{6tLsMu)}>`)!s4LlQy*%bM-;?+gik>CgP%k_|UeTM1WNNe)}f zywkSQ^+SOagXkLdV#+F)t3gWs3Cp1K#lE;z;%Y;c0zKbl`_=20}t3YUBYK4R)BbtvR#aV-`M z3UBUAS6O$B+Sak`{YLi9#wof0ZyU`o`|N7 zqjb0gpDz56BC=Y(dbiVeg(JMcm4{?^rsS3b5$F?+c2%(*E&Fcd;T9OR8PFE-r}e@B zTCYxQKMm`rO|8UR1r~ipbXpLtv(G!AxLX)Jn;jd`;tb8(i)zMdx~tPhi=N>*{?%K< z0)GPwF#3{!t>Ro7n^Lh6j;)a(C(e#QS==I{%6B7Lusn`o>&oWQ9(Ra$g+N>EpT$ZT z=@Zc;91WXbEGEh(DQEv#<)*-$I`o@R86(r{Z~Af`{3#T87laxRUCqrSFq>LTgbFnN zejPQoSpgqM)|;y97eVYism|lk>rb+yO*Zq4LSS;I%K+cK+p9lYG`#w-ZXZtmjxi+q zNBh9R%j+<*#k}^h<0_QPDSz~C(^dy=hr(6H^#nsN#vTD`h&!!)-o3h z_Jfbs<<2AuOByIE3EE2EKCM~Biu2v6GA*-{2n6t_6&$?%L0B$h)tmgS&-4mR1;
  • D^KJgwgQzS;gWK@s#2H<+=q~p52k;> z&5EmqdFXmCeS=pqyUV&=HvK1$CI1Yr?eI^%HCX!lE6B4OWm_H9m7f%WZ(TcSR8uxE z<8--<+pj_r1!y9==c`yG>*{$vD!DX@j>Vx})*C{t+7pQrS|gC9`UUOPq9fTSM4d~h zawhWz*WBewa;Q57SUk)H9g3Zk+W{}s)=fRsK3@w~eIG@yE$t#7EZ&>|b;)+Z0so%n zQdMv49!ndw9Ir+O#4MQqQkNQnGkio4{nF+3`RUh3GEkx)oR=QKj_d^8xTg_E4G8>1 zTmD9w8$(DQzJyHQ7S=u|>dM+s<*p7;dAf!Sj1rx;R!KvPYId&r={Y!!U5Rdl1QM_N zIIExONYTeH`xbMZkQSwzn016qQMzwqs+%7V-KtMY-=zNH4?2sk@F6|X^nSUz_~wMn zD(6F?XrWb1rlLEE4waG)<8HP)3CA(F54ZOrLLz1d^>lU&<1$jiN@`2=JK9N)wzgH;mQ>9&RG9 z8w0g1^jbkR=h)k??fY}dKX4P|+)kXlm{om*o4U0>|M^+!pEUpNLaY2aV5u;Y)>O$Y zDC!oF+V(ppDZ=kQH;E;!dVAf-+g=UM+|*{}Agfz(!2EPyT=s2~ID15x5RmaKOx_sV zD)eORZNmp(31w}w{F1_T%yWZ+Sd}Z8G%=~^0W$}jLp!$6nD2~sN`LS~>kLh2^Xhzn zL_EHi!;HKK1-e142XK3@uSKGSddAyMPyqjU3>|YJb?%=ZrlV5$9DIc9Tsq=LEhXiw zBLjV=fNbP)x8xgixQ<#|0kgpo^`IR9(H^;$z>qo|d>whSiq#d;u2{W^@4;%|aUYZV zQkt?1R9$5)yKAClhaDN7ugmpA{;P66yc>8}!>$G7=4I5$a_T1`wU044Ui^l{+M@Ef ze7`}LbHjqf_|)^CP};V%+O^;P%5FE&E#48z*_{0PD7Ro|&D0mcsdK)zuK-CI5rl zkDj&N%dqezTMj@jP8P z+o4x^q(?`(_4VGsBJ~KFhhAp?z!k>;IKvyQwa_wty)RUhj_`y$mU-Np=OQWY{@tF1 z2cx|G6p(|I)2A@pBWd1ekDM*Y#i@*JsiCLtZs$IDwu7SD>nr+VMkl1I{(D{iU}3Kr zjeXo*M6v#zoAAjrnU)|=q($I?@`{6j=iFX$4tFi2AxWj^ODmkC z2rV+;3t7ks>iR4D>%rtpON5&C9fRzrh2Ndi!`Kd?>YwJLYyQ*O%OSM4^bM$XIIQj3 z_K}^GfcGR;?;XSU%$D(1UO&>c4|WBRtJ4nDoc~6h&}pcjkMY$mRvT3yTEpOCpX-wN zjdAy$FXw)J-q@0`pMrDuZ_sC3WBhC;qsYvhaD8x~x>%s?(@iPs7-^A>t>WE`mHC}Ahe4CJO-g1GqIl5x%+?X0Xk~eWM*&yT;@;A z`5)wVvt;)*7$OdA52Dxqp#?rm43F(_nzPyQar_Id{Xe0%^6}WeWE|yldOweoqO^a- zdj+NiOn*#ooiKfU#9Md!j@;+F8!c<)zX`X~gD1fsDGCEe#JBPMw|pc9A8A?5@4GoY zMJLx6YbQ@DpGQ2^lb;9L)i1^FkL3bLCI%bBtSog_FB`dca63N&jxxm_irbzSt-exB z)>^$Cw*Gs>I%q6?5leq$-nbXiK26D|3>*=D;LLwtvVJYH5^|9gIo%AHS@F+cee-3z z{JQVf-ixMsHVC&7+4bea6`}3?njV;B?a6mD_BPu744?ILRm>Cq6bP_>I-Bje9tDDZ zUG~wQuX@J`-m6p+@1*PUN2HkjzR13wM?2s8W}jPBO5u((gs*j>eu}hzIvPF7Rw9wq?Ffdf z*F%e4E-s7Bdv&hPW_$e2;6V?6%DofVUE%MzF`(-7YV&VPyougbTaK72b%VZ!9%_T7 zBZZ~QM>Yoq(=a{008OCO^57=XY9{3Iw)J8*L><28_nOMYnZ!Sb&AwUQZ2FDqxF;>s zld}GarCIiUnW{^w_i=_5$q^K~6>bafhO^dLIW?;926YHuqCQxn5qHm`{Fgmo;#HrI z=5ANpE24B|=cjt{v2qdvR`ZHTF+@ zCbGmI^+ftXWCwohI}_{cl{mPXz0P32bCQ>QunvwZr70_SRc(%pc`euc(1?n0OhKSM_&sX@KamkC{b_r3W(X!&LMU z#h_z-)>`g#6zSyH&_Rq4?|l#N`iJ-vVo)%x&CznmT>eh(FFJbXyo0b8%kgriqB}Z- z!*x8~-6A;Kr|3aM^Jt=@x8IKk-*&-4`(U-{=8}7V{xOT2vqBQRi~Wz2Iii~GQ#vpD z8_e0!{tsm{G~Xum+1%Q$_&*}y3w6D+aj@H5w{@V_To?I7la2f96ofDXk*402H0x`8 z^J=Jbdob3T*ZsGhSKv$DD8Ab+?x`=lM9}hOC1=2C!8E^r>#+tJ6}U7}Fmh`-7x(Fi z?W-FZ&oPM|$?$^9#jWnn!?S98ja%65ZINWJz7H$~6D{Zk=l&w3tMIP71NR=oIwvAx zg6J~)5M<&Qx~Nri@I?3ZUM^C<7zOuu&!6*_4BHX$$8mD4(nBYNZpN+Ee8mu?Yy|)y ztNqW7fZqJJ_)T*C#1K?JD^cy=G`FU(dTSm7ZTyipi3na>b(=TBg}+3nHRmeMm99VK z3920-m+}C*)-U7`-YaRk&Ij!Wl3rP<=Aqu9>%*bc@ZLBIDedzGGZ9@xhMFqbnv3Am4qh`CpW#uwFyf4g@hOHzno6 zb25b6GlOqRj*~@EMx>sfZ7?_%Piw$?vJ3=T|vb^E5>)y3jsa1h+|Q7_M`Do z-Hi6v!ge|E0gBOqxWF-i7Qijs%@KLwMqZi0vW}0bwf7Ki^cjP1pwY`Hd08yMBlcUD^M`3B@+!Js8TAc_?cLv#=zjC|W;2TT*6CyaG8olSGVv-RgeWr zyFcLX5GwN6-TIKLeZtDRonlb$G1d=gqcjmn!AbN199-aR$@7MwA}1 zdcMSbWyKrIpbG+;pMVY*w>3EA)QzpaZ6!JN-N&U1$Rkb`b)blO6-#~t|HzyD53mXw z$qOliX^fGOy;WWC^f#>k^7iI-=<4o}&@}7xMM2dPhSv5}`8Ma^w(6l$eLk1fk&9!n zfOkBEVtXV)#+VtpP+sW8w}rB@e2o)k9O?cbsStNsM_jq1s&rW8TyamD>XVaH-f)sy zpCiwf#P$sh0H(H*RpSryLob1g5LYbm>ZRbS7|;;;TEiLHhIabBPZI_rR@lnjZxNng z#urFEsK_9`xrPF35PAOt5+vkqP-}xGg%vFt3QZUe8oJ;)mwIkh}kj`FdLEO&1SY+Dt|Iu1L;|rmQUI`s-jw%Q)4;gi?{=*7)&BbhqyMPl&w+ zvs$(lnC9-D7^;R@0$^e$%g6Imm4-q^aJR7%_P&rHc1h#RmV<8EN~gLrm++FN4$A zWotofJn6)hf|StDr;#uW_7K{%fFIiWveQdD&X)R*)7lC$d!OTMeKUgg$#)TXboDwG z=zL#~b-i+BD}nG>XDG~f+?z5@`N{9sjo)jGt@BAf2!4lCH+7in+`gt$6<|}V+XOm0 z1>dHb+DuZWMQrTOc|%}}UwDcRLnM_s(+EUxmgujnV)7KEyjF%y*3I4-`7?)06^oX_ z@&xRxNpqU_20X5cwW#BmH;pM=FInZ`{AU=kpMdIp$_V4Nyn@3{Vk^ViS=wkq{&aSf zc;a;l?6d0k66luf`D2<^f5=iRfe&Oqy8^qWePIErIinfp@Py%3`Xb$Elms-M^1GAq z3eza)e-;SQl41YzdzP?JD2tB7ux;UUPQgB2l*`qbJ+swvd(RAdpGg}c*qmpP^RuM$ z1$qA*GOoKs6=9nawm@_nLGVIGb5Oy7B`(B$FC{c^Dd^pp#C?nmh_@75&I=b{M1QhV zyy*J^K9@KYcVbf#ZMeYDu9s?1>aw!5^4gO)5#LUPA7GCv3*g$;L4XsMN*j|+GH1)! z--=F8XLx5uowMA)1xn*+IH)GmRjZ0)7gBQHW$bw&&3P0UcWFn(q{}$=B?{(LcUudo zX0L)&SzwxW;dh1B^)N5+hJ$jClE3P!fIlgQ#Hu%>Z@IN9xGCkCltTtB$$!rGQbxaO zRU!~MluM7C+83H25Zsq5h(L9xTFz>qE&3-udd778sT$4WP6q9FN=xe~MussAq{%G( zbBmUMq%|C2zav-q!@e)%*Pa?ZoF^1}nTIFbmv5SJhRH&fs+(67O<-LqOuVLY2-<%i zxGbNWFnn;9@d{Z+K}1)1wfo(-E)_XE(V6r-UBYg4i48qMfnN;Dm*wNG1EyC64guKH zv@Ju)q55IcK$N)LvNn3ngciX~8V*vmKDzAV#;s%|z8W`gxsEkB6LAL@+D|0rZpyoi zlYjAP#3f-uGHdl5lpHPCYeqGL{YCW*GZ{68BR|?P3&V4HcIPpFa(dEG+_J)3HCzi+ z>uC9fAd1z9oeZLwViSg;X+z5iK~8CxRv{S%=AOA)uRpLCaw~w1uotgbyG%CA@N_%= zMj!{W5Q1T?Bl|kaN#J}V5OCfcIgzV$D3k;H8=)zi`+ZEGwJ4r}!JHJi(V9$+Kct|D z@UiiP^HL*#uxRwgOm9BLIPUzd*uiyRrzFgHIajE!kem3$NQRH-+3X<38#BBltg>9S znOeOOg~Xy(9Hx!C*`pJ3leZy%x7J=3DltCWCA@YU&cd%=h~#?e8pYDdXK^sBSF?cE zB{Mv3`RnUs*|WAFbXWWH%F2~0AD-y2YtvW1p;SaM4Bylx-GsHAncPrSLa8NArJuV`{tLJAz=2@Vj?eVB(+$ZFiKB6sEXqmy{4u0sz4On*#KpA^+>Um(4K$8$g_u#DxGg(|Bk9EnplZ zG@SteSk(WwfdDeIvHk<0TqI>hp;lqx;3!D_paMAlgRoshHC%-4ZEa2MTmZsOriL!2 zCPeO*E*3=MlClcwfe8OG69FVe1XVoNuY9f3Xr*n7508F+=oul684Qg78M6?2sjG_o zW~y{^&x0MA&tyG*KHKi3QZ5(&UHq@20t@?p)LVF}ej_0L1=h zK?43a4($IMB>y)!{tq_ta+Fs@LPbJCT6q3Scz0J66BXt2b@^&}cqd-FGwt(L#^;Ab zNSKg;tg1-Z>*9C)HeO3O*J~m&Ji0T--?yXtldY(kkfMm3pt&*=R~mX#vx0d4H1F!y ze_=jaD<-Da-%E(-#w&9?8m)#uql27sa&gd!@p%&$x_?vhG#+;o`)_3hy>{j<$+_2J za{7zo<@U+d)6S{yLC4>j&r$57)$41pzULF_BrTVpCzWdN1?W$IVC*YN|Gw_yGZX%e zUh{QFFuHB7dxM?N^_qRF8@}B)(DnFo(98c#so&vwDym0+KVLBK>td~0$Xz?Lucz%I6UnR!+E5jg7WW!SZ^JwmyZnhEpkmXbLybPd&BLe{tJt1(g{9b=@Pg#0?xVK)xDBvXj;_|~>X(~Us;M8OLI5?>@JMOC3nq1*RU;nKX^}OEk z`N$N`FSm%z69WQm*jNTv@}oN>i4hmfCOPLrLHQi_1o?EXb#h>X zzRomHZ=F5%TwZg0al2yK*5@|id|1y1p~kwjb75JF5#F-;n`QbwLH<~}^Wwe}OU>~U za{I~S<*@JUHcRz2EG^f^M?aUZmMI9{a5ghH&v8g*ZU({*v7*RIhK>HaA#RG6!dKnB z`wQ-^xq<2*&<$d1rK8YShmt3q*(Vo+KimJtC<^-`VqWUlPuF}(JYl5vM) zxv^#}A+=%F(`{>1qjh!qym!tEOJ5dy8u>GQ^LpI_GcCxL^DEDzh6!iS2E|s>$jQ_> zIF4|J*Y)?VXAbBrkk^W712HDji9dR%i5?s2yk2+vp#PBT zsG&`f>fi_|YTWSp3eW_)Spq{$dI~AXRWsGHzm4Nx`aq1X(*?%RH;~jJP zX=$w@-wWtrp}zaEC#`uamm!frz+$deffVFY=YDAg#w=BFUwWsU!5RqaEu1j*5@=ke z&>hBL!Zbc=V6r#55SYI>p7p|81$u#rOO{K=qr!wfJE{leB zn;8yukQj)l(V25>(Sv0y>d^|yv^-7?S^2jFYQf5&t~7?{)bZNKQOhj6R#^a0MXRmN z&ga|BUIqWqK@qcl(t1BjFsWy3XayuWGH?6a+4p~`*lj;I4Dv>{Als~h<{;|Z5FG-X z{JWa8mSDN98~e;pB@P=2uVq%ntGuGIDHM{DKm)n(So@$%_EsV3NH#<#D^dOsC0VT~ z#ZQAj>S$tuQ>LF^l7NtUOL3-ya`Z6OvGvxxE8qp-af9868bpvSZ$(rR1Jt5bsF(e_ zB~JUfy_{RVbw*_j-THndP=TMJF(Q8OXjLl|>HJmd;7oBJ83fgt%7n_F(u|U<32acq zJ{Q;mf)^NYJ3WQAD3ucNfnhI^qJq_w7`G}=QW5YZsgP8r{)5qjqFoZUAO$RTw61w56{mUE9pA{imcoZ7T&2+-+JS`_dzfIPltnA)6@2L{1VG+YNH z<|>{jLe#>_v{7O2mBaT7NVSFh%2Z_jE(jjb+KE-;r63><#8g>wkMxgzE@%T?qJ=_f z14S8@Z8@KcTzGPYtkNBg_$#FWeyiqqG(ygmODc0$b_aq12(A3W{(=Te zRVjb&0CpQ%1a5V0!`mP-VlRo(18E)78-xsEZok2!4O<;W;-uF^|DMA0*r$u=ybdOB z5ByE0H+e{7X>p{Av#aCLz(zdYkd5i&mgM>coMmATBU87Vm?0#>Dbo z2H>X_7XHnjTMY^T?)8$w6l^Qu>PY5D_p1zFF2%7*3xN%yH&cZ?J1WfPkumHw5}PxS zM7RwCq}263s=32TR8MmjG?TBKAp$#!r4nilic(#(4Wl{2Sz^OuxYWeAj_ zN!6kasBz0;ZT405w(<82F^4F%#AIC4Ek*Zr!n|LkEl^iF8@z9F$JJ~%#IE99S`b21 z^XFYyLU|j#^SqGAI$V{%m}d1;pT}r}&nvB#5TBkHcPYTq>FW)#k*gO~f^GU5G}BVg zoFd6erf$lDAcO88iDKd1$%8;n9Ab*2WG(fnm6f7s51ibGL5+#Z?t2UFwGTSR@wAs1 z?M^u~x9r%SO?WDzv~a1yQDE562iV3(AsKk?y4iX>%}?e*i`pY;R#J{b4Ifm%jLca2 zd|@(^4S&%DI6oaKMXWhvxGk|P@a10$;SL^xmTA!5P#tNN$-J&UOwiY`mL{(i2tJ9) zv_UWITUJh|n%2R2+xJF)iCnq8DYAMgTb-AVG{x0Os;z0C8_ivlS)BG1H_w?z6=^Lk zwHjVzb&z;s;f5HjPpE}pNF~^IH}=1Lv;RDn@@LGiF)2U9I;P-`7LIyiUcR>8l>L*1 zJA2|=;iYS@*?tok5Q*IgXfYHoMYLmsOwqQOP<&}>=oYtoT`Wzk@;D?zdt&o4^*6&J zhmD`WrQJEWlG~rQtb&DJ*X-<@yD8%!z9M{T;-g)>>)`(TYcA%)z@mBmTL;(9My+y4 zE1%Y?Jj_x{9LKGfq@(H*E0Z&a-3d1I=C!XgfQ8qZ9D@{!gb>>;mUV?U{X%XQ_aH+{ zv7$}<=Q?M0fnWbSSJL1=Rn_}9OQjE_&(&P}58YY~#L}@h%BNa2F~I%p4E2(C8~)d} zJJQF4A$C|?p%pApWfQ>P>EO*0geKx8tvPm??@H9Y!M{}J0amt1W_gCA;~isI@8H^X{UvTp4%tM+1EIoZ9rB+UAMvGRe{z&|N;PHU%#p%Mb-kJ!G7Eakshj& zH56L;xh0^#J2Xl9c_#m(jGPYE88!6h;8h0iR!uYxEQ|a+{F|CRt6CpRgTY?kg{kgp2LWtj)-|bcw>F|$u`yj&h+|g@{t=FiB zX;m01Z#~1uq4&n4e;j_ev9ph8nmuRRWbZ~yUk6CM8%zg+J?zCQf91{NTbSEH;^gSL z%LOWib#}DXQA>j}=8ie8(&uIkKW8wXw5!bDs*io|p;+&H!OmJLTkWN5#e*+`;Tr!h zN{zaqfV|8w^ZeN?F{c|alCqql+}D9p$9V51(Y3d&RcCn~O@%)P)ydbsV0YMyvxu?L zxMF>3y;EHwR~~~o?O!Bv?|kX^Bkny(&LugFJ){3=nVU`-&W*!^nY$5nes9|u@V57I zFaNs--d?R_*S`QsTs1oaQi3|Z?YTz&@4L9Cv&L64fR7Apcd&+C%z2i?S5C&qU+2#^ zQC&t9ySed**0Zu3u5yb0?KGPGocXFU{u$7KuTGlZm-Q6F8^qr`Wr*CzKe|dKS4`4>lJVEgeP8%@8}LXndU`m0QznnFgK+Eg1WfGA{zHd9I**-V3i` zw|5=h_PuTUYu3$!?um0hu;%4*$obc_ z``Q}4&*%Y3{Dyy;C?B+gi-MEyvqP8q&|A#(=3Ri>f=f^TJa0q@#!hF}=)E4={d2>K z|Ids6$_ndu7UF2Bzqu{Z&Ry@$Qv`p?XPvp{V7zC=%M1TaoX1M{)cdymXMJ9aeb%eg z((a9B-w4`p@#*y^EcPm!7O0Y{U*pjJxieAG)<;|4D*&qL&TA3)p~_ zxM`{%v#=G*b9JdT{_3diQ0pHB+1{*My}0TTUoPZ0zn`tX>f2V` zk{?}f797?KHYP2W&RgY4e)XlFpqGz-g!=x@KP%BY4E)R9X|-7jy|;ELlTc0aZZASl z8R%E+QYYM#dc*7_K3ND=KV@5(op03?T(=hV_IzXTAtSWoH;da#X=~xWg$HKUcIXbZ zy|35qTradOK^W^-LVj}*ygJwJ&#!w>9r_>VxPHAGo(_8c`{wtfhul4;-S02vr;X2- z-J$P_hN554SMRY0Z$(CBX2<6RIiKeT)!*luDi4DgN7btV{wqJ0z8mg*&&>Rubswa; zMLlhLGyBaq0bT38KY*7X)6loaw4b4tYjNwj>$jfU%Ndo18tw5O*ZLlUtgfv8ROZ_D zQ-wZf<-7e`-p^a!(;((UwS|Q}dMf9P69~pn=#OzqAB(6D>5CS4NT+J2w7?DAX-3ni zt`OeFjn^~o+sL=wGm*=V+wyBCiC#})E&sLGP8NxuwbK2tx;HbkZ$Oq0opifnS@D^N z-Bd$wHAr5h3c((>J*2BPen())_B<^gE8FW5rWWr8Ca74m-uKm`X0Gk>sLJjP*ZB;A zO6d>*vfhFIh@6-Y;x!UCnA6ulHO@AE)RLyB1rMJOicV{$esGmq-y?VKb4In#>Do?z z=iS~;b1jA&UGa8*`K=vp1IrR0cRJ9X;|?;H9BePZdG0=c3kv^cgk9iX+e)uRUCbeGu|f zHUTi>7Pwfw-Un!r8ef#p?3|}f$o1MqR#4GrzX#u~(+fpH_Jd8kJ!Qy6u;?GTZ3n2_ zyu4^L`hwa=>d5i z+XV3-2!?=k3Zp&t`^eBMxBd5up4~iJj3UdiYWT9#b%kvZy65_h?rz$qk2e^k^$k2& zuuYCbZUt|6myOTsg|SIqkcJ8p<{b^D+DP|;l_WUwee6La+npDQac4T(AANe1nrP3mZ`w@z6h!-c@>nIVr}uqQskhNksu?#`G%E#Om~7kn z%>C1*rH6x%c3M~w1%jKi?5J0%m=P;%I;GYJ6A#~N@3#oOQh*FaK^v#!6V@L5>cMZ2!E6^G4%`oix`c70UG*s{01Qw^Tmv`-F zuH*B23icclooC=y(W@>ETi1>*5RSMdPJPs8Tp9wgO(XK1spsLkW*yyd%cuA%)*r`^NW zoU&HNsh~C2QKu7^eE$~D*v~6_f9+OP6Vx^8N_(jJyja4U3KVhVFyxUF@!&dmsnhIn z>Ag2Q`6BQviAHja5l!~-&VcW3F=|$^5!sqQlUssak>Ib?q4!~eev(0Iw;|ZSP;?3c zZu%Pt0SKSQcJ-yxRRUlpdu^vIy%Z^_qS6agnhtm7ZOOh6tHt2Ik@v@CQon&jq}xTk z#`~ug&oxKKo25Q#j_ZaeH;iFw)3DJUI3HN!PTBE}%BWARIID;{*x^DCE$Y~`T_3?^@t8YUdd5WL zLnrtsfh0=<1zUg7M~^FkvWw$ih$&2p`rYVU)xQ%s@Oq;uwyG75f+%$`--*5hv_;z@ zH>2;$fHV;5*7+&DzZy(1b|0ts5$4l>BEh+5yEjmtYd4gaRsFKMlE<%_h1_`QFJHJ> zc)*IQM=@!&AXc!94`t^rqCyWm5hGt+fJO2j-J^n7O>KrkS%1osi!WgcZ=~+r(IRvm z^Vt|(hq8DftFmdcR+}S8zOlk+QQQwTl7y@I_Jl4W&~&|rsJHoid@rLxHEWw?W3l(S zbf6dcjA<@A5v-wNw*l?Q+z4?PY&D?H+4;%}kdX;9x&@UuMUtfh0q{LwucWE8h6wje z_AS#hGbe{0B-274qn*e8pQHZCux0#goB8W4r52`J%ViFYAN?A*d^x{1CZ7m`>uzRSwIq3KXH$eCF%5D0Z1um4i@tE;i91aj=+Cc*rd zKLwhUSE?NCato3Ek?u|JRdA})X^9rKi7sC18Vl1jZEx@d9g|jU#WqSZN}thQcF=qt zG$rxJ(CTYY(y*aL+2q4oqKg~}0#|l70tY-+!Ot7;-FYqM*Wz}?%8lKx`mPF=1Zq0q zsYw0QsyaTJi5@vO$|W%a$zyTilbqz(pi0XPH%~J#yliLEA3xG`XYHXVhg!)LeT!^N!wZPq38fgGyU=%3(Cx=>G26K#M^$t{R=GnZ|@?viZSdZ27c(fpzNhqb!f#xU`BM^&k`#1_yZM zs0a~5>HZXD^dbCc$c_~1f90#8s7nO02mFAa*rOOQ3guW@$6k0W53ORA9>!7{_PJ3K zKwvPovLtxxRC>2v5XQw#?0mkni?(|`7BfJ9iArm;K_eEY(|iQNPpwi8MEUxt)$Y5@ zR~jGw77|(LBRHq%t~t^o_QLHqtwPuq1m62|FmoEzTXV$7`KR>}m8)+;S*I;Hrxa6< zQw3X82RQxZnb<=zm50HZ>&r!#{oTcTpv>9U;ki2FTH)Ao;_g1H4bV<*Vz5RfiuWspl_=h$v!Sl#)?T-LWGdxw3gmkb!rOCYh`Sx<)f~W!tvnt zg?&3JD%43znN#eYycLIhrR2vO+JLEGI4fRtNRY1%ob)~C2e9Hu_}|?%K~S)RMbPcK zvNCocj_$Te^*^o1ogCF??Zj-03H5Zpfi%}1D-NoUdy^mm^m<)7=2iUK9Qq(`tZbbP z1y0q9SJmoRs)V)sWv$1Wq#_a5y_JUvsnjUh`sx@-LK8gIsV(pYsQG&g>(>e01t{|Y z$aEz;GN_34D?V&o)GPH_I3EZg4*si{AF?dqY`6RIG~&(a#9nLHr*JeY{FRs5_uOM) zXEKNzpdI=0-pk6fPuCdzHi{H#Q$}DgsW3%Oie?PeZGaxJV#75|Q<6VxOJ?H4NPaoT z7Y~W6*eQIe%~vvyauD_K*S2xVDG9jn$vVR*>U^mQ8-_vYNlKkkFbBsGid#C=QOblY zaD!&;ZlY7@&3_)757;s$kkuquy5)La59!;Fb4xL}ogi9C`irlJLctZJoGi(QFsNtH@^P7HKF zrhZfN@i=o%5pzFqJU7quLb5{!CiSq)xiw51aBPA7eEPK^Ri_AYnaDWWr>)PI%PhG9 z$FiSty1DT4C6YJGlhWNJ<*62s$})3OEl|CPLm*T`XQcY=pBegoRWq9kO!a(>38{q9 zhkR>$A3m4H-GYX5l|R&Svi!s?=+d%92a~%wc{w^8@IoGr@*-(j%;!s!6N#pie@j7@ z?20M!B2m7%1CyEF3ToOZR~p5g&$B-T0VctI5)rB6(3y*q80y2Ck+ZB8DW$0*g3ST1 zE2h7_hy!RtAZwp=U70LP8jV2ASjX@dxS@B0im147U1Hn0^8RKTq@H|Uiqz=_xp5&v zvted`nP3Rhx8S{JFzLgClEVl^dV@_fLJ*PCS}c*9#FvD{0g!Yc5p>!T7s%pG1{a7sh%lc8-U^5~108;e!`jF0PsB!DYS z($E2*q+#o5;~e|c9t+Olp#Xh7!+n)ij>zuS>v`(c6oEGwIfioqy~9Wvuy!dn&Lo~u z)1SyEzee1km$RvU(X3#c=s#1cT~biphvCPh_r5RHMkW9IlL}wPgq3f=3YJe~v8Jeu zQh`OBG998^fmV`uSV;+l><39E7j1P6SWkr?viVkLC>I5b_Wjkc67mHP`C##J?`umQZ9yFqL1;;e2d+W-L-Jj)f3%;+EPeczEZJ zT9ALe7iG%h%i&%OsT~>tkNs23~$3*B$cpR_WHSkeI-)u{1j=WZy{_%otk#)9+hFX3okq+N~Yn}ZIo|` z9r6#S=WdmMjT#pcvB)eI@VJ}2}Aj>lQjj`QR8e7^&qaOWrH;bgR31Um!O^fhXS z+~A}&&^}6L+PqGpd?_%7D*JC5v%rEs)~YNuR=O#Aw#bi3Mo61Hf`ejAvUmDIwrN0@ zwcLGC*9RHA&Ys+Hdjo1Eg=j^tFe0+9(93LQyPi)e+;nN$W0)rl8j~lZP=x2`B-&zY zEFSk?Msw39F1KzTb!Xjt#wXM7mrrh04f+sO;In8~vc?*aV2IrwMSWB=FmpC3hP+|& z&FH0!ONA#xwx{?QH}T~QF<4D|DoyPYTJbzzFg1;59UP3#%@6ZxLPSd78{F97(gK*j z{*_tun-5OaIZ@g9LMMb%0C|ZUFDUW0ZM@45x=d z43XPK#fzu-7ndq|{L~M2v|#}cUf5p~Bmw9nB7O_47#l0(te@P%`%H_UaDl~#9 zm4rNtCMQ&oS>;M8(ZHBUzvS~ukBdsXVsuiVn~aEEi1ZPO0IqN^Zu@?*A3scQoCy4(uqVCv?C+IhA4;R_EV7+J)>HShkcPM zYjR)ulGLw1@17uJb;i}}WH9oy13NlSWS8+o>@y+35+Jn!p^34TbbY>2-3tbAl|2yV zQFhN3yE(WK-30Z+jg?3=HEMz|MqgyjuAFQF7B`I#q%}g7<^^vtI?nNFp-_k468Ao! zk)~A3@b70tvQN(2Mvzy$zw&jWxRH4%`kQfwtm^8USfth~+QcNs<*qqFmWvrUX22Ig zuyM%4Ub~~ccBdQ@}UiGRaPot4<*uPod`fd@(qD^fkmZ{wr_1xJy<)nxUn-KXB zw3yaBK+}R0!1r~Z*4q_}t`_`fujriO$sC*4CT8E3D6p)p+Ezw6!XHhpNQrX4N%0eLhSL6}~=F6twqPGdZ zLbQ-TB?JRPO_VVcgv#j&jzjT3llZxSKgS;aY0Uu!Df4;rm2IUvDc@^yC-DA%BreE- ze`hPV9^DP@z*Vsdu`C8>0xA$e?BP_L`z?dR0CoM#J=E9|-M2xg%8L$FL09WOmqgo% zxl(qW)MH~#Ytu^Ax)4viTRFx3j;Pa@9JWFXrfZ79*rKUYng$R_Uorz#@xKl274O6= z%M%ZaC82M-!BFmMYQiSyzPial92v$6Ta}x(-xSu_-nr^}!|n=fwniYue?ss&*gq zutv5&_e-gwB$IYz)PHY7V?QDH>QsHybiX_pY!a02LV#00A8ZAPnQKvH0I(<2Eh_4z zLhyg%#TWGp2#s{D9+W`jdkWreO*Dg`KF9;R#^xhV8;3XFQ?TEq)rIsSC$=pL2?^G5 z=GQ>kAYp8APl7a)DU(YqlNgu<->*yu0?K1Phw1$^R~2X21x*m^c6VC{LZJ)H>l#AJ zGa+4CqXEDja$ThL^@1$DonZ7%LlGg3u3iPANDEgjS8 z19O=^|CBi_TF+n8S^W!#wvmVSMxgIZj&9$dRma-AcHqMe@|9>t?XZ`KD{tX^wU$Xj zRpFvX!4q%#dzdtg#1x^>7nr|oS^Pr!07FG!E8m`{6qrNQVG3h98>0!jAtA`9#4UYk zV2g8V=ah$mEg5g za)WN!4kA-q4EU*7#^CfwuO&h+%%`$B!O(37zL*&~WJ$2H1rM#it2F21nMbz~A)p3X zw2l<79`S(96}Q7^6ub;#ZdDVB<|9EaboCO`BB3D$3^bpusdz3tQ{};l8QoKKNHN1G zy-(oUQm>0_%`+7MaBZcEu#?J_4@=O*+q|hY#;Uo-#E@uBmq~^)p^ebg(xyB(@X4t8 zV6|yU(VE1){}f!Po(ejU6@~0&R##-LzvD<=aiRTCMFIxDmDaz5&aLP5k=9MZFSF39tM$TFs&!Sn#?SHZNt!4vcQvt@_&z0i_X9q7Eqv4JL)< zIrf^M0p(g^GQE*ujK*BBnS*K5@t&Pc?M|gxdAKGzWIw&bUO9UWJjbFkOfm zYVP{h0z#NP~)ojIJ(iZ*8Ki$+y{&|f8aVbT?I5R8vmxImP zUrDGb@mW2c^M$I@;1i26L^&Wkqy}Be-)At_1O?yN`9KBNCkw^xbd`ERsa159)5}0U zGR1y~E8n=?!~eW{Xy%-0kPGq5g1!k8)G1ia;Nw-KZ-_PXoq-%GZ@|Cfn1#u0dBiTQ z`*w{C%(ykVMTNxV5qHk?xD+P^kFFK>po8J3ahHdrSnVTx!k4H^RAOem=V9e15cNz5 zZNPN;!Xx2>p_#oVbP8$kvMhQ}yI`6eBI8b^?_!2<_kmrRKK7XsgKr+x34P*}^)RM<* za)?P*fLuPtmCHOahEJv{~7j)p8z0Z2)5 zwTy=dDL0twRt4R-PN~?$%>MCSy5I&HnuxhlxfL_0%Mkh+dl3JIwXUv$lysx!bhGkB z-Y=z`OL|Vc_^1eEV+p~ibq;+7uP`{aO2-XQkd2FH{6}_Mq!ID6tqm?PQyhBa#X=VX zF=d+A#vI>15bP=pDYy@>Ukw3|GPXE?{hVxUBQlwS!*qY>OCtsi@ME+!H37aVgZm2E zqYiqo7xMzCkHru@^27ote1N&UYc^3*4IZDYV0j}R3*k%WiU zkpxU(@e{%E(TooADRht|ohlNMwJ!rlBu^A=9jNs;RG;NMN=8hg8bED))GP`~ZSX{x zI!YEEr9m%ex3~8%xTTPOR~%B^k~CGs$ffij_9A`)FrX1okx@`o7eFhu`i1?H=`u&s zO7*>cX&*yl#GT`QzgS17s9jr7UsRe!o-_TN(ziRaDJC zxlP1b%P;(TWZF{qa&g`Cdo0kU-4hnh2#PUs#e>R2CxaW$UpNy9nHZNj4AF$|GgFu? zSj)dWAg&uG4Vb1*UKhg;3D9`k#y);d*wscNd9TZY4F~wbT$dEXyj##KrNfD!o3RN{ zB|$0dr>-<(q$WlV!F@NG0F!{+yxL*fGqj93x3Q(VL}Al)xo zLiR8{bytC!T$Z(FK#=8lu89Ne2FAkV%F8e{Vr9mSjKOw-zYswcb@PSjqU>T8s(Dy2 zYe^pw2r?JwLBwA_qi!;wDvY=G!0Wah$qSXq3~U~UG>uK8?IqFb_XTFFuQ%QK6>mUb%kg{{=S z#xAcHGq;11t1w{SK0FT^=3pkd+D5Q|7am*mH;1VhtYQc3Nq{7cM1GV`xO(=kvJI^> zJoGwk`GB(1WK60|&0^k+09;poWmB!|=|9mJLl0X9gha}xz&=*Qq{(@|M}uQr^h)KL z$J7npVvez3E~GN**gGZjS&2JUrY|;m==*xT%=o=tV#dPF(jkZzH{@c&MCpF65-gA& zuzsv+t6xp1AqcRDE$YxlCQsCA!di_a?c_8+NBZ9-N)`;J<>ySSC?5^vNQ;=odn}u5 zK8kq-^zxS3T1!KEjyMd$=($WUgU0g~6@C9*o${U`d<^G^Mn(+5mCiCa2)!C`XkSUxpXeH=1uHHN23+EjHV9ngG?dE#~2u0lb2O1hm<@? z9tps{6Oq^(cY>w+myXKn(y)S)bFmzeC|rfhWF~m4WYvo)w1QypQWxxuW?vJYg>q5x zokS4X%AKOEBZ60={Nzr;y^=k57z2PHfey-Jdq9mj%fWwR0#SKwH%AUm3#!6{zcbcJ zFAb_XTPx{ZC33Ebv$I&<;Zt05IZ+n}H)@%ZtTc)igh|qnziQ&*_dJ#YL7};33M9R{ z}zmaPQR1ErH@YMuiZkm$)xN;lVuf~SMmhGo5=eRx~V_B3Zul+U&92^!1XIx zQ^z!c*j4Cwvy&wm_1MkJ@9R%gt3#Ypn;!%H?pf;bLy4BH=0pHI9|#lf8T2N1JGNLo z`y{6=rzT2<)$5RYm!@rhn@7f^-)WGL<2s7FSOeD!}->&XmAo(qFXl=D|j0m=M7x;0f8g2khbevvMXqrnw7R9 zn^X{N`#K(W7Jv`@;DqOp^Qgn(}qdidaWoNgeHkkvo4Itm`X98ES69DOEcS>hE8_bd&L7w(AvMZ z#=Ng899{j&;3I01@|T}vxSPNL-VFoHTsF;0Dai?koc$i=nppKpmqN)EK-ekQu@^7i z=8>0d6DEByMUz;BB7DzUeo(EVBkIz^EPr_*&1L`>n3%x~H2L`#4vL&sx);IxO!{Qr4@9=7AqL*=$7?=Y<~WN!WoQ(2r87!R zn&}56Ds#KdSfT)8_hiT!6=-Y(R3wuHbW`%WFrBBh+Jf>Cwt}9~<5w54(0~{r*LlB7E)VphU`AJ1R@q1N@lvzB|Q6jF1sfOmnZidBRpOn`&X;gwo zx}XwMNPZp+I1(8#H1l}oGYL0t(LZwl!=5%%VKc{FGuNj3zee+UI2T&0W?+Thg$~Ho z+cLpBQ@W^Iz`wPlF)^svM7B&eK6WqGE=K1Dc$UiT`gy^?-y3{LvFND2ztN!RnWQP+ ziR#M^CI3zENH5@^C7Kv!1*?U?g_4AdRnf}V5D<)Ch;S7jz|d+;R+HtJZDimS+=;m| zkb}P#WO7c!_J4bNvQ1Bs%M@c{3|JM{B(u=^IN?NHv8oTXk?5hXcdrZ7Ec+2&end7z zgYC&gE0O)O{f9@Cis~m+GzY2L*lsIshd%$!hg}#&MDLDMM_&=^8Zu2)kz6(2qL7Z; zIIn}AvxP}4J*FF8+C4-LD9FhpcV`I|m@g4cVrp}=x^Tobnbjr|${^~$6e!q3rKxZp zDXSI~{}aIaKL}6#?@_S-&qm%xK>TB!I5^FiXg4Bxct9`hRUo>jE|9k1;L)Cx#{QCENR+fI!JJ?Y) zaQxOt<}0Zg_wcn_63Z&zs887uIpcpFdFk5%q2Md`-n+6=qKUqq;c; z$48ydA89?e99Z>-pytAP$*lO&RF93wlD^aKZ%y-hp+zsvi6$SrLAT3}2x6u!Sk6SU za^MXrjYtu#7=McOIm3{rTj@L zOpjo-KQlfzZ8$PZhSLA~^R0V(ih0sK1+Q&UuBC%}CyM=m9Z)ZMHnmT#CK6yg&Uz*J z9kY~|ixGu>}VtEsv-`R&XjLjca zku_HO2c0L`E*!_n!sfs&1(lJb7MG#rGbcCvp5LlcY)wrk)Z#k7%E4I*;=QoH!{C@T z$(Urk&caE04E@S|)_nReYoS{4*GaII_n+$`!hMd|KT^L=oZlY&-#kTCR7^w`n~7X$ zh4A^qp{k?Ce7>X$B)?v({zbgMmPC||O_Y7U_98#M$He&fx^?%olIX`N*3Ja5*Q{`> zXU}BS5~>Wp^%mWKe#Nbn#npbk20hqnhQ^+Zt#}{3@hRpzq;wGwB0k(DeR|}@ne%~$ zt_Ty(+-oI0FzVmnWGFs)nd`(yj8&995bhsvPY1*A5Z2yMfVq5r)Vtqix%w7NI?uVPsYe*qj8slL z-#klhyelwoTOaMnwu1EbBzU&2r9V#WxVPI)wT8m#^Qm|F?&pxqBKGW{i5%f^%n?Dauk9uNQo&a6>iogjeA5l*jJE#Ah(a(JKO#j?+=!QT~I?kM!}BZ=X^hST#IO{?LGi`SZQZWlc-i(ocmO6+m{jp z%fQilrWZ@FAUeQLKx|pl+j(sAd+hf$5kMaFI(&Qx{qzJj9Zp$S*%ii>)%o>01=NYI zslY%neW-KaPTw7^0JMrZy_hF2gPhVl5xZSz+xLy@;>e#9 zY_QfZIBYSopQoE2Q_R;~X$h_bvW-%k^rt-KYKGzk2uV(dR-5pW>gEp3}$hh~_PXs_`|GscOaWz2{a2(f_pe3v9SLhK@9xUfJ1X7))i~#ZplXAXaoW+&@4tsMCcc2f zG%<*klEDI42i3vSnI|(@TtS+ov)MFZLcf`@2e~z#2wE7$IMfVhwlc_uVGn{`Py>XG z61jHN0!_964B4$_JWDrPAtxAvT>)n$C}Gt>gcUk?dMPmiz=3MRPuH4Okn~y#qz?(x z!Ff$bP7XHFN`{L+F_&5aMrK5y`~VIx7fHz9ixOJAwIF1qyQ8cQ>Io@QTYTjcr@*b} zkiRxF2NKpt(jYJcld|8v65w|S2jevw=SB_qeG!-D}Cklm(d3BzB zQNeFV;RR<~G)#kxrrgmA+eiX{qIZRp2|#oGy?ihgW}DyOsass4`rm-UT5zJMgZ^W1G{Jn~a^AuK z_>kn&QPfcEhOvPxr-6PL^UoNBfcXMbi8@vSc>DgA0)Q*np4Gz{D$?=LJ1a5lHnSO* zd4aUAdj@s8z--?j5-1?(A@!QM!^Eo~K#%?l9+Zihd|Gx93e^q+Fbf?eF`v6ze5VU0 zcnPcMr$CqQZbD{9BXR%u-^H`cX7UT1{2UuU=-IZ=4n| z1Ksud1@4iGx*wZ(NJN$tlW5XemMJ3|4UYf&l}>_b%pb&Jz<@rubPWKbSE>9_?g3`W zzdrUm@{0bFC+vqW`J12r8tV0HxC1SQbJc}ya1$d6{V&8cV&7<_Tt9xa zTvowN_VjzuyL6ciK|gl)tK=pee$4-CxEG@kPGY9q$Xp|x0}Tt}wkCTs>A6uc>U%BQ z!qHD@c&U;lH?ITwe|Z5SrY8)^)kq(lez#3R2tbf@TMSX5ErkkzQwy6oGZn{Bq~u$Z zTj=(KSRtiVoBZkb@;H!1z-pX?Wu3YVJjKR>#I+E~LfH|l&@lDDD2suisFd)BemOWl z#GXYv69S16x9FTbfu-tDxl5Dz1op(a$5n!glreK-S6CGaLtaagB+`!XfvZw9q4s4w zw2zSp2XbKHE!4@9%tosQN#d*FLWhp{!Z8J)0iZVmz{p_^fiLNm*k#e7DWLf{dghEJ z6CPKjH%Sb%;D*CEn51JJHvlFgSV6(3c-|zYKL$6+r}~qDf{&)g%7Bi>3cSKbv^q?4 zYj7M#Q&9LrSD6a+=@3eV(<5%6OGI5TnIOnDlFAX-4>@DjlU7f%MRJeYfXyUYFY(3yQ{6c|SK*2fUYEZ*}+cv|KQL4Gvs*Q6;^Y!0SGT7HkO+g2NpK zP29hVQkA{IHQ50w^ENC9mV;4#h8zIKu2K*;8Q%T)5g0>Ote(jiKK1X1;0ZaCvCAEf@ajCzlgbrIQ}mcBCSvF?&*;xL2pb`! zbrW|izSXl^1zH&ceg`&j!Ib=Y&u!x3)4Ph}wo=!oz$YCFXkR5JMYpEq7{M8XzP1j{ zg3U#*W{>~Z?u?A_wY5)Ay7OjRMbz6&5{Zt1QiyZF9v&l%L=b>19t#scuEfZ+Gi4lL z4ZMxr@9PFWPmTa&%ev(Yd?}Vvqeasy;xs~_4A&qZ3P2EX{VBh2DnS?xjgTnf-fj(N zN%mMpDp~6f7;M zqpdIvM=C0&E7+9{9C?JB(MofShArFmk`Ia{{hoTyWK_fHKq1~qLyKNVLKEC$h0dxM z!+C02(HzSw@AO-?z<>cOF_|9M`R^ zN~R?jL;@U=^Lo>a9_3V1{C5zhtILq|^Opu3gBQqLSB;E%9mCR_p2b^Bk25Sf5TZsh z(jo$Gjk{0>r!~^B9}QIRVy^q42W4;&0}z;=ifLYn)l)UZ8bAb28)NLEUIJUk%y#9@ z^;36~chZi5kU_U6>oUk(y;woz%gkm_>Y|ZW=z_*_vF2C>aS-Dl#HqGiLWA*ggJ`*zS`#=k4@eR z&RhPZ)3XoRW@F|GTF*;*N)3VrfS~ap^uyxSjq zOG(7g(R)k$!{>;dSu*9Sq)^SUPQChe(1{fFjR-$}SM6_FCtL(CmaBqm0T-1HX!~|B z@ATv0>dBvBhN2sGj0>mxIQ1ZqY}`dP^gX0B9mp>!_tIAdfCgwGtaq&zeR(|jONfH2 z8ErfLB1b=#cG;Vf$Y_vaE=pZ0m9!k&>HH!D$@o0*eklFJ_V1ClWQ!aCm*Ohp>v1wL z6!eB@b;|Y{a?0NbF`$KhaLz9CO0LSO;e#^^kgiT^dPb%dZ4kqq)0E{XhM-T5!;tFK zGWF#a*8xfyKG zFs!G0N);0g)0C)@jDUU{_V+r3Z~X`rrGk&^leHn=N%nG}ws#{adU(rzWGw2-wNqa@ zrX+_~i-0eNXHb(XO|B5?t&xz!kGg8EBeXbYYYz1R@m0{F6GCB{B4{yftdi4zZ$Nw4 zg|x>pU3h`Dm9OGaE_xF?Bof0fp#~LJ$&phw2d~nGlOHabuZ%Qy*|*xwE=(v;&r=Qh z(ZZrixsj~?@tEVw?$*eLtM~;5&K1E8+5nfz=Yj^|yOi!oWGWv(_8!Dqghr*iVfzThB|?`H>}`p2+m|HW zD5LsIKeWPu!68=<0!>Fx#l~o&2BgfJ1KCf}e+F@I?x`vUvY!z}q+7FVUdJqmLv!iN zsDs8WcbEQUmxT~LD$nq`2E4+9pEjjc@4a=#8Y^xTyP3ixsCU5$2}{d-{xA=WU4P@Z z5wlm+u%k$-guv1jVI@%3i{FcOOH~KOb3F@AuC)@a;8owLJ8OuxB{AzvB~d3CiaqQy z__A_v9u&2r$Op4RAkdr5p&RJR;E*rk>8o*tn?Fk3y!@J%Mlkg)t}B>T4aDD1;o#f!xiMGynUDMWWh7>LxnZ{P9 zjNkZ#Ml@aKL(;)e1$x8Xa)%d9Pp9aoYW_f|9u;s`(MSp=OfoQ*NTrf;#vSi7jZqA# zC{)sQVzp{k*Pkjvs)bYyV$GXd)Y&9M7Y>CgOKOMN5KJWRV8b~0fFK!{yZ2AOLpTZ> zip+IZLi2s;{)&*h969B1&zZsjs0vX|UcFh6$2KlhgQpeWtahXptbdISLq0y{b(iV) z2$d-qGi42>^*8CiTQ3A-^*Kre9FlrlE|FTtFy!vo?R#i9o^}(S6kk;&-+=-hwpM=d zqvR`TC_t+6F*2(dW1k$8*xrT`_qQy3fw)|@W({z)PhQf~w4~vhJ0+B>klB-;MrViN zNT9J(f1nA&-T!FSyTEoe3kiOX&8CzSKRE*0O!^2Gj(?8$?4i~Olo>F6C#r7MwQUW2u{#SD6pn<|T-!r>YB2`Dub zqst|Umi|y=79R03=qCJApL}gao7c3Dd9lE9fdxZ<=?V&6+=u9`U}wzpGpkTXT}*}O z+3r3pNMho5333*vlzsIn6M?Os87<~)e18l@3lB9OB(L$1U<#bS6q~Qipr@k~GJztB zsF$U{6No1_LG=M*o+3zn;*CdI1rSAkn>uG4qtE0skeB)Fc29%-XmbIGr)oIjYH99j zSH(!4YUdWp-;6|dM6ybWLIHAufkzCD1KTe2l!mVZ_8H4;XAFZJz-H(})8i`Ymid#| z+t3_y(R6M@(bJ$^3zjt{tD4LP#sVoVH<`*l@EU6F9!{>2P8O^^6J%W4BL&S516id- z4pNYA$mPjtGai!{@eR=#5(cWG0}RD>2ZI$pSg<-O=KU$~OY0lrAA+$mEfn!G_VJ=% z(hy@iP1X^k&Zv?=9(|ZXE$JZRxN#ZTz=Fwo7%Dai$)ahj-O3!bctxGY-W`xr;WJnTtU_jO)U3uU>AnEE)hu|fK79yLi* zrsHbzT&=sO_;qpU`A;hz%QAfhAqgAJq|XeYmv&=zxhF1{iPL+kXv1KR@2a}zG-iLJ zAP@-j#i5kBbQ#!N0NQ=3b;iRi^1tVU8FQ<>T5tkYh=fN?t?F%E65Twva|#xS|5#>= z>i5x-u(i1Umz7M4tuY(E(eK7@(-TcS56A=^8l9w?7Gcoi6Nb__F2@dX{CEwxYt0=` z%k~GOE1geXxDoR+mg&@TqWdjK%#OQrHKY&Jq!;kLWtdlIMi2t7?sF&+lS7bE<|O>0 zAOM3}#-;u${e$Ojkj*d9w7r~bNHhKf{6FjjPhR-NnoZk(qSxvVUuuV^x}M#gJYud* zVyaf^aaA;7>zw^&EO`-t9}82&M9b}(8}M87!MN1^6(cN|T5wh6X#of&8(5cbe3_#R zhv)UELOJ}{NMVI~_>ozumv z9)D({F;!W!>gJXYEp9${z;F!m0s_OW(~{H7m%|WVD-ltHk{DnhBI- zA`;P4ADx(iU4LD5_opm;PZbU%`)HdJ< zVlzU6hnXVv27Rspgm3+R?^yKc6*xpPJY*r48IJMh?F{!N9wmom5+7 zhDwEnhbv_iU4<)lZ5=tN<&u@mQ#JP8@6YOF*o&C(jAv?hGir z*syW}*3ooIM7c+(bHa>SxP-rAWHpw=|c&lfAoKkNgIzJ2JN%1xNII45^n>Jk2byDI3)nT9$AZ&t8s6 zVXn@~jOX_Kq>0HVbZjF%^KhUQW64%w-VB%rQ|ic6rB|x5-O3@{ z)jpE|7$SNeHv89t#g;kz^N>xtBT;IkVaJfFa1%S6!L(WrQ=OKxS&Rr&dM{A7$@qW~ z+UM(075fII?5*9+n`0;y^_NByFbx3;aXF2Nw@)?$cQ$sO}BzHUGeO?J<8dLVyE?p znmJ;3cR$J)nRV1J*2>~eKKx}o zlZuB|sK&ox_RTkmJat{hAx)sdkl)-)Dstgvwy8{(1O-%kNk(J6qRfOrrxuFl%BWG0 zaGg%m6bvgWf){bn$fABo18(D=+F)ax?gl8O_+}fWtsY4+Dw+q_Xs+lrOv|N561?e0 z==Nz-xl!F8q4F$!fU+sO{AWbKqo_r@x)E9pac#f1{Ip?gz10*A|C$rDGf%Ou^B{7? z0s&g-6OqR3A-`ygW1EUqT33@r_iR6C2!fhs@Z)|z-8Eym)xn55fBvpZH|SUqruJJY zqfxKjYRM0jq5Aip{6tLsMu)}>`)!s4LlQy*%bM-;?+gik>CgP%k_|UeTM1WNNe)}f zywkSQ^+SOagXkLdV#+F)t3gWs3Cp1K#lE;z;%Y;c0zKbl`_=20}t3YUBYK4R)BbtvR#aV-`M z3UBUAS6O$B+Sak`{YLi9#wof0ZyU`o`|N7 zqjb0gpDz56BC=Y(dbiVeg(JMcm4{?^rsS3b5$F?+c2%(*E&Fcd;T9OR8PFE-r}e@B zTCYxQKMm`rO|8UR1r~ipbXpLtv(G!AxLX)Jn;jd`;tb8(i)zMdx~tPhi=N>*{?%K< z0)GPwF#3{!t>Ro7n^Lh6j;)a(C(e#QS==I{%6B7Lusn`o>&oWQ9(Ra$g+N>EpT$ZT z=@Zc;91WXbEGEh(DQEv#<)*-$I`o@R86(r{Z~Af`{3#T87laxRUCqrSFq>LTgbFnN zejPQoSpgqM)|;y97eVYism|lk>rb+yO*Zq4LSS;I%K+cK+p9lYG`#w-ZXZtmjxi+q zNBh9R%j+<*#k}^h<0_QPDSz~C(^dy=hr(6H^#nsN#vTD`h&!!)-o3h z_Jfbs<<2AuOByIE3EE2EKCM~Biu2v6GA*-{2n6t_6&$?%L0B$h)tmgS&-4mR1;
  • D^KJgwgQzS;gWK@s#2H<+=q~p52k;> z&5EmqdFXmCeS=pqyUV&=HvK1$CI1Yr?eI^%HCX!lE6B4OWm_H9m7f%WZ(TcSR8uxE z<8--<+pj_r1!y9==c`yG>*{$vD!DX@j>Vx})*C{t+7pQrS|gC9`UUOPq9fTSM4d~h zawhWz*WBewa;Q57SUk)H9g3Zk+W{}s)=fRsK3@w~eIG@yE$t#7EZ&>|b;)+Z0so%n zQdMv49!ndw9Ir+O#4MQqQkNQnGkio4{nF+3`RUh3GEkx)oR=QKj_d^8xTg_E4G8>1 zTmD9w8$(DQzJyHQ7S=u|>dM+s<*p7;dAf!Sj1rx;R!KvPYId&r={Y!!U5Rdl1QM_N zIIExONYTeH`xbMZkQSwzn016qQMzwqs+%7V-KtMY-=zNH4?2sk@F6|X^nSUz_~wMn zD(6F?XrWb1rlLEE4waG)<8HP)3CA(F54ZOrLLz1d^>lU&<1$jiN@`2=JK9N)wzgH;mQ>9&RG9 z8w0g1^jbkR=h)k??fY}dKX4P|+)kXlm{om*o4U0>|M^+!pEUpNLaY2aV5u;Y)>O$Y zDC!oF+V(ppDZ=kQH;E;!dVAf-+g=UM+|*{}Agfz(!2EPyT=s2~ID15x5RmaKOx_sV zD)eORZNmp(31w}w{F1_T%yWZ+Sd}Z8G%=~^0W$}jLp!$6nD2~sN`LS~>kLh2^Xhzn zL_EHi!;HKK1-e142XK3@uSKGSddAyMPyqjU3>|YJb?%=ZrlV5$9DIc9Tsq=LEhXiw zBLjV=fNbP)x8xgixQ<#|0kgpo^`IR9(H^;$z>qo|d>whSiq#d;u2{W^@4;%|aUYZV zQkt?1R9$5)yKAClhaDN7ugmpA{;P66yc>8}!>$G7=4I5$a_T1`wU044Ui^l{+M@Ef ze7`}LbHjqf_|)^CP};V%+O^;P%5FE&E#48z*_{0PD7Ro|&D0mcsdK)zuK-CI5rl zkDj&N%dqezTMj@jP8P z+o4x^q(?`(_4VGsBJ~KFhhAp?z!k>;IKvyQwa_wty)RUhj_`y$mU-Np=OQWY{@tF1 z2cx|G6p(|I)2A@pBWd1ekDM*Y#i@*JsiCLtZs$IDwu7SD>nr+VMkl1I{(D{iU}3Kr zjeXo*M6v#zoAAjrnU)|=q($I?@`{6j=iFX$4tFi2AxWj^ODmkC z2rV+;3t7ks>iR4D>%rtpON5&C9fRzrh2Ndi!`Kd?>YwJLYyQ*O%OSM4^bM$XIIQj3 z_K}^GfcGR;?;XSU%$D(1UO&>c4|WBRtJ4nDoc~6h&}pcjkMY$mRvT3yTEpOCpX-wN zjdAy$FXw)J-q@0`pMrDuZ_sC3WBhC;qsYvhaD8x~x>%s?(@iPs7-^A>t>WE`mHC}Ahe4CJO-g1GqIl5x%+?X0Xk~eWM*&yT;@;A z`5)wVvt;)*7$OdA52Dxqp#?rm43F(_nzPyQar_Id{Xe0%^6}WeWE|yldOweoqO^a- zdj+NiOn*#ooiKfU#9Md!j@;+F8!c<)zX`X~gD1fsDGCEe#JBPMw|pc9A8A?5@4GoY zMJLx6YbQ@DpGQ2^lb;9L)i1^FkL3bLCI%bBtSog_FB`dca63N&jxxm_irbzSt-exB z)>^$Cw*Gs>I%q6?5leq$-nbXiK26D|3>*=D;LLwtvVJYH5^|9gIo%AHS@F+cee-3z z{JQVf-ixMsHVC&7+4bea6`}3?njV;B?a6mD_BPu744?ILRm>Cq6bP_>I-Bje9tDDZ zUG~wQuX@J`-m6p+@1*PUN2HkjzR13wM?2s8W}jPBO5u((gs*j>eu}hzIvPF7Rw9wq?Ffdf z*F%e4E-s7Bdv&hPW_$e2;6V?6%DofVUE%MzF`(-7YV&VPyougbTaK72b%VZ!9%_T7 zBZZ~QM>Yoq(=a{008OCO^57=XY9{3Iw)J8*L><28_nOMYnZ!Sb&AwUQZ2FDqxF;>s zld}GarCIiUnW{^w_i=_5$q^K~6>bafhO^dLIW?;926YHuqCQxn5qHm`{Fgmo;#HrI z=5ANpE24B|=cjt{v2qdvR`ZHTF+@ zCbGmI^+ftXWCwohI}_{cl{mPXz0P32bCQ>QunvwZr70_SRc(%pc`euc(1?n0OhKSM_&sX@KamkC{b_r3W(X!&LMU z#h_z-)>`g#6zSyH&_Rq4?|l#N`iJ-vVo)%x&CznmT>eh(FFJbXyo0b8%kgriqB}Z- z!*x8~-6A;Kr|3aM^Jt=@x8IKk-*&-4`(U-{=8}7V{xOT2vqBQRi~Wz2Iii~GQ#vpD z8_e0!{tsm{G~Xum+1%Q$_&*}y3w6D+aj@H5w{@V_To?I7la2f96ofDXk*402H0x`8 z^J=Jbdob3T*ZsGhSKv$DD8Ab+?x`=lM9}hOC1=2C!8E^r>#+tJ6}U7}Fmh`-7x(Fi z?W-FZ&oPM|$?$^9#jWnn!?S98ja%65ZINWJz7H$~6D{Zk=l&w3tMIP71NR=oIwvAx zg6J~)5M<&Qx~Nri@I?3ZUM^C<7zOuu&!6*_4BHX$$8mD4(nBYNZpN+Ee8mu?Yy|)y ztNqW7fZqJJ_)T*C#1K?JD^cy=G`FU(dTSm7ZTyipi3na>b(=TBg}+3nHRmeMm99VK z3920-m+}C*)-U7`-YaRk&Ij!Wl3rP<=Aqu9>%*bc@ZLBIDedzGGZ9@xhMFqbnv3Am4qh`CpW#uwFyf4g@hOHzno6 zb25b6GlOqRj*~@EMx>sfZ7?_%Piw$?vJ3=T|vb^E5>)y3jsa1h+|Q7_M`Do z-Hi6v!ge|E0gBOqxWF-i7Qijs%@KLwMqZi0vW}0bwf7Ki^cjP1pwY`Hd08yMBlcUD^M`3B@+!Js8TAc_?cLv#=zjC|W;2TT*6CyaG8olSGVv-RgeWr zyFcLX5GwN6-TIKLeZtDRonlb$G1d=gqcjmn!AbN199-aR$@7MwA}1 zdcMSbWyKrIpbG+;pMVY*w>3EA)QzpaZ6!JN-N&U1$Rkb`b)blO6-#~t|HzyD53mXw z$qOliX^fGOy;WWC^f#>k^7iI-=<4o}&@}7xMM2dPhSv5}`8Ma^w(6l$eLk1fk&9!n zfOkBEVtXV)#+VtpP+sW8w}rB@e2o)k9O?cbsStNsM_jq1s&rW8TyamD>XVaH-f)sy zpCiwf#P$sh0H(H*RpSryLob1g5LYbm>ZRbS7|;;;TEiLHhIabBPZI_rR@lnjZxNng z#urFEsK_9`xrPF35PAOt5+vkqP-}xGg%vFt3QZUe8oJ;)mwIkh}kj`FdLEO&1SY+Dt|Iu1L;|rmQUI`s-jw%Q)4;gi?{=*7)&BbhqyMPl&w+ zvs$(lnC9-D7^;R@0$^e$%g6Imm4-q^aJR7%_P&rHc1h#RmV<8EN~gLrm++FN4$A zWotofJn6)hf|StDr;#uW_7K{%fFIiWveQdD&X)R*)7lC$d!OTMeKUgg$#)TXboDwG z=zL#~b-i+BD}nG>XDG~f+?z5@`N{9sjo)jGt@BAf2!4lCH+7in+`gt$6<|}V+XOm0 z1>dHb+DuZWMQrTOc|%}}UwDcRLnM_s(+EUxmgujnV)7KEyjF%y*3I4-`7?)06^oX_ z@&xRxNpqU_20X5cwW#BmH;pM=FInZ`{AU=kpMdIp$_V4Nyn@3{Vk^ViS=wkq{&aSf zc;a;l?6d0k66luf`D2<^f5=iRfe&Oqy8^qWePIErIinfp@Py%3`Xb$Elms-M^1GAq z3eza)e-;SQl41YzdzP?JD2tB7ux;UUPQgB2l*`qbJ+swvd(RAdpGg}c*qmpP^RuM$ z1$qA*GOoKs6=9nawm@_nLGVIGb5Oy7B`(B$FC{c^Dd^pp#C?nmh_@75&I=b{M1QhV zyy*J^K9@KYcVbf#ZMeYDu9s?1>aw!5^4gO)5#LUPA7GCv3*g$;L4XsMN*j|+GH1)! z--=F8XLx5uowMA)1xn*+IH)GmRjZ0)7gBQHW$bw&&3P0UcWFn(q{}$=B?{(LcUudo zX0L)&SzwxW;dh1B^)N5+hJ$jClE3P!fIlgQ#Hu%>Z@IN9xGCkCltTtB$$!rGQbxaO zRU!~MluM7C+83H25Zsq5h(L9xTFz>qE&3-udd778sT$4WP6q9FN=xe~MussAq{%G( zbBmUMq%|C2zav-q!@e)%*Pa?ZoF^1}nTIFbmv5SJhRH&fs+(67O<-LqOuVLY2-<%i zxGbNWFnn;9@d{Z+K}1)1wfo(-E)_XE(V6r-UBYg4i48qMfnN;Dm*wNG1EyC64guKH zv@Ju)q55IcK$N)LvNn3ngciX~8V*vmKDzAV#;s%|z8W`gxsEkB6LAL@+D|0rZpyoi zlYjAP#3f-uGHdl5lpHPCYeqGL{YCW*GZ{68BR|?P3&V4HcIPpFa(dEG+_J)3HCzi+ z>uC9fAd1z9oeZLwViSg;X+z5iK~8CxRv{S%=AOA)uRpLCaw~w1uotgbyG%CA@N_%= zMj!{W5Q1T?Bl|kaN#J}V5OCfcIgzV$D3k;H8=)zi`+ZEGwJ4r}!JHJi(V9$+Kct|D z@UiiP^HL*#uxRwgOm9BLIPUzd*uiyRrzFgHIajE!kem3$NQRH-+3X<38#BBltg>9S znOeOOg~Xy(9Hx!C*`pJ3leZy%x7J=3DltCWCA@YU&cd%=h~#?e8pYDdXK^sBSF?cE zB{Mv3`RnUs*|WAFbXWWH%F2~0AD-y2YtvW1p;SaM4Bylx-GsHAncPrSLa8NArJuV`{tLJAz=2@Vj?eVB(+$ZFiKB6sEXqEaktaqI03M?nS#j>85A_Md&pARnW&@O;@MU7#TkwoU|+ z3_Kv>0Ei&WWCp1+01*s?mmEpuW7s+-~TuzGNghCf7EFesh4gc|i Y!J?NX>%y8Z3qdjE>FVdQ&MBb@0A1@!m;e9( literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/unlimitedDown.png b/tribler-mod/Tribler/Main/vwxGUI/images/unlimitedDown.png new file mode 100644 index 0000000000000000000000000000000000000000..f442eab95d722e7d37e3cec71737fcd6cc9bfccc GIT binary patch literal 688 zcmV;h0#E&kP)JMLq`<-i}=V>(}yQoH*4xmQ|*D(aH4USb@cpMxHZeN~`8|N*zhkA$UB?-?);5p#* zMgvbb!u{rjf9>cTN0fI^tT&mHrID7q+|G{2Yo~FOvjMm+I!+I4Yjj0Ovm@L%6RxOW zR$dlol14JmwuXbH@zP@P8RE;c39q`Hag~z(tbxYG@^XssR=1|>D}oy^{uxjD<96Oy zUQQ9-)iIykNi_gGgi^#c-En*cTb*`-jOOA;h)Xi;%E&P1Z^9h>>>5V z`>W>u@l(=8&$#zPx#24YJ`L;(K){{a7>y{D4^000SaNLh0L01FZT01FZU(%pXi00007bV*G`2iXJ^ z4FDwP#rDkr00CS{L_t(Y$K{u?YQr!TMURa?#{P=BU(g0RcF58tLx-kIAz%dpokF2Q zmMk4Q7D5-VvA-hzmRP|ZVkdSYIkFQd#21A0{NBB~Pe_9;*L49{JwNz>0B*QW#DMTV zeE+LJlfvYw9khyfBGv0543&`E`%3`FaC>-Yb87Ti@@< S`|kq)0000JMLq`<-i}=V>(}yQoH*4xmQ|*D(aH4USb@cpMxHZeN~`8|N*zhkA$UB?-?);5p#* zMgvbb!u{rjf9>cTN0fI^tT&mHrID7q+|G{2Yo~FOvjMm+I!+I4Yjj0Ovm@L%6RxOW zR$dlol14JmwuXbH@zP@P8RE;c39q`Hag~z(tbxYG@^XssR=1|>D}oy^{uxjD<96Oy zUQQ9-)iIykNi_gGgi^#c-En*cTb*`-jOOA;h)Xi;%E&P1Z^9h>>>5V z`>W>u@l(=8&$#z2BR0pdfpRr>`sfO%@Se2K@qYeOI6J7uc-IPFzxTZeQlDn83<3*J+{iG#yzhF(>i$8}}>yn-+0U zHc7oq$#Rph#u4$BFMsas-R1Amtb4tbF~ZjVM*~CN4!m}e ztd!k;M*8gcs^=Af2OPD89%;}2eY|kvx+~vTZ}jmmE|FSw>tf7&{r77hd6hmk3o0@x zQ*qs6{CS7$(Y9Syvd^a+OPo99_puP0&~bS75BB& z_i=VDL}c%`Tb1if=3U*hdySge*_lPRJ}m2gYx8F2`;FC)wM3jQAC3ECAMkDA=g^w$ z7dKDEy_??Pzg^(w&wkJA3OV{dkNQmY_ZW2MGU)pp`&rt*?q?Nq-97F4+UWc5PyBw* Q3=Db(Pgg&ebxsLQ07eed-T(jq literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/zeroDown.png b/tribler-mod/Tribler/Main/vwxGUI/images/zeroDown.png new file mode 100644 index 0000000000000000000000000000000000000000..1b0bb7167599c7f6725be554fc3a0267095f6779 GIT binary patch literal 542 zcmV+(0^$9MP)?OMyizT=g$*vJkGylMuZ-$?l5n5{ixI8ksK2uc6S$d~b_E8hdISW8& zt>K`kxH`)5bpMCk5C?;Yx9ZBwT2TpoYMC4q_CM}!kcJ?Wl066eEsYTH>G>4yUKS)L zdpT||kj;QhstLl@97m3pVyUWSjM4cqQ=MlE$Kf>LB_@1|kI!Sy_ zBWblD2;$|algN+UoQ}+ia6HvN=LwK#Z2@LeL#ordUj&jK1iMbz$1^;$Hs5qfw7=Y)X^|b3s6b=(n*G|u g+IUChE4l>m1;@RtQV;s)QUCw|07*qoM6N<$f>@2OYYHQjEnx?oJHr&dIz4a@dl*-CY>| zgW!U_%O?XxI14-?iy0WWg+Q3`(%rg0Ktc8rPhVH|n=B%{jLgwjik<_7&Uv~xhFF~5 zI>nIhkO7Zt{HlB11#D?-Y-K752@Pyk1_}b!%+CCmz6Hh2;+nRg%;5^tCsl8`ee)Nu zS8;P;cu=>cf!VEQy;Q*&GtmY?!5y+P6F%C%o%w#w#M7skRR3SMIBG6gIAv)?K6`F= z@b&oXQ9FDyr0uS8*Tj`{x1~8P{oz*Lx6j{l@}s>%@9uw>f4ElY-uJCh6;~qU_+|IJ z4?ZUm>DM;5^5WOqD}ODLj`Z_7;8x)hDrC1>qKLCH&^_Y8iEb;-neUuUuC5PcF#Ax! Z^nEc)I$ztaD0e0s!_feKi08 literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Main/vwxGUI/images/zeroUp.png b/tribler-mod/Tribler/Main/vwxGUI/images/zeroUp.png new file mode 100644 index 0000000000000000000000000000000000000000..1b0bb7167599c7f6725be554fc3a0267095f6779 GIT binary patch literal 542 zcmV+(0^$9MP)?OMyizT=g$*vJkGylMuZ-$?l5n5{ixI8ksK2uc6S$d~b_E8hdISW8& zt>K`kxH`)5bpMCk5C?;Yx9ZBwT2TpoYMC4q_CM}!kcJ?Wl066eEsYTH>G>4yUKS)L zdpT||kj;QhstLl@97m3pVyUWSjM4cqQ=MlE$Kf>LB_@1|kI!Sy_ zBWblD2;$|algN+UoQ}+ia6HvN=LwK#Z2@LeL#ordUj&jK1iMbz$1^;$Hs5qfw7=Y)X^|b3s6b=(n*G|u g+IUChE4l>m1;@RtQV;s)QUCw|07*qoM6N<$f>@2OYYHQjEnx?oJHr&dIz4a@dl*-CY>| zgW!U_%O?XxI14-?iy0WWg+Q3`(%rg0Ktc8rPhVH|n=B%{jC`u8d{IE5vz{)FAr_~% zPO;@Wr!Yk3#Au;@uMr~L7^JUuzEC@y=!qyJLNEoKY4M|N$kGRUpY z{aYx=b)A#nJAcoC_Q=~AD~`E$ZB8uS^PKhC5({yT^1J)w + + + 0,0 + 300,720 + + wxVERTICAL + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + 3,3 + 10,21 + + + + wxFIXED_MINSIZE + 3 + + + images/triblerpanel_topcenter.png + 13,3 + 280,21 + # 0 + #ffffff + + wxHORIZONTAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 5 + + + + + 0,5 + 280,16 + # 0 + #ffffff + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + 275,3 + 10,21 + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,21 + 300,170 + + wxHORIZONTAL + + wxFIXED_MINSIZE + 3 + + + 0,0 + 62,16 + #ffffff + # 0 + + + + 105,170 + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 6 + + + 3,194 + 300,28 + # 0 + + wxVERTICAL + + 290,10 + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxHORIZONTAL + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 10 + + + 10,10 + 75,18 + + + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 10 + + + 88,13 + 75,18 + + + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 10 + + + 95,10 + 75,18 + # 0 + + + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,219 + 300,348 + #ffffff + + wxVERTICAL + + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxHORIZONTAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 4 + + + wxVERTICAL + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 5 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 6 + + + + + 8,7 + 79,18 + # 0 + + + + wxEXPAND|wxFIXED_MINSIZE + 10 + + + + + 87,7 + 131,18 + # 0 + + + + + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 5 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 6 + + + + + 8,25 + 79,18 + # 0 + + + + wxEXPAND|wxFIXED_MINSIZE + 10 + + + + 87,25 + 131,18 + # 0 + + + + + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 5 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + 8,43 + 79,18 + # 0 + + + + wxRIGHT|wxFIXED_MINSIZE + 3 + + + 87,43 + 11,14 + #ffffff + + + + wxRIGHT|wxEXPAND + 4 + + + + 105,43 + 50,18 + # 0 + + + + wxRIGHT|wxFIXED_MINSIZE + 3 + + + 175,43 + 11,14 + #ffffff + + + + wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 4 + + + + 169,43 + 50,18 + # 0 + + + + wxLEFT|wxFIXED_MINSIZE + 2 + + + 248,43 + 11,12 + + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 4 + + + wxHORIZONTAL + + 10,10 + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + + + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 3 + + + 3,315 + 294,15 + #cbcbcb + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + 0,0 + 199,15 + #cbcbcb + # 0 + + + + + + + wxTOP|wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + + 6,88 + 265,73 + # 0 + + 8 + modern + + normal + 0 + Verdana + + + + + wxEXPAND|wxFIXED_MINSIZE + 10 + + + 0,161 + 20,15 + + wxHORIZONTAL + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 10 + + + + + 10,0 + 204,14 + #cbcbcb + # 0 + + + + + + + wxTOP|wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + 6,182 + 284,125 + + + + + wxEXPAND|wxFIXED_MINSIZE + 10 + + + 3,310 + 20,15 + + wxHORIZONTAL + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 10 + + + + + 3,3 + 204,15 + #cbcbcb + # 0 + + + + + + + wxTOP|wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + 3,325 + 284,125 + + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 3,275 + 300,5 + + + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/personsDetails.xrc b/tribler-mod/Tribler/Main/vwxGUI/personsDetails.xrc new file mode 100644 index 0000000..9b93756 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/personsDetails.xrc @@ -0,0 +1,424 @@ + + + + 0,0 + 300,620 + # 0 + #d8d8bf + + wxVERTICAL + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + 0,0 + 10,21 + + + + wxFIXED_MINSIZE + 3 + + + images/triblerpanel_topcenter.png + 13,525 + 280,21 + # 0 + #ffffff + + wxHORIZONTAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 5 + + + + + 0,5 + 280,16 + # 0 + #ffffff + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + 288,0 + 10,21 + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxHORIZONTAL + + wxFIXED_MINSIZE + 3 + + + 3,24 + 62,16 + #ffffff + # 0 + + + + 18,3 + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,21 + 20,86 + # 0 + + wxHORIZONTAL + + 10,10 + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + wxTOP|wxBOTTOM|wxEXPAND|wxFIXED_MINSIZE + 3 + + + 10,3 + 80,80 + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,211 + 300,18 + # 0 + + wxHORIZONTAL + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 10 + + + 10,0 + 75,18 + # 0 + + + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 10 + + + 95,0 + 95,16 + # 0 + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,125 + 298,238 + #ffffff + + wxVERTICAL + + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxHORIZONTAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 4 + + + wxVERTICAL + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 5 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 6 + + + + + 8,7 + 110,18 + # 0 + + + + wxEXPAND|wxFIXED_MINSIZE + 10 + + + + + 118,7 + 111,18 + # 0 + + + + + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 5 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 6 + + + + + 8,25 + 110,18 + # 0 + + + + wxEXPAND|wxFIXED_MINSIZE + 10 + + + + + 118,25 + 111,18 + # 0 + + + + + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 5 + + + wxHORIZONTAL + + wxFIXED_MINSIZE + 3 + + + + + 8,43 + 110,18 + # 0 + + + + wxLEFT|wxBOTTOM|wxRIGHT|wxFIXED_MINSIZE + 3 + + + 121,43 + 14,14 + + + + wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 10 + + + + + 138,43 + 86,18 + # 0 + + + + + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 5 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 6 + + + + + 8,61 + 110,18 + # 0 + + + + wxEXPAND|wxFIXED_MINSIZE + 10 + + + + + 118,61 + 171,18 + # 0 + + + + + + + + wxFIXED_MINSIZE + 4 + + + wxHORIZONTAL + + 10,10 + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + 238,3 + 55,55 + + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,61 + 20,15 + #cbcbcb + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + 0,0 + 241,15 + #cbcbcb + # 0 + + + + + + + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + 6,103 + 284,85 + # 0 + + + + + wxEXPAND|wxFIXED_MINSIZE + 10 + + + 0,194 + 20,15 + + wxHORIZONTAL + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 10 + + + + + 10,0 + 204,15 + #cbcbcb + # 0 + + + + + + + wxTOP|wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + 6,215 + 284,160 + # 0 + + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,543 + 300,5 + + + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/personsItem.xrc b/tribler-mod/Tribler/Main/vwxGUI/personsItem.xrc new file mode 100644 index 0000000..a8b5e45 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/personsItem.xrc @@ -0,0 +1,61 @@ + + + + 0,0 + 82,155 + + wxVERTICAL + + wxEXPAND|wxALIGN_CENTER_VERTICAL|wxALIGN_CENTER_HORIZONTAL|wxFIXED_MINSIZE + 3 + + + images/1p_82x82.gif + + 0,0 + 82,82 + #000000 + + + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + 0,85 + 82,28 + + 8 + modern + + normal + 0 + Verdana + + + + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 1 + + + + + 0,116 + 82,28 + #808080 + + 8 + modern + + normal + 0 + Verdana + + + + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/personsTab_advanced.xrc b/tribler-mod/Tribler/Main/vwxGUI/personsTab_advanced.xrc new file mode 100644 index 0000000..b683951 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/personsTab_advanced.xrc @@ -0,0 +1,144 @@ + + + + 0,0 + 300,350 + #ffffff + + wxVERTICAL + + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxHORIZONTAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 4 + + + wxVERTICAL + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 5 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 6 + + + + + 8,7 + 150,18 + + + + wxEXPAND|wxFIXED_MINSIZE + 10 + + + + + 158,7 + 60,18 + + + + + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 5 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + 8,25 + 150,18 + + + + wxEXPAND|wxFIXED_MINSIZE + 4 + + + + 158,43 + 60,18 + + + + + + wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 5 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + 8,43 + 150,18 + + + + wxEXPAND|wxFIXED_MINSIZE + 4 + + + + 158,43 + 60,18 + + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 4 + + + wxHORIZONTAL + + 10,10 + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + + wxLEFT|wxFIXED_MINSIZE + 3 + + + 240,3 + 55,55 + + + + + + + + 105,60 + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/profileDetails.xrc b/tribler-mod/Tribler/Main/vwxGUI/profileDetails.xrc new file mode 100644 index 0000000..7e15135 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/profileDetails.xrc @@ -0,0 +1,116 @@ + + + + 0,0 + 300,495 + + wxVERTICAL + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + 3,3 + 10,21 + + + + wxFIXED_MINSIZE + 3 + + + images/triblerpanel_topcenter.png + 13,3 + 280,21 + # 0 + #ffffff + + wxHORIZONTAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 5 + + + + + 0,5 + 280,15 + # 0 + #ffffff + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + 275,3 + 10,21 + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,21 + 298,445 + #ffffff + + wxVERTICAL + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxHORIZONTAL + + 10,66 + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + 18,3 + 260,420 + + + + 10,340 + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + -10,506 + 300,5 + + + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/profileDetails_Download.xrc b/tribler-mod/Tribler/Main/vwxGUI/profileDetails_Download.xrc new file mode 100644 index 0000000..c9da6a5 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/profileDetails_Download.xrc @@ -0,0 +1,402 @@ + + + + 0,0 + 300,650 + + wxVERTICAL + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + 3,3 + 10,21 + + + + wxFIXED_MINSIZE + 3 + + + images/triblerpanel_topcenter.png + 13,3 + 280,21 + # 0 + #ffffff + + wxHORIZONTAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 5 + + + + + 0,5 + 280,15 + # 0 + #ffffff + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + 275,3 + 10,21 + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,21 + 298,550 + #ffffff + + wxVERTICAL + + 105,8 + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + + wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + 6,14 + 284,-1 + + 8 + modern + + normal + 0 + Verdana + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxVERTICAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,30 + 20,-1 + #cbcbcb + + wxHORIZONTAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + 0,3 + 241,15 + #cbcbcb + + + + + + + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + 6,51 + 284,-1 + + 8 + modern + + normal + 0 + Verdana + + + + + wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + + 6,64 + 284,18 + + 8 + modern + + bold + 0 + Verdana + + + + + wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + 6,88 + 284,-1 + + 8 + modern + + normal + 0 + Verdana + + + + + wxTOP|wxLEFT|wxBOTTOM|wxFIXED_MINSIZE + 6 + + + 6,112 + 100,16 + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxVERTICAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,142 + 20,-1 + #cbcbcb + + wxHORIZONTAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + 0,3 + 241,15 + #cbcbcb + + + + + + + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + 6,171 + 284,-1 + + 8 + modern + + normal + 0 + Verdana + + + + + wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + + 6,178 + 284,18 + + 8 + modern + + bold + 0 + Verdana + + + + + wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + 6,208 + 284,-1 + + 8 + modern + + normal + 0 + Verdana + + + + + wxTOP|wxLEFT|wxBOTTOM|wxFIXED_MINSIZE + 6 + + + 6,224 + 100,16 + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxVERTICAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,254 + 20,-1 + #cbcbcb + + wxHORIZONTAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + 0,3 + 241,15 + #cbcbcb + + + + + + + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + 6,278 + 284,-1 + + 8 + modern + + normal + 0 + Verdana + + + + + wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + + 6,279 + 284,18 + + 8 + modern + + bold + 0 + Verdana + + + + + wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + 6,315 + 284,-1 + + 8 + modern + + normal + 0 + Verdana + + + + + wxTOP|wxLEFT|wxBOTTOM|wxFIXED_MINSIZE + 6 + + + 6,336 + 100,16 + + + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 3,475 + 300,5 + + + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/profileDetails_Files.xrc b/tribler-mod/Tribler/Main/vwxGUI/profileDetails_Files.xrc new file mode 100644 index 0000000..19aa18f --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/profileDetails_Files.xrc @@ -0,0 +1,157 @@ + + + + 0,0 + 300,500 + + wxVERTICAL + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + 3,3 + 10,21 + + + + wxFIXED_MINSIZE + 3 + + + images/triblerpanel_topcenter.png + 13,3 + 280,21 + # 0 + #ffffff + + wxHORIZONTAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 5 + + + + + 0,5 + 280,15 + # 0 + #ffffff + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + 275,3 + 10,21 + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,21 + 300,348 + #ffffff + + wxVERTICAL + + 105,8 + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + + wxTOP|wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + 6,20 + 284,-1 + + 8 + modern + + normal + 0 + Verdana + + + + + wxTOP|wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + + 6,52 + 284,18 + + 8 + modern + + bold + 0 + Verdana + + + + + wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + 6,57 + 284,-1 + + 8 + modern + + normal + 0 + Verdana + + + + + wxTOP|wxLEFT|wxBOTTOM|wxFIXED_MINSIZE + 6 + + + 6,76 + 100,16 + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 3,475 + 300,5 + + + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/profileDetails_Persons.xrc b/tribler-mod/Tribler/Main/vwxGUI/profileDetails_Persons.xrc new file mode 100644 index 0000000..32d4072 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/profileDetails_Persons.xrc @@ -0,0 +1,148 @@ + + + + 0,0 + 300,500 + + wxVERTICAL + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + 3,3 + 10,21 + + + + wxFIXED_MINSIZE + 3 + + + images/triblerpanel_topcenter.png + 13,3 + 280,21 + # 0 + #ffffff + + wxHORIZONTAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 5 + + + + + 0,5 + 280,17 + # 0 + #ffffff + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + 275,3 + 10,21 + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,21 + 298,413 + #ffffff + + wxVERTICAL + + 105,8 + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + 6,20 + 284,-1 + + 8 + modern + + normal + 0 + Verdana + + + + + wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + + 6,52 + 284,18 + + 8 + modern + + bold + 0 + Verdana + + + + + wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + 6,70 + 284,-1 + + 8 + modern + + normal + 0 + Verdana + + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,434 + 300,5 + + + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/profileDetails_Presence.xrc b/tribler-mod/Tribler/Main/vwxGUI/profileDetails_Presence.xrc new file mode 100644 index 0000000..35e3a79 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/profileDetails_Presence.xrc @@ -0,0 +1,289 @@ + + + + 0,0 + 300,660 + + wxVERTICAL + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + 3,3 + 10,21 + + + + wxFIXED_MINSIZE + 3 + + + images/triblerpanel_topcenter.png + 13,3 + 280,21 + # 0 + #ffffff + + wxHORIZONTAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 5 + + + + + 0,5 + 280,15 + # 0 + #ffffff + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + 275,3 + 10,21 + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,21 + 298,550 + #ffffff + + wxVERTICAL + + 105,8 + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + + wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + 6,14 + 284,-1 + + 8 + modern + + normal + 0 + Verdana + + + + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,36 + 20,-1 + #cbcbcb + + wxHORIZONTAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + 0,3 + 241,15 + #cbcbcb + + + + + + + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + 6,57 + 284,-1 + + 8 + modern + + normal + 0 + Verdana + + + + + wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + + 6,84 + 284,18 + + 8 + modern + + bold + 0 + Verdana + + + + + wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + 6,94 + 284,-1 + + 8 + modern + + normal + 0 + Verdana + + + + + wxTOP|wxLEFT|wxBOTTOM|wxFIXED_MINSIZE + 6 + + + 6,118 + 100,16 + + + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,143 + 20,-1 + #cbcbcb + + wxHORIZONTAL + + wxTOP|wxFIXED_MINSIZE + 3 + + + + + 0,3 + 241,15 + #cbcbcb + + + + + + + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + 6,131 + 284,-1 + + 8 + modern + + normal + 0 + Verdana + + + + + wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + + 6,300 + 284,18 + + 8 + modern + + bold + 0 + Verdana + + + + + wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + 6,354 + 284,-1 + + 8 + modern + + normal + 0 + Verdana + + + + + wxTOP|wxLEFT|wxBOTTOM|wxFIXED_MINSIZE + 6 + + + 6,230 + 100,16 + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 3,475 + 300,5 + + + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/profileDetails_Quality.xrc b/tribler-mod/Tribler/Main/vwxGUI/profileDetails_Quality.xrc new file mode 100644 index 0000000..3baf472 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/profileDetails_Quality.xrc @@ -0,0 +1,150 @@ + + + + 0,0 + 300,280 + + wxVERTICAL + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + 3,3 + 10,21 + + + + wxFIXED_MINSIZE + 3 + + + images/triblerpanel_topcenter.png + 13,3 + 280,21 + # 0 + #ffffff + + wxHORIZONTAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 5 + + + + + 0,5 + 280,15 + # 0 + #ffffff + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + 275,3 + 10,21 + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,21 + 298,200 + #ffffff + + wxVERTICAL + + 105,8 + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + + wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + 6,14 + 284,-1 + + 8 + modern + + normal + 0 + Verdana + + + + + wxTOP|wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + + 6,33 + 284,18 + + 8 + modern + + bold + 0 + Verdana + + + + + wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + + 6,51 + 284,-1 + + 8 + modern + + normal + 0 + Verdana + + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,23 + 300,5 + + + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/settingsOverview.xrc b/tribler-mod/Tribler/Main/vwxGUI/settingsOverview.xrc new file mode 100644 index 0000000..659bf8c --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/settingsOverview.xrc @@ -0,0 +1,500 @@ + + + + 0,0 + 1000,760 + #FFFFFF + + wxVERTICAL + + 0,20 + + + + wxHORIZONTAL + + 50,0 + + + + wxVERTICAL + + + + + 10 + swiss + + bold + 0 + Sans + + + + + 0,10 + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxHORIZONTAL + + wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 15 + + + 18,84 + 80,80 + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxVERTICAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 2 + + + + + 110,70 + 89,23 + + + + 89,17 + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + wxFIXED_MINSIZE + 3 + + + 110,130 + 40,16 + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxVERTICAL + + wxTOP|wxLEFT|wxBOTTOM|wxEXPAND|wxFIXED_MINSIZE + 2 + + + + + 201,66 + 99,18 + + + + + + + + 0,10 + + + + + 45,0 + + + + wxVERTICAL + + + + + 10 + swiss + + bold + 0 + Sans + + + + + 0,10 + + + + wxHORIZONTAL + + 20,0 + + + + + + + + + + 0,0 + + + + wxHORIZONTAL + + 20,0 + + + + 70,20 + + + + + 3,0 + + + + + + + + 5,0 + + + + 23,16 + + + + 3,0 + + + + 23,16 + + + + 3,0 + + + + 28,16 + + + + 3,0 + + + + 62,16 + + + + + + 0,10 + + + + wxHORIZONTAL + + 20,0 + + + + + + + + + + + wxHORIZONTAL + + 20,0 + + + + 70,20 + + + + + 3,0 + + + + + + + + 5,0 + + + + 23,16 + + + + 3,0 + + + + 23,16 + + + + 3,0 + + + + 28,16 + + + + 3,0 + + + + 62,16 + + + + + + + wxHORIZONTAL + + + + + + 100,0 + + + + wxVERTICAL + + + + + 10 + swiss + + bold + 0 + Sans + + + + + 0,10 + + + + wxHORIZONTAL + + 20,0 + + + + wxVERTICAL + + 0,5 + + + + + 50,20 + + + 10 + default + + normal + 0 + + 1 + + + + + + 5,0 + + + + 33,32 + + + + 5,0 + + + + wxVERTICAL + + 0,7 + + + + + + + + + + + + 0,5 + + + + wxHORIZONTAL + + 20,0 + + + + + 200,50 + + 10 + swiss + + bold + 0 + Sans + + + + + + + + + + + 0,70 + + + + wxHORIZONTAL + + 50,0 + + + + wxVERTICAL + + + + + 10 + swiss + + bold + 0 + Sans + + + + + 0,10 + + + + wxHORIZONTAL + + 10,0 + + + + + + + + + + 0,0 + + + + wxHORIZONTAL + + 10,0 + + + + 300,20 + + + + + 3,0 + + + + wxVERTICAL + + 0,2 + + + + 55,16 + + + + + + + + + + + + 0,50 + + + + wxHORIZONTAL + + 419,0 + + + + 62,32 + + + + + + 0,30 + + + + wxHORIZONTAL + + 390,0 + + + + images/iconSaved_state3.png + 1 + + + + + + + \ No newline at end of file diff --git a/tribler-mod/Tribler/Main/vwxGUI/settingsOverviewPanel.py b/tribler-mod/Tribler/Main/vwxGUI/settingsOverviewPanel.py new file mode 100644 index 0000000..d681d08 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/settingsOverviewPanel.py @@ -0,0 +1,468 @@ +from time import localtime, strftime +# Written by Richard Gwin +# see LICENSE.txt for license information +import wx +import wx.xrc as xrc +import sys, os + +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.vwxGUI.IconsManager import IconsManager, data2wxBitmap +from Tribler.Main.Dialogs.GUITaskQueue import GUITaskQueue +from Tribler.Main.Dialogs.socnetmyinfo import MyInfoWizard +from Tribler.Main.globals import DefaultDownloadStartupConfig,get_default_dscfg_filename +from Tribler.Core.simpledefs import * + + + +#fonts +if sys.platform == 'darwin': + FONT_SIZE_PROFILE_TITLE=12 + FONT_SIZE_SHARING_TITLE=12 + FONT_SIZE_FIREWALL_TITLE=12 + FONT_SIZE_FILE_TEXT=12 + +else: + FONT_SIZE_PROFILE_TITLE=10 + FONT_SIZE_SHARING_TITLE=10 + FONT_SIZE_FIREWALL_TITLE=10 + FONT_SIZE_FILE_TEXT=10 + + + + + +class SettingsOverviewPanel(wx.Panel): + def __init__(self, *args, **kw): +# print " tribler_topButton in init" + self.initDone = False + self.elementsName = ['profileTitle', \ + 'sharingTitle', \ + 'firewallTitle', \ + 'fileText', \ + 'myNameField', \ + 'thumb', \ + 'edit', \ + 'firewallValue', \ + 'firewallStatusText', \ + 'firewallStatus', \ + 'uploadCtrl', \ + 'downloadCtrl', \ + 'zeroUp', \ + 'fiftyUp', \ + 'hundredUp', \ + 'unlimitedUp', \ + 'seventyfiveDown', \ + 'threehundredDown', \ + 'sixhundreddDown', \ + 'unlimitedDown', \ + 'diskLocationCtrl', \ + 'portChange', \ + 'iconSaved', \ + 'Save'] + + + self.elements = {} + self.data = {} #data related to profile information, to be used in details panel + self.mypref = None + self.currentPortValue = None + + self.reload_counter = -1 + self.reload_cache = [None, None, None] + + # SELDOM cache + self.bartercast_db = None + self.barterup = 0 + self.barterdown = 0 + + if len(args) == 0: + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, *args, **kw) + self._PostInit() + + def OnCreate(self, event): +# print " tribler_topButton in OnCreate" + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","settingsOverviewPanel: in _PostInit" + # Do all init here + self.guiUtility = GUIUtility.getInstance() + + self.standardOverview = self.guiUtility.standardOverview + + self.defaultDLConfig = DefaultDownloadStartupConfig.getInstance() + + + self.firewallStatus = xrc.XRCCTRL(self,"firewallStatus") + + + self.utility = self.guiUtility.utility + # All mainthread, no need to close + self.torrent_db = self.guiUtility.utility.session.open_dbhandler(NTFY_TORRENTS) + self.peer_db = self.guiUtility.utility.session.open_dbhandler(NTFY_PEERS) + self.friend_db = self.guiUtility.utility.session.open_dbhandler(NTFY_FRIENDS) + self.bartercast_db = self.guiUtility.utility.session.open_dbhandler(NTFY_BARTERCAST) + self.mypref = self.guiUtility.utility.session.open_dbhandler(NTFY_MYPREFERENCES) +# self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) +# self.Bind(wx.EVT_LEFT_UP, self.guiUtility.buttonClicked) + for element in self.elementsName: + xrcElement = xrc.XRCCTRL(self, element) + if not xrcElement: + print 'settingsOverviewPanel: Error: Could not identify xrc element:',element + self.elements[element] = xrcElement + + self.getNameMugshot() + self.showNameMugshot() + #self.getGuiElement('myNameField').SetLabel('') + + + #set fonts + self.elements['profileTitle'].SetFont(wx.Font(FONT_SIZE_PROFILE_TITLE, wx.SWISS, wx.NORMAL, wx.BOLD, 0, "UTF-8")) + self.elements['sharingTitle'].SetFont(wx.Font(FONT_SIZE_SHARING_TITLE, wx.SWISS, wx.NORMAL, wx.BOLD, 0, "UTF-8")) + self.elements['firewallTitle'].SetFont(wx.Font(FONT_SIZE_FIREWALL_TITLE, wx.SWISS, wx.NORMAL, wx.BOLD, 0, "UTF-8")) + self.elements['fileText'].SetFont(wx.Font(FONT_SIZE_FILE_TEXT, wx.SWISS, wx.NORMAL, wx.BOLD, 0, "UTF-8")) + + self.elements['zeroUp'].Bind(wx.EVT_LEFT_UP, self.zeroUp) + self.elements['fiftyUp'].Bind(wx.EVT_LEFT_UP, self.fiftyUp) + self.elements['hundredUp'].Bind(wx.EVT_LEFT_UP, self.hundredUp) + self.elements['unlimitedUp'].Bind(wx.EVT_LEFT_UP, self.unlimitedUp) + + self.elements['seventyfiveDown'].Bind(wx.EVT_LEFT_UP, self.seventyfiveDown) + self.elements['threehundredDown'].Bind(wx.EVT_LEFT_UP, self.threehundredDown) + self.elements['sixhundreddDown'].Bind(wx.EVT_LEFT_UP, self.sixhundreddDown) + self.elements['unlimitedDown'].Bind(wx.EVT_LEFT_UP, self.unlimitedDown) + + self.elements['uploadCtrl'].Bind(wx.EVT_KEY_DOWN, self.uploadCtrlEnter) + self.elements['downloadCtrl'].Bind(wx.EVT_KEY_DOWN, self.downloadCtrlEnter) + + #self.elements['firewallValue'].Bind(wx.EVT_KEY_DOWN,self.OnPortChange) + self.elements['diskLocationCtrl'].Bind(wx.EVT_KEY_DOWN,self.diskLocationCtrlEnter) + + self.elements['Save'].Bind(wx.EVT_LEFT_UP, self.saveAll) + + + + + self.showPort() + self.setCurrentPortValue() + + + + self.showMaxDLRate() + self.showMaxULRate() + + self.showDiskLocation() # sic + + self.initDone = True + +# self.Update() +# self.initData() + self.timer = None + + wx.CallAfter(self.Refresh) + + +# print " in onshow in settingsOverviewPanel" +# if evt.show: +# print " settingsOverviewPanel is visible" +# self.timer.Start() #restarts the timer +# else: +# print " settingsOverviewPanel is visible" + #pass + #wx.CallAfter(self.reloadData) + + def getNameMugshot(self): + self.myname = self.utility.session.get_nickname() + mime, data = self.utility.session.get_mugshot() + if data is None: + im = IconsManager.getInstance() + self.mugshot = im.get_default('personsMode','DEFAULT_THUMB') + else: + self.mugshot = data2wxBitmap(mime, data) + + def showNameMugshot(self): + self.getGuiElement('myNameField').SetLabel(self.myname) + thumbpanel = self.getGuiElement('thumb') + thumbpanel.createBackgroundImage() + thumbpanel.setBitmap(self.mugshot) + + + + def getGuiElement(self, name): + if not self.elements.has_key(name) or not self.elements[name]: + return None + return self.elements[name] + + + + def sendClick(self, event): + source = event.GetEventObject() + source_name = source.GetName() + if source_name == "edit": + self.OnMyInfoWizard(event) + elif source_name == "browse": + self.BrowseClicked(event) + + + def setCurrentPortValue(self): + self.currentPortValue = self.elements['firewallValue'].GetValue() + + + + def OnPortChange(self, event): + keycode = event.GetKeyCode() + if keycode == wx.WXK_RETURN: + self.saveAll() + else: + event.Skip() + + + def diskLocationCtrlEnter(self, event): + self.elements['diskLocationCtrl'].SetForegroundColour(wx.BLACK) + event.Skip() + + + + def showPort(self): + self.elements['firewallValue'].SetValue(str(self.guiUtility.get_port_number())) + + def show_message(self): + self.elements['portChange'].SetLabel('Your changes will occur \nthe next time you restart \nTribler.') + self.guiserver.add_task(lambda:wx.CallAfter(self.hide_message), 3.0) + + + def hide_message(self): + self.elements['portChange'].SetLabel('') + + def updateSaveIcon(self): + self.guiserver = GUITaskQueue.getInstance() + self.guiserver.add_task(lambda:wx.CallAfter(self.showSaveIcon), 0.0) + + def showSaveIcon(self): + self.elements['iconSaved'].Show(True) + sizer = self.elements['iconSaved'].GetContainingSizer() + sizer.Layout() + self.guiserver.add_task(lambda:wx.CallAfter(self.hideSaveIcon), 3.0) + + def hideSaveIcon(self): + self.elements['iconSaved'].Show(False) + + def showMaxDLRate(self): + maxdownloadrate = self.guiUtility.utility.config.Read('maxdownloadrate', 'int') #kB/s + if maxdownloadrate == 0: + self.elements['downloadCtrl'].SetValue('unlimited') + else: + self.elements['downloadCtrl'].SetValue(str(maxdownloadrate)) + + def showMaxULRate(self): + maxuploadrate = self.guiUtility.utility.config.Read('maxuploadrate', 'int') #kB/s + if maxuploadrate == -1: + self.elements['uploadCtrl'].SetValue('0') + elif maxuploadrate == 0: + self.elements['uploadCtrl'].SetValue('unlimited') + else: + self.elements['uploadCtrl'].SetValue(str(maxuploadrate)) + + def showDiskLocation(self): + path = self.defaultDLConfig.get_dest_dir() + self.elements['diskLocationCtrl'].SetValue(path) + + + def zeroUp(self, event): + self.resetUploadDownloadCtrlColour() + self.elements['uploadCtrl'].SetValue('0') + #self.saveAll() + + def fiftyUp(self, event): + self.resetUploadDownloadCtrlColour() + self.elements['uploadCtrl'].SetValue('50') + #self.saveAll() + + def hundredUp(self, event): + self.resetUploadDownloadCtrlColour() + self.elements['uploadCtrl'].SetValue('100') + #self.saveAll() + + def unlimitedUp(self, event): + self.resetUploadDownloadCtrlColour() + self.elements['uploadCtrl'].SetValue('unlimited') + #self.saveAll() + + def seventyfiveDown(self, event): + self.resetUploadDownloadCtrlColour() + self.elements['downloadCtrl'].SetValue('75') + #self.saveAll() + + def threehundredDown(self, event): + self.resetUploadDownloadCtrlColour() + self.elements['downloadCtrl'].SetValue('300') + #self.saveAll() + + def sixhundreddDown(self, event): + self.resetUploadDownloadCtrlColour() + self.elements['downloadCtrl'].SetValue('600') + #self.saveAll() + + def unlimitedDown(self, event): + self.resetUploadDownloadCtrlColour() + self.elements['downloadCtrl'].SetValue('unlimited') + #self.saveAll() + + + + def resetUploadDownloadCtrlColour(self): + self.elements['uploadCtrl'].SetForegroundColour(wx.BLACK) + self.elements['downloadCtrl'].SetForegroundColour(wx.BLACK) + + + + + def uploadCtrlEnter(self, event): + self.elements['uploadCtrl'].SetForegroundColour(wx.BLACK) + if self.elements['uploadCtrl'].GetValue().strip() == 'unlimited': + self.elements['uploadCtrl'].SetValue('') + event.Skip() + + + def downloadCtrlEnter(self, event): + self.elements['downloadCtrl'].SetForegroundColour(wx.BLACK) + if self.elements['downloadCtrl'].GetValue().strip() == 'unlimited': + self.elements['downloadCtrl'].SetValue('') + event.Skip() + + + def saveAll(self, download = True, upload = True, diskLocation = True, port = True): + saved = True + maxdownload = None + maxupload = None + + valdown = self.elements['downloadCtrl'].GetValue().strip() + if valdown != '': + if valdown == 'unlimited': + maxdownload = 'unlimited' + elif valdown.isdigit() and int(valdown) > 0: + maxdownload = 'value' + else: + saved = False + self.elements['downloadCtrl'].SetForegroundColour(wx.RED) + self.elements['downloadCtrl'].SetValue('Error') + + + valup = self.elements['uploadCtrl'].GetValue().strip() + if valup != '': + if valup == 'unlimited': + maxupload = 'unlimited' + self.utility.ratelimiter.set_global_max_speed(UPLOAD, 0) + self.guiUtility.utility.config.Write('maxuploadrate', '0') + elif valup == '0': + maxupload = '0' + self.utility.ratelimiter.set_global_max_speed(UPLOAD, 0.0001) + self.guiUtility.utility.config.Write('maxuploadrate', '-1') + elif valup.isdigit(): + maxupload = 'value' + self.utility.ratelimiter.set_global_max_speed(UPLOAD, int(valup)) + self.guiUtility.utility.config.Write('maxuploadrate', valup) + else: + saved = False + self.elements['uploadCtrl'].SetForegroundColour(wx.RED) + self.elements['uploadCtrl'].SetValue('Error') + + + if not self.elements['firewallValue'].GetValue().isdigit(): + saved = False + + if not os.path.exists(self.elements['diskLocationCtrl'].GetValue()): + saved = False + self.elements['diskLocationCtrl'].SetForegroundColour(wx.RED) + self.elements['diskLocationCtrl'].SetValue('Error') + + + + + # save settings parameters + if saved: + + # max download + if download: + if maxdownload == 'unlimited': + self.utility.ratelimiter.set_global_max_speed(DOWNLOAD, 0) + self.guiUtility.utility.config.Write('maxdownloadrate', '0') + else: + self.utility.ratelimiter.set_global_max_speed(DOWNLOAD, int(valdown)) + self.guiUtility.utility.config.Write('maxdownloadrate', valdown) + + # max upload + if upload: + if maxupload == 'unlimited': + self.utility.ratelimiter.set_global_max_speed(UPLOAD, 0) + self.guiUtility.utility.config.Write('maxuploadrate', '0') + elif maxupload == '0': + self.utility.ratelimiter.set_global_max_speed(UPLOAD, 0.0001) + self.guiUtility.utility.config.Write('maxuploadrate', '-1') + else: + self.utility.ratelimiter.set_global_max_speed(UPLOAD, int(valup)) + self.guiUtility.utility.config.Write('maxuploadrate', valup) + + # disk location + if diskLocation: + self.defaultDLConfig.set_dest_dir(self.elements['diskLocationCtrl'].GetValue()) + self.saveDefaultDownloadConfig() + + + # port number + if port and self.elements['firewallValue'].GetValue() != self.currentPortValue: + self.currentPortValue = self.elements['firewallValue'].GetValue() + self.utility.config.Write('minport', self.elements['firewallValue'].GetValue()) + self.utility.config.Flush() + self.guiUtility.set_port_number(self.elements['firewallValue'].GetValue()) + self.guiUtility.set_firewall_restart(True) + self.guiserver = GUITaskQueue.getInstance() + self.guiserver.add_task(lambda:wx.CallAfter(self.show_message), 0.0) + self.elements['firewallStatus'].setSelected(1) + self.elements['firewallStatusText'].SetLabel('Restart Tribler') + tt = self.elements['firewallStatus'].GetToolTip() + if tt is not None: + tt.SetTip(self.utility.lang.get('restart_tooltip')) + + + self.updateSaveIcon() + + + + + def BrowseClicked(self, event = None): + dlg = wx.DirDialog(self,"Choose download directory", style = wx.DEFAULT_DIALOG_STYLE) + dlg.SetPath(self.defaultDLConfig.get_dest_dir()) + if dlg.ShowModal() == wx.ID_OK: + self.elements['diskLocationCtrl'].SetForegroundColour(wx.BLACK) + self.elements['diskLocationCtrl'].SetValue(dlg.GetPath()) + #self.saveAll() + else: + pass + + + def saveDefaultDownloadConfig(self): + dlcfgfilename = get_default_dscfg_filename(self.utility.session) + self.defaultDLConfig.save(dlcfgfilename) + + + def OnMyInfoWizard(self, event = None): + wizard = MyInfoWizard(self) + wizard.RunWizard(wizard.getFirstPage()) + + def WizardFinished(self,wizard): + wizard.Destroy() + + self.getNameMugshot() + self.showNameMugshot() + + #self.saveAll() + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/settingsOverviewPanel.py.bak b/tribler-mod/Tribler/Main/vwxGUI/settingsOverviewPanel.py.bak new file mode 100644 index 0000000..23624bf --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/settingsOverviewPanel.py.bak @@ -0,0 +1,467 @@ +# Written by Richard Gwin +# see LICENSE.txt for license information +import wx +import wx.xrc as xrc +import sys, os + +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.vwxGUI.IconsManager import IconsManager, data2wxBitmap +from Tribler.Main.Dialogs.GUITaskQueue import GUITaskQueue +from Tribler.Main.Dialogs.socnetmyinfo import MyInfoWizard +from Tribler.Main.globals import DefaultDownloadStartupConfig,get_default_dscfg_filename +from Tribler.Core.simpledefs import * + + + +#fonts +if sys.platform == 'darwin': + FONT_SIZE_PROFILE_TITLE=12 + FONT_SIZE_SHARING_TITLE=12 + FONT_SIZE_FIREWALL_TITLE=12 + FONT_SIZE_FILE_TEXT=12 + +else: + FONT_SIZE_PROFILE_TITLE=10 + FONT_SIZE_SHARING_TITLE=10 + FONT_SIZE_FIREWALL_TITLE=10 + FONT_SIZE_FILE_TEXT=10 + + + + + +class SettingsOverviewPanel(wx.Panel): + def __init__(self, *args, **kw): +# print " tribler_topButton in init" + self.initDone = False + self.elementsName = ['profileTitle', \ + 'sharingTitle', \ + 'firewallTitle', \ + 'fileText', \ + 'myNameField', \ + 'thumb', \ + 'edit', \ + 'firewallValue', \ + 'firewallStatusText', \ + 'firewallStatus', \ + 'uploadCtrl', \ + 'downloadCtrl', \ + 'zeroUp', \ + 'fiftyUp', \ + 'hundredUp', \ + 'unlimitedUp', \ + 'seventyfiveDown', \ + 'threehundredDown', \ + 'sixhundreddDown', \ + 'unlimitedDown', \ + 'diskLocationCtrl', \ + 'portChange', \ + 'iconSaved', \ + 'Save'] + + + self.elements = {} + self.data = {} #data related to profile information, to be used in details panel + self.mypref = None + self.currentPortValue = None + + self.reload_counter = -1 + self.reload_cache = [None, None, None] + + # SELDOM cache + self.bartercast_db = None + self.barterup = 0 + self.barterdown = 0 + + if len(args) == 0: + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, *args, **kw) + self._PostInit() + + def OnCreate(self, event): +# print " tribler_topButton in OnCreate" + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","settingsOverviewPanel: in _PostInit" + # Do all init here + self.guiUtility = GUIUtility.getInstance() + + self.standardOverview = self.guiUtility.standardOverview + + self.defaultDLConfig = DefaultDownloadStartupConfig.getInstance() + + + self.firewallStatus = xrc.XRCCTRL(self,"firewallStatus") + + + self.utility = self.guiUtility.utility + # All mainthread, no need to close + self.torrent_db = self.guiUtility.utility.session.open_dbhandler(NTFY_TORRENTS) + self.peer_db = self.guiUtility.utility.session.open_dbhandler(NTFY_PEERS) + self.friend_db = self.guiUtility.utility.session.open_dbhandler(NTFY_FRIENDS) + self.bartercast_db = self.guiUtility.utility.session.open_dbhandler(NTFY_BARTERCAST) + self.mypref = self.guiUtility.utility.session.open_dbhandler(NTFY_MYPREFERENCES) +# self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) +# self.Bind(wx.EVT_LEFT_UP, self.guiUtility.buttonClicked) + for element in self.elementsName: + xrcElement = xrc.XRCCTRL(self, element) + if not xrcElement: + print 'settingsOverviewPanel: Error: Could not identify xrc element:',element + self.elements[element] = xrcElement + + self.getNameMugshot() + self.showNameMugshot() + #self.getGuiElement('myNameField').SetLabel('') + + + #set fonts + self.elements['profileTitle'].SetFont(wx.Font(FONT_SIZE_PROFILE_TITLE, wx.SWISS, wx.NORMAL, wx.BOLD, 0, "UTF-8")) + self.elements['sharingTitle'].SetFont(wx.Font(FONT_SIZE_SHARING_TITLE, wx.SWISS, wx.NORMAL, wx.BOLD, 0, "UTF-8")) + self.elements['firewallTitle'].SetFont(wx.Font(FONT_SIZE_FIREWALL_TITLE, wx.SWISS, wx.NORMAL, wx.BOLD, 0, "UTF-8")) + self.elements['fileText'].SetFont(wx.Font(FONT_SIZE_FILE_TEXT, wx.SWISS, wx.NORMAL, wx.BOLD, 0, "UTF-8")) + + self.elements['zeroUp'].Bind(wx.EVT_LEFT_UP, self.zeroUp) + self.elements['fiftyUp'].Bind(wx.EVT_LEFT_UP, self.fiftyUp) + self.elements['hundredUp'].Bind(wx.EVT_LEFT_UP, self.hundredUp) + self.elements['unlimitedUp'].Bind(wx.EVT_LEFT_UP, self.unlimitedUp) + + self.elements['seventyfiveDown'].Bind(wx.EVT_LEFT_UP, self.seventyfiveDown) + self.elements['threehundredDown'].Bind(wx.EVT_LEFT_UP, self.threehundredDown) + self.elements['sixhundreddDown'].Bind(wx.EVT_LEFT_UP, self.sixhundreddDown) + self.elements['unlimitedDown'].Bind(wx.EVT_LEFT_UP, self.unlimitedDown) + + self.elements['uploadCtrl'].Bind(wx.EVT_KEY_DOWN, self.uploadCtrlEnter) + self.elements['downloadCtrl'].Bind(wx.EVT_KEY_DOWN, self.downloadCtrlEnter) + + #self.elements['firewallValue'].Bind(wx.EVT_KEY_DOWN,self.OnPortChange) + self.elements['diskLocationCtrl'].Bind(wx.EVT_KEY_DOWN,self.diskLocationCtrlEnter) + + self.elements['Save'].Bind(wx.EVT_LEFT_UP, self.saveAll) + + + + + self.showPort() + self.setCurrentPortValue() + + + + self.showMaxDLRate() + self.showMaxULRate() + + self.showDiskLocation() # sic + + self.initDone = True + +# self.Update() +# self.initData() + self.timer = None + + wx.CallAfter(self.Refresh) + + +# print " in onshow in settingsOverviewPanel" +# if evt.show: +# print " settingsOverviewPanel is visible" +# self.timer.Start() #restarts the timer +# else: +# print " settingsOverviewPanel is visible" + #pass + #wx.CallAfter(self.reloadData) + + def getNameMugshot(self): + self.myname = self.utility.session.get_nickname() + mime, data = self.utility.session.get_mugshot() + if data is None: + im = IconsManager.getInstance() + self.mugshot = im.get_default('personsMode','DEFAULT_THUMB') + else: + self.mugshot = data2wxBitmap(mime, data) + + def showNameMugshot(self): + self.getGuiElement('myNameField').SetLabel(self.myname) + thumbpanel = self.getGuiElement('thumb') + thumbpanel.createBackgroundImage() + thumbpanel.setBitmap(self.mugshot) + + + + def getGuiElement(self, name): + if not self.elements.has_key(name) or not self.elements[name]: + return None + return self.elements[name] + + + + def sendClick(self, event): + source = event.GetEventObject() + source_name = source.GetName() + if source_name == "edit": + self.OnMyInfoWizard(event) + elif source_name == "browse": + self.BrowseClicked(event) + + + def setCurrentPortValue(self): + self.currentPortValue = self.elements['firewallValue'].GetValue() + + + + def OnPortChange(self, event): + keycode = event.GetKeyCode() + if keycode == wx.WXK_RETURN: + self.saveAll() + else: + event.Skip() + + + def diskLocationCtrlEnter(self, event): + self.elements['diskLocationCtrl'].SetForegroundColour(wx.BLACK) + event.Skip() + + + + def showPort(self): + self.elements['firewallValue'].SetValue(str(self.guiUtility.get_port_number())) + + def show_message(self): + self.elements['portChange'].SetLabel('Your changes will occur \nthe next time you restart \nTribler.') + self.guiserver.add_task(lambda:wx.CallAfter(self.hide_message), 3.0) + + + def hide_message(self): + self.elements['portChange'].SetLabel('') + + def updateSaveIcon(self): + self.guiserver = GUITaskQueue.getInstance() + self.guiserver.add_task(lambda:wx.CallAfter(self.showSaveIcon), 0.0) + + def showSaveIcon(self): + self.elements['iconSaved'].Show(True) + sizer = self.elements['iconSaved'].GetContainingSizer() + sizer.Layout() + self.guiserver.add_task(lambda:wx.CallAfter(self.hideSaveIcon), 3.0) + + def hideSaveIcon(self): + self.elements['iconSaved'].Show(False) + + def showMaxDLRate(self): + maxdownloadrate = self.guiUtility.utility.config.Read('maxdownloadrate', 'int') #kB/s + if maxdownloadrate == 0: + self.elements['downloadCtrl'].SetValue('unlimited') + else: + self.elements['downloadCtrl'].SetValue(str(maxdownloadrate)) + + def showMaxULRate(self): + maxuploadrate = self.guiUtility.utility.config.Read('maxuploadrate', 'int') #kB/s + if maxuploadrate == -1: + self.elements['uploadCtrl'].SetValue('0') + elif maxuploadrate == 0: + self.elements['uploadCtrl'].SetValue('unlimited') + else: + self.elements['uploadCtrl'].SetValue(str(maxuploadrate)) + + def showDiskLocation(self): + path = self.defaultDLConfig.get_dest_dir() + self.elements['diskLocationCtrl'].SetValue(path) + + + def zeroUp(self, event): + self.resetUploadDownloadCtrlColour() + self.elements['uploadCtrl'].SetValue('0') + #self.saveAll() + + def fiftyUp(self, event): + self.resetUploadDownloadCtrlColour() + self.elements['uploadCtrl'].SetValue('50') + #self.saveAll() + + def hundredUp(self, event): + self.resetUploadDownloadCtrlColour() + self.elements['uploadCtrl'].SetValue('100') + #self.saveAll() + + def unlimitedUp(self, event): + self.resetUploadDownloadCtrlColour() + self.elements['uploadCtrl'].SetValue('unlimited') + #self.saveAll() + + def seventyfiveDown(self, event): + self.resetUploadDownloadCtrlColour() + self.elements['downloadCtrl'].SetValue('75') + #self.saveAll() + + def threehundredDown(self, event): + self.resetUploadDownloadCtrlColour() + self.elements['downloadCtrl'].SetValue('300') + #self.saveAll() + + def sixhundreddDown(self, event): + self.resetUploadDownloadCtrlColour() + self.elements['downloadCtrl'].SetValue('600') + #self.saveAll() + + def unlimitedDown(self, event): + self.resetUploadDownloadCtrlColour() + self.elements['downloadCtrl'].SetValue('unlimited') + #self.saveAll() + + + + def resetUploadDownloadCtrlColour(self): + self.elements['uploadCtrl'].SetForegroundColour(wx.BLACK) + self.elements['downloadCtrl'].SetForegroundColour(wx.BLACK) + + + + + def uploadCtrlEnter(self, event): + self.elements['uploadCtrl'].SetForegroundColour(wx.BLACK) + if self.elements['uploadCtrl'].GetValue().strip() == 'unlimited': + self.elements['uploadCtrl'].SetValue('') + event.Skip() + + + def downloadCtrlEnter(self, event): + self.elements['downloadCtrl'].SetForegroundColour(wx.BLACK) + if self.elements['downloadCtrl'].GetValue().strip() == 'unlimited': + self.elements['downloadCtrl'].SetValue('') + event.Skip() + + + def saveAll(self, download = True, upload = True, diskLocation = True, port = True): + saved = True + maxdownload = None + maxupload = None + + valdown = self.elements['downloadCtrl'].GetValue().strip() + if valdown != '': + if valdown == 'unlimited': + maxdownload = 'unlimited' + elif valdown.isdigit() and int(valdown) > 0: + maxdownload = 'value' + else: + saved = False + self.elements['downloadCtrl'].SetForegroundColour(wx.RED) + self.elements['downloadCtrl'].SetValue('Error') + + + valup = self.elements['uploadCtrl'].GetValue().strip() + if valup != '': + if valup == 'unlimited': + maxupload = 'unlimited' + self.utility.ratelimiter.set_global_max_speed(UPLOAD, 0) + self.guiUtility.utility.config.Write('maxuploadrate', '0') + elif valup == '0': + maxupload = '0' + self.utility.ratelimiter.set_global_max_speed(UPLOAD, 0.0001) + self.guiUtility.utility.config.Write('maxuploadrate', '-1') + elif valup.isdigit(): + maxupload = 'value' + self.utility.ratelimiter.set_global_max_speed(UPLOAD, int(valup)) + self.guiUtility.utility.config.Write('maxuploadrate', valup) + else: + saved = False + self.elements['uploadCtrl'].SetForegroundColour(wx.RED) + self.elements['uploadCtrl'].SetValue('Error') + + + if not self.elements['firewallValue'].GetValue().isdigit(): + saved = False + + if not os.path.exists(self.elements['diskLocationCtrl'].GetValue()): + saved = False + self.elements['diskLocationCtrl'].SetForegroundColour(wx.RED) + self.elements['diskLocationCtrl'].SetValue('Error') + + + + + # save settings parameters + if saved: + + # max download + if download: + if maxdownload == 'unlimited': + self.utility.ratelimiter.set_global_max_speed(DOWNLOAD, 0) + self.guiUtility.utility.config.Write('maxdownloadrate', '0') + else: + self.utility.ratelimiter.set_global_max_speed(DOWNLOAD, int(valdown)) + self.guiUtility.utility.config.Write('maxdownloadrate', valdown) + + # max upload + if upload: + if maxupload == 'unlimited': + self.utility.ratelimiter.set_global_max_speed(UPLOAD, 0) + self.guiUtility.utility.config.Write('maxuploadrate', '0') + elif maxupload == '0': + self.utility.ratelimiter.set_global_max_speed(UPLOAD, 0.0001) + self.guiUtility.utility.config.Write('maxuploadrate', '-1') + else: + self.utility.ratelimiter.set_global_max_speed(UPLOAD, int(valup)) + self.guiUtility.utility.config.Write('maxuploadrate', valup) + + # disk location + if diskLocation: + self.defaultDLConfig.set_dest_dir(self.elements['diskLocationCtrl'].GetValue()) + self.saveDefaultDownloadConfig() + + + # port number + if port and self.elements['firewallValue'].GetValue() != self.currentPortValue: + self.currentPortValue = self.elements['firewallValue'].GetValue() + self.utility.config.Write('minport', self.elements['firewallValue'].GetValue()) + self.utility.config.Flush() + self.guiUtility.set_port_number(self.elements['firewallValue'].GetValue()) + self.guiUtility.set_firewall_restart(True) + self.guiserver = GUITaskQueue.getInstance() + self.guiserver.add_task(lambda:wx.CallAfter(self.show_message), 0.0) + self.elements['firewallStatus'].setSelected(1) + self.elements['firewallStatusText'].SetLabel('Restart Tribler') + tt = self.elements['firewallStatus'].GetToolTip() + if tt is not None: + tt.SetTip(self.utility.lang.get('restart_tooltip')) + + + self.updateSaveIcon() + + + + + def BrowseClicked(self, event = None): + dlg = wx.DirDialog(self,"Choose download directory", style = wx.DEFAULT_DIALOG_STYLE) + dlg.SetPath(self.defaultDLConfig.get_dest_dir()) + if dlg.ShowModal() == wx.ID_OK: + self.elements['diskLocationCtrl'].SetForegroundColour(wx.BLACK) + self.elements['diskLocationCtrl'].SetValue(dlg.GetPath()) + #self.saveAll() + else: + pass + + + def saveDefaultDownloadConfig(self): + dlcfgfilename = get_default_dscfg_filename(self.utility.session) + self.defaultDLConfig.save(dlcfgfilename) + + + def OnMyInfoWizard(self, event = None): + wizard = MyInfoWizard(self) + wizard.RunWizard(wizard.getFirstPage()) + + def WizardFinished(self,wizard): + wizard.Destroy() + + self.getNameMugshot() + self.showNameMugshot() + + #self.saveAll() + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/standardDetails.py b/tribler-mod/Tribler/Main/vwxGUI/standardDetails.py new file mode 100644 index 0000000..e590ade --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/standardDetails.py @@ -0,0 +1,2090 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg, Maarten ten Brinke, Lucian Musat +# see LICENSE.txt for license information + +import wx +import wx.xrc as xrc +from binascii import hexlify +from time import sleep,time +import math +from traceback import print_exc, print_stack +import cStringIO +import urlparse +from wx.lib.stattext import GenStaticText as StaticText + +import threading + +from Tribler.Core.Overlay.MetadataHandler import get_filename + +from font import * +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.vwxGUI.IconsManager import IconsManager, data2wxBitmap +from Tribler.Main.vwxGUI.filesItemPanel import loadAzureusMetadataFromTorrent,createThumbImage +from Tribler.Main.Dialogs.GUITaskQueue import GUITaskQueue + +from Tribler.Main.Utility.constants import COL_PROGRESS +from Tribler.TrackerChecking.TorrentChecking import TorrentChecking +from Tribler.Video.VideoPlayer import VideoPlayer +from Tribler.Main.vwxGUI.tribler_List import DLFilesList +from Tribler.Main.vwxGUI.FriendsItemPanel import peer2status + +from Tribler.Core.API import * +from Tribler.Core.Utilities.utilities import * +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles +from Tribler.Main.vwxGUI.bgPanel import ImagePanel +from Tribler.Core.Utilities.unicode import bin2unicode, dunno2unicode + +# Sort of LAYERVIOLATION. It's a meta DBHandler actually. +from Tribler.Core.CacheDB.CacheDBHandler import GUIDBHandler +from Tribler.Core.CacheDB.EditDist import editDist +from Tribler.Video.utils import videoextdefaults + +DETAILS_MODES = ['filesMode', 'personsMode', 'profileMode', 'libraryMode', 'friendsMode', 'fileDetailsMode','subscriptionsMode', 'messageMode'] + +DEBUG = False + +def showInfoHash(infohash): + if infohash.startswith('torrent'): # for testing + return infohash + try: + n = int(infohash) + return str(n) + except: + pass + return encodestring(infohash).replace("\n","") + +class standardDetails(wx.Panel): + """ + Wrappers around details xrc panels + """ + def __init__(self, *args): + + self.bartercastdb = None + self.top_stats = None + + if len(args) == 0: + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, *args) + self._PostInit() + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + self.subscr_old_source = None + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.torrent_db = self.utility.session.open_dbhandler(NTFY_TORRENTS) + self.friend_db = self.utility.session.open_dbhandler(NTFY_FRIENDS) + self.triblerStyles = TriblerStyles.getInstance() + self.peer_db = self.utility.session.open_dbhandler(NTFY_PEERS) + self.superpeer_db = self.utility.session.open_dbhandler(NTFY_SUPERPEERS) + #self.optionsButtonLibraryFunc = rightMouseButton.getInstance() + self.iconsManager = IconsManager.getInstance() + #self.gui_db = GUIDBHandler.getInstance() + self.playList = [] + + + self.mode = None + self.item = None + self.bartercastdb = None + self.lastItemSelected = {} #keeps the last item selected for each mode + self.data = {} #keeps gui elements for each mode + for mode in DETAILS_MODES+['status']: + self.data[mode] = {} #each mode has a dictionary of gui elements with name and reference + self.lastItemSelected[mode] = None + self.currentPanel = None + self.videoplayer = VideoPlayer.getInstance() + + self.addasfriendcount = 0 + self.addasfriendlast = 0 + + + # videodata + self.videodata = None + + + ## self.addComponents() + + #self.Refresh() + self.modeElements = {} + for elem in DETAILS_MODES: + self.modeElements[elem] = [] + self.modeElements['settingsMode'] = ['profileTitle'] + + self.modeElements['filesMode'] = ['titleField', 'simTitlesField', 'popularityField1', 'options', 'popularityField2', 'creationdateField', + 'descriptionField', 'sizeField', 'thumbField', 'up', 'down', 'refresh', + 'download', 'tabs', ('files_detailsTab','tabs'), ('info_detailsTab','tabs'), + 'TasteHeart', 'details', 'peopleWhoField', 'recommendationField'] + self.modeElements['personsMode'] = ['TasteHeart', 'recommendationField','addAsFriend', 'commonFilesField', 'commonFiles', + 'alsoDownloadedField', 'alsoDownloaded', 'info_detailsTab', 'advanced_detailsTab','detailsC', + 'titleField','statusField','thumbField', 'discFilesField', 'discPersonsField'] + self.modeElements['friendsMode'] = ['TasteHeart', 'recommendationField','addAsFriend', 'commonFilesField', 'commonFiles', + 'alsoDownloadedField', 'alsoDownloaded', 'info_detailsTab', 'advanced_detailsTab','detailsC', + 'titleField','statusField','thumbField', 'discFilesField', 'discPersonsField'] + self.modeElements['libraryMode'] = ['titleField', 'simTitlesField', 'popularityField1','options', 'popularityField2', 'creationdateField', + 'descriptionField', 'sizeField', 'thumbField', 'up', 'down', 'refresh', + 'files_detailsTab', 'info_detailsTab', 'details', 'upload_detailsTab', 'uploadTab_details', + 'peopleWhoField'] + self.modeElements['profileMode'] = ['levelPic', 'uploadedNumber', 'downloadedNumber'] + + + self.modeElements['fileDetailsMode'] = ['titleField', 'receivedToday', 'subscrTodayField', 'receivedYesterday', 'subscrYesterdayField'] # 'receivedTotal'] + + self.modeElements['subscriptionsMode'] = ['titleField', 'receivedToday', 'subscrTodayField', 'receivedYesterday', 'subscrYesterdayField'] # 'receivedTotal'] + + self.tabElements = {'filesTab_files': [ 'download', 'includedFiles', 'filesField', 'trackerField'], + 'personsTab_advanced': ['lastExchangeField', 'timesConnectedField','addAsFriend','similarityValueField'], + 'libraryTab_files': [ 'download', 'includedFiles'], + 'profileDetails_Quality': ['descriptionField0','howToImprove','descriptionField1'], + 'profileDetails_Files': ['descriptionField0','howToImprove','descriptionField1','takeMeThere0'], + 'profileDetails_Persons': ['descriptionField0','howToImprove','descriptionField1'], + 'profileDetails_Download': ['descriptionField','Desc0','descriptionField0','howToImprove0','descriptionField1','takeMeThere0','Desc1','descriptionField2','howToImprove1','descriptionField3','takeMeThere1','Desc2','descriptionField4','howToImprove2','descriptionField5','takeMeThere2'], + #'profileDetails_Presence': ['descriptionField','Desc0','descriptionField0','howToImprove0','descriptionField1','Desc1','descriptionField2','howToImprove1','descriptionField3','Desc2','descriptionField4','howToImprove2','descriptionField5','takeMeThere0']} + 'profileDetails_Presence': ['descriptionField','Desc0','descriptionField0','howToImprove0','descriptionField1','Desc2','descriptionField4','howToImprove2','descriptionField5','takeMeThere0'], + 'uploadTab_details': ['t4t_peers', 'g2g_peers']} + + + + self.statdlElements = ['statusHeader','Downloading', 'st28c','down_White','downSpeed','up_White','upSpeed','download1','percent1','download2','percent2','download3','percent3','download4','percent4','playList'] + + self.guiUtility.initStandardDetails(self) + + + + def addComponents(self): + self.SetBackgroundColour(wx.Colour(102,102,102)) +# self.SetBackgroundColour(wx.Colour(255,51,0)) + self.hSizer = wx.BoxSizer(wx.VERTICAL) + self.SetSizer(self.hSizer) + self.SetAutoLayout(1) + self.Layout() + #print "tb" + #print self.GetSize() + + + def setMode(self, mode, item = None): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: setMode called, new mode is",mode,"old",self.mode + + if self.mode != mode: + #change the mode, so save last item selected + self.lastItemSelected[self.mode] = self.item + self.mode = mode + self.checkGraphTabVisible() + ## self.refreshMode() + if item: + self.setData(item) + elif self.lastItemSelected[self.mode]: + self.guiUtility.selectData(self.lastItemSelected[self.mode]) + else: + self.setData(None) + + def getMode(self): + return self.mode + + def refreshMode(self): + # load xrc + self.oldpanel = self.currentPanel + #self.Show(False) + + self.currentPanel = self.loadPanel() + assert self.currentPanel, "Panel could not be loaded" + self.currentPanel.Layout() + self.currentPanel.SetAutoLayout(1) + #self.currentPanel.Enable(True) +# self.currentPanel.SetBackgroundColour("red") + + + self.currentPanel.Show(True) +# print self.mode + + + if self.mode == 'filesMode' or self.mode == 'libraryMode' or self.mode=='personsMode' or self.mode == 'friendsMode' or self.mode == 'profileMode' or self.mode == 'subscriptionsMode' or self.mode == 'fileDetailsMode' : +# print 'tb' + self.currentPanel.SetSize((-1,5)) + self.currentPanel.Hide() + +# + if self.oldpanel: + self.hSizer.Detach(self.oldpanel) + self.oldpanel.Hide() + #self.oldpanel.Disable() + + + self.hSizer.Insert(0, self.currentPanel, 0, wx.ALL|wx.EXPAND, 0) + + +# self.currentPanel.Layout() + wx.CallAfter(self.hSizer.Layout) + wx.CallAfter(self.refreshStandardDetailsHeight) +# wx.CallAfter(self.currentPanel.Refresh) + #self.Show(True) + + + def refreshStatusPanel(self, show): + pass + ##if show: + ## statusPanel = self.data['status'].get('panel') + ## if not statusPanel: + ## statusPanel = self.loadStatusPanel() + ## self.data['status']['panel'] = statusPanel + #statusPanel.Enable() + ## statusPanel.Show() + ## self.hSizer.Insert(1, statusPanel, 0, wx.TOP|wx.EXPAND, 6) + ## self.hSizer.Layout() + ##else: + ## # Remove statusPanel if necessary + ## if self.data['status'].get('panel'): + ## statusPanel = self.data['status']['panel'] + ## try: + ## self.hSizer.Detach(statusPanel) + ## statusPanel.Hide() + ## #statusPanel.Disable() + ## except: + ## print_exc() + + def setListAspect2OneColumn(self, list_name): + try: + ofList = self.getGuiObj(list_name) + ofList.ClearAll() + if False: # sys.platform == 'win32': + ofList.SetWindowStyleFlag(wx.LC_REPORT|wx.NO_BORDER|wx.LC_NO_HEADER|wx.LC_SINGLE_SEL) #it doesn't work + else: + #ofList.SetSingleStyle(wx.LC_REPORT) + ofList.SetSingleStyle(wx.LC_NO_HEADER) + ofList.SetSingleStyle(wx.LC_SINGLE_SEL) + ofList.SetSingleStyle(wx.NO_BORDER) + ofList.InsertColumn(0, "Torrent") #essential code + # ofList.SetColumnWidth(0,wx.LIST_AUTOSIZE) + except: + # Arno, 2008-08-21: wxPython 2.8.8.1 doesn't like LC_REPORT anymore, + # for unknown reasons. Our hack around is this exception handler, + # and we MANUALLY added the style parameters to the .xrc files. + """ + Traceback (most recent call last): + File "C:\Python252\Lib\site-packages\wx-2.8-msw-unicode\wx\_core.py", line 14555, in + lambda event: event.callable(*event.args, **event.kw) ) + File "C:\build\mainbranch\Tribler\Main\vwxGUI\standardDetails.py", line 131, in _PostInit + self.guiUtility.initStandardDetails(self) + File "C:\build\mainbranch\Tribler\Main\vwxGUI\GuiUtility.py", line 294, in initStandardDetails + self.standardDetails.setMode('filesMode', firstItem) + File "C:\build\mainbranch\Tribler\Main\vwxGUI\standardDetails.py", line 155, in setMode + self.refreshMode() + File "C:\build\mainbranch\Tribler\Main\vwxGUI\standardDetails.py", line 171, in refreshMode + self.currentPanel = self.loadPanel() + File "C:\build\mainbranch\Tribler\Main\vwxGUI\standardDetails.py", line 269, in loadPanel + self.setListAspect2OneColumn("peopleWhoField") + File "C:\build\mainbranch\Tribler\Main\vwxGUI\standardDetails.py", line 220, in setListAspect2OneColumn + ofList.SetWindowStyleFlag(wx.LC_REPORT|wx.NO_BORDER|wx.LC_NO_HEADER|wx.LC_SINGLE_SEL) #it doesn't work + File "C:\Python252\Lib\site-packages\wx-2.8-msw-unicode\wx\_core.py", line 9140, in SetWindowStyleFlag + return _core_.Window_SetWindowStyleFlag(*args, **kwargs) + wx._core.PyAssertionError: C++ assertion "nModes == 1" failed at ..\..\src\msw\listctrl.cpp(380) in wxListCtrl::MSWGetStyle(): wxListCtrl style should have exactly one mode bit set + """ + print_exc() + + + def getVideodata(self): + return self.videodata + + + def setVideodata(self, videodata): + self.videodata = videodata + + + + def loadPanel(self): + currentPanel = self.data[self.mode].get('panel',None) + modeString = self.mode[:-4] + #[11.05.07]: small hack as the friends mode has no details panel, but we still want to know that this is friends mode + if self.mode == "friendsMode": + modeString = "persons" + if not currentPanel: + xrcResource = os.path.join(self.guiUtility.vwxGUI_path, modeString+'Details.xrc') + panelName = modeString+'Details' + currentPanel = self.loadXRCPanel(xrcResource, panelName) + # Save paneldata in self.data + self.data[self.mode]['panel'] = currentPanel + #titlePanel = xrc.XRCCTRL(currentPanel, 'titlePanel') + + if self.modeElements.has_key(self.mode): + for element in self.modeElements[self.mode]: + xrcElement = None + name = None + if type(element) == str: + xrcElement = xrc.XRCCTRL(currentPanel, element) + name = element + elif type(element) == tuple: + name = element[0] + xrcElement = xrc.XRCCTRL(self.getGuiObj(element[1]), name) + if not xrcElement: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: Error: Could not identify xrc element: %s for mode %s' % (element, self.mode) + pass + if name: + self.data[self.mode][name] = xrcElement + else: + self.modeElements[self.mode] = [] + + # do extra init + if modeString in ['files','library']: + self.getGuiObj('up').setBackground(wx.WHITE) + self.getGuiObj('down').setBackground(wx.WHITE) + refresh = self.getGuiObj('refresh') + refresh.setBackground(wx.WHITE) + refresh.Bind(wx.EVT_ENTER_WINDOW, self.updateLastCheck) + self.setListAspect2OneColumn("peopleWhoField") + self.setListAspect2OneColumn("simTitlesField") + infoTab = self.getGuiObj('info_detailsTab') + infoTab.setSelected(True) + self.getAlternativeTabPanel('filesTab_files', parent=currentPanel).Hide() + + # "upload" tab is added, by Boxun + self.getAlternativeTabPanel('uploadTab_details', parent=currentPanel).Hide() + + if modeString == 'files': + self.getGuiObj('TasteHeart').setBackground(wx.WHITE) + """ + if modeString == 'library': + graph_parent = self.getAlternativeTabPanel('Tab_graphs', parent=currentPanel) + graph_parent.Hide() + #swap the dummy Graph panel with the plot panel + dummy_graph_panel = self.getGuiObj('Graph', 'Tab_graphs') + #optionsButton = self.getGuiObj('options') + + emsg = None + try: + from graphs import StatsPanel + graph_panel = StatsPanel(graph_parent) + except ImportError, msg: + graph_panel = None + emsg=msg + if graph_panel is None: + def setData(item): + pass + dummy_graph_panel.setData = setData + def setVisible(isVisible): + pass + dummy_graph_panel.setVisible = setVisible + dummy_graph_panel.vSizer = wx.BoxSizer(wx.VERTICAL) + dummy_graph_panel.title =wx.StaticText(dummy_graph_panel,-1,"",wx.Point(0,0),wx.Size(300,300)) + dummy_graph_panel.title.SetBackgroundColour(wx.WHITE) + dummy_graph_panel.title.SetFont(wx.Font(10,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + dummy_graph_panel.title.SetMinSize((300,300)) + dummy_graph_panel.vSizer.Add(dummy_graph_panel.title, 0, wx.BOTTOM, 3) + dummy_graph_panel.title.SetLabel(str(emsg)) + dummy_graph_panel.SetSizer(dummy_graph_panel.vSizer); + dummy_graph_panel.SetAutoLayout(1); + dummy_graph_panel.Layout(); + dummy_graph_panel.Refresh() + else: + self.swapPanel(dummy_graph_panel, graph_panel) + #also set it as an object of Tab_graphs + self.data[self.mode]['Tab_graphs'+'_'+'Graph'] = graph_panel + graph_panel.SetMinSize(wx.Size(300,300)) + graph_panel.SetSize(wx.Size(300,300)) + """ + + + elif modeString in ['persons','friends']: + self.getGuiObj('TasteHeart').setBackground(wx.WHITE) + self.getGuiObj('info_detailsTab').setSelected(True) + self.getGuiObj('advanced_detailsTab').SetLabel(" advanced") + #get the list in the right mode for viewing + self.setListAspect2OneColumn("alsoDownloadedField") + self.setListAspect2OneColumn("commonFilesField") + self.getAlternativeTabPanel('personsTab_advanced', parent=currentPanel).Hide() + ofList = self.getGuiObj("alsoDownloadedField") + cfList = self.getGuiObj("commonFilesField") + ofList.setOtherList(cfList) + ofList.setFieldsUpdateFunction(self.updateNumFilesInTextFields) + + elif modeString == "profile": + self.data[self.mode]['profileDetails_Overall'] = currentPanel #also add first panel as an named element in the data list +# self.item = "profileDetails_Overall" #the name of the panel that's currently selected + self.getAlternativeTabPanel('profileDetails_Quality', parent=self).Hide() #parent is self because it is not a tab, it replaces the details panel + self.getAlternativeTabPanel('profileDetails_Files', parent=self).Hide() #parent is self because it is not a tab, it replaces the details panel + self.getAlternativeTabPanel('profileDetails_Persons', parent=self).Hide() #parent is self because it is not a tab, it replaces the details panel + self.getAlternativeTabPanel('profileDetails_Download', parent=self).Hide() #parent is self because it is not a tab, it replaces the details panel + self.getAlternativeTabPanel('profileDetails_Presence', parent=self).Hide() #parent is self because it is not a tab, it replaces the details panel + return currentPanel + + def loadStatusPanel(self): + currentPanel = self.loadXRCPanel(os.path.join(self.guiUtility.vwxGUI_path, 'statusDownloads.xrc'), 'statusDownloads') + + mode = 'status' + for element in self.statdlElements: + xrcElement = None + name = None + if type(element) == str: + xrcElement = xrc.XRCCTRL(currentPanel, element) + name = element + elif type(element) == tuple: + name = element[0] + xrcElement = xrc.XRCCTRL(self.data[mode][element[1]],name) + if not xrcElement: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: Error: Could not identify xrc element: %s for mode %s' % (element, mode) + pass + if name: + self.data[mode][name] = xrcElement + + # header styling + self.data['status']['downSpeed'] + self.triblerStyles.titleBar(self.data['status']['statusHeader']) + self.triblerStyles.titleBar(self.data['status']['Downloading']) + self.triblerStyles.titleBar(self.data['status']['down_White']) + self.triblerStyles.titleBar(self.data['status']['downSpeed']) + self.triblerStyles.titleBar(self.data['status']['up_White']) + self.triblerStyles.titleBar(self.data['status']['upSpeed']) + # content styling + self.triblerStyles.setDarkText(self.data['status']['download1']) + self.triblerStyles.setDarkText(self.data['status']['percent1']) + self.triblerStyles.setDarkText(self.data['status']['download2']) + self.triblerStyles.setDarkText(self.data['status']['percent2']) + self.triblerStyles.setDarkText(self.data['status']['download3']) + self.triblerStyles.setDarkText(self.data['status']['percent3']) + self.triblerStyles.setDarkText(self.data['status']['download4']) + self.triblerStyles.setDarkText(self.data['status']['percent4']) + + + +# self.triblerStyles.titleBar(self.data['status']['statusHeader'], text= self.data['status']['statusHeader'].GetName()) + + + return currentPanel + + + + def loadXRCPanel(self, filename, panelName, parent=None): + try: + currentPanel = None + if not os.path.exists(filename): + dummyFile = os.path.join(self.guiUtility.vwxGUI_path, 'dummy.xrc') + filename = dummyFile + panelName = "dummy" + res = xrc.XmlResource(filename) + # create panel + if parent is None: + parent = self + currentPanel = res.LoadPanel(parent, panelName) + if not currentPanel: + raise Exception() + return currentPanel + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: Error: Could not load panel from XRC-file %s' % filename + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: Tried panel: %s=%s' % (panelName, currentPanel) + print_exc() + return None + + + def getData(self): + return self.item + + def getIdentifier(self): + if not self.item: + return None + try: + if self.mode in ['filesMode','libraryMode']: + return self.item['infohash'] + elif self.mode in ['personsMode','friendsMode']: + return self.item['permid'] + elif self.mode in ['subscriptionsMode']: + return self.item['url'] + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: Error in getIdentifier for mode %s, item=%s' % (self.mode,self.item) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: Error in getIdentifier for mode %s, item=%s' % (self.mode,self.item) + + print_exc() + + def setData(self, item): + self.updateCallback(item) # update callback function on changing item + self.item = item + if item is None: + item = {} + if self.mode in ['filesMode', 'libraryMode']: + #check if this is a corresponding item from type point of view + if item.get('infohash') is None: + return #no valid torrent + torrent = item + + ##titleField = self.getGuiObj('titleField') + title = torrent.get('name') + title = title[:77] + ##titleField.SetLabel(title) + ##titleField.Wrap(-1) # doesn't appear to work + +# self.setTorrentThumb(self.mode, torrent, self.getGuiObj('thumbField')) + + + + elif self.mode in ['personsMode', 'friendsMode']: + #check if this is a corresponding item from type point of view +# if item.get('permid') is None: +# return #no valid torrent + + titleField = self.getGuiObj('titleField') + titleField.SetLabel(item.get('name') or '') + titleField.Wrap(-1) + + #set the picture + try: + bmp = None + # Check if we have already read the thumbnail and metadata information from this torrent file + if item.get('metadata'): + bmp = item['metadata'].get('ThumbnailBitmap') + elif 'permid' in item: + mime, icondata = self.peer_db.getPeerIcon(item['permid']) + if icondata: + bmp = data2wxBitmap(mime,icondata) + + if not bmp: + superpeers = self.superpeer_db.getSuperPeers() + if 'permid' in item and item['permid'] in superpeers: + bmp = self.iconsManager.get_default('personsMode','SUPERPEER_BITMAP') + else: + bmp = self.iconsManager.get_default('personsMode','DEFAULT_THUMB') + + thumbField = self.getGuiObj("thumbField") + thumbField.setBitmap(bmp) + width, height = thumbField.GetSize() + d = 1 + thumbField.border = [wx.Point(0,d), wx.Point(width-d, d), wx.Point(width-d, height-d), wx.Point(d,height-d), wx.Point(d,0)] + thumbField.Refresh() +# wx.CallAfter(thumbField.Refresh) + + except: + print_exc() + + + if self.getGuiObj('info_detailsTab').isSelected(): + + if item.get('simRank'): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'SimRank of peer: %s' % item['simRank'] + self.setRankToRecommendationField(item['simRank']) + self.getGuiObj('TasteHeart').setRank(item['simRank']) + + + # Peer status = online status + frienstatus + label = peer2status(item) + self.getGuiObj('statusField').SetLabel(label) + + if 'num_peers' in item: + n = unicode(item['num_peers']) + if not n or n=='0': + n = '?' + self.getGuiObj('discPersonsField').SetLabel(n) + if 'num_torrents' in item: + n = unicode(item['num_torrents']) + if not n or n == '0': + n = '?' + self.getGuiObj('discFilesField').SetLabel(n) + + if 'friend' in item: + fs = item.get('friend') + if fs == FS_MUTUAL or fs == FS_I_INVITED: + isfriend = self.iconsManager.get_default('personsMode','ISFRIEND_BITMAP') + isfriend_clicked = self.iconsManager.get_default('personsMode','ISFRIEND_CLICKED_BITMAP') + self.getGuiObj('addAsFriend').switchTo(isfriend,isfriend_clicked) + else: + self.getGuiObj('addAsFriend').switchBack() + + self.fillTorrentLists() + + elif self.getGuiObj('advanced_detailsTab').isSelected(): + if item.get('last_connected') is not None: + if item['last_connected'] < 0: + self.getGuiObj('lastExchangeField', tab = 'personsTab_advanced').SetLabel("never seen online") + else: + self.getGuiObj('lastExchangeField', tab = 'personsTab_advanced').SetLabel('%s %s'%(friendly_time(item['last_connected']),'ago')) + else: + self.getGuiObj('lastExchangeField', tab = 'personsTab_advanced').SetLabel('') + if item.get("connected_times") is not None: + self.getGuiObj('timesConnectedField', tab = 'personsTab_advanced').SetLabel(str(item["connected_times"])) + else: + self.getGuiObj('timesConnectedField', tab = 'personsTab_advanced').SetLabel("") + if item.get("similarity") is not None: + self.getGuiObj('similarityValueField', tab = 'personsTab_advanced').SetLabel("%.1f" % item["similarity"]) + else: + self.getGuiObj('similarityValueField', tab = 'personsTab_advanced').SetLabel("") + + addAsFriend = self.getGuiObj('addAsFriend', tab = 'personsTab_advanced') + if addAsFriend.initDone: + if item.get('friend') is not None: + fs = item['friend'] + if fs == FS_MUTUAL or fs == FS_I_INVITED: + isfriend = self.iconsManager.get_default('personsMode','ISFRIEND_BITMAP') + isfriend_clicked = self.iconsManager.get_default('personsMode','ISFRIEND_CLICKED_BITMAP') + addAsFriend.switchTo(isfriend,isfriend_clicked) + else: + addAsFriend.switchBack() + + elif self.mode == 'subscriptionsMode': + if item.get('url') is None: + return #no valid url + subscrip = item + rssurl = subscrip.get('url') + + if self.subscr_old_source is not None and self.subscr_old_source == rssurl: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: setData: subscriptionMode: Not refreshing" + return # no need to refresh + self.subscr_old_source = rssurl + + titleField = self.getGuiObj('titleField') + titleField.SetLabel(rssurl) + titleField.Wrap(-1) + + bcsub = self.utility.lang.get('buddycastsubscription') + if rssurl == bcsub: + rssurl = 'BC' + + # Gather data for views + torrents = self.torrent_db.getTorrentsFromSource(rssurl) + todayl = [] + yesterdayl = [] + now = long(time()) + sotoday = long(math.floor(now / (24*3600.0))*24*3600.0) + soyester = long(sotoday - (24*3600.0)) + for torrent in torrents: + if torrent['insert_time'] > sotoday: + todayl.append(torrent) + elif torrent['insert_time'] > soyester: + yesterdayl.append(torrent) + + todayl.sort(reverse_torrent_insertime_cmp) + yesterdayl.sort(reverse_torrent_insertime_cmp) + + # Update Today view + todayField = self.getGuiObj('receivedToday') + todaystr = " Today ("+str(len(todayl))+")" + todayField.SetLabel(todaystr) + + todayList = self.getGuiObj('subscrTodayField') + if sys.platform == 'win32': + todayList.SetWindowStyleFlag(wx.LC_REPORT|wx.NO_BORDER|wx.LC_NO_HEADER|wx.LC_SINGLE_SEL) #it doesn't work on mac + else: + todayList.SetSingleStyle(wx.NO_BORDER) + todayList.SetSingleStyle(wx.LC_REPORT) + todayList.SetSingleStyle(wx.LC_NO_HEADER) + todayList.SetSingleStyle(wx.LC_SINGLE_SEL) +# todayList.SetWindowStyle(wx.LC_REPORT|wx.NO_BORDER|wx.LC_SINGLE_SEL|wx.LC_NO_HEADER) + if todayList.GetColumnCount() == 0: + todayList.InsertColumn(0, "Torrent",wx.LIST_FORMAT_LEFT,280) + todayList.DeleteAllItems() + + today_infohashes = [] + for torrent in todayl: + todayList.Append([torrent['name']]) + today_infohashes.append(torrent['infohash']) + todayList.setInfoHashList(today_infohashes) + + # Update Yesterday view + ydayField = self.getGuiObj('receivedYesterday') + ydaystr = " Yesterday ("+str(len(yesterdayl))+")" + ydayField.SetLabel(ydaystr) + + ydayList = self.getGuiObj('subscrYesterdayField') + if sys.platform == 'win32': + ydayList.SetWindowStyleFlag(wx.LC_REPORT|wx.NO_BORDER|wx.LC_NO_HEADER|wx.LC_SINGLE_SEL) #it doesn't work on mac + else: + ydayList.SetSingleStyle(wx.NO_BORDER) + ydayList.SetSingleStyle(wx.LC_REPORT) + ydayList.SetSingleStyle(wx.LC_NO_HEADER) + ydayList.SetSingleStyle(wx.LC_SINGLE_SEL) + if ydayList.GetColumnCount() == 0: + ydayList.InsertColumn(0, "Torrent",wx.LIST_FORMAT_LEFT,280) + ydayList.DeleteAllItems() + yesterday_infohashes = [] + for torrent in yesterdayl: + ydayList.Append([torrent['name']]) + yesterday_infohashes.append(torrent['infohash']) + ydayList.setInfoHashList(yesterday_infohashes) + + elif self.mode == 'profileMode': + if len(item) == 0: + return + tab = None + + # -------------------------------------------------------------------------------------------------------------------------------------------------------- + ## --- Overall performance !!!! we'll leave it probably out!!! + if self.currentPanel == self.getGuiObj('profileDetails_Overall'): +# self.getGuiObj('descriptionField0').SetLabel(item.get('overall_rank')) + picture = self.getGuiObj("levelPic") + if item.get('overall_rank') == "beginner": + picture.setIndex(0) + if item.get('overall_rank') == "experienced": + picture.setIndex(1) + if item.get('overall_rank') == "top user": + picture.setIndex(2) + if item.get('overall_rank') == "master": + picture.setIndex(3) + + # -------------------------------------------------------------------------------------------------------------------------------------------------------- + # --- Quality of tribler recommendations + elif self.currentPanel == self.getGuiObj('profileDetails_Quality'): + tab = 'profileDetails_Quality' + count = item.get('downloaded_files',0) + text = self.utility.lang.get("profileDetails_Quality_description", giveerror=False) + text1 = self.utility.lang.get("profileDetails_Quality_improve", giveerror=False) + if count < 10: + only = self.utility.lang.get("profileDetails_Quality_description_onlyword", giveerror=False) + else: + only="" + self.getGuiObj('descriptionField0', tab = 'profileDetails_Quality').SetLabel(text % (only,count)) + self.getGuiObj('descriptionField1', tab = 'profileDetails_Quality').SetLabel(text1) + + # -------------------------------------------------------------------------------------------------------------------------------------------------------- + # --- Discovered Files + elif self.currentPanel == self.getGuiObj('profileDetails_Files'): + tab = 'profileDetails_Files' + count = item.get('discovered_files',0) + count2 = self.utility.session.get_torrent_collecting_max_torrents() + text = self.utility.lang.get("profileDetails_Files_description", giveerror=False) + text1 = self.utility.lang.get("profileDetails_Files_improve", giveerror=False) + self.getGuiObj('descriptionField0', tab = 'profileDetails_Files').SetLabel(text % count) + self.getGuiObj('descriptionField1', tab = 'profileDetails_Files').SetLabel(text1 % count2) + + # -------------------------------------------------------------------------------------------------------------------------------------------------------- + # --- Discovered Persons + elif self.currentPanel == self.getGuiObj('profileDetails_Persons'): + tab = 'profileDetails_Persons' + count = 0 + count = item.get('discovered_persons',0) + text = self.utility.lang.get("profileDetails_Persons_description", giveerror=False) + text1 = self.utility.lang.get("profileDetails_Persons_improve", giveerror=False) + self.getGuiObj('descriptionField0', tab = 'profileDetails_Persons').SetLabel(text % count) + self.getGuiObj('descriptionField1', tab = 'profileDetails_Persons').SetLabel(text1) + + # -------------------------------------------------------------------------------------------------------------------------------------------------------- + ## --- Optimal download speed + elif self.currentPanel == self.getGuiObj('profileDetails_Download'): + tab = 'profileDetails_Download' + text = self.utility.lang.get("profileDetails_Download_info", giveerror=False) + self.getGuiObj('descriptionField', tab = 'profileDetails_Download').SetLabel(text) + + maxuploadrate = self.guiUtility.utility.config.Read('maxuploadrate', 'int') #kB/s + if ( maxuploadrate == 0 ): + text1 = self.utility.lang.get("profileDetails_Download_UpSpeedMax", giveerror=False) + text2 = self.utility.lang.get("profileDetails_Download_UpSpeedMax_improve", giveerror=False) + else: + text1 = self.utility.lang.get("profileDetails_Download_UpSpeed", giveerror=False) + text1 = text1 % maxuploadrate + text2 = self.utility.lang.get("profileDetails_Download_UpSpeed_improve", giveerror=False) + # maxuploadslots = self.guiUtility.utility.config.Read('maxupload', "int") + # if ( maxuploadslots == 0 ): + # text2 = self.utility.lang.get("profileDetails_Download_UpSlotsMax", giveerror=False) + # else: + # text2 = self.utility.lang.get("profileDetails_Download_UpSlots", giveerror=False) + # text2 = text2 % maxuploadslots + # maxdownloadrate = self.guiUtility.utility.config.Read('maxdownloadrate', "int") + # if ( maxdownloadrate == 0 ): + # text3 = self.utility.lang.get("profileDetails_Download_DlSpeedMax", giveerror=False) + # else: + # text3 = self.utility.lang.get("profileDetails_Download_DlSpeed", giveerror=False) + # text3 = text3 % maxdownloadrate + # text = "%s\n%s\n%s" % (text1,text2,text3) + self.getGuiObj('descriptionField0', tab = 'profileDetails_Download').SetLabel( text1) + self.getGuiObj('descriptionField1', tab = 'profileDetails_Download').SetLabel(text2) + + count = item.get('number_friends',0) + text = self.utility.lang.get("profileDetails_Download_Friends", giveerror=False) + self.getGuiObj('descriptionField2', tab = 'profileDetails_Download').SetLabel(text % count) + text = self.utility.lang.get("profileDetails_Download_Friends_improve", giveerror=False) + self.getGuiObj('descriptionField3', tab = 'profileDetails_Download').SetLabel(text) + + nat = item.get('nat_type') + if self.guiUtility.isReachable(): + text1 = self.utility.lang.get("profileDetails_Download_VisibleYes", giveerror=False) + text2 = self.utility.lang.get("profileDetails_Download_VisibleYes_improve", giveerror=False) + self.getGuiObj('descriptionField4', tab = 'profileDetails_Download').SetLabel(text1) + self.getGuiObj('descriptionField5', tab = 'profileDetails_Download').SetLabel(text2) + else: + text1 = self.utility.lang.get("profileDetails_Download_VisibleNo", giveerror=False) + text2 = self.utility.lang.get("profileDetails_Download_VisibleNo_improve", giveerror=False) + self.getGuiObj('descriptionField4', tab = 'profileDetails_Download').SetLabel(text1 % nat) + self.getGuiObj('descriptionField5', tab = 'profileDetails_Download').SetLabel(text2) + + # -------------------------------------------------------------------------------------------------------------------------------------------------------- + ## --- Reachability + elif self.currentPanel == self.getGuiObj('profileDetails_Presence'): + tab = 'profileDetails_Presence' + text = self.utility.lang.get("profileDetails_Presence_info", giveerror=False) + self.getGuiObj('descriptionField', tab = 'profileDetails_Presence').SetLabel(text) + + count = item.get('number_friends',0) + # use text that is also used in 'optimal download details + text = self.utility.lang.get("profileDetails_Download_Friends", giveerror=False) + self.getGuiObj('descriptionField0', tab = 'profileDetails_Presence').SetLabel(text % count) + text = self.utility.lang.get("profileDetails_Download_Friends_improve", giveerror=False) + self.getGuiObj('descriptionField1', tab = 'profileDetails_Presence').SetLabel(text) + + current_version = self.utility.getVersion() + text = self.utility.lang.get("profileDetails_Presence_VersionUnknown", giveerror=False) + new_version = item.get('new_version',text) + update_url = 'www.tribler.org' #item.get('update_url','www.tribler.org') + compare_result = item.get('compare_result',-3) + if compare_result == -1: #newer version locally + text1 = self.utility.lang.get("profileDetails_Presence_VersionNewer", giveerror=False) + text1 = text1 % (current_version, new_version) + text2 = self.utility.lang.get("profileDetails_Presence_VersionNewer_improve", giveerror=False) + text2 = text2 % update_url + elif compare_result == 0: #same version + text1 = self.utility.lang.get("profileDetails_Presence_VersionCurrent", giveerror=False) + text1 = text1 % current_version + text2 = self.utility.lang.get("profileDetails_Presence_VersionCurrent_improve", giveerror=False) + text2 = text2 % update_url + elif compare_result == 1: #newer version on website + text1 = self.utility.lang.get("profileDetails_Presence_VersionOlder", giveerror=False) + text1 = text1 % current_version + text2 = self.utility.lang.get("profileDetails_Presence_VersionOlder_improve", giveerror=False) + text2 = text2 % (new_version,update_url) + else: + text1 = self.utility.lang.get("profileDetails_Presence_VersionError", giveerror=False) + text1 = text1 % current_version + text2 = self.utility.lang.get("profileDetails_Presence_VersionError_improve", giveerror=False) + text2 = text2 % update_url + self.getGuiObj('descriptionField4', tab = 'profileDetails_Presence').SetLabel(text1) + self.getGuiObj('descriptionField5', tab = 'profileDetails_Presence').SetLabel(text2) + else: + tab = "error" + if tab != "error": + if self.reHeightToFit(tab): + + #print " do panel ",tab,"relayouting" + self.currentPanel.SetAutoLayout(1) + self.currentPanel.Layout() + self.hSizer.Layout() + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: setData: No entry for mode",self.mode + +# self.currentPanel.Refresh() + + def setRankToRecommendationField(self, rank): + recommField = self.getGuiObj('recommendationField') + assert recommField, "No recommendationField found" + + if rank != -1: + + if rank == 1: + recommField.SetLabel("%d" % rank + "st of top 20") + elif rank == 2: + recommField.SetLabel("%d" % rank + "nd of top 20") + elif rank == 3: + recommField.SetLabel("%d" % rank + "rd of top 20") + else: + recommField.SetLabel("%d" % rank + "th of top 20") + else: + recommField.SetLabel("") + + def reHeightToFit(self, tab=None): + """the idea is to iterate through all objects mentioned in the list of + object for current tab and to reposition them on y axis so that all of + them are fully visible -> update, the repositioning should be done automatically by + autolayouting the sizer, all that has to be done is wrap the elements that don't + have the ST_NO_AUTORESIZE flag set + returns true if elements have been repositioned so that the layout be redone""" +# if DEBUG: +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: trying to reheight panel for mode",self.mode,"and tab",tab + bElementMoved = False + VERTICAL_SPACE = 3 + try: + if tab is None: + list = self.modeElements[self.mode] + else: + list = self.tabElements[tab] + #check to see it it's worth trying to reposition elements + if len(list)>0: + prevElement = None + for elementName in list: + currentElement = self.getGuiObj(elementName, tab) + if isinstance(currentElement,wx.StaticText): + style = currentElement.GetWindowStyle() +# if DEBUG: +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: element",elementName,"has style",style + #print 'Style if %s has flag: %s' % (elementName, (style & wx.ST_NO_AUTORESIZE)) + if (style & wx.ST_NO_AUTORESIZE)==0 : + currentElement.Wrap(284) + bElementMoved = True + prevElement = None + if prevElement is not None: + prevPos = prevElement.GetPosition().y + prevHeight = prevElement.GetSize().height + new_pos = prevPos + prevHeight + VERTICAL_SPACE + # print " element",list[index],"is at",currentElement.GetPosition().y,"and has height",currentElement.GetSize().height + if new_pos != currentElement.GetPosition().y: + #reposition element as it overlaps the one above + currentElement.SetPosition(wx.Point(currentElement.GetPosition().x,new_pos)) + prevElement = currentElement + # Set size of standardDetails to size of content + + + except: + print_exc() + return bElementMoved + + def setDownloadbutton(self, torrent, tab = None, item = ''): + if item == '': + self.downloadButton2 = self.getGuiObj('download', tab = tab) + else: + self.downloadButton2 = item + + if self.downloadButton2: + if torrent.get('myDownloadHistory', False): + bitmap, bitmap2 = self.iconsManager.getDownloadButton('library') + elif torrent.get('web2'): + bitmap, bitmap2 = self.iconsManager.getDownloadButton('play') + else: + bitmap, bitmap2 = self.iconsManager.getDownloadButton('download') + + self.downloadButton2.setBitmaps(bitmap, bitmap2) + self.downloadButton2.Refresh() + + def getGuiObj(self, obj_name, tab=None, mode=None): + """handy function to retreive an object based on it's name for the current mode""" + if tab: + obj_name = tab+'_'+obj_name + if not mode: + mode = self.mode + #print 'Available objects: %s' % self.data[mode].keys() + return self.data[mode].get(obj_name) + + def show_loading(self, list_obj): + list_obj.DeleteAllItems() + index = list_obj.InsertStringItem(sys.maxint, "Searching..") + font = list_obj.GetItemFont(index) + font.SetStyle(wx.FONTSTYLE_ITALIC) + list_obj.SetItemFont(index, font) + list_obj.SetItemTextColour(index, "#555555") + + def fillSimLists(self, item): + # show loading.. + self.show_loading(self.getGuiObj('peopleWhoField')) + self.show_loading(self.getGuiObj('simTitlesField')) + + + guiserver = GUITaskQueue.getInstance() + guiserver.add_task(lambda:self.updateSimLists(item), 0, id='fillSimLists') + + def updateSimLists(self, item): + def cmpfunc(x, y): + return int(10000*(editDist(x[1], name) - editDist(y[1], name))) + + infohash = item['infohash'] + name = item['name'] + gui_db = GUIDBHandler.getInstance() # LAYERVIOLATION + + sim_files = None + sim_titles = None + + try: + sim_files = gui_db.getSimItems(infohash, 8) + except: + print_exc() + wx.CallAfter(self.fillSimTorrentsList, sim_files) + + try: + sim_titles = gui_db.getSimilarTitles(name, 30, infohash) # first get a subset of titles + sim_titles.sort(cmpfunc) + except: + print_exc() + wx.CallAfter(self.fillSimTitlesList, sim_titles) + + def fillSimTorrentsList(self, sim_files): + """fills the list of torrents from library or file view with the files that are similar to the currently selected one""" + # jie.done: fill similar torrent list + # future.work: smooth the recommendation, solve the data sparse and cold start problem + + sim_torrent_list = self.getGuiObj('peopleWhoField') + if not sim_torrent_list: # user already switched to another page + return + sim_torrent_list.DeleteAllItems() + if sim_files is None: + self.errorLoadData('peopleWhoField') + return + + try: + #sim_files = self.gui_db.getSimItems(infohash, 8) # [(infohash, title)] + sim_torrent_list.setInfoHashList(None) + + torrent_list = [] + if len(sim_files) > 0: + for infohash, name, status_id, coocurrence in sim_files: + if coocurrence <= 1: # don't show too irrelevant torrents. set it to 0 if you want to show all co-occurent torrents + continue + if status_id == 0: # good + color = "blue" + elif status_id == 1: # unknown + color = "black" + elif status_id == 2: # dead + color = "red" + continue + name = dunno2unicode(name) + index = sim_torrent_list.InsertStringItem(sys.maxint, name) + sim_torrent_list.SetItemTextColour(index, color) + torrent_list.append(infohash) + # TODO: show a tip string on this listitem. SetToolTipString? + sim_torrent_list.setInfoHashList(torrent_list) + + if len(torrent_list) == 0: + index = sim_torrent_list.InsertStringItem(sys.maxint, "No similar files found yet.") + font = sim_torrent_list.GetItemFont(index) + font.SetStyle(wx.FONTSTYLE_ITALIC) + sim_torrent_list.SetItemFont(index, font) + sim_torrent_list.SetItemTextColour(index, "#222222") + + except Exception, e: + print_exc() + sim_torrent_list.setInfoHashList(None) + index = sim_torrent_list.InsertStringItem(0, "Error getting similar files list") + sim_torrent_list.SetItemTextColour(index, "dark red") + + try: + sim_torrent_list.onListResize() #SetColumnWidth(0,wx.LIST_AUTOSIZE) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: could not resize lists in sim_torrent_list panel" + + + def fillSimTitlesList(self, sim_titles): + """fills the list of torrents with similar titles""" + # jie.done: fill sim title list + + sim_torrent_list = self.getGuiObj('simTitlesField') + if not sim_torrent_list: + return + sim_torrent_list.DeleteAllItems() + + if sim_titles is None: + self.errorLoadData('simTitlesField') + return + + try: + sim_torrent_list.setInfoHashList(None) + + torrent_list = [] + if len(sim_titles) > 0: + for infohash, name, status_id in sim_titles: + #if infohash == item['infohash']: + # continue + name = dunno2unicode(name) + index = sim_torrent_list.InsertStringItem(sys.maxint, name) + if status_id == 0: # good + color = "blue" + elif status_id == 1: # unknown + color = "black" + elif status_id == 2: # dead + color = "red" + continue + sim_torrent_list.SetItemTextColour(index, color) + torrent_list.append(infohash) + sim_torrent_list.setInfoHashList(torrent_list) + + if len(torrent_list) == 0: + index = sim_torrent_list.InsertStringItem(sys.maxint, "No similar files found yet.") + font = sim_torrent_list.GetItemFont(index) + font.SetStyle(wx.FONTSTYLE_ITALIC) + sim_torrent_list.SetItemFont(index, font) + sim_torrent_list.SetItemTextColour(index, "#222222") + + except Exception, e: + print_exc() + sim_torrent_list.setInfoHashList(None) + index = sim_torrent_list.InsertStringItem(0, "Error getting similar files list") + sim_torrent_list.SetItemTextColour(index, "dark red") + + try: + sim_torrent_list.onListResize() #SetColumnWidth(0,wx.LIST_AUTOSIZE) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: could not resize lists in sim_torrent_list panel" + + + def fillTorrentLists(self): + """fills the lists of torrents from persons detail view with common + and history files for the selected person""" + + ofList = self.getGuiObj("alsoDownloadedField") + if not ofList: + return + cfList = self.getGuiObj("commonFilesField") + if not cfList: + return + + ofList.setInfoHashList(None) + if ( self.mode != "personsMode" and self.mode != "friendsMode" ) or \ + self.item is None or self.item.get('permid') is None: + return + + self.show_loading(ofList) + self.show_loading(cfList) + + guiserver = GUITaskQueue.getInstance() + permid = self.item.get('permid') + guiserver.add_task(lambda:self.updateTorrentLists(permid), 0, id='fillTorrentLists') + + def updateTorrentLists(self, permid): + common_files = None + other_files = None + gui_db = GUIDBHandler.getInstance() + + try: + common_files = gui_db.getCommonFiles(permid) #[name] + except: + print_exc() + wx.CallAfter(self.fillCommonList, common_files) + + try: + other_files = gui_db.getOtherFiles(permid) #[(infohash,name)] + except: + print_exc() + wx.CallAfter(self.fillOtherList, other_files) + + def fillCommonList(self, common_files): + cfList = self.getGuiObj("commonFilesField") + cfList.DeleteAllItems() + + if common_files is None: + self.errorLoadData('commonFilesField') + return + + if len(common_files) == 0: + index = cfList.InsertStringItem(sys.maxint, "No common files with this person.") + font = cfList.GetItemFont(index) + font.SetStyle(wx.FONTSTYLE_ITALIC) + cfList.SetItemFont(index, font) + cfList.SetItemTextColour(index, "#222222") + cfList.isEmpty = True # used by DLFilesList to remove "No common files with this person." + else: + cfList.isEmpty = False + for name in common_files: + cfList.InsertStringItem(sys.maxint, name) + + def fillOtherList(self, other_files): + ofList = self.getGuiObj("alsoDownloadedField") + ofList.DeleteAllItems() + + if other_files is None: + self.errorLoadData('alsoDownloadedField') + return + + if len(other_files) == 0: + index = ofList.InsertStringItem(sys.maxint, "No files advertised by this person.") + font = ofList.GetItemFont(index) + font.SetStyle(wx.FONTSTYLE_ITALIC) + ofList.SetItemFont(index, font) + ofList.SetItemTextColour(index, "#222222") + else: + torrent_list = [] + for infohash, name in other_files: + ofList.InsertStringItem(sys.maxint, name) + torrent_list.append(infohash) + ofList.setInfoHashList(torrent_list) + + def errorLoadData(self, obj_name): + flist = self.getGuiObj(obj_name) + flist.DeleteAllItems() + if isinstance(flist, DLFilesList): + flist.setInfoHashList(None) + index = flist.InsertStringItem(sys.maxint, "Error: cannot load the data") + flist.SetItemTextColour(index, "#222222") + + try: + flist.onListResize() + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: could not resize lists in person detail panel" + + def updateNumFilesInTextFields(self, cfList, ofList): + numItems = [cfList.GetItemCount(), ofList.GetItemCount()] + self.getGuiObj('commonFiles').SetLabel(self.utility.lang.get('commonFiles') % numItems[0]) + nprefs = max(self.getData().get('nprefs',0), numItems[1]) + self.getGuiObj('alsoDownloaded').SetLabel(self.utility.lang.get('alsoDownloaded') % (numItems[1], nprefs)) + + def checkGraphTabVisible(self, tab2check='Graph', selectedTab=None): + # just some generic way of making sure that a certain panel is informed when it is or not visible + #the function must be there! + graph_panel = self.getGuiObj(obj_name='Graph', tab='Tab_graphs', mode='libraryMode') + if graph_panel is None: + return + if self.mode == 'libraryMode': + if selectedTab is None: + #find currently selected tab + tabButtons = { 'files_detailsTab':self.getGuiObj('files_detailsTab'), + 'info_detailsTab':self.getGuiObj('info_detailsTab'), + 'graphs_detailsTab':self.getGuiObj('graphs_detailsTab') } + for key in tabButtons.keys(): + if tabButtons[key].isSelected(): + selectedTab = key + break + if selectedTab == 'graphs_detailsTab': + graph_panel.setVisible(True) + return + graph_panel.setVisible(False) + + def tabClicked(self, name): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: tabClicked: %s' % name + #self.checkGraphTabVisible(selectedTab=name) + + if self.mode == 'libraryMode': + tabButtons = { 'files_detailsTab':self.getGuiObj('files_detailsTab'), + 'info_detailsTab':self.getGuiObj('info_detailsTab'), + 'upload_detailsTab':self.getGuiObj('upload_detailsTab')} + # 'graphs_detailsTab':self.getGuiObj('graphs_detailsTab') } + tabPanelNames = { 'files_detailsTab':'filesTab_files', + 'info_detailsTab':'details', + 'upload_detailsTab':'uploadTab_details'} + #'graphs_detailsTab':'Tab_graphs'} + #TODO: change from currentPanel to the string name of the current selected details panel + #get the currently selected panel + current_name = 'details' + panel_name = 'details' + for key in tabButtons.keys(): + if name == key: + panel_name = tabPanelNames[key] + if tabButtons[key].isSelected(): + current_name = tabPanelNames[key] + panel1 = self.getGuiObj(current_name) + panel2 = self.getGuiObj(panel_name) + if panel1 is not None and panel2 is not None and panel1 != panel2: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: switching from "+current_name+" to "+panel_name + self.swapPanel(panel1, panel2) + + for key in tabButtons.keys(): + try: + if key == name: + tabButtons[key].setSelected(True) + else: + tabButtons[key].setSelected(False) + except: + print "tab %s has no button??" % key + self.currentPanel.SetAutoLayout(1) + self.currentPanel.Layout() + self.hSizer.Layout() + elif self.mode == 'filesMode': + tabFiles = self.getGuiObj('files_detailsTab') + tabInfo = self.getGuiObj('info_detailsTab') + infoPanel = self.getGuiObj('details') + # sizer = infoPanel.GetContainingSizer() + filesPanel = self.getGuiObj('filesTab_files') + + if name == 'files_detailsTab' and not tabFiles.isSelected(): + tabFiles.setSelected(True) + tabInfo.setSelected(False) + self.swapPanel( infoPanel, filesPanel)#, sizer, 3) + + elif name == 'info_detailsTab' and not tabInfo.isSelected(): + tabFiles.setSelected(False) + tabInfo.setSelected(True) + self.swapPanel( filesPanel, infoPanel)#, sizer, 3) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: %s: Unknown tab %s' % (self.mode,name) + return +# relayout the details panel to accomodate the new panel + + + self.currentPanel.SetAutoLayout(1) + self.currentPanel.Layout() + self.hSizer.Layout() + + elif self.mode in ["personsMode","friendsMode"]: + tabAdvanced = self.getGuiObj('advanced_detailsTab') + tabInfo = self.getGuiObj('info_detailsTab') + infoPanel = self.getGuiObj('detailsC') + advancedPanel = self.getGuiObj('personsTab_advanced') + if name == 'advanced_detailsTab' and not tabAdvanced.isSelected(): + tabAdvanced.setSelected(True) + tabInfo.setSelected(False) + self.swapPanel( infoPanel, advancedPanel) + elif name == 'info_detailsTab' and not tabInfo.isSelected(): + tabAdvanced.setSelected(False) + tabInfo.setSelected(True) + self.swapPanel( advancedPanel, infoPanel) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: %s: Unknown tab %s' % (self.mode,name) + return +# print " advanced tab has label:",tabAdvanced.GetLabel() + + #relayout the details panel to accomodate the new panel + self.currentPanel.SetAutoLayout(1) + self.currentPanel.Layout() + self.hSizer.Layout() + + elif self.mode == "profileMode": +# print " try to switch to",name + if name.startswith("bgPanel"): + name = "profileDetails"+name[7:] +# if name == "profileDetails_Overall": +# name = 'panel' +# print " current panel is:",self.item +# if self.item is None: +# self.item = 'panel' + panel1 = self.currentPanel #getGuiObj(self.item) + panel2 = self.getGuiObj(name) + if panel1 is not None and panel2 is not None and panel1 != panel2: +#=============================================================================== +# print " switch from %s[%s] to %s[%s]" % (panel1.GetName(), panel1.GetParent().GetName(), panel2.GetName(), panel2.GetParent().GetName()) +# if isinstance(panel1,tribler_topButton): +# print " set unselected for",panel1.GetName() +# panel1.setSelected(False) +# else: +# print " panel1 ",panel1.GetName()," is of type ",panel1.__class__.__name__ +# if panel2.__class__.__name__.endswith("tribler_topButton"): +# print " set selected for",panel2.GetName() +# panel2.setSelected(True) +# else: +# print " panel2 ",panel2.GetName()," is of type ",panel2.__class__.__name__ +#=============================================================================== + self.swapPanel(panel1, panel2) + #each time the panel changes, update the 'panel' reference in data list + self.data[self.mode]['panel'] = panel2 + #actually, update the currentPanel reference + self.currentPanel = panel2 +# self.item = name +# else: +# print " can't switch, one of the panel is None or the same panel" +# self.currentPanel.Layout() +# self.currentPanel.SetAutoLayout(1) +# self.hSizer.Layout() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: switch from %s[%s] to %s[%s]" % (panel1.GetName(), panel1.GetParent().GetName(), panel2.GetName(), panel2.GetParent().GetName()) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: Tab (%s) for this mode (%s) not yet implemented' % (name,self.mode) + return + + self.setData(self.item) + self.refreshStandardDetailsHeight() + + + def swapPanel(self, oldpanel, newpanel, sizer=None, index=-1): + """replaces in a sizer a panel with another one to simulate tabs""" + if sizer is None: + sizer = oldpanel.GetContainingSizer() + if not sizer: + return #could not swap + #if index not given, use sizer's own replace method + if index == -1: + index = 0 + for panel in sizer.GetChildren(): + if panel.GetWindow() == oldpanel: + break + index = index + 1 + if index == len(sizer.GetChildren()): + return #error: index not found so nothing to change +# sizerItem = sizer.Replace(oldpanel, newpanel) +# print "found index is:",index,"number of children in sizer:",len(sizer.GetChildren()) + # remove info tab panel + sizer.Detach(oldpanel) + oldpanel.Hide() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: found sizer equal to hSizer?",(sizer==self.hSizer) + # add files tab panel + newpanel.SetAutoLayout(1) + newpanel.Layout() + if not newpanel.IsShown(): + newpanel.Show() + sizer.Insert(index, newpanel, 0, wx.ALL|wx.EXPAND, 0) + sizer.Layout() + + def getAlternativeTabPanel(self, name, parent=None): + "Load a tabPanel that was not loaded as default" + panel = self.getGuiObj(name) + if panel: + return panel + else: + # generate new panel + xrcResource = os.path.join(self.guiUtility.vwxGUI_path, name+'.xrc') + if os.path.exists(xrcResource): + panelName = name + if parent is None: + parent = self.currentPanel + panel = self.loadXRCPanel(xrcResource, panelName, parent=parent) + if panel is not None and self.tabElements.has_key(name): + for element in self.tabElements[name]: + xrcElement = xrc.XRCCTRL(panel, element) + if not xrcElement: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: Error: Could not identify xrc element: %s for mode %s' % (element, self.mode) + pass + self.data[self.mode][name+'_'+element] = xrcElement + + self.data[self.mode][name] = panel + + return panel + + def mouseAction(self, event): + """ Arno: apparently not used, see GUIUtility.buttonClicked() """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: mouseAction' + + obj = event.GetEventObject() + #print obj + + if not self.data: + return + if obj == self.downloadButton: + self.download(self.data) + # --tb-- +# if obj == self.optionsButtonLibrary: +# # zelfde menu als rechterMuisKnop +# print "optionsButton" +# self.rightMouseAction(event) + elif obj == self.refreshButton: + #and self.refreshButton.isEnabled(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: refresh seeders and leechers" + #self.swarmText.SetLabel(self.utility.lang.get('refreshing')+'...') + #self.swarmText.Refresh() + + self.refresh(self.data) + + def rightMouseButton(self, event): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: --tb-- keydown function(2)' + menu = self.guiUtility.OnRightMouseAction(event) + if menu is not None: + self.PopupMenu(menu, (-1,-1)) + + + def refresh(self, torrent): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: refresh ' + repr(torrent.get('name', 'no_name')) + check = TorrentChecking(torrent['infohash']) + check.start() + + +# def isEnabled(self): +# return self.enabled + + def _download_torrentfile_from_peers(self, torrent, callback): + """ + TORRENT is a dictionary containing torrent information used to + display the entry on the UI. it is NOT the torrent file! + + CALLBACK is called when the torrent is downloaded. When no + torrent can be downloaded the callback is ignored + """ + def success_callback(*args): + # empty the permids list to indicate that we are done + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: _download_torrentfile_from_peers: received .torrent from peer" + if state[0]: + state[0] = False + callback(*args) + + def next_callback(timeout): + """ + TIMEOUT: when TIMEOUT>=0 then will try another peer after TIMEOUT seconds. + """ + if state[0] and state[1]: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: _download_torrentfile_from_peers: trying to .torrent download from peer.",len(state[1])-1,"other peers to ask" + self.utility.session.download_torrentfile_from_peer(state[1].pop(0), torrent['infohash'], success_callback) + if timeout >= 0: + next_callback_lambda = lambda:next_callback(timeout) + guiserver.add_task(next_callback_lambda, timeout) + + guiserver = GUITaskQueue.getInstance() + state = [True, torrent['query_permids'][:]] + torrent['query_torrent_was_requested'] = True + + # The rules and policies below can be tweaked to increase + # performace. More parallel requests can be made, or the + # timeout to ask more people can be decreased. All at the + # expence of bandwith. + if torrent['torrent_size'] > 50 * 1024: + # this is a big torrent. to preserve bandwidth we will + # request sequentially with a large timeout + next_callback(3) + + elif 0 <= torrent['torrent_size'] <= 10 * 1024: + # this is a small torrent. bandwidth is not an issue so + # download in parallel + next_callback(-1) + next_callback(1) + + else: + # medium and unknown torrent size. + next_callback(1) + + def torrent_is_playable(self, torrent=None, default=True, callback=None): + """ + TORRENT is a dictionary containing torrent information used to + display the entry on the UI. it is NOT the torrent file! + + DEFAULT indicates the default value when we don't know if the + torrent is playable. + + CALLBACK can be given to result the actual 'playable' value + for the torrent after some downloading/processing. The DEFAULT + value is returned in this case. Will only be called if + self.item == torrent + """ + if torrent is None: + torrent = self.item + + if 'torrent_file_name' not in torrent or not torrent['torrent_file_name']: + torrent['torrent_file_name'] = get_filename(torrent['infohash']) + torrent_dir = self.utility.session.get_torrent_collecting_dir() + torrent_filename = os.path.join(torrent_dir, torrent['torrent_file_name']) + + if os.path.isfile(torrent_filename): + tdef = TorrentDef.load(torrent_filename) + if tdef.get_files(exts=videoextdefaults): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "standardDetails:torrent_is_playable is playable" + return True + else: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "standardDetails:torrent_is_playable is NOT playable" + return False + + elif callback: + # unknown, figure it out and return the information using + # a callback + + if 'query_permids' in torrent and not torrent.get('myDownloadHistory'): + def got_requested_torrent(infohash, metadata, filename): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "standardDetails:torrent_is_playable Downloaded a torrent" + # test that we are still focussed on the same torrent + if torrent_filename.endswith(filename) and self.item == torrent: + # recursive call + playable = self.torrent_is_playable(torrent, default=default) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "standardDetails:torrent_is_playable performing callback. is playable", playable + callback(torrent, playable) + self._download_torrentfile_from_peers(torrent, got_requested_torrent) + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "standardDetails:torrent_is_playable returning default", default + return default + + + def download(self, torrent = None, dest = None, secret = False, force = False, vodmode = False): + if torrent is None: + torrent = self.item + + +# if self.GetName() == 'download': + + force = True + if (torrent is None or torrent.get('myDownloadHistory')) and not force: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: download: Bailout" + return + + #print "**** standdetail: download", `torrent` + + if torrent.get('web2'): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: download: Playing WEB2 video: " + torrent['url'] + self.videoplayer.play_url(torrent['url']) + self.setDownloadbutton(torrent=self.item, item = self.downloadButton2) + return True + + if 'query_permids' in torrent and not torrent.get('myDownloadHistory'): + sesscb_got_requested_torrent_lambda = lambda infohash,metadata,filename:self.sesscb_got_requested_torrent(torrent,infohash,metadata,filename,vodmode) + self._download_torrentfile_from_peers(torrent, sesscb_got_requested_torrent_lambda) + + # Show error if torrent file does not come in + tfdownload_timeout_lambda = lambda:self.guiserv_tfdownload_timeout(torrent) + guiserver = GUITaskQueue.getInstance() + guiserver.add_task(tfdownload_timeout_lambda,20) + + # Show pending colour + self.guiUtility.standardOverview.refreshGridManager() + + #self.setDownloadbutton(torrent=self.item, item = self.downloadButton2) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", torrent, torrent.keys() + return True + + torrent_dir = self.utility.session.get_torrent_collecting_dir() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'standardDetails: download: got torrent to download', 'torrent_file_name' in torrent, torrent_dir, torrent['torrent_file_name'] + + if 'torrent_file_name' not in torrent: + torrent['torrent_file_name'] = get_filename(torrent['infohash']) + torrent_filename = os.path.join(torrent_dir, torrent['torrent_file_name']) + + if torrent.get('name'): + name = torrent['name'] + else: + name = showInfoHash(torrent['infohash']) + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: download: Preparing to start:",`name` + + if os.path.isfile(torrent_filename): + + clicklog={'keywords': self.guiUtility.torrentsearch_manager.searchkeywords[self.mode], + 'reranking_strategy': self.guiUtility.torrentsearch_manager.rerankingStrategy[self.mode].getID()} + if "click_position" in torrent: + clicklog["click_position"] = torrent["click_position"] + + + # Api download + d = self.utility.frame.startDownload(torrent_filename,destdir=dest, + clicklog=clicklog,name=name,vodmode=vodmode) ## remove name=name + if d: + if secret: + self.torrent_db.setSecret(torrent['infohash'], secret) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: download: download started' + # save start download time. + #self.setDownloadbutton(torrent=self.item, item = self.downloadButton2) + #torrent['download_started'] = time() + #torrent['progress'] = 0.0 + self.setBelongsToMyDowloadHistory(torrent, True) + return True + else: + return False + else: + + # Torrent not found + str = self.utility.lang.get('delete_torrent') % name + dlg = wx.MessageDialog(self, str, self.utility.lang.get('delete_dead_torrent'), + wx.YES_NO|wx.NO_DEFAULT|wx.ICON_INFORMATION) + result = dlg.ShowModal() + dlg.Destroy() + if result == wx.ID_YES: + infohash = torrent['infohash'] + self.torrent_db.deleteTorrent(infohash, delete_file=True, commit = True) + + + + return True + else: + return False + + def sesscb_got_requested_torrent(self,querytorrent,infohash,metadata,filename,vodmode): + """ The torrent file requested from another peer came in. + @param querytorrent The original torrent record as shown on the screen + @param infohash The infohash of the torrent file. + @param metadata The contents of the torrent file (still bencoded) + @param vodmode Whether to download in VOD mode (lambda added) + """ + # Called by SessionCallback thread + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: sesscb_got_requested_torrent:",`infohash` + + # Update the torrent record, and refresh the view afterwards such + # that it shows as a torrent being downloaded. + querytorrent['torrent_file_name'] = filename + self.setBelongsToMyDowloadHistory(querytorrent, True) + + wx.CallAfter(self.download,querytorrent,force=True,vodmode=vodmode) + wx.CallAfter(self.guiUtility.standardOverview.refreshGridManager) + + def setBelongsToMyDowloadHistory(self,torrent, b): + """Set a certain new torrent to be in the download history or not + Should not be changed by updateTorrent calls""" + + # DB registration and buddycast notification is done in LaunchManyCore.add() + # Currently no removal function. + torrent['myDownloadHistory'] = True + + + def guiserv_tfdownload_timeout(self,torrent): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: tdownload_timeout: Did we receive",`torrent['name']` + dbrecord = self.torrent_db.getTorrent(torrent['infohash']) + d = self.getData() + if d is not None: + selinfohash = d.get('infohash',None) + if dbrecord is None and torrent['infohash'] == selinfohash: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: tdownload_timeout: Couldn't get torrent from peer",`torrent['name']` + wx.CallAfter(self.tfdownload_timeout_error) + + def tfdownload_timeout_error(self): + self.videoplayer.set_player_status("Error starting download. Could not get metadata from remote peer.") + + def setTorrentThumb(self, mode, torrent, thumbPanel, size = 'large'): + + if not thumbPanel: + return + + thumbPanel.setBackground(wx.BLACK) + if mode in ['filesMode', 'libraryMode']: + self.getThumbnailLarge(torrent,thumbPanel, size) + elif mode in ['personsMode', 'friendMode']: + # get thumbimage of person + if False: + pass + else: + default = self.iconsManager.get_default('personsMode','DEFAULT_THUMB') + thumbPanel.setBitmap(default) + + def addAsFriend(self): + # add the current user selected in details panel as a friend or delete + if self.mode in ["personsMode","friendsMode"]: + peer_data = self.item + if peer_data is not None and peer_data.get('permid'): + + self.addasfriendcount += 1 + now = time() + diff = now - self.addasfriendlast + if self.addasfriendcount >= 2 and diff < 1.0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: addAsFriend: ratelimiter!" + return + if diff > 10.0: + self.addasfriendcount = 0 + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: addAsFriend: stats",self.addasfriendcount,diff + + #self.friend_db.toggleFriend(peer_data['permid']) + fs = peer_data['friend'] + if fs == FS_NOFRIEND or fs == FS_I_DENIED or fs == FS_HE_DENIED: + # Invite him, reinvite him + self.utility.session.send_friendship_message(peer_data['permid'],F_REQUEST_MSG) + elif fs == FS_MUTUAL or fs == FS_I_INVITED: + # Remove friendship + self.utility.session.send_friendship_message(peer_data['permid'],F_RESPONSE_MSG,approved=False) + elif fs == FS_HE_INVITED: + # Confirm friendship + self.utility.session.send_friendship_message(peer_data['permid'],F_RESPONSE_MSG,approved=True) + self.addasfriendlast = time() + + + def refreshUploadStats(self, dslist): + # Update the overrall uploading information + if self.mode in ['libraryMode']: + if self.getGuiObj('upload_detailsTab').isSelected(): + tab = 'uploadTab_details' + t4t_list = self.getGuiObj('t4t_peers', tab = tab) + t4t_list.setData(dslist) + + g2g_list = self.getGuiObj('g2g_peers', tab = tab) + g2g_list.setData(dslist) + + def refreshTorrentStats(self,dslist): + """ Called by GUI thread """ + nactive = 0 + + tl = [] + totaldlspeed = 0.0 + totalulspeed = 0.0 + for ds in dslist: + d = ds.get_download() + progress = ds.get_progress() + + + totaldlspeed += ds.get_current_speed(DOWNLOAD) + totalulspeed += ds.get_current_speed(UPLOAD) + + status = ds.get_status() + if status != DLSTATUS_STOPPED and status != DLSTATUS_STOPPED_ON_ERROR: + nactive += 1 + + # Only show active downloading unfinished torrents + if progress < 1.0: + tl.append([progress,d]) + + + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: stats:",`d.get_def().get_name()`,progress,status + + + + # Reverse sort on percentage done, get top 4 + tl.sort(revtcmp) + ml = min(len(tl),4) + newtl = tl[:ml] + + for i in range(4): + if i < ml: + (progress,d) = newtl[i] + progresstxt = progress2txt(progress) + file = d.get_def().get_name_as_unicode() + else: + progresstxt = '' + file = '' + tname = 'download'+str(i+1) + pname = 'percent'+str(i+1) + tlabel = self.data['status'][tname] + plabel = self.data['status'][pname] + #print "Setting",pname,"to",progresstxt + tlabel.SetLabel(file[:45]) + plabel.SetLabel(progresstxt[:45]) + statdlpanel = self.data['status']['panel'] + + self.refreshTorrentTotalStats(nactive,totaldlspeed,totalulspeed) + + statdlpanel.Refresh() + + + def refreshTorrentTotalStats(self,nactive,totaldlspeed,totalulspeed): + """ Called by GUI thread """ + leftlabel = self.data['status']['Downloading'] + rightlabel = self.data['status']['downSpeed'] + rightlabel2 = self.data['status']['upSpeed'] + + lefttext = self.utility.lang.get('downloading')+' ('+str(nactive)+')' + righttxt = str(int(totaldlspeed))+' KB/s' + righttxt2 = str(int(totalulspeed))+' KB/s' + leftlabel.SetLabel(lefttext) + rightlabel.SetLabel(righttxt) + rightlabel2.SetLabel(righttxt2) + + def addToPlaylist(self, name ='--', add=False): + playListPanel = self.data['status']['playList'] + + if playListPanel.GetChildren() == []: + vSizer = wx.BoxSizer(wx.VERTICAL) + playListPanel.SetSizer(vSizer) + playListPanel.SetAutoLayout(1) + else: + vSizer = playListPanel.GetSizer() + + if not add and len(self.playList) > 0: + playListPanel.DestroyChildren() + self.playList = [] + + if name != '': + self.playList.append(name) + + hSizer = wx.BoxSizer(wx.HORIZONTAL) + text = wx.StaticText(playListPanel,-1,"",wx.Point(0,0)) + text.SetSize((-1, 12)) +# text.SetBackgroundColour(wx.WHITE) + text.SetLabel(name) + hSizer.Add(text, 1, wx.TOP, 2) + + progressBar = ImagePanel(playListPanel, -1, wx.DefaultPosition, wx.Size(30,10), name='progressBar') + hSizer.Add(progressBar, 0, wx.FIXED_MINSIZE|wx.TOP|wx.ALIGN_RIGHT, 4) + + vSizer.Add(hSizer, 0, wx.EXPAND, 0) + + number = len(self.playList) + playListPanel.SetMinSize((-1, (number*15+12))) + playListPanel.Layout() + playListPanel.GetParent().Layout() #Layout of statusDownloads + self.Layout() #Layout of StandardDetails + + def updateLastCheck(self, event=None): + #print 'updateLastCheck' + if self.item and self.item.has_key('last_check_time'): + last_time = self.item.get('last_check_time') + if last_time and type(last_time) == int: + self.getGuiObj('refresh').SetToolTipString('%s: %s' % (self.utility.lang.get('last_checked'), friendly_time(last_time))) + event.Skip() + + """ + def subscrNeedsGUIUpdate(self,todayl,yesterdayl): + update = True + if len(todayl) > 0: + if self.subscrDataCopy_today_top is not None and self.subscrDataCopy_today_top == todayl[0]: + update = False + self.subscrDataCopy_today_top = todayl[0] + + if len(yesterdayl) > 0: + if self.subscrDataCopy_yday_top is not None and self.subscrDataCopy_yday_top == yesterdayl[0]: + update = False + self.subscrDataCopy_yday_top = yesterdayl[0] + return update + """ + + def getThumbnailLarge(self,torrent,thumbPanel, size='large'): + readable = torrent.get('metadata',{}).get('ThumbReadable') + if readable == False: + default = self.iconsManager.getCategoryIcon('filesMode',torrent.get('category'), 'large') + thumbPanel.setBitmap(default) + return + + if 'preview' in torrent: + thumbnailString = torrent['preview'] + else: + # Arno: Read big image on demand + torrent_dir = self.utility.session.get_torrent_collecting_dir() + torrent_filename = os.path.join(torrent_dir, torrent['torrent_file_name']) + metadata = loadAzureusMetadataFromTorrent(torrent_filename) + if metadata: + thumbnailString = metadata.get('Thumbnail') + + else: + thumbnailString = None + + + if 'metadata' not in torrent: + # Dump the raw data + if thumbnailString: + del metadata['Thumbnail'] + + torrent['metadata'] = metadata + + if thumbnailString: + img = createThumbImage(thumbnailString) + + #print 'Found thumbnail: %s' % thumbnailString + iw, ih = img.GetSize() + w, h = thumbPanel.GetSize() + if (iw/float(ih)) > (w/float(h)): + nw = w + nh = int(ih * w/float(iw)) + else: + nh = h + nw = int(iw * h/float(ih)) + if nw != iw or nh != ih: + #print 'Rescale from (%d, %d) to (%d, %d)' % (iw, ih, nw, nh) + try: + # if wx >= 2.7, use Bicubic scaling + img.Rescale(nw, nh, quality = wx.IMAGE_QUALITY_HIGH) + except: + img.Rescale(nw, nh) + bmp = wx.BitmapFromImage(img) + + thumbPanel.setBitmap(bmp) + torrent['metadata']['ThumbReadable'] = True + else: + #print 'Torrent: %s' % torrent + torrent['metadata']['ThumbReadable'] = False + + #print "****** torrent", torrent + + default = self.iconsManager.getCategoryIcon('filesMode',torrent.get('category','all'), 'large') + thumbPanel.setBitmap(default) + + def refreshStandardDetailsHeight(self, panel = None): + if not panel: + panel = self.currentPanel + margin = 6 + if self.data.get('status',{}).get('panel'): + statusPanelHeight = self.data['status']['panel'].GetSize()[1] + else: + statusPanelHeight = 0 + + newHeight = panel.GetSize()[1] + statusPanelHeight + margin +# size = (20,newHeight) +# self.SetSize(size) +# self.SetMinSize(size) +# self.SetMaxSize(size) + self.GetContainingSizer().Layout() + # Resize scrollWindow to make scrollbars update to new windowsize + self.guiUtility.scrollWindow.FitInside() + self.Refresh() + + if DEBUG: + print 'StandardDetails: setting height of stand.details to: %s' % str(newHeight) + + def topNListText(self, tab): + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: topNListText ^^^^^^^^^" + + if self.top_stats is None: + return + + top_stats = self.top_stats + + top = top_stats['top'] + #total_up = top_stats['total_up'] + #total_down = top_stats['total_down'] + tribler_up = top_stats['tribler_up'] + tribler_down = top_stats['tribler_down'] + + rank = 1 + topText = '' + for permid, up, down in top: + + # up and down are integers in KB in the database + # (for overhead limitation) + amount_str_up = self.utility.size_format(up) + amount_str_down = self.utility.size_format(down) + + name = self.bartercastdb.getName(permid) + + topText += '%d. %s%s up: %s (down: %s)%s%s' % (rank, name, os.linesep, + amount_str_up, amount_str_down, os.linesep, os.linesep) + rank+=1 + + self.getGuiObj('descriptionField0', tab = tab).SetLabel(topText) + self.getGuiObj('descriptionField0', tab = tab).Refresh() + self.getGuiObj('downloadedNumberT', tab = tab).SetLabel(self.utility.size_format(tribler_down)) + self.getGuiObj('uploadedNumberT', tab = tab).SetLabel(self.utility.size_format(tribler_up)) + + + def seldomReloadData(self): + # Arno: this involves reading a potentially huge db, do only on + # clicks that show overview panel. + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: seldomReloadData!!!!!!!!" + + if not self.bartercastdb: + self.bartercastdb = self.utility.session.open_dbhandler(NTFY_BARTERCAST) + + self.top_stats = self.bartercastdb.getTopNPeers(10) + + + def updateCallback(self, item): + "Update callback handling for this item" + session = self.guiUtility.utility.session + session.remove_observer(self.db_callback) + if item is None: + return + if self.mode in ['filesMode', 'libraryMode']: + session.add_observer(self.db_callback, NTFY_TORRENTS, [NTFY_UPDATE, NTFY_DELETE], item['infohash']) + elif self.mode in ['personsMode', 'friendsMode']: + session.add_observer(self.db_callback, NTFY_PEERS, [NTFY_UPDATE, NTFY_DELETE], item['permid']) + elif self.mode == 'subscriptionsMode': + pass + elif self.mode == 'profileMode': + pass + + def db_callback(self,subject,changeType,objectID,*args): + # called by threadpool thread + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'stdDetails: db_callback: %s %s %s %s' % (subject, changeType, `objectID`, args) + db_handler = self.guiUtility.utility.session.open_dbhandler(subject) + if subject == NTFY_PEERS: + newitem = db_handler.getPeer(objectID) + elif subject in (NTFY_TORRENTS): + newitem = db_handler.getTorrent(objectID) + + + + wx.CallAfter(self.setData, newitem) + +def revtcmp(a,b): + if a[0] < b[0]: + return 1 + elif a[0] == b[0]: + return 0 + else: + return -1 + +def reverse_torrent_insertime_cmp(a,b): + if a['insert_time'] < b['insert_time']: + return 1 + elif a['insert_time'] == b['insert_time']: + return 0 + else: + return -1 + +def getShortTrackerFormat(n): + try: + t = urlparse.urlsplit(n) + short = t[1] + idx = t[1].find(':') + if idx == -1: + short = t[1] + else: + short = t[1][:idx] + if sys.platform == 'linux2': + short = short[:27] + except: + short = n[:27] + return ' '+short + + +def progress2txt(progress): + # Truncate the progress value rather than round down + # (will show 99.9% for incomplete torrents rather than 100.0%) + progress = int(progress * 1000)/10.0 + + return ('%.1f' % progress) + "%" diff --git a/tribler-mod/Tribler/Main/vwxGUI/standardDetails.py.bak b/tribler-mod/Tribler/Main/vwxGUI/standardDetails.py.bak new file mode 100644 index 0000000..8f1c2b0 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/standardDetails.py.bak @@ -0,0 +1,2089 @@ +# Written by Jelle Roozenburg, Maarten ten Brinke, Lucian Musat +# see LICENSE.txt for license information + +import wx +import wx.xrc as xrc +from binascii import hexlify +from time import sleep,time +import math +from traceback import print_exc, print_stack +import cStringIO +import urlparse +from wx.lib.stattext import GenStaticText as StaticText + +import threading + +from Tribler.Core.Overlay.MetadataHandler import get_filename + +from font import * +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.vwxGUI.IconsManager import IconsManager, data2wxBitmap +from Tribler.Main.vwxGUI.filesItemPanel import loadAzureusMetadataFromTorrent,createThumbImage +from Tribler.Main.Dialogs.GUITaskQueue import GUITaskQueue + +from Tribler.Main.Utility.constants import COL_PROGRESS +from Tribler.TrackerChecking.TorrentChecking import TorrentChecking +from Tribler.Video.VideoPlayer import VideoPlayer +from Tribler.Main.vwxGUI.tribler_List import DLFilesList +from Tribler.Main.vwxGUI.FriendsItemPanel import peer2status + +from Tribler.Core.API import * +from Tribler.Core.Utilities.utilities import * +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles +from Tribler.Main.vwxGUI.bgPanel import ImagePanel +from Tribler.Core.Utilities.unicode import bin2unicode, dunno2unicode + +# Sort of LAYERVIOLATION. It's a meta DBHandler actually. +from Tribler.Core.CacheDB.CacheDBHandler import GUIDBHandler +from Tribler.Core.CacheDB.EditDist import editDist +from Tribler.Video.utils import videoextdefaults + +DETAILS_MODES = ['filesMode', 'personsMode', 'profileMode', 'libraryMode', 'friendsMode', 'fileDetailsMode','subscriptionsMode', 'messageMode'] + +DEBUG = False + +def showInfoHash(infohash): + if infohash.startswith('torrent'): # for testing + return infohash + try: + n = int(infohash) + return str(n) + except: + pass + return encodestring(infohash).replace("\n","") + +class standardDetails(wx.Panel): + """ + Wrappers around details xrc panels + """ + def __init__(self, *args): + + self.bartercastdb = None + self.top_stats = None + + if len(args) == 0: + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, *args) + self._PostInit() + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + self.subscr_old_source = None + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.torrent_db = self.utility.session.open_dbhandler(NTFY_TORRENTS) + self.friend_db = self.utility.session.open_dbhandler(NTFY_FRIENDS) + self.triblerStyles = TriblerStyles.getInstance() + self.peer_db = self.utility.session.open_dbhandler(NTFY_PEERS) + self.superpeer_db = self.utility.session.open_dbhandler(NTFY_SUPERPEERS) + #self.optionsButtonLibraryFunc = rightMouseButton.getInstance() + self.iconsManager = IconsManager.getInstance() + #self.gui_db = GUIDBHandler.getInstance() + self.playList = [] + + + self.mode = None + self.item = None + self.bartercastdb = None + self.lastItemSelected = {} #keeps the last item selected for each mode + self.data = {} #keeps gui elements for each mode + for mode in DETAILS_MODES+['status']: + self.data[mode] = {} #each mode has a dictionary of gui elements with name and reference + self.lastItemSelected[mode] = None + self.currentPanel = None + self.videoplayer = VideoPlayer.getInstance() + + self.addasfriendcount = 0 + self.addasfriendlast = 0 + + + # videodata + self.videodata = None + + + ## self.addComponents() + + #self.Refresh() + self.modeElements = {} + for elem in DETAILS_MODES: + self.modeElements[elem] = [] + self.modeElements['settingsMode'] = ['profileTitle'] + + self.modeElements['filesMode'] = ['titleField', 'simTitlesField', 'popularityField1', 'options', 'popularityField2', 'creationdateField', + 'descriptionField', 'sizeField', 'thumbField', 'up', 'down', 'refresh', + 'download', 'tabs', ('files_detailsTab','tabs'), ('info_detailsTab','tabs'), + 'TasteHeart', 'details', 'peopleWhoField', 'recommendationField'] + self.modeElements['personsMode'] = ['TasteHeart', 'recommendationField','addAsFriend', 'commonFilesField', 'commonFiles', + 'alsoDownloadedField', 'alsoDownloaded', 'info_detailsTab', 'advanced_detailsTab','detailsC', + 'titleField','statusField','thumbField', 'discFilesField', 'discPersonsField'] + self.modeElements['friendsMode'] = ['TasteHeart', 'recommendationField','addAsFriend', 'commonFilesField', 'commonFiles', + 'alsoDownloadedField', 'alsoDownloaded', 'info_detailsTab', 'advanced_detailsTab','detailsC', + 'titleField','statusField','thumbField', 'discFilesField', 'discPersonsField'] + self.modeElements['libraryMode'] = ['titleField', 'simTitlesField', 'popularityField1','options', 'popularityField2', 'creationdateField', + 'descriptionField', 'sizeField', 'thumbField', 'up', 'down', 'refresh', + 'files_detailsTab', 'info_detailsTab', 'details', 'upload_detailsTab', 'uploadTab_details', + 'peopleWhoField'] + self.modeElements['profileMode'] = ['levelPic', 'uploadedNumber', 'downloadedNumber'] + + + self.modeElements['fileDetailsMode'] = ['titleField', 'receivedToday', 'subscrTodayField', 'receivedYesterday', 'subscrYesterdayField'] # 'receivedTotal'] + + self.modeElements['subscriptionsMode'] = ['titleField', 'receivedToday', 'subscrTodayField', 'receivedYesterday', 'subscrYesterdayField'] # 'receivedTotal'] + + self.tabElements = {'filesTab_files': [ 'download', 'includedFiles', 'filesField', 'trackerField'], + 'personsTab_advanced': ['lastExchangeField', 'timesConnectedField','addAsFriend','similarityValueField'], + 'libraryTab_files': [ 'download', 'includedFiles'], + 'profileDetails_Quality': ['descriptionField0','howToImprove','descriptionField1'], + 'profileDetails_Files': ['descriptionField0','howToImprove','descriptionField1','takeMeThere0'], + 'profileDetails_Persons': ['descriptionField0','howToImprove','descriptionField1'], + 'profileDetails_Download': ['descriptionField','Desc0','descriptionField0','howToImprove0','descriptionField1','takeMeThere0','Desc1','descriptionField2','howToImprove1','descriptionField3','takeMeThere1','Desc2','descriptionField4','howToImprove2','descriptionField5','takeMeThere2'], + #'profileDetails_Presence': ['descriptionField','Desc0','descriptionField0','howToImprove0','descriptionField1','Desc1','descriptionField2','howToImprove1','descriptionField3','Desc2','descriptionField4','howToImprove2','descriptionField5','takeMeThere0']} + 'profileDetails_Presence': ['descriptionField','Desc0','descriptionField0','howToImprove0','descriptionField1','Desc2','descriptionField4','howToImprove2','descriptionField5','takeMeThere0'], + 'uploadTab_details': ['t4t_peers', 'g2g_peers']} + + + + self.statdlElements = ['statusHeader','Downloading', 'st28c','down_White','downSpeed','up_White','upSpeed','download1','percent1','download2','percent2','download3','percent3','download4','percent4','playList'] + + self.guiUtility.initStandardDetails(self) + + + + def addComponents(self): + self.SetBackgroundColour(wx.Colour(102,102,102)) +# self.SetBackgroundColour(wx.Colour(255,51,0)) + self.hSizer = wx.BoxSizer(wx.VERTICAL) + self.SetSizer(self.hSizer) + self.SetAutoLayout(1) + self.Layout() + #print "tb" + #print self.GetSize() + + + def setMode(self, mode, item = None): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: setMode called, new mode is",mode,"old",self.mode + + if self.mode != mode: + #change the mode, so save last item selected + self.lastItemSelected[self.mode] = self.item + self.mode = mode + self.checkGraphTabVisible() + ## self.refreshMode() + if item: + self.setData(item) + elif self.lastItemSelected[self.mode]: + self.guiUtility.selectData(self.lastItemSelected[self.mode]) + else: + self.setData(None) + + def getMode(self): + return self.mode + + def refreshMode(self): + # load xrc + self.oldpanel = self.currentPanel + #self.Show(False) + + self.currentPanel = self.loadPanel() + assert self.currentPanel, "Panel could not be loaded" + self.currentPanel.Layout() + self.currentPanel.SetAutoLayout(1) + #self.currentPanel.Enable(True) +# self.currentPanel.SetBackgroundColour("red") + + + self.currentPanel.Show(True) +# print self.mode + + + if self.mode == 'filesMode' or self.mode == 'libraryMode' or self.mode=='personsMode' or self.mode == 'friendsMode' or self.mode == 'profileMode' or self.mode == 'subscriptionsMode' or self.mode == 'fileDetailsMode' : +# print 'tb' + self.currentPanel.SetSize((-1,5)) + self.currentPanel.Hide() + +# + if self.oldpanel: + self.hSizer.Detach(self.oldpanel) + self.oldpanel.Hide() + #self.oldpanel.Disable() + + + self.hSizer.Insert(0, self.currentPanel, 0, wx.ALL|wx.EXPAND, 0) + + +# self.currentPanel.Layout() + wx.CallAfter(self.hSizer.Layout) + wx.CallAfter(self.refreshStandardDetailsHeight) +# wx.CallAfter(self.currentPanel.Refresh) + #self.Show(True) + + + def refreshStatusPanel(self, show): + pass + ##if show: + ## statusPanel = self.data['status'].get('panel') + ## if not statusPanel: + ## statusPanel = self.loadStatusPanel() + ## self.data['status']['panel'] = statusPanel + #statusPanel.Enable() + ## statusPanel.Show() + ## self.hSizer.Insert(1, statusPanel, 0, wx.TOP|wx.EXPAND, 6) + ## self.hSizer.Layout() + ##else: + ## # Remove statusPanel if necessary + ## if self.data['status'].get('panel'): + ## statusPanel = self.data['status']['panel'] + ## try: + ## self.hSizer.Detach(statusPanel) + ## statusPanel.Hide() + ## #statusPanel.Disable() + ## except: + ## print_exc() + + def setListAspect2OneColumn(self, list_name): + try: + ofList = self.getGuiObj(list_name) + ofList.ClearAll() + if False: # sys.platform == 'win32': + ofList.SetWindowStyleFlag(wx.LC_REPORT|wx.NO_BORDER|wx.LC_NO_HEADER|wx.LC_SINGLE_SEL) #it doesn't work + else: + #ofList.SetSingleStyle(wx.LC_REPORT) + ofList.SetSingleStyle(wx.LC_NO_HEADER) + ofList.SetSingleStyle(wx.LC_SINGLE_SEL) + ofList.SetSingleStyle(wx.NO_BORDER) + ofList.InsertColumn(0, "Torrent") #essential code + # ofList.SetColumnWidth(0,wx.LIST_AUTOSIZE) + except: + # Arno, 2008-08-21: wxPython 2.8.8.1 doesn't like LC_REPORT anymore, + # for unknown reasons. Our hack around is this exception handler, + # and we MANUALLY added the style parameters to the .xrc files. + """ + Traceback (most recent call last): + File "C:\Python252\Lib\site-packages\wx-2.8-msw-unicode\wx\_core.py", line 14555, in + lambda event: event.callable(*event.args, **event.kw) ) + File "C:\build\mainbranch\Tribler\Main\vwxGUI\standardDetails.py", line 131, in _PostInit + self.guiUtility.initStandardDetails(self) + File "C:\build\mainbranch\Tribler\Main\vwxGUI\GuiUtility.py", line 294, in initStandardDetails + self.standardDetails.setMode('filesMode', firstItem) + File "C:\build\mainbranch\Tribler\Main\vwxGUI\standardDetails.py", line 155, in setMode + self.refreshMode() + File "C:\build\mainbranch\Tribler\Main\vwxGUI\standardDetails.py", line 171, in refreshMode + self.currentPanel = self.loadPanel() + File "C:\build\mainbranch\Tribler\Main\vwxGUI\standardDetails.py", line 269, in loadPanel + self.setListAspect2OneColumn("peopleWhoField") + File "C:\build\mainbranch\Tribler\Main\vwxGUI\standardDetails.py", line 220, in setListAspect2OneColumn + ofList.SetWindowStyleFlag(wx.LC_REPORT|wx.NO_BORDER|wx.LC_NO_HEADER|wx.LC_SINGLE_SEL) #it doesn't work + File "C:\Python252\Lib\site-packages\wx-2.8-msw-unicode\wx\_core.py", line 9140, in SetWindowStyleFlag + return _core_.Window_SetWindowStyleFlag(*args, **kwargs) + wx._core.PyAssertionError: C++ assertion "nModes == 1" failed at ..\..\src\msw\listctrl.cpp(380) in wxListCtrl::MSWGetStyle(): wxListCtrl style should have exactly one mode bit set + """ + print_exc() + + + def getVideodata(self): + return self.videodata + + + def setVideodata(self, videodata): + self.videodata = videodata + + + + def loadPanel(self): + currentPanel = self.data[self.mode].get('panel',None) + modeString = self.mode[:-4] + #[11.05.07]: small hack as the friends mode has no details panel, but we still want to know that this is friends mode + if self.mode == "friendsMode": + modeString = "persons" + if not currentPanel: + xrcResource = os.path.join(self.guiUtility.vwxGUI_path, modeString+'Details.xrc') + panelName = modeString+'Details' + currentPanel = self.loadXRCPanel(xrcResource, panelName) + # Save paneldata in self.data + self.data[self.mode]['panel'] = currentPanel + #titlePanel = xrc.XRCCTRL(currentPanel, 'titlePanel') + + if self.modeElements.has_key(self.mode): + for element in self.modeElements[self.mode]: + xrcElement = None + name = None + if type(element) == str: + xrcElement = xrc.XRCCTRL(currentPanel, element) + name = element + elif type(element) == tuple: + name = element[0] + xrcElement = xrc.XRCCTRL(self.getGuiObj(element[1]), name) + if not xrcElement: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: Error: Could not identify xrc element: %s for mode %s' % (element, self.mode) + pass + if name: + self.data[self.mode][name] = xrcElement + else: + self.modeElements[self.mode] = [] + + # do extra init + if modeString in ['files','library']: + self.getGuiObj('up').setBackground(wx.WHITE) + self.getGuiObj('down').setBackground(wx.WHITE) + refresh = self.getGuiObj('refresh') + refresh.setBackground(wx.WHITE) + refresh.Bind(wx.EVT_ENTER_WINDOW, self.updateLastCheck) + self.setListAspect2OneColumn("peopleWhoField") + self.setListAspect2OneColumn("simTitlesField") + infoTab = self.getGuiObj('info_detailsTab') + infoTab.setSelected(True) + self.getAlternativeTabPanel('filesTab_files', parent=currentPanel).Hide() + + # "upload" tab is added, by Boxun + self.getAlternativeTabPanel('uploadTab_details', parent=currentPanel).Hide() + + if modeString == 'files': + self.getGuiObj('TasteHeart').setBackground(wx.WHITE) + """ + if modeString == 'library': + graph_parent = self.getAlternativeTabPanel('Tab_graphs', parent=currentPanel) + graph_parent.Hide() + #swap the dummy Graph panel with the plot panel + dummy_graph_panel = self.getGuiObj('Graph', 'Tab_graphs') + #optionsButton = self.getGuiObj('options') + + emsg = None + try: + from graphs import StatsPanel + graph_panel = StatsPanel(graph_parent) + except ImportError, msg: + graph_panel = None + emsg=msg + if graph_panel is None: + def setData(item): + pass + dummy_graph_panel.setData = setData + def setVisible(isVisible): + pass + dummy_graph_panel.setVisible = setVisible + dummy_graph_panel.vSizer = wx.BoxSizer(wx.VERTICAL) + dummy_graph_panel.title =wx.StaticText(dummy_graph_panel,-1,"",wx.Point(0,0),wx.Size(300,300)) + dummy_graph_panel.title.SetBackgroundColour(wx.WHITE) + dummy_graph_panel.title.SetFont(wx.Font(10,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + dummy_graph_panel.title.SetMinSize((300,300)) + dummy_graph_panel.vSizer.Add(dummy_graph_panel.title, 0, wx.BOTTOM, 3) + dummy_graph_panel.title.SetLabel(str(emsg)) + dummy_graph_panel.SetSizer(dummy_graph_panel.vSizer); + dummy_graph_panel.SetAutoLayout(1); + dummy_graph_panel.Layout(); + dummy_graph_panel.Refresh() + else: + self.swapPanel(dummy_graph_panel, graph_panel) + #also set it as an object of Tab_graphs + self.data[self.mode]['Tab_graphs'+'_'+'Graph'] = graph_panel + graph_panel.SetMinSize(wx.Size(300,300)) + graph_panel.SetSize(wx.Size(300,300)) + """ + + + elif modeString in ['persons','friends']: + self.getGuiObj('TasteHeart').setBackground(wx.WHITE) + self.getGuiObj('info_detailsTab').setSelected(True) + self.getGuiObj('advanced_detailsTab').SetLabel(" advanced") + #get the list in the right mode for viewing + self.setListAspect2OneColumn("alsoDownloadedField") + self.setListAspect2OneColumn("commonFilesField") + self.getAlternativeTabPanel('personsTab_advanced', parent=currentPanel).Hide() + ofList = self.getGuiObj("alsoDownloadedField") + cfList = self.getGuiObj("commonFilesField") + ofList.setOtherList(cfList) + ofList.setFieldsUpdateFunction(self.updateNumFilesInTextFields) + + elif modeString == "profile": + self.data[self.mode]['profileDetails_Overall'] = currentPanel #also add first panel as an named element in the data list +# self.item = "profileDetails_Overall" #the name of the panel that's currently selected + self.getAlternativeTabPanel('profileDetails_Quality', parent=self).Hide() #parent is self because it is not a tab, it replaces the details panel + self.getAlternativeTabPanel('profileDetails_Files', parent=self).Hide() #parent is self because it is not a tab, it replaces the details panel + self.getAlternativeTabPanel('profileDetails_Persons', parent=self).Hide() #parent is self because it is not a tab, it replaces the details panel + self.getAlternativeTabPanel('profileDetails_Download', parent=self).Hide() #parent is self because it is not a tab, it replaces the details panel + self.getAlternativeTabPanel('profileDetails_Presence', parent=self).Hide() #parent is self because it is not a tab, it replaces the details panel + return currentPanel + + def loadStatusPanel(self): + currentPanel = self.loadXRCPanel(os.path.join(self.guiUtility.vwxGUI_path, 'statusDownloads.xrc'), 'statusDownloads') + + mode = 'status' + for element in self.statdlElements: + xrcElement = None + name = None + if type(element) == str: + xrcElement = xrc.XRCCTRL(currentPanel, element) + name = element + elif type(element) == tuple: + name = element[0] + xrcElement = xrc.XRCCTRL(self.data[mode][element[1]],name) + if not xrcElement: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: Error: Could not identify xrc element: %s for mode %s' % (element, mode) + pass + if name: + self.data[mode][name] = xrcElement + + # header styling + self.data['status']['downSpeed'] + self.triblerStyles.titleBar(self.data['status']['statusHeader']) + self.triblerStyles.titleBar(self.data['status']['Downloading']) + self.triblerStyles.titleBar(self.data['status']['down_White']) + self.triblerStyles.titleBar(self.data['status']['downSpeed']) + self.triblerStyles.titleBar(self.data['status']['up_White']) + self.triblerStyles.titleBar(self.data['status']['upSpeed']) + # content styling + self.triblerStyles.setDarkText(self.data['status']['download1']) + self.triblerStyles.setDarkText(self.data['status']['percent1']) + self.triblerStyles.setDarkText(self.data['status']['download2']) + self.triblerStyles.setDarkText(self.data['status']['percent2']) + self.triblerStyles.setDarkText(self.data['status']['download3']) + self.triblerStyles.setDarkText(self.data['status']['percent3']) + self.triblerStyles.setDarkText(self.data['status']['download4']) + self.triblerStyles.setDarkText(self.data['status']['percent4']) + + + +# self.triblerStyles.titleBar(self.data['status']['statusHeader'], text= self.data['status']['statusHeader'].GetName()) + + + return currentPanel + + + + def loadXRCPanel(self, filename, panelName, parent=None): + try: + currentPanel = None + if not os.path.exists(filename): + dummyFile = os.path.join(self.guiUtility.vwxGUI_path, 'dummy.xrc') + filename = dummyFile + panelName = "dummy" + res = xrc.XmlResource(filename) + # create panel + if parent is None: + parent = self + currentPanel = res.LoadPanel(parent, panelName) + if not currentPanel: + raise Exception() + return currentPanel + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: Error: Could not load panel from XRC-file %s' % filename + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: Tried panel: %s=%s' % (panelName, currentPanel) + print_exc() + return None + + + def getData(self): + return self.item + + def getIdentifier(self): + if not self.item: + return None + try: + if self.mode in ['filesMode','libraryMode']: + return self.item['infohash'] + elif self.mode in ['personsMode','friendsMode']: + return self.item['permid'] + elif self.mode in ['subscriptionsMode']: + return self.item['url'] + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: Error in getIdentifier for mode %s, item=%s' % (self.mode,self.item) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: Error in getIdentifier for mode %s, item=%s' % (self.mode,self.item) + + print_exc() + + def setData(self, item): + self.updateCallback(item) # update callback function on changing item + self.item = item + if item is None: + item = {} + if self.mode in ['filesMode', 'libraryMode']: + #check if this is a corresponding item from type point of view + if item.get('infohash') is None: + return #no valid torrent + torrent = item + + ##titleField = self.getGuiObj('titleField') + title = torrent.get('name') + title = title[:77] + ##titleField.SetLabel(title) + ##titleField.Wrap(-1) # doesn't appear to work + +# self.setTorrentThumb(self.mode, torrent, self.getGuiObj('thumbField')) + + + + elif self.mode in ['personsMode', 'friendsMode']: + #check if this is a corresponding item from type point of view +# if item.get('permid') is None: +# return #no valid torrent + + titleField = self.getGuiObj('titleField') + titleField.SetLabel(item.get('name') or '') + titleField.Wrap(-1) + + #set the picture + try: + bmp = None + # Check if we have already read the thumbnail and metadata information from this torrent file + if item.get('metadata'): + bmp = item['metadata'].get('ThumbnailBitmap') + elif 'permid' in item: + mime, icondata = self.peer_db.getPeerIcon(item['permid']) + if icondata: + bmp = data2wxBitmap(mime,icondata) + + if not bmp: + superpeers = self.superpeer_db.getSuperPeers() + if 'permid' in item and item['permid'] in superpeers: + bmp = self.iconsManager.get_default('personsMode','SUPERPEER_BITMAP') + else: + bmp = self.iconsManager.get_default('personsMode','DEFAULT_THUMB') + + thumbField = self.getGuiObj("thumbField") + thumbField.setBitmap(bmp) + width, height = thumbField.GetSize() + d = 1 + thumbField.border = [wx.Point(0,d), wx.Point(width-d, d), wx.Point(width-d, height-d), wx.Point(d,height-d), wx.Point(d,0)] + thumbField.Refresh() +# wx.CallAfter(thumbField.Refresh) + + except: + print_exc() + + + if self.getGuiObj('info_detailsTab').isSelected(): + + if item.get('simRank'): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'SimRank of peer: %s' % item['simRank'] + self.setRankToRecommendationField(item['simRank']) + self.getGuiObj('TasteHeart').setRank(item['simRank']) + + + # Peer status = online status + frienstatus + label = peer2status(item) + self.getGuiObj('statusField').SetLabel(label) + + if 'num_peers' in item: + n = unicode(item['num_peers']) + if not n or n=='0': + n = '?' + self.getGuiObj('discPersonsField').SetLabel(n) + if 'num_torrents' in item: + n = unicode(item['num_torrents']) + if not n or n == '0': + n = '?' + self.getGuiObj('discFilesField').SetLabel(n) + + if 'friend' in item: + fs = item.get('friend') + if fs == FS_MUTUAL or fs == FS_I_INVITED: + isfriend = self.iconsManager.get_default('personsMode','ISFRIEND_BITMAP') + isfriend_clicked = self.iconsManager.get_default('personsMode','ISFRIEND_CLICKED_BITMAP') + self.getGuiObj('addAsFriend').switchTo(isfriend,isfriend_clicked) + else: + self.getGuiObj('addAsFriend').switchBack() + + self.fillTorrentLists() + + elif self.getGuiObj('advanced_detailsTab').isSelected(): + if item.get('last_connected') is not None: + if item['last_connected'] < 0: + self.getGuiObj('lastExchangeField', tab = 'personsTab_advanced').SetLabel("never seen online") + else: + self.getGuiObj('lastExchangeField', tab = 'personsTab_advanced').SetLabel('%s %s'%(friendly_time(item['last_connected']),'ago')) + else: + self.getGuiObj('lastExchangeField', tab = 'personsTab_advanced').SetLabel('') + if item.get("connected_times") is not None: + self.getGuiObj('timesConnectedField', tab = 'personsTab_advanced').SetLabel(str(item["connected_times"])) + else: + self.getGuiObj('timesConnectedField', tab = 'personsTab_advanced').SetLabel("") + if item.get("similarity") is not None: + self.getGuiObj('similarityValueField', tab = 'personsTab_advanced').SetLabel("%.1f" % item["similarity"]) + else: + self.getGuiObj('similarityValueField', tab = 'personsTab_advanced').SetLabel("") + + addAsFriend = self.getGuiObj('addAsFriend', tab = 'personsTab_advanced') + if addAsFriend.initDone: + if item.get('friend') is not None: + fs = item['friend'] + if fs == FS_MUTUAL or fs == FS_I_INVITED: + isfriend = self.iconsManager.get_default('personsMode','ISFRIEND_BITMAP') + isfriend_clicked = self.iconsManager.get_default('personsMode','ISFRIEND_CLICKED_BITMAP') + addAsFriend.switchTo(isfriend,isfriend_clicked) + else: + addAsFriend.switchBack() + + elif self.mode == 'subscriptionsMode': + if item.get('url') is None: + return #no valid url + subscrip = item + rssurl = subscrip.get('url') + + if self.subscr_old_source is not None and self.subscr_old_source == rssurl: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: setData: subscriptionMode: Not refreshing" + return # no need to refresh + self.subscr_old_source = rssurl + + titleField = self.getGuiObj('titleField') + titleField.SetLabel(rssurl) + titleField.Wrap(-1) + + bcsub = self.utility.lang.get('buddycastsubscription') + if rssurl == bcsub: + rssurl = 'BC' + + # Gather data for views + torrents = self.torrent_db.getTorrentsFromSource(rssurl) + todayl = [] + yesterdayl = [] + now = long(time()) + sotoday = long(math.floor(now / (24*3600.0))*24*3600.0) + soyester = long(sotoday - (24*3600.0)) + for torrent in torrents: + if torrent['insert_time'] > sotoday: + todayl.append(torrent) + elif torrent['insert_time'] > soyester: + yesterdayl.append(torrent) + + todayl.sort(reverse_torrent_insertime_cmp) + yesterdayl.sort(reverse_torrent_insertime_cmp) + + # Update Today view + todayField = self.getGuiObj('receivedToday') + todaystr = " Today ("+str(len(todayl))+")" + todayField.SetLabel(todaystr) + + todayList = self.getGuiObj('subscrTodayField') + if sys.platform == 'win32': + todayList.SetWindowStyleFlag(wx.LC_REPORT|wx.NO_BORDER|wx.LC_NO_HEADER|wx.LC_SINGLE_SEL) #it doesn't work on mac + else: + todayList.SetSingleStyle(wx.NO_BORDER) + todayList.SetSingleStyle(wx.LC_REPORT) + todayList.SetSingleStyle(wx.LC_NO_HEADER) + todayList.SetSingleStyle(wx.LC_SINGLE_SEL) +# todayList.SetWindowStyle(wx.LC_REPORT|wx.NO_BORDER|wx.LC_SINGLE_SEL|wx.LC_NO_HEADER) + if todayList.GetColumnCount() == 0: + todayList.InsertColumn(0, "Torrent",wx.LIST_FORMAT_LEFT,280) + todayList.DeleteAllItems() + + today_infohashes = [] + for torrent in todayl: + todayList.Append([torrent['name']]) + today_infohashes.append(torrent['infohash']) + todayList.setInfoHashList(today_infohashes) + + # Update Yesterday view + ydayField = self.getGuiObj('receivedYesterday') + ydaystr = " Yesterday ("+str(len(yesterdayl))+")" + ydayField.SetLabel(ydaystr) + + ydayList = self.getGuiObj('subscrYesterdayField') + if sys.platform == 'win32': + ydayList.SetWindowStyleFlag(wx.LC_REPORT|wx.NO_BORDER|wx.LC_NO_HEADER|wx.LC_SINGLE_SEL) #it doesn't work on mac + else: + ydayList.SetSingleStyle(wx.NO_BORDER) + ydayList.SetSingleStyle(wx.LC_REPORT) + ydayList.SetSingleStyle(wx.LC_NO_HEADER) + ydayList.SetSingleStyle(wx.LC_SINGLE_SEL) + if ydayList.GetColumnCount() == 0: + ydayList.InsertColumn(0, "Torrent",wx.LIST_FORMAT_LEFT,280) + ydayList.DeleteAllItems() + yesterday_infohashes = [] + for torrent in yesterdayl: + ydayList.Append([torrent['name']]) + yesterday_infohashes.append(torrent['infohash']) + ydayList.setInfoHashList(yesterday_infohashes) + + elif self.mode == 'profileMode': + if len(item) == 0: + return + tab = None + + # -------------------------------------------------------------------------------------------------------------------------------------------------------- + ## --- Overall performance !!!! we'll leave it probably out!!! + if self.currentPanel == self.getGuiObj('profileDetails_Overall'): +# self.getGuiObj('descriptionField0').SetLabel(item.get('overall_rank')) + picture = self.getGuiObj("levelPic") + if item.get('overall_rank') == "beginner": + picture.setIndex(0) + if item.get('overall_rank') == "experienced": + picture.setIndex(1) + if item.get('overall_rank') == "top user": + picture.setIndex(2) + if item.get('overall_rank') == "master": + picture.setIndex(3) + + # -------------------------------------------------------------------------------------------------------------------------------------------------------- + # --- Quality of tribler recommendations + elif self.currentPanel == self.getGuiObj('profileDetails_Quality'): + tab = 'profileDetails_Quality' + count = item.get('downloaded_files',0) + text = self.utility.lang.get("profileDetails_Quality_description", giveerror=False) + text1 = self.utility.lang.get("profileDetails_Quality_improve", giveerror=False) + if count < 10: + only = self.utility.lang.get("profileDetails_Quality_description_onlyword", giveerror=False) + else: + only="" + self.getGuiObj('descriptionField0', tab = 'profileDetails_Quality').SetLabel(text % (only,count)) + self.getGuiObj('descriptionField1', tab = 'profileDetails_Quality').SetLabel(text1) + + # -------------------------------------------------------------------------------------------------------------------------------------------------------- + # --- Discovered Files + elif self.currentPanel == self.getGuiObj('profileDetails_Files'): + tab = 'profileDetails_Files' + count = item.get('discovered_files',0) + count2 = self.utility.session.get_torrent_collecting_max_torrents() + text = self.utility.lang.get("profileDetails_Files_description", giveerror=False) + text1 = self.utility.lang.get("profileDetails_Files_improve", giveerror=False) + self.getGuiObj('descriptionField0', tab = 'profileDetails_Files').SetLabel(text % count) + self.getGuiObj('descriptionField1', tab = 'profileDetails_Files').SetLabel(text1 % count2) + + # -------------------------------------------------------------------------------------------------------------------------------------------------------- + # --- Discovered Persons + elif self.currentPanel == self.getGuiObj('profileDetails_Persons'): + tab = 'profileDetails_Persons' + count = 0 + count = item.get('discovered_persons',0) + text = self.utility.lang.get("profileDetails_Persons_description", giveerror=False) + text1 = self.utility.lang.get("profileDetails_Persons_improve", giveerror=False) + self.getGuiObj('descriptionField0', tab = 'profileDetails_Persons').SetLabel(text % count) + self.getGuiObj('descriptionField1', tab = 'profileDetails_Persons').SetLabel(text1) + + # -------------------------------------------------------------------------------------------------------------------------------------------------------- + ## --- Optimal download speed + elif self.currentPanel == self.getGuiObj('profileDetails_Download'): + tab = 'profileDetails_Download' + text = self.utility.lang.get("profileDetails_Download_info", giveerror=False) + self.getGuiObj('descriptionField', tab = 'profileDetails_Download').SetLabel(text) + + maxuploadrate = self.guiUtility.utility.config.Read('maxuploadrate', 'int') #kB/s + if ( maxuploadrate == 0 ): + text1 = self.utility.lang.get("profileDetails_Download_UpSpeedMax", giveerror=False) + text2 = self.utility.lang.get("profileDetails_Download_UpSpeedMax_improve", giveerror=False) + else: + text1 = self.utility.lang.get("profileDetails_Download_UpSpeed", giveerror=False) + text1 = text1 % maxuploadrate + text2 = self.utility.lang.get("profileDetails_Download_UpSpeed_improve", giveerror=False) + # maxuploadslots = self.guiUtility.utility.config.Read('maxupload', "int") + # if ( maxuploadslots == 0 ): + # text2 = self.utility.lang.get("profileDetails_Download_UpSlotsMax", giveerror=False) + # else: + # text2 = self.utility.lang.get("profileDetails_Download_UpSlots", giveerror=False) + # text2 = text2 % maxuploadslots + # maxdownloadrate = self.guiUtility.utility.config.Read('maxdownloadrate', "int") + # if ( maxdownloadrate == 0 ): + # text3 = self.utility.lang.get("profileDetails_Download_DlSpeedMax", giveerror=False) + # else: + # text3 = self.utility.lang.get("profileDetails_Download_DlSpeed", giveerror=False) + # text3 = text3 % maxdownloadrate + # text = "%s\n%s\n%s" % (text1,text2,text3) + self.getGuiObj('descriptionField0', tab = 'profileDetails_Download').SetLabel( text1) + self.getGuiObj('descriptionField1', tab = 'profileDetails_Download').SetLabel(text2) + + count = item.get('number_friends',0) + text = self.utility.lang.get("profileDetails_Download_Friends", giveerror=False) + self.getGuiObj('descriptionField2', tab = 'profileDetails_Download').SetLabel(text % count) + text = self.utility.lang.get("profileDetails_Download_Friends_improve", giveerror=False) + self.getGuiObj('descriptionField3', tab = 'profileDetails_Download').SetLabel(text) + + nat = item.get('nat_type') + if self.guiUtility.isReachable(): + text1 = self.utility.lang.get("profileDetails_Download_VisibleYes", giveerror=False) + text2 = self.utility.lang.get("profileDetails_Download_VisibleYes_improve", giveerror=False) + self.getGuiObj('descriptionField4', tab = 'profileDetails_Download').SetLabel(text1) + self.getGuiObj('descriptionField5', tab = 'profileDetails_Download').SetLabel(text2) + else: + text1 = self.utility.lang.get("profileDetails_Download_VisibleNo", giveerror=False) + text2 = self.utility.lang.get("profileDetails_Download_VisibleNo_improve", giveerror=False) + self.getGuiObj('descriptionField4', tab = 'profileDetails_Download').SetLabel(text1 % nat) + self.getGuiObj('descriptionField5', tab = 'profileDetails_Download').SetLabel(text2) + + # -------------------------------------------------------------------------------------------------------------------------------------------------------- + ## --- Reachability + elif self.currentPanel == self.getGuiObj('profileDetails_Presence'): + tab = 'profileDetails_Presence' + text = self.utility.lang.get("profileDetails_Presence_info", giveerror=False) + self.getGuiObj('descriptionField', tab = 'profileDetails_Presence').SetLabel(text) + + count = item.get('number_friends',0) + # use text that is also used in 'optimal download details + text = self.utility.lang.get("profileDetails_Download_Friends", giveerror=False) + self.getGuiObj('descriptionField0', tab = 'profileDetails_Presence').SetLabel(text % count) + text = self.utility.lang.get("profileDetails_Download_Friends_improve", giveerror=False) + self.getGuiObj('descriptionField1', tab = 'profileDetails_Presence').SetLabel(text) + + current_version = self.utility.getVersion() + text = self.utility.lang.get("profileDetails_Presence_VersionUnknown", giveerror=False) + new_version = item.get('new_version',text) + update_url = 'www.tribler.org' #item.get('update_url','www.tribler.org') + compare_result = item.get('compare_result',-3) + if compare_result == -1: #newer version locally + text1 = self.utility.lang.get("profileDetails_Presence_VersionNewer", giveerror=False) + text1 = text1 % (current_version, new_version) + text2 = self.utility.lang.get("profileDetails_Presence_VersionNewer_improve", giveerror=False) + text2 = text2 % update_url + elif compare_result == 0: #same version + text1 = self.utility.lang.get("profileDetails_Presence_VersionCurrent", giveerror=False) + text1 = text1 % current_version + text2 = self.utility.lang.get("profileDetails_Presence_VersionCurrent_improve", giveerror=False) + text2 = text2 % update_url + elif compare_result == 1: #newer version on website + text1 = self.utility.lang.get("profileDetails_Presence_VersionOlder", giveerror=False) + text1 = text1 % current_version + text2 = self.utility.lang.get("profileDetails_Presence_VersionOlder_improve", giveerror=False) + text2 = text2 % (new_version,update_url) + else: + text1 = self.utility.lang.get("profileDetails_Presence_VersionError", giveerror=False) + text1 = text1 % current_version + text2 = self.utility.lang.get("profileDetails_Presence_VersionError_improve", giveerror=False) + text2 = text2 % update_url + self.getGuiObj('descriptionField4', tab = 'profileDetails_Presence').SetLabel(text1) + self.getGuiObj('descriptionField5', tab = 'profileDetails_Presence').SetLabel(text2) + else: + tab = "error" + if tab != "error": + if self.reHeightToFit(tab): + + #print " do panel ",tab,"relayouting" + self.currentPanel.SetAutoLayout(1) + self.currentPanel.Layout() + self.hSizer.Layout() + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: setData: No entry for mode",self.mode + +# self.currentPanel.Refresh() + + def setRankToRecommendationField(self, rank): + recommField = self.getGuiObj('recommendationField') + assert recommField, "No recommendationField found" + + if rank != -1: + + if rank == 1: + recommField.SetLabel("%d" % rank + "st of top 20") + elif rank == 2: + recommField.SetLabel("%d" % rank + "nd of top 20") + elif rank == 3: + recommField.SetLabel("%d" % rank + "rd of top 20") + else: + recommField.SetLabel("%d" % rank + "th of top 20") + else: + recommField.SetLabel("") + + def reHeightToFit(self, tab=None): + """the idea is to iterate through all objects mentioned in the list of + object for current tab and to reposition them on y axis so that all of + them are fully visible -> update, the repositioning should be done automatically by + autolayouting the sizer, all that has to be done is wrap the elements that don't + have the ST_NO_AUTORESIZE flag set + returns true if elements have been repositioned so that the layout be redone""" +# if DEBUG: +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: trying to reheight panel for mode",self.mode,"and tab",tab + bElementMoved = False + VERTICAL_SPACE = 3 + try: + if tab is None: + list = self.modeElements[self.mode] + else: + list = self.tabElements[tab] + #check to see it it's worth trying to reposition elements + if len(list)>0: + prevElement = None + for elementName in list: + currentElement = self.getGuiObj(elementName, tab) + if isinstance(currentElement,wx.StaticText): + style = currentElement.GetWindowStyle() +# if DEBUG: +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: element",elementName,"has style",style + #print 'Style if %s has flag: %s' % (elementName, (style & wx.ST_NO_AUTORESIZE)) + if (style & wx.ST_NO_AUTORESIZE)==0 : + currentElement.Wrap(284) + bElementMoved = True + prevElement = None + if prevElement is not None: + prevPos = prevElement.GetPosition().y + prevHeight = prevElement.GetSize().height + new_pos = prevPos + prevHeight + VERTICAL_SPACE + # print " element",list[index],"is at",currentElement.GetPosition().y,"and has height",currentElement.GetSize().height + if new_pos != currentElement.GetPosition().y: + #reposition element as it overlaps the one above + currentElement.SetPosition(wx.Point(currentElement.GetPosition().x,new_pos)) + prevElement = currentElement + # Set size of standardDetails to size of content + + + except: + print_exc() + return bElementMoved + + def setDownloadbutton(self, torrent, tab = None, item = ''): + if item == '': + self.downloadButton2 = self.getGuiObj('download', tab = tab) + else: + self.downloadButton2 = item + + if self.downloadButton2: + if torrent.get('myDownloadHistory', False): + bitmap, bitmap2 = self.iconsManager.getDownloadButton('library') + elif torrent.get('web2'): + bitmap, bitmap2 = self.iconsManager.getDownloadButton('play') + else: + bitmap, bitmap2 = self.iconsManager.getDownloadButton('download') + + self.downloadButton2.setBitmaps(bitmap, bitmap2) + self.downloadButton2.Refresh() + + def getGuiObj(self, obj_name, tab=None, mode=None): + """handy function to retreive an object based on it's name for the current mode""" + if tab: + obj_name = tab+'_'+obj_name + if not mode: + mode = self.mode + #print 'Available objects: %s' % self.data[mode].keys() + return self.data[mode].get(obj_name) + + def show_loading(self, list_obj): + list_obj.DeleteAllItems() + index = list_obj.InsertStringItem(sys.maxint, "Searching..") + font = list_obj.GetItemFont(index) + font.SetStyle(wx.FONTSTYLE_ITALIC) + list_obj.SetItemFont(index, font) + list_obj.SetItemTextColour(index, "#555555") + + def fillSimLists(self, item): + # show loading.. + self.show_loading(self.getGuiObj('peopleWhoField')) + self.show_loading(self.getGuiObj('simTitlesField')) + + + guiserver = GUITaskQueue.getInstance() + guiserver.add_task(lambda:self.updateSimLists(item), 0, id='fillSimLists') + + def updateSimLists(self, item): + def cmpfunc(x, y): + return int(10000*(editDist(x[1], name) - editDist(y[1], name))) + + infohash = item['infohash'] + name = item['name'] + gui_db = GUIDBHandler.getInstance() # LAYERVIOLATION + + sim_files = None + sim_titles = None + + try: + sim_files = gui_db.getSimItems(infohash, 8) + except: + print_exc() + wx.CallAfter(self.fillSimTorrentsList, sim_files) + + try: + sim_titles = gui_db.getSimilarTitles(name, 30, infohash) # first get a subset of titles + sim_titles.sort(cmpfunc) + except: + print_exc() + wx.CallAfter(self.fillSimTitlesList, sim_titles) + + def fillSimTorrentsList(self, sim_files): + """fills the list of torrents from library or file view with the files that are similar to the currently selected one""" + # jie.done: fill similar torrent list + # future.work: smooth the recommendation, solve the data sparse and cold start problem + + sim_torrent_list = self.getGuiObj('peopleWhoField') + if not sim_torrent_list: # user already switched to another page + return + sim_torrent_list.DeleteAllItems() + if sim_files is None: + self.errorLoadData('peopleWhoField') + return + + try: + #sim_files = self.gui_db.getSimItems(infohash, 8) # [(infohash, title)] + sim_torrent_list.setInfoHashList(None) + + torrent_list = [] + if len(sim_files) > 0: + for infohash, name, status_id, coocurrence in sim_files: + if coocurrence <= 1: # don't show too irrelevant torrents. set it to 0 if you want to show all co-occurent torrents + continue + if status_id == 0: # good + color = "blue" + elif status_id == 1: # unknown + color = "black" + elif status_id == 2: # dead + color = "red" + continue + name = dunno2unicode(name) + index = sim_torrent_list.InsertStringItem(sys.maxint, name) + sim_torrent_list.SetItemTextColour(index, color) + torrent_list.append(infohash) + # TODO: show a tip string on this listitem. SetToolTipString? + sim_torrent_list.setInfoHashList(torrent_list) + + if len(torrent_list) == 0: + index = sim_torrent_list.InsertStringItem(sys.maxint, "No similar files found yet.") + font = sim_torrent_list.GetItemFont(index) + font.SetStyle(wx.FONTSTYLE_ITALIC) + sim_torrent_list.SetItemFont(index, font) + sim_torrent_list.SetItemTextColour(index, "#222222") + + except Exception, e: + print_exc() + sim_torrent_list.setInfoHashList(None) + index = sim_torrent_list.InsertStringItem(0, "Error getting similar files list") + sim_torrent_list.SetItemTextColour(index, "dark red") + + try: + sim_torrent_list.onListResize() #SetColumnWidth(0,wx.LIST_AUTOSIZE) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: could not resize lists in sim_torrent_list panel" + + + def fillSimTitlesList(self, sim_titles): + """fills the list of torrents with similar titles""" + # jie.done: fill sim title list + + sim_torrent_list = self.getGuiObj('simTitlesField') + if not sim_torrent_list: + return + sim_torrent_list.DeleteAllItems() + + if sim_titles is None: + self.errorLoadData('simTitlesField') + return + + try: + sim_torrent_list.setInfoHashList(None) + + torrent_list = [] + if len(sim_titles) > 0: + for infohash, name, status_id in sim_titles: + #if infohash == item['infohash']: + # continue + name = dunno2unicode(name) + index = sim_torrent_list.InsertStringItem(sys.maxint, name) + if status_id == 0: # good + color = "blue" + elif status_id == 1: # unknown + color = "black" + elif status_id == 2: # dead + color = "red" + continue + sim_torrent_list.SetItemTextColour(index, color) + torrent_list.append(infohash) + sim_torrent_list.setInfoHashList(torrent_list) + + if len(torrent_list) == 0: + index = sim_torrent_list.InsertStringItem(sys.maxint, "No similar files found yet.") + font = sim_torrent_list.GetItemFont(index) + font.SetStyle(wx.FONTSTYLE_ITALIC) + sim_torrent_list.SetItemFont(index, font) + sim_torrent_list.SetItemTextColour(index, "#222222") + + except Exception, e: + print_exc() + sim_torrent_list.setInfoHashList(None) + index = sim_torrent_list.InsertStringItem(0, "Error getting similar files list") + sim_torrent_list.SetItemTextColour(index, "dark red") + + try: + sim_torrent_list.onListResize() #SetColumnWidth(0,wx.LIST_AUTOSIZE) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: could not resize lists in sim_torrent_list panel" + + + def fillTorrentLists(self): + """fills the lists of torrents from persons detail view with common + and history files for the selected person""" + + ofList = self.getGuiObj("alsoDownloadedField") + if not ofList: + return + cfList = self.getGuiObj("commonFilesField") + if not cfList: + return + + ofList.setInfoHashList(None) + if ( self.mode != "personsMode" and self.mode != "friendsMode" ) or \ + self.item is None or self.item.get('permid') is None: + return + + self.show_loading(ofList) + self.show_loading(cfList) + + guiserver = GUITaskQueue.getInstance() + permid = self.item.get('permid') + guiserver.add_task(lambda:self.updateTorrentLists(permid), 0, id='fillTorrentLists') + + def updateTorrentLists(self, permid): + common_files = None + other_files = None + gui_db = GUIDBHandler.getInstance() + + try: + common_files = gui_db.getCommonFiles(permid) #[name] + except: + print_exc() + wx.CallAfter(self.fillCommonList, common_files) + + try: + other_files = gui_db.getOtherFiles(permid) #[(infohash,name)] + except: + print_exc() + wx.CallAfter(self.fillOtherList, other_files) + + def fillCommonList(self, common_files): + cfList = self.getGuiObj("commonFilesField") + cfList.DeleteAllItems() + + if common_files is None: + self.errorLoadData('commonFilesField') + return + + if len(common_files) == 0: + index = cfList.InsertStringItem(sys.maxint, "No common files with this person.") + font = cfList.GetItemFont(index) + font.SetStyle(wx.FONTSTYLE_ITALIC) + cfList.SetItemFont(index, font) + cfList.SetItemTextColour(index, "#222222") + cfList.isEmpty = True # used by DLFilesList to remove "No common files with this person." + else: + cfList.isEmpty = False + for name in common_files: + cfList.InsertStringItem(sys.maxint, name) + + def fillOtherList(self, other_files): + ofList = self.getGuiObj("alsoDownloadedField") + ofList.DeleteAllItems() + + if other_files is None: + self.errorLoadData('alsoDownloadedField') + return + + if len(other_files) == 0: + index = ofList.InsertStringItem(sys.maxint, "No files advertised by this person.") + font = ofList.GetItemFont(index) + font.SetStyle(wx.FONTSTYLE_ITALIC) + ofList.SetItemFont(index, font) + ofList.SetItemTextColour(index, "#222222") + else: + torrent_list = [] + for infohash, name in other_files: + ofList.InsertStringItem(sys.maxint, name) + torrent_list.append(infohash) + ofList.setInfoHashList(torrent_list) + + def errorLoadData(self, obj_name): + flist = self.getGuiObj(obj_name) + flist.DeleteAllItems() + if isinstance(flist, DLFilesList): + flist.setInfoHashList(None) + index = flist.InsertStringItem(sys.maxint, "Error: cannot load the data") + flist.SetItemTextColour(index, "#222222") + + try: + flist.onListResize() + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: could not resize lists in person detail panel" + + def updateNumFilesInTextFields(self, cfList, ofList): + numItems = [cfList.GetItemCount(), ofList.GetItemCount()] + self.getGuiObj('commonFiles').SetLabel(self.utility.lang.get('commonFiles') % numItems[0]) + nprefs = max(self.getData().get('nprefs',0), numItems[1]) + self.getGuiObj('alsoDownloaded').SetLabel(self.utility.lang.get('alsoDownloaded') % (numItems[1], nprefs)) + + def checkGraphTabVisible(self, tab2check='Graph', selectedTab=None): + # just some generic way of making sure that a certain panel is informed when it is or not visible + #the function must be there! + graph_panel = self.getGuiObj(obj_name='Graph', tab='Tab_graphs', mode='libraryMode') + if graph_panel is None: + return + if self.mode == 'libraryMode': + if selectedTab is None: + #find currently selected tab + tabButtons = { 'files_detailsTab':self.getGuiObj('files_detailsTab'), + 'info_detailsTab':self.getGuiObj('info_detailsTab'), + 'graphs_detailsTab':self.getGuiObj('graphs_detailsTab') } + for key in tabButtons.keys(): + if tabButtons[key].isSelected(): + selectedTab = key + break + if selectedTab == 'graphs_detailsTab': + graph_panel.setVisible(True) + return + graph_panel.setVisible(False) + + def tabClicked(self, name): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: tabClicked: %s' % name + #self.checkGraphTabVisible(selectedTab=name) + + if self.mode == 'libraryMode': + tabButtons = { 'files_detailsTab':self.getGuiObj('files_detailsTab'), + 'info_detailsTab':self.getGuiObj('info_detailsTab'), + 'upload_detailsTab':self.getGuiObj('upload_detailsTab')} + # 'graphs_detailsTab':self.getGuiObj('graphs_detailsTab') } + tabPanelNames = { 'files_detailsTab':'filesTab_files', + 'info_detailsTab':'details', + 'upload_detailsTab':'uploadTab_details'} + #'graphs_detailsTab':'Tab_graphs'} + #TODO: change from currentPanel to the string name of the current selected details panel + #get the currently selected panel + current_name = 'details' + panel_name = 'details' + for key in tabButtons.keys(): + if name == key: + panel_name = tabPanelNames[key] + if tabButtons[key].isSelected(): + current_name = tabPanelNames[key] + panel1 = self.getGuiObj(current_name) + panel2 = self.getGuiObj(panel_name) + if panel1 is not None and panel2 is not None and panel1 != panel2: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: switching from "+current_name+" to "+panel_name + self.swapPanel(panel1, panel2) + + for key in tabButtons.keys(): + try: + if key == name: + tabButtons[key].setSelected(True) + else: + tabButtons[key].setSelected(False) + except: + print "tab %s has no button??" % key + self.currentPanel.SetAutoLayout(1) + self.currentPanel.Layout() + self.hSizer.Layout() + elif self.mode == 'filesMode': + tabFiles = self.getGuiObj('files_detailsTab') + tabInfo = self.getGuiObj('info_detailsTab') + infoPanel = self.getGuiObj('details') + # sizer = infoPanel.GetContainingSizer() + filesPanel = self.getGuiObj('filesTab_files') + + if name == 'files_detailsTab' and not tabFiles.isSelected(): + tabFiles.setSelected(True) + tabInfo.setSelected(False) + self.swapPanel( infoPanel, filesPanel)#, sizer, 3) + + elif name == 'info_detailsTab' and not tabInfo.isSelected(): + tabFiles.setSelected(False) + tabInfo.setSelected(True) + self.swapPanel( filesPanel, infoPanel)#, sizer, 3) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: %s: Unknown tab %s' % (self.mode,name) + return +# relayout the details panel to accomodate the new panel + + + self.currentPanel.SetAutoLayout(1) + self.currentPanel.Layout() + self.hSizer.Layout() + + elif self.mode in ["personsMode","friendsMode"]: + tabAdvanced = self.getGuiObj('advanced_detailsTab') + tabInfo = self.getGuiObj('info_detailsTab') + infoPanel = self.getGuiObj('detailsC') + advancedPanel = self.getGuiObj('personsTab_advanced') + if name == 'advanced_detailsTab' and not tabAdvanced.isSelected(): + tabAdvanced.setSelected(True) + tabInfo.setSelected(False) + self.swapPanel( infoPanel, advancedPanel) + elif name == 'info_detailsTab' and not tabInfo.isSelected(): + tabAdvanced.setSelected(False) + tabInfo.setSelected(True) + self.swapPanel( advancedPanel, infoPanel) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: %s: Unknown tab %s' % (self.mode,name) + return +# print " advanced tab has label:",tabAdvanced.GetLabel() + + #relayout the details panel to accomodate the new panel + self.currentPanel.SetAutoLayout(1) + self.currentPanel.Layout() + self.hSizer.Layout() + + elif self.mode == "profileMode": +# print " try to switch to",name + if name.startswith("bgPanel"): + name = "profileDetails"+name[7:] +# if name == "profileDetails_Overall": +# name = 'panel' +# print " current panel is:",self.item +# if self.item is None: +# self.item = 'panel' + panel1 = self.currentPanel #getGuiObj(self.item) + panel2 = self.getGuiObj(name) + if panel1 is not None and panel2 is not None and panel1 != panel2: +#=============================================================================== +# print " switch from %s[%s] to %s[%s]" % (panel1.GetName(), panel1.GetParent().GetName(), panel2.GetName(), panel2.GetParent().GetName()) +# if isinstance(panel1,tribler_topButton): +# print " set unselected for",panel1.GetName() +# panel1.setSelected(False) +# else: +# print " panel1 ",panel1.GetName()," is of type ",panel1.__class__.__name__ +# if panel2.__class__.__name__.endswith("tribler_topButton"): +# print " set selected for",panel2.GetName() +# panel2.setSelected(True) +# else: +# print " panel2 ",panel2.GetName()," is of type ",panel2.__class__.__name__ +#=============================================================================== + self.swapPanel(panel1, panel2) + #each time the panel changes, update the 'panel' reference in data list + self.data[self.mode]['panel'] = panel2 + #actually, update the currentPanel reference + self.currentPanel = panel2 +# self.item = name +# else: +# print " can't switch, one of the panel is None or the same panel" +# self.currentPanel.Layout() +# self.currentPanel.SetAutoLayout(1) +# self.hSizer.Layout() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: switch from %s[%s] to %s[%s]" % (panel1.GetName(), panel1.GetParent().GetName(), panel2.GetName(), panel2.GetParent().GetName()) + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: Tab (%s) for this mode (%s) not yet implemented' % (name,self.mode) + return + + self.setData(self.item) + self.refreshStandardDetailsHeight() + + + def swapPanel(self, oldpanel, newpanel, sizer=None, index=-1): + """replaces in a sizer a panel with another one to simulate tabs""" + if sizer is None: + sizer = oldpanel.GetContainingSizer() + if not sizer: + return #could not swap + #if index not given, use sizer's own replace method + if index == -1: + index = 0 + for panel in sizer.GetChildren(): + if panel.GetWindow() == oldpanel: + break + index = index + 1 + if index == len(sizer.GetChildren()): + return #error: index not found so nothing to change +# sizerItem = sizer.Replace(oldpanel, newpanel) +# print "found index is:",index,"number of children in sizer:",len(sizer.GetChildren()) + # remove info tab panel + sizer.Detach(oldpanel) + oldpanel.Hide() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: found sizer equal to hSizer?",(sizer==self.hSizer) + # add files tab panel + newpanel.SetAutoLayout(1) + newpanel.Layout() + if not newpanel.IsShown(): + newpanel.Show() + sizer.Insert(index, newpanel, 0, wx.ALL|wx.EXPAND, 0) + sizer.Layout() + + def getAlternativeTabPanel(self, name, parent=None): + "Load a tabPanel that was not loaded as default" + panel = self.getGuiObj(name) + if panel: + return panel + else: + # generate new panel + xrcResource = os.path.join(self.guiUtility.vwxGUI_path, name+'.xrc') + if os.path.exists(xrcResource): + panelName = name + if parent is None: + parent = self.currentPanel + panel = self.loadXRCPanel(xrcResource, panelName, parent=parent) + if panel is not None and self.tabElements.has_key(name): + for element in self.tabElements[name]: + xrcElement = xrc.XRCCTRL(panel, element) + if not xrcElement: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: Error: Could not identify xrc element: %s for mode %s' % (element, self.mode) + pass + self.data[self.mode][name+'_'+element] = xrcElement + + self.data[self.mode][name] = panel + + return panel + + def mouseAction(self, event): + """ Arno: apparently not used, see GUIUtility.buttonClicked() """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: mouseAction' + + obj = event.GetEventObject() + #print obj + + if not self.data: + return + if obj == self.downloadButton: + self.download(self.data) + # --tb-- +# if obj == self.optionsButtonLibrary: +# # zelfde menu als rechterMuisKnop +# print "optionsButton" +# self.rightMouseAction(event) + elif obj == self.refreshButton: + #and self.refreshButton.isEnabled(): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: refresh seeders and leechers" + #self.swarmText.SetLabel(self.utility.lang.get('refreshing')+'...') + #self.swarmText.Refresh() + + self.refresh(self.data) + + def rightMouseButton(self, event): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: --tb-- keydown function(2)' + menu = self.guiUtility.OnRightMouseAction(event) + if menu is not None: + self.PopupMenu(menu, (-1,-1)) + + + def refresh(self, torrent): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: refresh ' + repr(torrent.get('name', 'no_name')) + check = TorrentChecking(torrent['infohash']) + check.start() + + +# def isEnabled(self): +# return self.enabled + + def _download_torrentfile_from_peers(self, torrent, callback): + """ + TORRENT is a dictionary containing torrent information used to + display the entry on the UI. it is NOT the torrent file! + + CALLBACK is called when the torrent is downloaded. When no + torrent can be downloaded the callback is ignored + """ + def success_callback(*args): + # empty the permids list to indicate that we are done + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: _download_torrentfile_from_peers: received .torrent from peer" + if state[0]: + state[0] = False + callback(*args) + + def next_callback(timeout): + """ + TIMEOUT: when TIMEOUT>=0 then will try another peer after TIMEOUT seconds. + """ + if state[0] and state[1]: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: _download_torrentfile_from_peers: trying to .torrent download from peer.",len(state[1])-1,"other peers to ask" + self.utility.session.download_torrentfile_from_peer(state[1].pop(0), torrent['infohash'], success_callback) + if timeout >= 0: + next_callback_lambda = lambda:next_callback(timeout) + guiserver.add_task(next_callback_lambda, timeout) + + guiserver = GUITaskQueue.getInstance() + state = [True, torrent['query_permids'][:]] + torrent['query_torrent_was_requested'] = True + + # The rules and policies below can be tweaked to increase + # performace. More parallel requests can be made, or the + # timeout to ask more people can be decreased. All at the + # expence of bandwith. + if torrent['torrent_size'] > 50 * 1024: + # this is a big torrent. to preserve bandwidth we will + # request sequentially with a large timeout + next_callback(3) + + elif 0 <= torrent['torrent_size'] <= 10 * 1024: + # this is a small torrent. bandwidth is not an issue so + # download in parallel + next_callback(-1) + next_callback(1) + + else: + # medium and unknown torrent size. + next_callback(1) + + def torrent_is_playable(self, torrent=None, default=True, callback=None): + """ + TORRENT is a dictionary containing torrent information used to + display the entry on the UI. it is NOT the torrent file! + + DEFAULT indicates the default value when we don't know if the + torrent is playable. + + CALLBACK can be given to result the actual 'playable' value + for the torrent after some downloading/processing. The DEFAULT + value is returned in this case. Will only be called if + self.item == torrent + """ + if torrent is None: + torrent = self.item + + if 'torrent_file_name' not in torrent or not torrent['torrent_file_name']: + torrent['torrent_file_name'] = get_filename(torrent['infohash']) + torrent_dir = self.utility.session.get_torrent_collecting_dir() + torrent_filename = os.path.join(torrent_dir, torrent['torrent_file_name']) + + if os.path.isfile(torrent_filename): + tdef = TorrentDef.load(torrent_filename) + if tdef.get_files(exts=videoextdefaults): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "standardDetails:torrent_is_playable is playable" + return True + else: + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "standardDetails:torrent_is_playable is NOT playable" + return False + + elif callback: + # unknown, figure it out and return the information using + # a callback + + if 'query_permids' in torrent and not torrent.get('myDownloadHistory'): + def got_requested_torrent(infohash, metadata, filename): + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "standardDetails:torrent_is_playable Downloaded a torrent" + # test that we are still focussed on the same torrent + if torrent_filename.endswith(filename) and self.item == torrent: + # recursive call + playable = self.torrent_is_playable(torrent, default=default) + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "standardDetails:torrent_is_playable performing callback. is playable", playable + callback(torrent, playable) + self._download_torrentfile_from_peers(torrent, got_requested_torrent) + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "standardDetails:torrent_is_playable returning default", default + return default + + + def download(self, torrent = None, dest = None, secret = False, force = False, vodmode = False): + if torrent is None: + torrent = self.item + + +# if self.GetName() == 'download': + + force = True + if (torrent is None or torrent.get('myDownloadHistory')) and not force: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: download: Bailout" + return + + #print "**** standdetail: download", `torrent` + + if torrent.get('web2'): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: download: Playing WEB2 video: " + torrent['url'] + self.videoplayer.play_url(torrent['url']) + self.setDownloadbutton(torrent=self.item, item = self.downloadButton2) + return True + + if 'query_permids' in torrent and not torrent.get('myDownloadHistory'): + sesscb_got_requested_torrent_lambda = lambda infohash,metadata,filename:self.sesscb_got_requested_torrent(torrent,infohash,metadata,filename,vodmode) + self._download_torrentfile_from_peers(torrent, sesscb_got_requested_torrent_lambda) + + # Show error if torrent file does not come in + tfdownload_timeout_lambda = lambda:self.guiserv_tfdownload_timeout(torrent) + guiserver = GUITaskQueue.getInstance() + guiserver.add_task(tfdownload_timeout_lambda,20) + + # Show pending colour + self.guiUtility.standardOverview.refreshGridManager() + + #self.setDownloadbutton(torrent=self.item, item = self.downloadButton2) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", torrent, torrent.keys() + return True + + torrent_dir = self.utility.session.get_torrent_collecting_dir() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'standardDetails: download: got torrent to download', 'torrent_file_name' in torrent, torrent_dir, torrent['torrent_file_name'] + + if 'torrent_file_name' not in torrent: + torrent['torrent_file_name'] = get_filename(torrent['infohash']) + torrent_filename = os.path.join(torrent_dir, torrent['torrent_file_name']) + + if torrent.get('name'): + name = torrent['name'] + else: + name = showInfoHash(torrent['infohash']) + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: download: Preparing to start:",`name` + + if os.path.isfile(torrent_filename): + + clicklog={'keywords': self.guiUtility.torrentsearch_manager.searchkeywords[self.mode], + 'reranking_strategy': self.guiUtility.torrentsearch_manager.rerankingStrategy[self.mode].getID()} + if "click_position" in torrent: + clicklog["click_position"] = torrent["click_position"] + + + # Api download + d = self.utility.frame.startDownload(torrent_filename,destdir=dest, + clicklog=clicklog,name=name,vodmode=vodmode) ## remove name=name + if d: + if secret: + self.torrent_db.setSecret(torrent['infohash'], secret) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardDetails: download: download started' + # save start download time. + #self.setDownloadbutton(torrent=self.item, item = self.downloadButton2) + #torrent['download_started'] = time() + #torrent['progress'] = 0.0 + self.setBelongsToMyDowloadHistory(torrent, True) + return True + else: + return False + else: + + # Torrent not found + str = self.utility.lang.get('delete_torrent') % name + dlg = wx.MessageDialog(self, str, self.utility.lang.get('delete_dead_torrent'), + wx.YES_NO|wx.NO_DEFAULT|wx.ICON_INFORMATION) + result = dlg.ShowModal() + dlg.Destroy() + if result == wx.ID_YES: + infohash = torrent['infohash'] + self.torrent_db.deleteTorrent(infohash, delete_file=True, commit = True) + + + + return True + else: + return False + + def sesscb_got_requested_torrent(self,querytorrent,infohash,metadata,filename,vodmode): + """ The torrent file requested from another peer came in. + @param querytorrent The original torrent record as shown on the screen + @param infohash The infohash of the torrent file. + @param metadata The contents of the torrent file (still bencoded) + @param vodmode Whether to download in VOD mode (lambda added) + """ + # Called by SessionCallback thread + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: sesscb_got_requested_torrent:",`infohash` + + # Update the torrent record, and refresh the view afterwards such + # that it shows as a torrent being downloaded. + querytorrent['torrent_file_name'] = filename + self.setBelongsToMyDowloadHistory(querytorrent, True) + + wx.CallAfter(self.download,querytorrent,force=True,vodmode=vodmode) + wx.CallAfter(self.guiUtility.standardOverview.refreshGridManager) + + def setBelongsToMyDowloadHistory(self,torrent, b): + """Set a certain new torrent to be in the download history or not + Should not be changed by updateTorrent calls""" + + # DB registration and buddycast notification is done in LaunchManyCore.add() + # Currently no removal function. + torrent['myDownloadHistory'] = True + + + def guiserv_tfdownload_timeout(self,torrent): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: tdownload_timeout: Did we receive",`torrent['name']` + dbrecord = self.torrent_db.getTorrent(torrent['infohash']) + d = self.getData() + if d is not None: + selinfohash = d.get('infohash',None) + if dbrecord is None and torrent['infohash'] == selinfohash: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: tdownload_timeout: Couldn't get torrent from peer",`torrent['name']` + wx.CallAfter(self.tfdownload_timeout_error) + + def tfdownload_timeout_error(self): + self.videoplayer.set_player_status("Error starting download. Could not get metadata from remote peer.") + + def setTorrentThumb(self, mode, torrent, thumbPanel, size = 'large'): + + if not thumbPanel: + return + + thumbPanel.setBackground(wx.BLACK) + if mode in ['filesMode', 'libraryMode']: + self.getThumbnailLarge(torrent,thumbPanel, size) + elif mode in ['personsMode', 'friendMode']: + # get thumbimage of person + if False: + pass + else: + default = self.iconsManager.get_default('personsMode','DEFAULT_THUMB') + thumbPanel.setBitmap(default) + + def addAsFriend(self): + # add the current user selected in details panel as a friend or delete + if self.mode in ["personsMode","friendsMode"]: + peer_data = self.item + if peer_data is not None and peer_data.get('permid'): + + self.addasfriendcount += 1 + now = time() + diff = now - self.addasfriendlast + if self.addasfriendcount >= 2 and diff < 1.0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: addAsFriend: ratelimiter!" + return + if diff > 10.0: + self.addasfriendcount = 0 + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: addAsFriend: stats",self.addasfriendcount,diff + + #self.friend_db.toggleFriend(peer_data['permid']) + fs = peer_data['friend'] + if fs == FS_NOFRIEND or fs == FS_I_DENIED or fs == FS_HE_DENIED: + # Invite him, reinvite him + self.utility.session.send_friendship_message(peer_data['permid'],F_REQUEST_MSG) + elif fs == FS_MUTUAL or fs == FS_I_INVITED: + # Remove friendship + self.utility.session.send_friendship_message(peer_data['permid'],F_RESPONSE_MSG,approved=False) + elif fs == FS_HE_INVITED: + # Confirm friendship + self.utility.session.send_friendship_message(peer_data['permid'],F_RESPONSE_MSG,approved=True) + self.addasfriendlast = time() + + + def refreshUploadStats(self, dslist): + # Update the overrall uploading information + if self.mode in ['libraryMode']: + if self.getGuiObj('upload_detailsTab').isSelected(): + tab = 'uploadTab_details' + t4t_list = self.getGuiObj('t4t_peers', tab = tab) + t4t_list.setData(dslist) + + g2g_list = self.getGuiObj('g2g_peers', tab = tab) + g2g_list.setData(dslist) + + def refreshTorrentStats(self,dslist): + """ Called by GUI thread """ + nactive = 0 + + tl = [] + totaldlspeed = 0.0 + totalulspeed = 0.0 + for ds in dslist: + d = ds.get_download() + progress = ds.get_progress() + + + totaldlspeed += ds.get_current_speed(DOWNLOAD) + totalulspeed += ds.get_current_speed(UPLOAD) + + status = ds.get_status() + if status != DLSTATUS_STOPPED and status != DLSTATUS_STOPPED_ON_ERROR: + nactive += 1 + + # Only show active downloading unfinished torrents + if progress < 1.0: + tl.append([progress,d]) + + + + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: stats:",`d.get_def().get_name()`,progress,status + + + + # Reverse sort on percentage done, get top 4 + tl.sort(revtcmp) + ml = min(len(tl),4) + newtl = tl[:ml] + + for i in range(4): + if i < ml: + (progress,d) = newtl[i] + progresstxt = progress2txt(progress) + file = d.get_def().get_name_as_unicode() + else: + progresstxt = '' + file = '' + tname = 'download'+str(i+1) + pname = 'percent'+str(i+1) + tlabel = self.data['status'][tname] + plabel = self.data['status'][pname] + #print "Setting",pname,"to",progresstxt + tlabel.SetLabel(file[:45]) + plabel.SetLabel(progresstxt[:45]) + statdlpanel = self.data['status']['panel'] + + self.refreshTorrentTotalStats(nactive,totaldlspeed,totalulspeed) + + statdlpanel.Refresh() + + + def refreshTorrentTotalStats(self,nactive,totaldlspeed,totalulspeed): + """ Called by GUI thread """ + leftlabel = self.data['status']['Downloading'] + rightlabel = self.data['status']['downSpeed'] + rightlabel2 = self.data['status']['upSpeed'] + + lefttext = self.utility.lang.get('downloading')+' ('+str(nactive)+')' + righttxt = str(int(totaldlspeed))+' KB/s' + righttxt2 = str(int(totalulspeed))+' KB/s' + leftlabel.SetLabel(lefttext) + rightlabel.SetLabel(righttxt) + rightlabel2.SetLabel(righttxt2) + + def addToPlaylist(self, name ='--', add=False): + playListPanel = self.data['status']['playList'] + + if playListPanel.GetChildren() == []: + vSizer = wx.BoxSizer(wx.VERTICAL) + playListPanel.SetSizer(vSizer) + playListPanel.SetAutoLayout(1) + else: + vSizer = playListPanel.GetSizer() + + if not add and len(self.playList) > 0: + playListPanel.DestroyChildren() + self.playList = [] + + if name != '': + self.playList.append(name) + + hSizer = wx.BoxSizer(wx.HORIZONTAL) + text = wx.StaticText(playListPanel,-1,"",wx.Point(0,0)) + text.SetSize((-1, 12)) +# text.SetBackgroundColour(wx.WHITE) + text.SetLabel(name) + hSizer.Add(text, 1, wx.TOP, 2) + + progressBar = ImagePanel(playListPanel, -1, wx.DefaultPosition, wx.Size(30,10), name='progressBar') + hSizer.Add(progressBar, 0, wx.FIXED_MINSIZE|wx.TOP|wx.ALIGN_RIGHT, 4) + + vSizer.Add(hSizer, 0, wx.EXPAND, 0) + + number = len(self.playList) + playListPanel.SetMinSize((-1, (number*15+12))) + playListPanel.Layout() + playListPanel.GetParent().Layout() #Layout of statusDownloads + self.Layout() #Layout of StandardDetails + + def updateLastCheck(self, event=None): + #print 'updateLastCheck' + if self.item and self.item.has_key('last_check_time'): + last_time = self.item.get('last_check_time') + if last_time and type(last_time) == int: + self.getGuiObj('refresh').SetToolTipString('%s: %s' % (self.utility.lang.get('last_checked'), friendly_time(last_time))) + event.Skip() + + """ + def subscrNeedsGUIUpdate(self,todayl,yesterdayl): + update = True + if len(todayl) > 0: + if self.subscrDataCopy_today_top is not None and self.subscrDataCopy_today_top == todayl[0]: + update = False + self.subscrDataCopy_today_top = todayl[0] + + if len(yesterdayl) > 0: + if self.subscrDataCopy_yday_top is not None and self.subscrDataCopy_yday_top == yesterdayl[0]: + update = False + self.subscrDataCopy_yday_top = yesterdayl[0] + return update + """ + + def getThumbnailLarge(self,torrent,thumbPanel, size='large'): + readable = torrent.get('metadata',{}).get('ThumbReadable') + if readable == False: + default = self.iconsManager.getCategoryIcon('filesMode',torrent.get('category'), 'large') + thumbPanel.setBitmap(default) + return + + if 'preview' in torrent: + thumbnailString = torrent['preview'] + else: + # Arno: Read big image on demand + torrent_dir = self.utility.session.get_torrent_collecting_dir() + torrent_filename = os.path.join(torrent_dir, torrent['torrent_file_name']) + metadata = loadAzureusMetadataFromTorrent(torrent_filename) + if metadata: + thumbnailString = metadata.get('Thumbnail') + + else: + thumbnailString = None + + + if 'metadata' not in torrent: + # Dump the raw data + if thumbnailString: + del metadata['Thumbnail'] + + torrent['metadata'] = metadata + + if thumbnailString: + img = createThumbImage(thumbnailString) + + #print 'Found thumbnail: %s' % thumbnailString + iw, ih = img.GetSize() + w, h = thumbPanel.GetSize() + if (iw/float(ih)) > (w/float(h)): + nw = w + nh = int(ih * w/float(iw)) + else: + nh = h + nw = int(iw * h/float(ih)) + if nw != iw or nh != ih: + #print 'Rescale from (%d, %d) to (%d, %d)' % (iw, ih, nw, nh) + try: + # if wx >= 2.7, use Bicubic scaling + img.Rescale(nw, nh, quality = wx.IMAGE_QUALITY_HIGH) + except: + img.Rescale(nw, nh) + bmp = wx.BitmapFromImage(img) + + thumbPanel.setBitmap(bmp) + torrent['metadata']['ThumbReadable'] = True + else: + #print 'Torrent: %s' % torrent + torrent['metadata']['ThumbReadable'] = False + + #print "****** torrent", torrent + + default = self.iconsManager.getCategoryIcon('filesMode',torrent.get('category','all'), 'large') + thumbPanel.setBitmap(default) + + def refreshStandardDetailsHeight(self, panel = None): + if not panel: + panel = self.currentPanel + margin = 6 + if self.data.get('status',{}).get('panel'): + statusPanelHeight = self.data['status']['panel'].GetSize()[1] + else: + statusPanelHeight = 0 + + newHeight = panel.GetSize()[1] + statusPanelHeight + margin +# size = (20,newHeight) +# self.SetSize(size) +# self.SetMinSize(size) +# self.SetMaxSize(size) + self.GetContainingSizer().Layout() + # Resize scrollWindow to make scrollbars update to new windowsize + self.guiUtility.scrollWindow.FitInside() + self.Refresh() + + if DEBUG: + print 'StandardDetails: setting height of stand.details to: %s' % str(newHeight) + + def topNListText(self, tab): + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: topNListText ^^^^^^^^^" + + if self.top_stats is None: + return + + top_stats = self.top_stats + + top = top_stats['top'] + #total_up = top_stats['total_up'] + #total_down = top_stats['total_down'] + tribler_up = top_stats['tribler_up'] + tribler_down = top_stats['tribler_down'] + + rank = 1 + topText = '' + for permid, up, down in top: + + # up and down are integers in KB in the database + # (for overhead limitation) + amount_str_up = self.utility.size_format(up) + amount_str_down = self.utility.size_format(down) + + name = self.bartercastdb.getName(permid) + + topText += '%d. %s%s up: %s (down: %s)%s%s' % (rank, name, os.linesep, + amount_str_up, amount_str_down, os.linesep, os.linesep) + rank+=1 + + self.getGuiObj('descriptionField0', tab = tab).SetLabel(topText) + self.getGuiObj('descriptionField0', tab = tab).Refresh() + self.getGuiObj('downloadedNumberT', tab = tab).SetLabel(self.utility.size_format(tribler_down)) + self.getGuiObj('uploadedNumberT', tab = tab).SetLabel(self.utility.size_format(tribler_up)) + + + def seldomReloadData(self): + # Arno: this involves reading a potentially huge db, do only on + # clicks that show overview panel. + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardDetails: seldomReloadData!!!!!!!!" + + if not self.bartercastdb: + self.bartercastdb = self.utility.session.open_dbhandler(NTFY_BARTERCAST) + + self.top_stats = self.bartercastdb.getTopNPeers(10) + + + def updateCallback(self, item): + "Update callback handling for this item" + session = self.guiUtility.utility.session + session.remove_observer(self.db_callback) + if item is None: + return + if self.mode in ['filesMode', 'libraryMode']: + session.add_observer(self.db_callback, NTFY_TORRENTS, [NTFY_UPDATE, NTFY_DELETE], item['infohash']) + elif self.mode in ['personsMode', 'friendsMode']: + session.add_observer(self.db_callback, NTFY_PEERS, [NTFY_UPDATE, NTFY_DELETE], item['permid']) + elif self.mode == 'subscriptionsMode': + pass + elif self.mode == 'profileMode': + pass + + def db_callback(self,subject,changeType,objectID,*args): + # called by threadpool thread + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'stdDetails: db_callback: %s %s %s %s' % (subject, changeType, `objectID`, args) + db_handler = self.guiUtility.utility.session.open_dbhandler(subject) + if subject == NTFY_PEERS: + newitem = db_handler.getPeer(objectID) + elif subject in (NTFY_TORRENTS): + newitem = db_handler.getTorrent(objectID) + + + + wx.CallAfter(self.setData, newitem) + +def revtcmp(a,b): + if a[0] < b[0]: + return 1 + elif a[0] == b[0]: + return 0 + else: + return -1 + +def reverse_torrent_insertime_cmp(a,b): + if a['insert_time'] < b['insert_time']: + return 1 + elif a['insert_time'] == b['insert_time']: + return 0 + else: + return -1 + +def getShortTrackerFormat(n): + try: + t = urlparse.urlsplit(n) + short = t[1] + idx = t[1].find(':') + if idx == -1: + short = t[1] + else: + short = t[1][:idx] + if sys.platform == 'linux2': + short = short[:27] + except: + short = n[:27] + return ' '+short + + +def progress2txt(progress): + # Truncate the progress value rather than round down + # (will show 99.9% for incomplete torrents rather than 100.0%) + progress = int(progress * 1000)/10.0 + + return ('%.1f' % progress) + "%" diff --git a/tribler-mod/Tribler/Main/vwxGUI/standardFilter.py b/tribler-mod/Tribler/Main/vwxGUI/standardFilter.py new file mode 100644 index 0000000..dbb04ed --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/standardFilter.py @@ -0,0 +1,161 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg, Maarten ten Brinke +# see LICENSE.txt for license information +import wx, sys + +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from traceback import print_exc +from Tribler.Category.Category import Category +from Tribler.Main.vwxGUI.GridState import GridState +from font import * + +DEBUG = False + +class standardFilter(wx.Panel): + """ + Panel with automatic backgroundimage control. + """ + def __init__(self, filterData = []): + self.filterData = filterData + self.filterState = {} + self.filters = {} + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.state = None + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + self.SetBackgroundColour(wx.Colour(153,153,153)) + self.parent = None + self.detailPanel = None + self.Show(False) + self.addComponents() + self.Show() + self.initReady = True + + self.Refresh(True) + self.Update() + + + def addComponents(self): + + #self.SetBackgroundColour(wx.BLUE) + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + # Add Sizer + self.hSizer.Add([20,10],0,wx.EXPAND|wx.FIXED_MINSIZE,0) + # filter 1 is making a selection + for name, pullDownData in self.filterData: + titles = [item[1] for item in pullDownData] + try: + #if self.filterState is None: + # self.filterState = {} + self.filterState[name] = pullDownData[0][0] + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardFilter: Error getting default filterState, data: %s' % pullDownData + raise + filter = wx.ComboBox(self,-1,titles[0], wx.Point(8,3),wx.Size(160,21),titles, wx.CB_DROPDOWN|wx.CB_READONLY) + #filter = wx.Choice(self,-1, wx.Point(8,3),wx.Size(180,21),titles) + filter.SetFont(wx.Font(10,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) +# filter.SetBackgroundColour(wx.WHITE) + filter.Bind(wx.EVT_COMBOBOX, self.mouseAction) + self.filters[name] = filter + self.hSizer.Add(filter, 0, wx.TOP|wx.LEFT|wx.RIGHT|wx.EXPAND|wx.FIXED_MINSIZE,3) + + self.hSizer.Add([8,10],0,wx.EXPAND|wx.FIXED_MINSIZE,2) + + + self.SetSizer(self.hSizer); + self.SetAutoLayout(1); + self.Layout(); + self.Refresh(True) + self.Update() + wx.CallAfter(self.mouseAction,[None]) + + def mouseAction(self, event = None): + filterState = {} + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardFilter: mouseAction: event is",event + for name, filter in self.filters.iteritems(): + idx = filter.GetSelection() + if idx == -1: + idx = 0 + values= [a[1] for a in self.filterData if a[0] == name][0] + filterState[name] = values[idx][0] + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardFilter: filterState is",filterState,"old",self.filterState + if filterState != self.filterState or self.state is None: + self.filterChanged(filterState) + self.filterState = filterState + + def filterChanged(self, dict_state): + try: + self.state = GridState(self.mode, + dict_state.get('category'), + None) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardFilter: %s returns %s' % (self.__class__.__name__, self.state) + self.guiUtility.standardOverview.filterChanged(self.state) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardFilter: Error could not call standardOverview.filterChanged()' + print_exc() + +# def setSelectionToFilter(self,filterState): +# try: +# for j in range(len(filterState)): +# for i in range(len(self.filterData[j])): +# if filterState[j] == self.filterData[j][i][0]: +# self.filters[j].SetSelection(i) +# break +# except: +# pass +# self.filterState = filterState + + def getState(self): + # Arno, 2008-6-20: The state that mouseAction computers for libraryMode is not valid + #if not self.state: + # self.mouseAction() + return self.state + + + +class filesFilter(standardFilter): + def __init__(self): + nametuples = [('all', 'All')] + nametuples += Category.getInstance().getCategoryNames() + nametuples.append(('other', 'Other')) + #nametuples.append(('search', 'Search Results')) + + filterData = [['category', nametuples]] + + standardFilter.__init__(self, filterData = filterData) + self.mode = 'filesMode' + + def refresh(self): + nametuples = [('all', 'All')] + nametuples += Category.getInstance().getCategoryNames() + nametuples.append(('other', 'Other')) + self.filterData = [['category', nametuples]] + #self._PostInit() + self.addComponents() + self.Show() + self.filterChanged(self.filterState) + + +class libraryFilter(filesFilter): + def __init__(self): + filesFilter.__init__(self) + self.mode = 'libraryMode' + diff --git a/tribler-mod/Tribler/Main/vwxGUI/standardFilter.py.bak b/tribler-mod/Tribler/Main/vwxGUI/standardFilter.py.bak new file mode 100644 index 0000000..92774ad --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/standardFilter.py.bak @@ -0,0 +1,160 @@ +# Written by Jelle Roozenburg, Maarten ten Brinke +# see LICENSE.txt for license information +import wx, sys + +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from traceback import print_exc +from Tribler.Category.Category import Category +from Tribler.Main.vwxGUI.GridState import GridState +from font import * + +DEBUG = False + +class standardFilter(wx.Panel): + """ + Panel with automatic backgroundimage control. + """ + def __init__(self, filterData = []): + self.filterData = filterData + self.filterState = {} + self.filters = {} + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.state = None + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + self.SetBackgroundColour(wx.Colour(153,153,153)) + self.parent = None + self.detailPanel = None + self.Show(False) + self.addComponents() + self.Show() + self.initReady = True + + self.Refresh(True) + self.Update() + + + def addComponents(self): + + #self.SetBackgroundColour(wx.BLUE) + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + # Add Sizer + self.hSizer.Add([20,10],0,wx.EXPAND|wx.FIXED_MINSIZE,0) + # filter 1 is making a selection + for name, pullDownData in self.filterData: + titles = [item[1] for item in pullDownData] + try: + #if self.filterState is None: + # self.filterState = {} + self.filterState[name] = pullDownData[0][0] + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardFilter: Error getting default filterState, data: %s' % pullDownData + raise + filter = wx.ComboBox(self,-1,titles[0], wx.Point(8,3),wx.Size(160,21),titles, wx.CB_DROPDOWN|wx.CB_READONLY) + #filter = wx.Choice(self,-1, wx.Point(8,3),wx.Size(180,21),titles) + filter.SetFont(wx.Font(10,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) +# filter.SetBackgroundColour(wx.WHITE) + filter.Bind(wx.EVT_COMBOBOX, self.mouseAction) + self.filters[name] = filter + self.hSizer.Add(filter, 0, wx.TOP|wx.LEFT|wx.RIGHT|wx.EXPAND|wx.FIXED_MINSIZE,3) + + self.hSizer.Add([8,10],0,wx.EXPAND|wx.FIXED_MINSIZE,2) + + + self.SetSizer(self.hSizer); + self.SetAutoLayout(1); + self.Layout(); + self.Refresh(True) + self.Update() + wx.CallAfter(self.mouseAction,[None]) + + def mouseAction(self, event = None): + filterState = {} + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardFilter: mouseAction: event is",event + for name, filter in self.filters.iteritems(): + idx = filter.GetSelection() + if idx == -1: + idx = 0 + values= [a[1] for a in self.filterData if a[0] == name][0] + filterState[name] = values[idx][0] + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardFilter: filterState is",filterState,"old",self.filterState + if filterState != self.filterState or self.state is None: + self.filterChanged(filterState) + self.filterState = filterState + + def filterChanged(self, dict_state): + try: + self.state = GridState(self.mode, + dict_state.get('category'), + None) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardFilter: %s returns %s' % (self.__class__.__name__, self.state) + self.guiUtility.standardOverview.filterChanged(self.state) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardFilter: Error could not call standardOverview.filterChanged()' + print_exc() + +# def setSelectionToFilter(self,filterState): +# try: +# for j in range(len(filterState)): +# for i in range(len(self.filterData[j])): +# if filterState[j] == self.filterData[j][i][0]: +# self.filters[j].SetSelection(i) +# break +# except: +# pass +# self.filterState = filterState + + def getState(self): + # Arno, 2008-6-20: The state that mouseAction computers for libraryMode is not valid + #if not self.state: + # self.mouseAction() + return self.state + + + +class filesFilter(standardFilter): + def __init__(self): + nametuples = [('all', 'All')] + nametuples += Category.getInstance().getCategoryNames() + nametuples.append(('other', 'Other')) + #nametuples.append(('search', 'Search Results')) + + filterData = [['category', nametuples]] + + standardFilter.__init__(self, filterData = filterData) + self.mode = 'filesMode' + + def refresh(self): + nametuples = [('all', 'All')] + nametuples += Category.getInstance().getCategoryNames() + nametuples.append(('other', 'Other')) + self.filterData = [['category', nametuples]] + #self._PostInit() + self.addComponents() + self.Show() + self.filterChanged(self.filterState) + + +class libraryFilter(filesFilter): + def __init__(self): + filesFilter.__init__(self) + self.mode = 'libraryMode' + diff --git a/tribler-mod/Tribler/Main/vwxGUI/standardGrid.py b/tribler-mod/Tribler/Main/vwxGUI/standardGrid.py new file mode 100644 index 0000000..0405922 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/standardGrid.py @@ -0,0 +1,1000 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg, Maarten ten Brinke, Lucan Musat +# see LICENSE.txt for license information + +import sys, wx +from traceback import print_exc +from time import time + +from Tribler.Core.simpledefs import * +from Tribler.Core.Utilities.utilities import * + +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.vwxGUI.filesItemPanel import FilesItemPanel +from Tribler.Main.vwxGUI.LibraryItemPanel import LibraryItemPanel +from Tribler.Main.vwxGUI.ColumnHeader import ColumnHeaderBar +from Tribler.Main.vwxGUI.SearchGridManager import SEARCHMODE_NONE, SEARCHMODE_SEARCHING +from Tribler.Main.Dialogs.GUITaskQueue import GUITaskQueue +#from Tribler.Subscriptions.rss_client import TorrentFeedThread +from Tribler.Category.Category import Category + +DEBUG = False + + +class GridManager(object): + """ Grid manager handles: + - handling of notifies in grid + - retrieval of data from db on paging events + - retrieval of data from db on state changes from GUI + + """ + def __init__(self, grid, utility): + self.session = utility.session + + self.peer_db = self.session.open_dbhandler(NTFY_PEERS) + self.torrent_db = self.session.open_dbhandler(NTFY_TORRENTS) + self.friend_db = self.session.open_dbhandler(NTFY_FRIENDS) + self.pref_db = self.session.open_dbhandler(NTFY_PREFERENCES) + self.mypref_db = self.session.open_dbhandler(NTFY_MYPREFERENCES) + self.search_db = self.session.open_dbhandler(NTFY_SEARCH) + + self.state = None + self.total_items = 0 + self.page = 0 + self.grid = grid + self.data = [] + self.callbacks_disabled = False + self.download_states_callback_set = False + self.dslist = [] + + self.torrentsearch_manager = utility.guiUtility.torrentsearch_manager + self.torrentsearch_manager.register(self.torrent_db, self.pref_db, self.mypref_db, self.search_db) + + self.peersearch_manager = utility.guiUtility.peersearch_manager + self.peersearch_manager.register(self.peer_db,self.friend_db) + self.guiserver = GUITaskQueue.getInstance() + + # Jie's hacks to avoid DB concurrency, REMOVE ASAP!!!!!!!!!!!! + # ARNOCOMMENT + #self.refresh_rate = 1.5 # how often to refresh the GUI in seconds + + self.cache_numbers = {} + self.cache_ntorrent_interval = 1 + self.cache_npeer_interval = 1 + + def set_state(self, state, reset_page = False): + self.state = state + if reset_page or self.inSearchMode(state): + self.page = 0 + self.refresh(update_observer = True) + + def refresh(self, update_observer = False): + """ + Refresh the data of the grid + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardGrid: refresh",self.grid.initReady + #print_stack() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardGrid: refresh",update_observer,"ready",self.grid.initReady,"state",self.state + + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '**********==============********* refresh', self.grid.initReady + #if not self.grid.initReady: + # standardgrid_refresh_lambda = lambda:self.refresh(update_observer=update_observer) + # wx.CallAfter(standardgrid_refresh_lambda) + # return + + if self.state is None: + return + + if update_observer: + self.setObserver() + + self.data, self.total_items = self._getData(self.state) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'GridManager: Data length: %d/%d' % (len(self.data), self.total_items) + self.grid.setData(self.data) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'GridManager: state: %s' % (self.state) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'GridManager: state: gave %d results, out of total %d' % (len(self.data), self.total_items) + #for torrent in self.data: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","GridManager: elem name is",`torrent['name']` + + + def set_page(self, page): + if page != self.page: + self.page = page + self.refresh() + + def get_total_items(self): + return self.total_items + + def get_number_torrents(self, state): + # cache the numbers to avoid loading db, which is a heavy operation + category_name = state.category + + library = (state.db == 'libraryMode') + key = (category_name, library) + now = time() + + if (key not in self.cache_numbers or + now - self.cache_numbers[key][1] > self.cache_ntorrent_interval): + + ntorrents = self.torrent_db.getNumberTorrents(category_name = category_name, library = library) + self.cache_numbers[key] = [ntorrents, now] + #if ntorrents > 1000: + # self.cache_ntorrent_interval = 120 + #elif ntorrents > 100 and self.cache_ntorrent_interval < 30: + # self.cache_ntorrent_interval = 30 + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '***** update get_number_torrents', ntorrents, self.cache_ntorrent_interval, time()-now + + return self.cache_numbers[key][0] + + def get_number_peers(self, state): + # cache the numbers to avoid loading db, which is a heavy operation + category_name = state.category + library = 'peer' + key = (category_name, library) + + if (key not in self.cache_numbers or + time() - self.cache_numbers[key][1] > self.cache_npeer_interval): + + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '*********** get_number_peers', key, self.cache_numbers[key], now - self.last_npeer_cache, self.cache_npeer_interval, self.grid.items + npeers = self.peer_db.getNumberPeers(category_name = category_name) + self.cache_numbers[key] = [npeers, time()] + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '***** update get_number_peers', npeers, self.cache_npeer_interval, time()-now + + return self.cache_numbers[key][0] + + def _getData(self, state): + #import threading + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'threading>>','****'*10, threading.currentThread().getName() + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardGrid: _getData: state is",state + + range = (self.page * self.grid.items, (self.page+1)*self.grid.items) + if state.db in ('filesMode', 'libraryMode'): + + # Arno: state.db should be NTFY_ according to GridState... + if state.db == 'libraryMode': + + total_items = self.get_number_torrents(state) # read from cache + sortcol = state.sort + if sortcol == "rameezmetric": + sortcol = "name" + data = self.torrent_db.getTorrents(category_name = state.category, + sort = sortcol, + range = range, + library = (state.db == 'libraryMode'), + reverse = state.reverse) + + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "GridManager: _getData: DB returned",len(data) + + # Arno, 2009-03-10: Not removing a torrent from MyPref when + # deleting from the Library means it keeps showing up there, + # even after removal :-( + # Now if the destdir is empty we don't show it. + # + def myDownloadHistoryFilter(torrent): + return torrent.get('myDownloadHistory', False) and torrent.get('destdir',"") != "" + + data = filter(myDownloadHistoryFilter,data) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "GridManager: _getData: filter returned",len(data) + + else: + [total_items,data] = self.torrentsearch_manager.getHitsInCategory(state.db,state.category,range,state.sort,state.reverse) + + #if state.db == 'libraryMode': + if data is not None: + data = self.addDownloadStates(data) + elif state.db in ('personsMode', 'friendsMode'): + if state.db == 'friendsMode': + state.category = 'friend' + + if self.peersearch_manager.getSearchMode(state.db) == SEARCHMODE_NONE: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","GET GUI PEERS #################################################################################" + total_items = self.get_number_peers(state) + data = self.peer_db.getGUIPeers(category_name = state.category, + sort = state.sort, + reverse = state.reverse, + range = range, + get_online = True) + else: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SEARCH GUI PEERS $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$" + try: + [total_items,data] = self.peersearch_manager.getHits(state.db,range) + except: + print_exc() + + if state.db == 'friendsMode': + data = self.addCoopDLStatus(data) + + else: + raise Exception('Unknown data db in GridManager: %s' % state.db) + + return data, total_items + + def _last_page(self): + return self.total_items == 0 or (0 < len(self.data) < self.grid.items) + + def setObserver(self): + if self.state.db == 'libraryMode' or self.state.db == 'filesMode': + if not self.download_states_callback_set: + self.download_states_callback_set = True + + def reactivate(self): + # After a grid has been hidden by the standardOverview, network/db callbacks + # are not handled anymore. This function is called if a resize event is caught + # if callbacks were disabled, they are enabled again + + if self.callbacks_disabled: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", ('*' * 50 + '\n')*3 + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Reactivating grid', self.grid.__class__.__name__ + self.callbacks_disabled = False + self.refresh(update_observer = True) + else: + # also refresh normally on resize (otherwise new rows cannot be added + self.refresh() + +# def download_state_network_callback(self, *args): +# """ Called by SessionThread from ABCApp """ +# if self.download_states_callback_set: +# if self.grid.isShowByOverview(): +# wx.CallAfter(self.download_state_gui_callback, *args) +# else: +# self.callbacks_disabled = True +# self.download_states_callback_set = False + + def item_network_callback(self, *args): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardGrid: item_network_callback",`args` + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '***** searchmode: ', self.torrentsearch_manager.getSearchMode(self.state.db) + + # only handle network callbacks when grid is shown + if not self.grid.isShowByOverview(): + self.callbacks_disabled = True + self.session.remove_observer(self.item_network_callback) #unsubscribe this function + else: + # 15/07/08 Boudewijn: only change grid when still searching + #if self.torrentsearch_manager.inSearchMode(self.state.db): # 25/07/08 Jie: it causes GUI never updated when not in search mode + self.itemChanged(*args) + + + def itemChanged(self,subject,changeType,objectID,*args): + "called by GuiThread" + if changeType == NTFY_INSERT: + self.itemAdded(subject, objectID, args) + elif changeType in (NTFY_UPDATE, NTFY_CONNECTION): + self.itemUpdated(subject, objectID, args) + elif changeType == NTFY_DELETE: + self.itemDeleted(subject, objectID, args) + else: + raise Exception('Unknown notify.changeType') + + def itemAdded(self,subject, objectID, args): + #if self._last_page(): # This doesn't work as the pager is not updated if page becomes full + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '******* standard Grid: itemAdded:', objectID, args, 'search?', self.torrentsearch_manager.inSearchMode(self.state.db) + if self.torrentsearch_manager.getSearchMode(self.state.db) == SEARCHMODE_SEARCHING: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Grid refresh because search item added!!!=============================' + wx.CallAfter(self.refresh) + elif self.isRelevantItem(subject, objectID): + ##task_id = str(subject) + str(int(time()/self.refresh_rate)) + ##self.guiserver.add_task(lambda:wx.CallAfter(self.refresh), self.refresh_rate, id=task_id) + # that's important to add the task 3 seconds later, to ensure the task will be executed at proper time + self.refresh() + + def itemUpdated(self,subject, objectID, args): + # Both in torrent grid and peergrid, changed items can make new items appear on the screen + # Peers: when first buddycast + # Friends: if just became new friend + # Torrent: when status changes to 'good' + # So we have to alway refresh here + + #if (self._objectOnPage(subject, objectID) + if self.torrentsearch_manager.getSearchMode(self.state.db) == SEARCHMODE_NONE: + ##task_id = str(subject) + str(int(time()/self.refresh_rate)) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardGrid: itemUpdated",subject,`objectID`,`args` + ##self.guiserver.add_task(lambda:wx.CallAfter(self.refresh), self.refresh_rate, id=task_id) + self.refresh() + + def itemDeleted(self,subject, objectID, args): + if self._objectOnPage(subject, objectID): + ##task_id = str(subject) + str(int(time()/self.refresh_rate)) + ##self.guiserver.add_task(lambda:wx.CallAfter(self.refresh), self.refresh_rate, id=task_id) + self.refresh() + + def download_state_gui_callback(self, dslist): + """ + Called by GUIThread + """ + self.dslist = dslist + if self.state.db == 'libraryMode': + for infohash in [ds.get_download().get_def().get_infohash() for ds in dslist]: + if self._objectOnPage(NTFY_TORRENTS, infohash): + self.refresh() + break + else: + # friendsMode + # self.refresh() + # We don't refresh on filesMode, but do need to add DownloadStates + # for the Play button to work. + self.addDownloadStates(self.data) + + def _objectOnPage(self, subject, objectID): + if subject == NTFY_PEERS: + id_name = 'permid' + elif subject in (NTFY_TORRENTS, NTFY_MYPREFERENCES, NTFY_SUPERPEERS): + id_name = 'infohash' + elif subject in (NTFY_YOUTUBE): + raise Exception('Not yet implemented') + + return objectID in [a[id_name] for a in self.data] + + def isRelevantItem(self, subject, objectID): + return True #Jie: let DB decide if the notifier should be sent + + db_handler = self.session.open_dbhandler(subject) + if subject == NTFY_PEERS: + peer = db_handler.getPeer(objectID) + ok = peer and (peer['last_connected']>0 or peer['friend']) + #if not ok: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Gridmanager: Peer is not relevant: %s' % peer + return ok + elif subject in (NTFY_TORRENTS): + id_name = 'infohash' + torrent = db_handler.getTorrent(objectID) + ok = torrent is not None and torrent['status'] == 'good' and Category.getInstance().hasActiveCategory(torrent) + #if not ok: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Gridmanager: Torrent is not relevant: %s' % torrent + return ok + elif subject == NTFY_MYPREFERENCES: + return True + + raise Exception('not yet implemented') + + def addDownloadStates(self, liblist): + # Add downloadstate data to list of torrent dicts + for ds in self.dslist: + infohash = ds.get_download().get_def().get_infohash() + for torrent in liblist: + if torrent['infohash'] == infohash: + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardGrid: addDownloadStates: adding ds for",`ds.get_download().get_def().get_name()` + torrent['ds'] = ds + break + return liblist + + + def addCoopDLStatus(self, liblist): + # Add downloadstate data to list of friend dicts + for ds in self.dslist: + helpers = ds.get_coopdl_helpers() + coordinator = ds.get_coopdl_coordinator() + + for friend in liblist: + if friend['permid'] in helpers: + # Friend is helping us + friend['coopdlstatus'] = u'Helping you with '+ds.get_download().get_def().get_name_as_unicode() + elif friend['permid'] == coordinator: + # Friend is getting help from us + friend['coopdlstatus'] = u'You help with '+ds.get_download().get_def().get_name_as_unicode() + #else: + # friend['coopdlstatus'] = u'Sleeping' + + return liblist + + def inSearchMode(self, state): + if state.db in ('filesMode', 'libraryMode'): + return self.torrentsearch_manager.getSearchMode(state.db) == SEARCHMODE_NONE + elif state.db in ('personsMode', 'friendsMode'): + return self.peersearch_manager.getSearchMode(state.db) == SEARCHMODE_NONE + else: + return False + + + def get_dslist(self): + return self.dslist + +class standardGrid(wx.Panel): + """ + Panel which shows a grid with static number of columns and dynamic number + of rows + """ + def __init__(self, cols, subPanelHeight, orientation='horizontal', viewmode = 'list', parent = None, name="standardGrid"): ## + self.data = None + self.detailPanel = None + self.orientation = orientation + self.subPanelClass = None + self.items = 0 #number of items that are currently visible + self.currentRows = 0 + self.sizeMode = 'auto' + self.columnHeader = None + self.topMargin = 5 + self.lastSize = None + self.panels = [] + self.viewmode = viewmode + self.guiUtility = GUIUtility.getInstance() + + self.guiUtility.standardGrid = self + + self.utility = self.guiUtility.utility + self.gridManager = GridManager(self, self.utility) + if not parent: + self.initReady = False + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + self.initReady = True + wx.Panel.__init__(self,parent,-1,name=name) + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardGrid: __init__: viewmode is",self.viewmode + + if type(cols) == int: + self.cols = cols + self.columnTypes = None + self.subPanelHeight = subPanelHeight + else: + self.columnTypes = cols + self.subPanelHeights = subPanelHeight + if self.viewmode == 'thumbnails': + self.cols = cols[0] + self.subPanelHeight = self.subPanelHeights[0] + elif self.viewmode == 'list': + self.cols = cols[1] + self.subPanelHeight = self.subPanelHeights[1] + else: + raise Exception('unknown viewmode: %s' % self.viewmode) + + self.superpeer_db = self.utility.session.open_dbhandler(NTFY_SUPERPEERS) + #self.torrentfeed = TorrentFeedThread.getInstance() + self.guiserver = GUITaskQueue.getInstance() + + if parent: + self.SetSize((675,500)) + self._PostInit() + #self.Show() + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + + def _PostInit(self): + # Do all init here + + self.SetBackgroundColour(wx.WHITE) + + #self.cols = 5 + + self.Bind(wx.EVT_SIZE, self.onResize) + + self.addComponents() + self.calculateRows() + + if self.viewmode == 'list': + self.toggleColumnHeaders(True) + self.Show() + self.Layout() + self.Refresh() + + self.initReady = True + if self.data: + self.setData(self.data) + + def addComponents(self): + self.Show(False) + + self.SetBackgroundColour(wx.WHITE) + self.vSizer = wx.BoxSizer(wx.VERTICAL) + self.columnHeaderSizer = wx.BoxSizer(wx.HORIZONTAL) + self.columnHeaderSizer.Add((0,self.topMargin)) + self.vSizer.Add(self.columnHeaderSizer, 0, wx.ALL|wx.EXPAND, 0) + self.SetSizer(self.vSizer); + self.SetAutoLayout(1); + #self.Layout(); + #self.Refresh(True) + #self.Update() + #print "vSizer: %s, Panel: %s"% (self.vSizer.GetSize(), self.GetSize()) + + + #def Show(self, s): + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '%s is show(%s)' % (self, s) + #wx.Panel.Show(self, s) + + def onViewModeChange(self, event=None, mode = None): + if not self.initReady: + wx.CallAfter(self.onViewModeChange, event, mode) + return + + if not mode: + if type(event.GetEventObject()) == wx.Choice: + mode = event.GetEventObject().GetStringSelection() + + if self.viewmode != mode: + self.viewmode = mode + #oldcols = self.cols + self.updatePanel(self.currentRows, 0) + if mode == 'thumbnails': + self.cols = self.columnTypes[0] + self.subPanelHeight = self.subPanelHeights[0] + elif mode == 'list': + self.cols = self.columnTypes[1] + self.subPanelHeight = self.subPanelHeights[1] + self.currentRows = 0 + + #self.updatePanel(0, self.currentRows) + self.calculateRows() + #self.updateCols(oldcols, self.cols) + self.gridManager.refresh() + self.toggleColumnHeaders(mode == 'list') + + def onSizeChange(self, event=None): + if type(event.GetEventObject()) == wx.Choice: + value = event.GetEventObject().GetStringSelection() + else: + value = event.GetEventObject().GetValue() + + self.sizeMode = value + if value == 'auto': + self.guiUtility.updateSizeOfStandardOverview() + self.SetMinSize((-1, 20)) + else: + try: + wantedRows = int(value) / self.cols + self.SetSize((-1, wantedRows * self.subPanelHeight)) + self.SetMinSize((-1, wantedRows * self.subPanelHeight)) + self.guiUtility.standardOverview.growWithGrid() + self.guiUtility.standardOverview.Refresh() + except: + #print 'Exception!' + + raise + + + def refreshData(self): + self.setData(self.data) + + + def getData(self): + return self.data + + + def setData(self, dataList): + + #if dataList is None: + #datalength = 0 + #else: + #datalength = len(dataList) + + if type(dataList) == list or dataList is None: + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'grid.setData: list' + self.data = dataList + + if not self.initReady: + return + + self.refreshPanels() + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardGrid: start columns:',\ + # self.cols,'rows:',self.currentRows,'items:',self.items + + self.Layout() + + def updateItem(self, item, delete = False, onlyupdate = False): + "Add or update an item in the grid" + + if not item: + return + + # Get key to compare this item to others + key = None + for tempkey in ['infohash', 'permid', 'content_name']: + if item.has_key(tempkey): + key = tempkey + break + if not key: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardGrid: Error, could not find key to compare item: %s' % item + return + #get the current data source + if len(self.data)>0 and self.data[0].has_key("permid"): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","\n*****************************************************\n\ +* big problem *\n\ +* in torrentGrid, working on peer data!!!!! *\n\ +* *\n\ +*****************************************************\n" + i = find_content_in_dictlist(self.data, item, key) + if i != -1: + if not delete: + self.data[i] = item + else: + self.data.remove(item) + elif not onlyupdate: + self.data.append(item) + self.refreshData() + + def refreshPanels(self): + "Refresh TorrentPanels with correct data and refresh pagerPanel" + if self.getStandardPager(): + self.standardPager.refresh() + + if self.data is None: + self.clearAllData() + else: + for i in xrange(0, self.items): + if i < len(self.data): + self.setDataOfPanel(i, self.data[i]) + else: + self.setDataOfPanel(i, None) + + self.updateSelection() + + def gridResized(self, rows): + self.items = self.cols * rows + self.refreshPanels() + + + + def getStandardPager(self): + try: + if self.standardPager: + return True + except: + return False + + def setPager(self, pager): + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardGrid: setPager called: %s' % pager + self.standardPager = pager + + def getSubPanel(self, keyfun=None): + raise NotImplementedError('Method getSubPanel should be subclassed') + + def setDataOfPanel(self, panelNumber, data): + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Set data of panel %d with data: %s' % (panelNumber, data) + try: + if self.orientation == 'vertical': + hSizer = self.vSizer.GetItem(panelNumber%self.currentRows+1).GetSizer() + panel = hSizer.GetItem(panelNumber/ self.currentRows).GetWindow() + else: + hSizer = self.vSizer.GetItem(panelNumber/self.cols+1).GetSizer() + panel = hSizer.GetItem(panelNumber % self.cols).GetWindow() + + panel.setData(data) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardGrid: Error: Could not set data in panel number %d, with %d cols" % (panelNumber, self.cols) + print_exc() + + def clearAllData(self): + for i in range(0, self.items): + self.setDataOfPanel(i, None) + + def onResize(self, event=None): + if self.GetSize() == self.lastSize: + return + self.lastSize = self.GetSize() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "standardGrid: resize event: %s" % self.GetSize() + self.calculateRows(event) + self.gridManager.reactivate() + if event: + event.Skip() + + + + def calculateRows(self, event=None): + + size = self.GetSize() + oldRows = self.currentRows + if self.columnHeader: + columnHeaderHeight = self.columnHeader.GetSize()[1] + else: + columnHeaderHeight = self.topMargin + + if size[1] < 50 or self.subPanelHeight == 0: + self.currentRows = 0 + self.items = 0 + else: + self.currentRows = (size[1] - columnHeaderHeight - 79) / self.subPanelHeight + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'standardGrid: Height: %d, single panel is %d, so %d rows' % (size[1], self.subPanelHeight, self.currentRows) + self.items = self.cols * self.currentRows + + if oldRows != self.currentRows: #changed + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardGrid: Size updated to %d rows and %d columns, oldrows: %d'% (self.currentRows, self.cols, oldRows) + + self.updatePanel(oldRows, self.currentRows) + self.gridResized(self.currentRows) + + + def updateCols(self, oldCols, newCols): + + self.items = newCols * self.currentRows + if newCols > oldCols: + numNew = newCols - oldCols + for row in xrange(len(self.panels)): + hSizer = self.vSizer.GetItem(row).GetSizer() + for i in xrange(numNew): + dataPanel = self.getSubPanel(self.keyTypedOnGridItem) + self.subPanelClass = dataPanel.__class__ + self.panels[row].append(dataPanel) + hSizer.Add(dataPanel, 1, wx.ALIGN_CENTER|wx.ALL|wx.GROW, 0) + elif newCols < oldCols: + numDelete = oldCols - newCols + for row in self.panels: + for i in xrange(numDelete): + panel = row[newCols] + panel.Destroy() + del row[newCols] + + + + def updatePanel(self, oldRows, newRows): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Grid: updating from %d to %d rows' % (oldRows, newRows) + # put torrent items in grid + + if newRows > oldRows: + for i in range(oldRows, newRows): + hSizer = wx.BoxSizer(wx.HORIZONTAL) + self.panels.append([]) + + for panel in range(0, self.cols): + dataPanel = self.getSubPanel(self.keyTypedOnGridItem) + self.subPanelClass = dataPanel.__class__ + # add keylistener for arrow selection + #dataPanel.Bind(wx.EVT_KEY_UP, self.keyTypedOnGridItem) + self.panels[i].append(dataPanel) + #dataPanel.SetSize((-1, self.subPanelHeight)) + hSizer.Add(dataPanel, 1, wx.ALIGN_CENTER|wx.ALL|wx.GROW, 0) + self.vSizer.Add(hSizer, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 0) + + elif newRows < oldRows: + #print "Destroying row %d up to %d" % (newRows, oldRows-1) + for row in range(oldRows-1, newRows-1, -1): + # Destroy old panels + for col in range(self.cols-1, -1, -1): #destroy panels right to left + panel = self.panels[row][col] + wx.CallAfter(panel.Destroy) + del self.panels[row][col] + + assert self.panels[row] == [], 'We deleted all panels, still the row is %s' % self.panels[row] + del self.panels[row] + self.vSizer.Detach(row+1) # detach hSizer of the row + # +1 compensated for columnheaders + + + + + + def updateSelection(self): + """Deselect all torrentPanels, but the one selected in detailPanel + If no torrent is selected in detailPanel, let first in grid be selected + """ + + try: + #print 'standardGrid: update selection' + if not self.hasDetailPanel(): + return + +# title = None + + id = self.detailPanel.getIdentifier() + + #print "standardGrid: updateSelection: detailsPanel has id",id,self.detailPanel + + number = 0 + rowIndex = 0 + for row in self.panels: + colIndex = 0 + for pan in row: + try: + panel_id = pan.getIdentifier() + #print "standardGrid: updateSelection: panel has id",`panel_id` + except: + panel_id = None + + if panel_id is None or repr(panel_id) != repr(id): + #print 'item deselected2' + pan.deselect(rowIndex,colIndex)#number = number) + else: + #pan.select(rowIndex,colIndex) + pan.select(rowIndex, + colIndex, + self.standardPager.currentPage, + self.cols, + self.currentRows) + number += 1 + colIndex += 1 + rowIndex += 1 + + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardGrid: QQQQQQQQQQQQQQQQQQQQQQQQQQQ QUERY RESULTS DISPLAYED" + self.Layout() + except: + # I sometimes get UnicodeErrors here somewhere + print_exc() + + + def deselectAll(self): + """Deselect all torrentPanels""" + + try: + #print 'standardGrid: update selection' + if not self.hasDetailPanel(): + return + +# title = None + + id = self.detailPanel.getIdentifier() + + #print "standardGrid: updateSelection: detailsPanel has id",id,self.detailPanel + + number = 0 + rowIndex = 0 + for row in self.panels: + colIndex = 0 + for pan in row: + try: + panel_id = pan.getIdentifier() + #print "standardGrid: updateSelection: panel has id",`panel_id` + except: + panel_id = None + + #if panel_id is None or repr(panel_id) != repr(id): + print >> sys.stderr , 'item deselected2' + pan.deselect(rowIndex,colIndex)#number = number) + number += 1 + colIndex += 1 + rowIndex += 1 + self.Layout() + except: + # I sometimes get UnicodeErrors here somewhere + print_exc() + + + + + + + def hasDetailPanel(self): + if self.detailPanel: + return True + try: + self.detailPanel = self.guiUtility.standardDetails + except: + pass + return self.detailPanel is not None + + def keyTypedOnGridItem(self, event): + obj = event.GetEventObject() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardGrid: keyTyped: in %s' % obj.__class__.__name__ + while obj.__class__ != self.subPanelClass: + obj = obj.GetParent() + + # Jelle: Turn of key navigation under windows. Windows already has a focus traversal policy and changes + # the focus of panel. + if sys.platform == 'win32': + return + + if not obj.selected and sys.platform != 'win32': + return + + keyCode = event.GetKeyCode() + # Get coord of keytyped panel + rowIndex = 0 + xpan = ypan = None + for row in self.panels: + colIndex = 0 + for pan in row: + if obj == pan: + (xpan, ypan) = colIndex, rowIndex + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardGrid: keyTyped: found: %d, %d' % (colIndex, rowIndex) + break + colIndex += 1 + rowIndex += 1 + if xpan == None: + raise Exception('Could not find selected panel') + xpanold = xpan + ypanold = ypan + if sys.platform != 'win32': + if keyCode == wx.WXK_UP: + ypan = max(0, ypan-1) + elif keyCode == wx.WXK_DOWN: + ypan = min(self.currentRows-1, ypan+1) + elif keyCode == wx.WXK_LEFT: + xpan = max(0, xpan -1) + elif keyCode == wx.WXK_RIGHT: + xpan = min(self.cols-1, xpan+1) + else: + if keyCode == wx.WXK_UP: + if xpan == self.cols-1: + xpan = 0 + else: + xpan+=1 + ypan = max(0, ypan-1) + elif keyCode == wx.WXK_DOWN: + if xpan == 0: + xpan = self.cols-1 + else: + xpan = xpan -1 + ypan = min(self.currentRows-1, ypan+1) + # Get data of new panel + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardGrid: Old: %s, New: %s' % ((xpanold, ypanold), (xpan, ypan)) + if xpanold != xpan or ypanold != ypan or sys.platform =='win32': + newpanel = self.panels[ypan][xpan] + if newpanel.data != None: + # select new panel + #newpanel.SetFocus() + self.guiUtility.selectData(newpanel.data) + event.Skip() + + def getFirstPanel(self): + try: + hSizer = self.vSizer.GetItem(1).GetSizer() + panel = hSizer.GetItem(0).GetWindow() + return panel + except: + return None + + def toggleColumnHeaders(self, show): + # show or hide columnheaders + if bool(self.columnHeader) == show: + return + if show: + panel = self.getFirstPanel() + if panel: + self.columnHeader = ColumnHeaderBar(self, panel) + self.columnHeaderSizer.Detach(0) + self.columnHeaderSizer.Add(self.columnHeader, 1, wx.EXPAND, 0) + self.columnHeaderSizer.Layout() + else: + self.columnHeaderSizer.Detach(0) + self.columnHeader.Destroy() + self.columnHeader = None + self.columnHeaderSizer.AddSpacer(5) + self.columnHeaderSizer.Layout() + self.vSizer.Layout() + + + def isShowByOverview(self): + name = self.__class__.__name__ + mode = self.guiUtility.standardOverview.mode + index = name.find('Grid') + isshown = name[:index] == mode[:index] + return isshown + + def getGridManager(self): + return self.gridManager + + +class filesGrid(standardGrid): + def __init__(self,parent=None): +# columns = 5 +# self.subPanelHeight = 108 # This will be update after first refresh + columns = (5, 1) + subPanelHeight = (5*22, 22) + standardGrid.__init__(self, columns, subPanelHeight, orientation='vertical',parent=parent,name="filesGrid") + + def getSubPanel(self, keyfun): + return FilesItemPanel(self, keyfun) + +class libraryGrid(standardGrid): + def __init__(self,parent=None): + columns = (1,1) + subPanelHeight = (22, 22) # This will be update after first refresh + standardGrid.__init__(self, columns, subPanelHeight, orientation='vertical', viewmode='list',parent=parent,name="libraryGrid") + + def getSubPanel(self, keyfun): + return LibraryItemPanel(self, keyfun) + diff --git a/tribler-mod/Tribler/Main/vwxGUI/standardGrid.py.bak b/tribler-mod/Tribler/Main/vwxGUI/standardGrid.py.bak new file mode 100644 index 0000000..7d2217d --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/standardGrid.py.bak @@ -0,0 +1,999 @@ +# Written by Jelle Roozenburg, Maarten ten Brinke, Lucan Musat +# see LICENSE.txt for license information + +import sys, wx +from traceback import print_exc +from time import time + +from Tribler.Core.simpledefs import * +from Tribler.Core.Utilities.utilities import * + +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.vwxGUI.filesItemPanel import FilesItemPanel +from Tribler.Main.vwxGUI.LibraryItemPanel import LibraryItemPanel +from Tribler.Main.vwxGUI.ColumnHeader import ColumnHeaderBar +from Tribler.Main.vwxGUI.SearchGridManager import SEARCHMODE_NONE, SEARCHMODE_SEARCHING +from Tribler.Main.Dialogs.GUITaskQueue import GUITaskQueue +#from Tribler.Subscriptions.rss_client import TorrentFeedThread +from Tribler.Category.Category import Category + +DEBUG = False + + +class GridManager(object): + """ Grid manager handles: + - handling of notifies in grid + - retrieval of data from db on paging events + - retrieval of data from db on state changes from GUI + + """ + def __init__(self, grid, utility): + self.session = utility.session + + self.peer_db = self.session.open_dbhandler(NTFY_PEERS) + self.torrent_db = self.session.open_dbhandler(NTFY_TORRENTS) + self.friend_db = self.session.open_dbhandler(NTFY_FRIENDS) + self.pref_db = self.session.open_dbhandler(NTFY_PREFERENCES) + self.mypref_db = self.session.open_dbhandler(NTFY_MYPREFERENCES) + self.search_db = self.session.open_dbhandler(NTFY_SEARCH) + + self.state = None + self.total_items = 0 + self.page = 0 + self.grid = grid + self.data = [] + self.callbacks_disabled = False + self.download_states_callback_set = False + self.dslist = [] + + self.torrentsearch_manager = utility.guiUtility.torrentsearch_manager + self.torrentsearch_manager.register(self.torrent_db, self.pref_db, self.mypref_db, self.search_db) + + self.peersearch_manager = utility.guiUtility.peersearch_manager + self.peersearch_manager.register(self.peer_db,self.friend_db) + self.guiserver = GUITaskQueue.getInstance() + + # Jie's hacks to avoid DB concurrency, REMOVE ASAP!!!!!!!!!!!! + # ARNOCOMMENT + #self.refresh_rate = 1.5 # how often to refresh the GUI in seconds + + self.cache_numbers = {} + self.cache_ntorrent_interval = 1 + self.cache_npeer_interval = 1 + + def set_state(self, state, reset_page = False): + self.state = state + if reset_page or self.inSearchMode(state): + self.page = 0 + self.refresh(update_observer = True) + + def refresh(self, update_observer = False): + """ + Refresh the data of the grid + """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardGrid: refresh",self.grid.initReady + #print_stack() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardGrid: refresh",update_observer,"ready",self.grid.initReady,"state",self.state + + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '**********==============********* refresh', self.grid.initReady + #if not self.grid.initReady: + # standardgrid_refresh_lambda = lambda:self.refresh(update_observer=update_observer) + # wx.CallAfter(standardgrid_refresh_lambda) + # return + + if self.state is None: + return + + if update_observer: + self.setObserver() + + self.data, self.total_items = self._getData(self.state) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'GridManager: Data length: %d/%d' % (len(self.data), self.total_items) + self.grid.setData(self.data) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'GridManager: state: %s' % (self.state) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'GridManager: state: gave %d results, out of total %d' % (len(self.data), self.total_items) + #for torrent in self.data: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","GridManager: elem name is",`torrent['name']` + + + def set_page(self, page): + if page != self.page: + self.page = page + self.refresh() + + def get_total_items(self): + return self.total_items + + def get_number_torrents(self, state): + # cache the numbers to avoid loading db, which is a heavy operation + category_name = state.category + + library = (state.db == 'libraryMode') + key = (category_name, library) + now = time() + + if (key not in self.cache_numbers or + now - self.cache_numbers[key][1] > self.cache_ntorrent_interval): + + ntorrents = self.torrent_db.getNumberTorrents(category_name = category_name, library = library) + self.cache_numbers[key] = [ntorrents, now] + #if ntorrents > 1000: + # self.cache_ntorrent_interval = 120 + #elif ntorrents > 100 and self.cache_ntorrent_interval < 30: + # self.cache_ntorrent_interval = 30 + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '***** update get_number_torrents', ntorrents, self.cache_ntorrent_interval, time()-now + + return self.cache_numbers[key][0] + + def get_number_peers(self, state): + # cache the numbers to avoid loading db, which is a heavy operation + category_name = state.category + library = 'peer' + key = (category_name, library) + + if (key not in self.cache_numbers or + time() - self.cache_numbers[key][1] > self.cache_npeer_interval): + + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '*********** get_number_peers', key, self.cache_numbers[key], now - self.last_npeer_cache, self.cache_npeer_interval, self.grid.items + npeers = self.peer_db.getNumberPeers(category_name = category_name) + self.cache_numbers[key] = [npeers, time()] + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '***** update get_number_peers', npeers, self.cache_npeer_interval, time()-now + + return self.cache_numbers[key][0] + + def _getData(self, state): + #import threading + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'threading>>','****'*10, threading.currentThread().getName() + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardGrid: _getData: state is",state + + range = (self.page * self.grid.items, (self.page+1)*self.grid.items) + if state.db in ('filesMode', 'libraryMode'): + + # Arno: state.db should be NTFY_ according to GridState... + if state.db == 'libraryMode': + + total_items = self.get_number_torrents(state) # read from cache + sortcol = state.sort + if sortcol == "rameezmetric": + sortcol = "name" + data = self.torrent_db.getTorrents(category_name = state.category, + sort = sortcol, + range = range, + library = (state.db == 'libraryMode'), + reverse = state.reverse) + + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "GridManager: _getData: DB returned",len(data) + + # Arno, 2009-03-10: Not removing a torrent from MyPref when + # deleting from the Library means it keeps showing up there, + # even after removal :-( + # Now if the destdir is empty we don't show it. + # + def myDownloadHistoryFilter(torrent): + return torrent.get('myDownloadHistory', False) and torrent.get('destdir',"") != "" + + data = filter(myDownloadHistoryFilter,data) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "GridManager: _getData: filter returned",len(data) + + else: + [total_items,data] = self.torrentsearch_manager.getHitsInCategory(state.db,state.category,range,state.sort,state.reverse) + + #if state.db == 'libraryMode': + if data is not None: + data = self.addDownloadStates(data) + elif state.db in ('personsMode', 'friendsMode'): + if state.db == 'friendsMode': + state.category = 'friend' + + if self.peersearch_manager.getSearchMode(state.db) == SEARCHMODE_NONE: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","GET GUI PEERS #################################################################################" + total_items = self.get_number_peers(state) + data = self.peer_db.getGUIPeers(category_name = state.category, + sort = state.sort, + reverse = state.reverse, + range = range, + get_online = True) + else: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SEARCH GUI PEERS $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$" + try: + [total_items,data] = self.peersearch_manager.getHits(state.db,range) + except: + print_exc() + + if state.db == 'friendsMode': + data = self.addCoopDLStatus(data) + + else: + raise Exception('Unknown data db in GridManager: %s' % state.db) + + return data, total_items + + def _last_page(self): + return self.total_items == 0 or (0 < len(self.data) < self.grid.items) + + def setObserver(self): + if self.state.db == 'libraryMode' or self.state.db == 'filesMode': + if not self.download_states_callback_set: + self.download_states_callback_set = True + + def reactivate(self): + # After a grid has been hidden by the standardOverview, network/db callbacks + # are not handled anymore. This function is called if a resize event is caught + # if callbacks were disabled, they are enabled again + + if self.callbacks_disabled: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", ('*' * 50 + '\n')*3 + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Reactivating grid', self.grid.__class__.__name__ + self.callbacks_disabled = False + self.refresh(update_observer = True) + else: + # also refresh normally on resize (otherwise new rows cannot be added + self.refresh() + +# def download_state_network_callback(self, *args): +# """ Called by SessionThread from ABCApp """ +# if self.download_states_callback_set: +# if self.grid.isShowByOverview(): +# wx.CallAfter(self.download_state_gui_callback, *args) +# else: +# self.callbacks_disabled = True +# self.download_states_callback_set = False + + def item_network_callback(self, *args): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardGrid: item_network_callback",`args` + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '***** searchmode: ', self.torrentsearch_manager.getSearchMode(self.state.db) + + # only handle network callbacks when grid is shown + if not self.grid.isShowByOverview(): + self.callbacks_disabled = True + self.session.remove_observer(self.item_network_callback) #unsubscribe this function + else: + # 15/07/08 Boudewijn: only change grid when still searching + #if self.torrentsearch_manager.inSearchMode(self.state.db): # 25/07/08 Jie: it causes GUI never updated when not in search mode + self.itemChanged(*args) + + + def itemChanged(self,subject,changeType,objectID,*args): + "called by GuiThread" + if changeType == NTFY_INSERT: + self.itemAdded(subject, objectID, args) + elif changeType in (NTFY_UPDATE, NTFY_CONNECTION): + self.itemUpdated(subject, objectID, args) + elif changeType == NTFY_DELETE: + self.itemDeleted(subject, objectID, args) + else: + raise Exception('Unknown notify.changeType') + + def itemAdded(self,subject, objectID, args): + #if self._last_page(): # This doesn't work as the pager is not updated if page becomes full + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '******* standard Grid: itemAdded:', objectID, args, 'search?', self.torrentsearch_manager.inSearchMode(self.state.db) + if self.torrentsearch_manager.getSearchMode(self.state.db) == SEARCHMODE_SEARCHING: + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Grid refresh because search item added!!!=============================' + wx.CallAfter(self.refresh) + elif self.isRelevantItem(subject, objectID): + ##task_id = str(subject) + str(int(time()/self.refresh_rate)) + ##self.guiserver.add_task(lambda:wx.CallAfter(self.refresh), self.refresh_rate, id=task_id) + # that's important to add the task 3 seconds later, to ensure the task will be executed at proper time + self.refresh() + + def itemUpdated(self,subject, objectID, args): + # Both in torrent grid and peergrid, changed items can make new items appear on the screen + # Peers: when first buddycast + # Friends: if just became new friend + # Torrent: when status changes to 'good' + # So we have to alway refresh here + + #if (self._objectOnPage(subject, objectID) + if self.torrentsearch_manager.getSearchMode(self.state.db) == SEARCHMODE_NONE: + ##task_id = str(subject) + str(int(time()/self.refresh_rate)) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardGrid: itemUpdated",subject,`objectID`,`args` + ##self.guiserver.add_task(lambda:wx.CallAfter(self.refresh), self.refresh_rate, id=task_id) + self.refresh() + + def itemDeleted(self,subject, objectID, args): + if self._objectOnPage(subject, objectID): + ##task_id = str(subject) + str(int(time()/self.refresh_rate)) + ##self.guiserver.add_task(lambda:wx.CallAfter(self.refresh), self.refresh_rate, id=task_id) + self.refresh() + + def download_state_gui_callback(self, dslist): + """ + Called by GUIThread + """ + self.dslist = dslist + if self.state.db == 'libraryMode': + for infohash in [ds.get_download().get_def().get_infohash() for ds in dslist]: + if self._objectOnPage(NTFY_TORRENTS, infohash): + self.refresh() + break + else: + # friendsMode + # self.refresh() + # We don't refresh on filesMode, but do need to add DownloadStates + # for the Play button to work. + self.addDownloadStates(self.data) + + def _objectOnPage(self, subject, objectID): + if subject == NTFY_PEERS: + id_name = 'permid' + elif subject in (NTFY_TORRENTS, NTFY_MYPREFERENCES, NTFY_SUPERPEERS): + id_name = 'infohash' + elif subject in (NTFY_YOUTUBE): + raise Exception('Not yet implemented') + + return objectID in [a[id_name] for a in self.data] + + def isRelevantItem(self, subject, objectID): + return True #Jie: let DB decide if the notifier should be sent + + db_handler = self.session.open_dbhandler(subject) + if subject == NTFY_PEERS: + peer = db_handler.getPeer(objectID) + ok = peer and (peer['last_connected']>0 or peer['friend']) + #if not ok: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Gridmanager: Peer is not relevant: %s' % peer + return ok + elif subject in (NTFY_TORRENTS): + id_name = 'infohash' + torrent = db_handler.getTorrent(objectID) + ok = torrent is not None and torrent['status'] == 'good' and Category.getInstance().hasActiveCategory(torrent) + #if not ok: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Gridmanager: Torrent is not relevant: %s' % torrent + return ok + elif subject == NTFY_MYPREFERENCES: + return True + + raise Exception('not yet implemented') + + def addDownloadStates(self, liblist): + # Add downloadstate data to list of torrent dicts + for ds in self.dslist: + infohash = ds.get_download().get_def().get_infohash() + for torrent in liblist: + if torrent['infohash'] == infohash: + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardGrid: addDownloadStates: adding ds for",`ds.get_download().get_def().get_name()` + torrent['ds'] = ds + break + return liblist + + + def addCoopDLStatus(self, liblist): + # Add downloadstate data to list of friend dicts + for ds in self.dslist: + helpers = ds.get_coopdl_helpers() + coordinator = ds.get_coopdl_coordinator() + + for friend in liblist: + if friend['permid'] in helpers: + # Friend is helping us + friend['coopdlstatus'] = u'Helping you with '+ds.get_download().get_def().get_name_as_unicode() + elif friend['permid'] == coordinator: + # Friend is getting help from us + friend['coopdlstatus'] = u'You help with '+ds.get_download().get_def().get_name_as_unicode() + #else: + # friend['coopdlstatus'] = u'Sleeping' + + return liblist + + def inSearchMode(self, state): + if state.db in ('filesMode', 'libraryMode'): + return self.torrentsearch_manager.getSearchMode(state.db) == SEARCHMODE_NONE + elif state.db in ('personsMode', 'friendsMode'): + return self.peersearch_manager.getSearchMode(state.db) == SEARCHMODE_NONE + else: + return False + + + def get_dslist(self): + return self.dslist + +class standardGrid(wx.Panel): + """ + Panel which shows a grid with static number of columns and dynamic number + of rows + """ + def __init__(self, cols, subPanelHeight, orientation='horizontal', viewmode = 'list', parent = None, name="standardGrid"): ## + self.data = None + self.detailPanel = None + self.orientation = orientation + self.subPanelClass = None + self.items = 0 #number of items that are currently visible + self.currentRows = 0 + self.sizeMode = 'auto' + self.columnHeader = None + self.topMargin = 5 + self.lastSize = None + self.panels = [] + self.viewmode = viewmode + self.guiUtility = GUIUtility.getInstance() + + self.guiUtility.standardGrid = self + + self.utility = self.guiUtility.utility + self.gridManager = GridManager(self, self.utility) + if not parent: + self.initReady = False + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + self.initReady = True + wx.Panel.__init__(self,parent,-1,name=name) + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardGrid: __init__: viewmode is",self.viewmode + + if type(cols) == int: + self.cols = cols + self.columnTypes = None + self.subPanelHeight = subPanelHeight + else: + self.columnTypes = cols + self.subPanelHeights = subPanelHeight + if self.viewmode == 'thumbnails': + self.cols = cols[0] + self.subPanelHeight = self.subPanelHeights[0] + elif self.viewmode == 'list': + self.cols = cols[1] + self.subPanelHeight = self.subPanelHeights[1] + else: + raise Exception('unknown viewmode: %s' % self.viewmode) + + self.superpeer_db = self.utility.session.open_dbhandler(NTFY_SUPERPEERS) + #self.torrentfeed = TorrentFeedThread.getInstance() + self.guiserver = GUITaskQueue.getInstance() + + if parent: + self.SetSize((675,500)) + self._PostInit() + #self.Show() + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + + def _PostInit(self): + # Do all init here + + self.SetBackgroundColour(wx.WHITE) + + #self.cols = 5 + + self.Bind(wx.EVT_SIZE, self.onResize) + + self.addComponents() + self.calculateRows() + + if self.viewmode == 'list': + self.toggleColumnHeaders(True) + self.Show() + self.Layout() + self.Refresh() + + self.initReady = True + if self.data: + self.setData(self.data) + + def addComponents(self): + self.Show(False) + + self.SetBackgroundColour(wx.WHITE) + self.vSizer = wx.BoxSizer(wx.VERTICAL) + self.columnHeaderSizer = wx.BoxSizer(wx.HORIZONTAL) + self.columnHeaderSizer.Add((0,self.topMargin)) + self.vSizer.Add(self.columnHeaderSizer, 0, wx.ALL|wx.EXPAND, 0) + self.SetSizer(self.vSizer); + self.SetAutoLayout(1); + #self.Layout(); + #self.Refresh(True) + #self.Update() + #print "vSizer: %s, Panel: %s"% (self.vSizer.GetSize(), self.GetSize()) + + + #def Show(self, s): + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '%s is show(%s)' % (self, s) + #wx.Panel.Show(self, s) + + def onViewModeChange(self, event=None, mode = None): + if not self.initReady: + wx.CallAfter(self.onViewModeChange, event, mode) + return + + if not mode: + if type(event.GetEventObject()) == wx.Choice: + mode = event.GetEventObject().GetStringSelection() + + if self.viewmode != mode: + self.viewmode = mode + #oldcols = self.cols + self.updatePanel(self.currentRows, 0) + if mode == 'thumbnails': + self.cols = self.columnTypes[0] + self.subPanelHeight = self.subPanelHeights[0] + elif mode == 'list': + self.cols = self.columnTypes[1] + self.subPanelHeight = self.subPanelHeights[1] + self.currentRows = 0 + + #self.updatePanel(0, self.currentRows) + self.calculateRows() + #self.updateCols(oldcols, self.cols) + self.gridManager.refresh() + self.toggleColumnHeaders(mode == 'list') + + def onSizeChange(self, event=None): + if type(event.GetEventObject()) == wx.Choice: + value = event.GetEventObject().GetStringSelection() + else: + value = event.GetEventObject().GetValue() + + self.sizeMode = value + if value == 'auto': + self.guiUtility.updateSizeOfStandardOverview() + self.SetMinSize((-1, 20)) + else: + try: + wantedRows = int(value) / self.cols + self.SetSize((-1, wantedRows * self.subPanelHeight)) + self.SetMinSize((-1, wantedRows * self.subPanelHeight)) + self.guiUtility.standardOverview.growWithGrid() + self.guiUtility.standardOverview.Refresh() + except: + #print 'Exception!' + + raise + + + def refreshData(self): + self.setData(self.data) + + + def getData(self): + return self.data + + + def setData(self, dataList): + + #if dataList is None: + #datalength = 0 + #else: + #datalength = len(dataList) + + if type(dataList) == list or dataList is None: + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'grid.setData: list' + self.data = dataList + + if not self.initReady: + return + + self.refreshPanels() + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardGrid: start columns:',\ + # self.cols,'rows:',self.currentRows,'items:',self.items + + self.Layout() + + def updateItem(self, item, delete = False, onlyupdate = False): + "Add or update an item in the grid" + + if not item: + return + + # Get key to compare this item to others + key = None + for tempkey in ['infohash', 'permid', 'content_name']: + if item.has_key(tempkey): + key = tempkey + break + if not key: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardGrid: Error, could not find key to compare item: %s' % item + return + #get the current data source + if len(self.data)>0 and self.data[0].has_key("permid"): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","\n*****************************************************\n\ +* big problem *\n\ +* in torrentGrid, working on peer data!!!!! *\n\ +* *\n\ +*****************************************************\n" + i = find_content_in_dictlist(self.data, item, key) + if i != -1: + if not delete: + self.data[i] = item + else: + self.data.remove(item) + elif not onlyupdate: + self.data.append(item) + self.refreshData() + + def refreshPanels(self): + "Refresh TorrentPanels with correct data and refresh pagerPanel" + if self.getStandardPager(): + self.standardPager.refresh() + + if self.data is None: + self.clearAllData() + else: + for i in xrange(0, self.items): + if i < len(self.data): + self.setDataOfPanel(i, self.data[i]) + else: + self.setDataOfPanel(i, None) + + self.updateSelection() + + def gridResized(self, rows): + self.items = self.cols * rows + self.refreshPanels() + + + + def getStandardPager(self): + try: + if self.standardPager: + return True + except: + return False + + def setPager(self, pager): + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardGrid: setPager called: %s' % pager + self.standardPager = pager + + def getSubPanel(self, keyfun=None): + raise NotImplementedError('Method getSubPanel should be subclassed') + + def setDataOfPanel(self, panelNumber, data): + #if DEBUG: + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Set data of panel %d with data: %s' % (panelNumber, data) + try: + if self.orientation == 'vertical': + hSizer = self.vSizer.GetItem(panelNumber%self.currentRows+1).GetSizer() + panel = hSizer.GetItem(panelNumber/ self.currentRows).GetWindow() + else: + hSizer = self.vSizer.GetItem(panelNumber/self.cols+1).GetSizer() + panel = hSizer.GetItem(panelNumber % self.cols).GetWindow() + + panel.setData(data) + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardGrid: Error: Could not set data in panel number %d, with %d cols" % (panelNumber, self.cols) + print_exc() + + def clearAllData(self): + for i in range(0, self.items): + self.setDataOfPanel(i, None) + + def onResize(self, event=None): + if self.GetSize() == self.lastSize: + return + self.lastSize = self.GetSize() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "standardGrid: resize event: %s" % self.GetSize() + self.calculateRows(event) + self.gridManager.reactivate() + if event: + event.Skip() + + + + def calculateRows(self, event=None): + + size = self.GetSize() + oldRows = self.currentRows + if self.columnHeader: + columnHeaderHeight = self.columnHeader.GetSize()[1] + else: + columnHeaderHeight = self.topMargin + + if size[1] < 50 or self.subPanelHeight == 0: + self.currentRows = 0 + self.items = 0 + else: + self.currentRows = (size[1] - columnHeaderHeight - 79) / self.subPanelHeight + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'standardGrid: Height: %d, single panel is %d, so %d rows' % (size[1], self.subPanelHeight, self.currentRows) + self.items = self.cols * self.currentRows + + if oldRows != self.currentRows: #changed + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardGrid: Size updated to %d rows and %d columns, oldrows: %d'% (self.currentRows, self.cols, oldRows) + + self.updatePanel(oldRows, self.currentRows) + self.gridResized(self.currentRows) + + + def updateCols(self, oldCols, newCols): + + self.items = newCols * self.currentRows + if newCols > oldCols: + numNew = newCols - oldCols + for row in xrange(len(self.panels)): + hSizer = self.vSizer.GetItem(row).GetSizer() + for i in xrange(numNew): + dataPanel = self.getSubPanel(self.keyTypedOnGridItem) + self.subPanelClass = dataPanel.__class__ + self.panels[row].append(dataPanel) + hSizer.Add(dataPanel, 1, wx.ALIGN_CENTER|wx.ALL|wx.GROW, 0) + elif newCols < oldCols: + numDelete = oldCols - newCols + for row in self.panels: + for i in xrange(numDelete): + panel = row[newCols] + panel.Destroy() + del row[newCols] + + + + def updatePanel(self, oldRows, newRows): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Grid: updating from %d to %d rows' % (oldRows, newRows) + # put torrent items in grid + + if newRows > oldRows: + for i in range(oldRows, newRows): + hSizer = wx.BoxSizer(wx.HORIZONTAL) + self.panels.append([]) + + for panel in range(0, self.cols): + dataPanel = self.getSubPanel(self.keyTypedOnGridItem) + self.subPanelClass = dataPanel.__class__ + # add keylistener for arrow selection + #dataPanel.Bind(wx.EVT_KEY_UP, self.keyTypedOnGridItem) + self.panels[i].append(dataPanel) + #dataPanel.SetSize((-1, self.subPanelHeight)) + hSizer.Add(dataPanel, 1, wx.ALIGN_CENTER|wx.ALL|wx.GROW, 0) + self.vSizer.Add(hSizer, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 0) + + elif newRows < oldRows: + #print "Destroying row %d up to %d" % (newRows, oldRows-1) + for row in range(oldRows-1, newRows-1, -1): + # Destroy old panels + for col in range(self.cols-1, -1, -1): #destroy panels right to left + panel = self.panels[row][col] + wx.CallAfter(panel.Destroy) + del self.panels[row][col] + + assert self.panels[row] == [], 'We deleted all panels, still the row is %s' % self.panels[row] + del self.panels[row] + self.vSizer.Detach(row+1) # detach hSizer of the row + # +1 compensated for columnheaders + + + + + + def updateSelection(self): + """Deselect all torrentPanels, but the one selected in detailPanel + If no torrent is selected in detailPanel, let first in grid be selected + """ + + try: + #print 'standardGrid: update selection' + if not self.hasDetailPanel(): + return + +# title = None + + id = self.detailPanel.getIdentifier() + + #print "standardGrid: updateSelection: detailsPanel has id",id,self.detailPanel + + number = 0 + rowIndex = 0 + for row in self.panels: + colIndex = 0 + for pan in row: + try: + panel_id = pan.getIdentifier() + #print "standardGrid: updateSelection: panel has id",`panel_id` + except: + panel_id = None + + if panel_id is None or repr(panel_id) != repr(id): + #print 'item deselected2' + pan.deselect(rowIndex,colIndex)#number = number) + else: + #pan.select(rowIndex,colIndex) + pan.select(rowIndex, + colIndex, + self.standardPager.currentPage, + self.cols, + self.currentRows) + number += 1 + colIndex += 1 + rowIndex += 1 + + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardGrid: QQQQQQQQQQQQQQQQQQQQQQQQQQQ QUERY RESULTS DISPLAYED" + self.Layout() + except: + # I sometimes get UnicodeErrors here somewhere + print_exc() + + + def deselectAll(self): + """Deselect all torrentPanels""" + + try: + #print 'standardGrid: update selection' + if not self.hasDetailPanel(): + return + +# title = None + + id = self.detailPanel.getIdentifier() + + #print "standardGrid: updateSelection: detailsPanel has id",id,self.detailPanel + + number = 0 + rowIndex = 0 + for row in self.panels: + colIndex = 0 + for pan in row: + try: + panel_id = pan.getIdentifier() + #print "standardGrid: updateSelection: panel has id",`panel_id` + except: + panel_id = None + + #if panel_id is None or repr(panel_id) != repr(id): + print >> sys.stderr , 'item deselected2' + pan.deselect(rowIndex,colIndex)#number = number) + number += 1 + colIndex += 1 + rowIndex += 1 + self.Layout() + except: + # I sometimes get UnicodeErrors here somewhere + print_exc() + + + + + + + def hasDetailPanel(self): + if self.detailPanel: + return True + try: + self.detailPanel = self.guiUtility.standardDetails + except: + pass + return self.detailPanel is not None + + def keyTypedOnGridItem(self, event): + obj = event.GetEventObject() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardGrid: keyTyped: in %s' % obj.__class__.__name__ + while obj.__class__ != self.subPanelClass: + obj = obj.GetParent() + + # Jelle: Turn of key navigation under windows. Windows already has a focus traversal policy and changes + # the focus of panel. + if sys.platform == 'win32': + return + + if not obj.selected and sys.platform != 'win32': + return + + keyCode = event.GetKeyCode() + # Get coord of keytyped panel + rowIndex = 0 + xpan = ypan = None + for row in self.panels: + colIndex = 0 + for pan in row: + if obj == pan: + (xpan, ypan) = colIndex, rowIndex + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardGrid: keyTyped: found: %d, %d' % (colIndex, rowIndex) + break + colIndex += 1 + rowIndex += 1 + if xpan == None: + raise Exception('Could not find selected panel') + xpanold = xpan + ypanold = ypan + if sys.platform != 'win32': + if keyCode == wx.WXK_UP: + ypan = max(0, ypan-1) + elif keyCode == wx.WXK_DOWN: + ypan = min(self.currentRows-1, ypan+1) + elif keyCode == wx.WXK_LEFT: + xpan = max(0, xpan -1) + elif keyCode == wx.WXK_RIGHT: + xpan = min(self.cols-1, xpan+1) + else: + if keyCode == wx.WXK_UP: + if xpan == self.cols-1: + xpan = 0 + else: + xpan+=1 + ypan = max(0, ypan-1) + elif keyCode == wx.WXK_DOWN: + if xpan == 0: + xpan = self.cols-1 + else: + xpan = xpan -1 + ypan = min(self.currentRows-1, ypan+1) + # Get data of new panel + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardGrid: Old: %s, New: %s' % ((xpanold, ypanold), (xpan, ypan)) + if xpanold != xpan or ypanold != ypan or sys.platform =='win32': + newpanel = self.panels[ypan][xpan] + if newpanel.data != None: + # select new panel + #newpanel.SetFocus() + self.guiUtility.selectData(newpanel.data) + event.Skip() + + def getFirstPanel(self): + try: + hSizer = self.vSizer.GetItem(1).GetSizer() + panel = hSizer.GetItem(0).GetWindow() + return panel + except: + return None + + def toggleColumnHeaders(self, show): + # show or hide columnheaders + if bool(self.columnHeader) == show: + return + if show: + panel = self.getFirstPanel() + if panel: + self.columnHeader = ColumnHeaderBar(self, panel) + self.columnHeaderSizer.Detach(0) + self.columnHeaderSizer.Add(self.columnHeader, 1, wx.EXPAND, 0) + self.columnHeaderSizer.Layout() + else: + self.columnHeaderSizer.Detach(0) + self.columnHeader.Destroy() + self.columnHeader = None + self.columnHeaderSizer.AddSpacer(5) + self.columnHeaderSizer.Layout() + self.vSizer.Layout() + + + def isShowByOverview(self): + name = self.__class__.__name__ + mode = self.guiUtility.standardOverview.mode + index = name.find('Grid') + isshown = name[:index] == mode[:index] + return isshown + + def getGridManager(self): + return self.gridManager + + +class filesGrid(standardGrid): + def __init__(self,parent=None): +# columns = 5 +# self.subPanelHeight = 108 # This will be update after first refresh + columns = (5, 1) + subPanelHeight = (5*22, 22) + standardGrid.__init__(self, columns, subPanelHeight, orientation='vertical',parent=parent,name="filesGrid") + + def getSubPanel(self, keyfun): + return FilesItemPanel(self, keyfun) + +class libraryGrid(standardGrid): + def __init__(self,parent=None): + columns = (1,1) + subPanelHeight = (22, 22) # This will be update after first refresh + standardGrid.__init__(self, columns, subPanelHeight, orientation='vertical', viewmode='list',parent=parent,name="libraryGrid") + + def getSubPanel(self, keyfun): + return LibraryItemPanel(self, keyfun) + diff --git a/tribler-mod/Tribler/Main/vwxGUI/standardOverview.py b/tribler-mod/Tribler/Main/vwxGUI/standardOverview.py new file mode 100644 index 0000000..b748251 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/standardOverview.py @@ -0,0 +1,775 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg, Maarten ten Brinke +# see LICENSE.txt for license information + +import wx, os, sys, os.path +import wx.xrc as xrc +from traceback import print_exc + +from Tribler.Core.simpledefs import * +from Tribler.Core.Utilities.unicode import * +from Tribler.Core.Utilities.utilities import * +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility + +from Tribler.Main.vwxGUI.SearchDetails import SearchDetailsPanel +from Tribler.Main.vwxGUI.LoadingDetails import LoadingDetailsPanel +from Tribler.Main.vwxGUI.standardGrid import filesGrid,libraryGrid +from Tribler.Main.Utility.constants import * +#from Tribler.Subscriptions.rss_client import TorrentFeedThread +from Tribler.Main.Dialogs.GUITaskQueue import GUITaskQueue + +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles + + +from Tribler.Core.Utilities.unicode import * + +from time import time + +from font import * + + +OVERVIEW_MODES = ['startpageMode','basicMode', 'statsMode', 'resultsMode', 'filesMode', 'settingsMode', 'personsMode', 'profileMode', 'friendsMode', 'subscriptionsMode', + 'messageMode', 'libraryMode', 'itemdetailsMode', 'fileDetailsMode','playlistMode', 'personDetailsMode', 'playlistMode'] +# font sizes +if sys.platform == 'darwin': + FS_FILETITLE = 10 + FS_SIMILARITY = 10 + FS_HEARTRANK = 8 +elif sys.platform == 'linux2': + FS_FILETITLE = 8 + FS_SIMILARITY = 7 + FS_HEARTRANK = 7 +else: + FS_FILETITLE = 8 + FS_SIMILARITY = 10 + FS_HEARTRANK = 7 + +DEBUG = False + +class standardOverview(wx.Panel): + """ + Panel that shows one of the overview panels + """ + def __init__(self, *args): + self.firewallStatus = None + + if len(args) == 0: + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, *args) + self._PostInit() + + def OnCreate(self, event): +# print 'standardOverview' + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.categorykey = None + + self.triblerStyles = TriblerStyles.getInstance() + + self.search_results = self.guiUtility.frame.top_bg.search_results + self.results = {} + +# self.SetBackgroundColour((255,255,90)) + +# self.Bind(wx.EVT_SIZE, self.standardOverviewResize) + self.mode = None + self.selectedTorrent = None + self.selectedPeer = None + self.data = {} #keeps gui elements for each mode + for mode in OVERVIEW_MODES: + self.data[mode] = {} #each mode has a dictionary of gui elements with name and reference + self.currentPanel = None + self.addComponents() + #self.Refresh() + +# self.guiUtility.frame.Bind(wx.EVT_SIZE, self.standardOverviewResize()) +# self.Bind(wx.EVT_SIZE, self.standardOverviewResize) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardOverview: __init__: Setting GUIUtil" + self.guiUtility.initStandardOverview(self) # show file panel + #self.toggleLoadingDetailsPanel(True) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '[StartUpDebug]----------- standardOverview is in postinit ----------', currentThread().getName(), '\n\n' + + + def addComponents(self): + self.hSizer = wx.BoxSizer(wx.VERTICAL) + self.SetSizer(self.hSizer) + self.SetAutoLayout(1) + self.Layout() + + + def standardOverviewResize(self, event=None): +# self.SetAutoLayout(0) +# self.SetSize((-1,(self.guiUtility.frame.GetSize()[1]-200))) +# self.SetWindowStyleFlag(wx) +# self.Layout() +# self.currentPanel.SetSize((-1, (self.GetSize()[1]-250))) +# print 'tb > standardOverviewResize Resize' +# print self.currentPanel.GetSize() +# self.SetSize((-1, 1000)) +# + +# print self.GetSize() + if event: + event.Skip() + + self.SetAutoLayout(1) + self.Layout() + + def setMode(self, mode, refreshGrid=True): + # switch to another view, + # mode is one of the [filesMode, personsMode, friendsMode, profileMode, libraryMode, subscriptionsMode] + if self.mode != mode or mode == 'fileDetailsMode' or mode == 'playlistMode': + #self.stopWeb2Search() + self.mode = mode + self.refreshMode(refreshGrid=refreshGrid) + + def getMode(self): + return self.mode + + self.guiUtility.filterStandard.SetData(self.mode) + + def refreshMode(self,refreshGrid=True): + # load xrc + self.oldpanel = self.currentPanel + + self.currentPanel = self.loadPanel() + + #print >> sys.stderr , 'standardOverview: self.oldpanel' , self.oldpanel + #print >> sys.stderr , 'standardOverview: self.currentPanel' , self.currentPanel + + + assert self.currentPanel, "standardOverview: Panel could not be loaded" + #self.currentPanel.GetSizer().Layout() + #self.currentPanel.Enable(True) + self.currentPanel.Show(True) + if self.data[self.mode].get('grid') and refreshGrid: + self.data[self.mode]['grid'].gridManager.reactivate() + + if self.oldpanel and self.oldpanel != self.currentPanel: + self.hSizer.Detach(self.oldpanel) + self.oldpanel.Hide() + #self.oldpanel.Disable() + + assert len(self.hSizer.GetChildren()) == 0, 'Error: standardOverview self.hSizer has old-panel and gets new panel added (2 panel bug). Old panels are: %s' % self.hSizer.GetChildren() + + #if self.oldpanel != self.currentPanel: + # self.hSizer.Add(self.currentPanel, 1, wx.ALL|wx.EXPAND, 0) + + nameCP = self.currentPanel.GetName() + if nameCP == 'profileOverview': + sizeCP = self.currentPanel.GetSize() + sizeFrame = self.Parent.GetSize() + + heightCP = max(sizeCP[1], sizeFrame[1]) +# print 'heightCP = %s' % heightCP + self.SetSize((-1, heightCP)) + self.SetMinSize((500,sizeCP[1])) + elif nameCP == 'settingsOverview': + self.SetMinSize((900,500)) + elif nameCP == 'libraryOverview': + self.SetMinSize((600,490)) # 480 + else: # filesOverview + self.SetMinSize((600,490)) # 476 + + self.hSizer.Layout() + + + wx.CallAfter(self.Parent.Layout) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'standardOverview: refreshMode: %s' % self.currentPanel.__class__.__name__ + wx.CallAfter(self.hSizer.Layout) + wx.CallAfter(self.currentPanel.Layout) + wx.CallAfter(self.currentPanel.Refresh) + + wx.CallAfter(self.guiUtility.scrollWindow.FitInside) +# self.guiUtility.scrollWindow.FitInside() + + def setPager(self, pager): ## added + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardOverview: setPager called: %s' % pager + self.standardPager = pager + + + def onReachable(self,event=None): + """ Called by GUI thread """ + if self.firewallStatus is not None and self.firewallStatusText.GetLabel() != 'Restart Tribler': + self.firewallStatus.setSelected(2) + self.firewallStatusText.SetLabel('Port is working') + tt = self.firewallStatus.GetToolTip() + if tt is not None: + tt.SetTip(self.utility.lang.get('reachable_tooltip')) + + + # change port number in settings panel + def OnPortChange(self, event): + keycode = event.GetKeyCode() + + if keycode == wx.WXK_RETURN: + self.utility.config.Write('minport', self.portValue.GetValue()) + self.utility.config.Flush() + self.guiUtility.set_port_number(self.portValue.GetValue()) + self.guiUtility.set_firewall_restart(True) + self.guiserver = GUITaskQueue.getInstance() + self.guiserver.add_task(lambda:wx.CallAfter(self.show_message), 0.0) + self.firewallStatus.setSelected(1) + self.firewallStatusText.SetLabel('Restart Tribler') + tt = self.firewallStatus.GetToolTip() + if tt is not None: + tt.SetTip(self.utility.lang.get('restart_tooltip')) + + + self.updateSaveIcon() + + else: + event.Skip() + + + + def updateFirewall(self): + if self.firewallStatus is not None: + if self.guiUtility.firewall_restart: + self.firewallStatus.setSelected(1) + self.firewallStatusText.SetLabel('Restart Tribler') + elif self.guiUtility.isReachable(): + self.firewallStatus.setSelected(2) + self.firewallStatusText.SetLabel('Port is working') + else: + self.firewallStatus.setSelected(1) + self.firewallStatusText.SetLabel('Connecting ...') + + + + + def show_message(self): + self.portChange.SetLabel('Your changes will occur \nthe next time you restart \nTribler.') + self.guiserver.add_task(lambda:wx.CallAfter(self.hide_message), 3.0) + + + def hide_message(self): + self.portChange.SetLabel('') + + + + + def updateSaveIcon(self): + self.guiserver = GUITaskQueue.getInstance() + self.guiserver.add_task(lambda:wx.CallAfter(self.showSaveIcon), 0.0) + + + def showSaveIcon(self): + wx.CallAfter(self.iconSaved.Show(True)) + sizer = self.iconSaved.GetContainingSizer() + sizer.Layout() + self.guiserver.add_task(lambda:wx.CallAfter(self.hideSaveIcon), 3.0) + + + def hideSaveIcon(self): + self.iconSaved.Show(False) + + + + + def loadPanel(self): + currentPanel = self.data[self.mode].get('panel',None) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'standardOverview: currentPanel' , currentPanel + modeString = self.mode[:-4] + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'standardOverview: modestring' , modeString + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardOverview: loadPanel: modeString='+modeString,'currentPanel:',currentPanel + + pager = xrc.XRCCTRL(self.guiUtility.frame, 'standardPager') # Jie:not really used for profile, rss and library? + if modeString == "startpage": + # If we don't set size to 0,0, it will show on Linux + currentPanel = wx.Panel(self,-1,size=(0,0)) + pager = None + grid = currentPanel + elif modeString == "files": # AKA search results page + currentPanel = filesGrid(parent=self) + grid = currentPanel + elif modeString == "library": + currentPanel = libraryGrid(parent=self) + grid = currentPanel + elif modeString == "settings": + xrcResource = os.path.join(self.guiUtility.vwxGUI_path, modeString+'Overview.xrc') + panelName = modeString+'Overview' + res = xrc.XmlResource(xrcResource) + currentPanel = res.LoadPanel(self, panelName) + grid = xrc.XRCCTRL(currentPanel, modeString+'Grid') + + self.data[self.mode]['panel'] = currentPanel + if modeString != "startpage": + self.data[self.mode]['grid'] = grid + self.data[self.mode]['pager'] = pager + + if pager is not None: + pager.setGrid(grid) + + if self.mode == 'settingsMode': + self.firewallStatus = xrc.XRCCTRL(currentPanel,'firewallStatus') + self.firewallStatusText = xrc.XRCCTRL(currentPanel,'firewallStatusText') + self.portValue = xrc.XRCCTRL(currentPanel,'firewallValue') + # self.portValue.Bind(wx.EVT_KEY_DOWN,self.OnPortChange) + self.portChange = xrc.XRCCTRL(currentPanel, 'portChange') + self.iconSaved = xrc.XRCCTRL(currentPanel, 'iconSaved') + wx.CallAfter(self.updateFirewall) + + + + + ## if self.guiUtility.isReachable(): + ## self.firewallStatus.setToggled(True) + ## self.firewallStatus.Refresh() + ## print >> sys.stderr , "OK" + ## else: + ## self.firewallStatus.setToggled(False) + ## self.Refresh() + + + + # create the panel for the first click. panel could be one of the [file,person,friend,library,profile,rss] + if not currentPanel: + #xrcResource = os.path.join(self.guiUtility.vwxGUI_path, modeString+'Overview.xrc') + #panelName = modeString+'Overview' + try: + #currentPanel = grid = pager = None + #res = xrc.XmlResource(xrcResource) + # create panel + #currentPanel = res.LoadPanel(self, panelName) + #grid = xrc.XRCCTRL(currentPanel, modeString+'Grid') + #pager = xrc.XRCCTRL(self.guiUtility.frame, 'standardPager') # Jie:not really used for profile, rss and library? + search = xrc.XRCCTRL(currentPanel, 'searchField') + filter = xrc.XRCCTRL(currentPanel, modeString+'Filter') + if not currentPanel: + raise Exception('standardOverview: Could not find panel, grid or pager') + #load dummy panel + dummyFile = os.path.join(self.guiUtility.vwxGUI_path, 'dummyOverview.xrc') + dummy_res = xrc.XmlResource(dummyFile) + currentPanel = dummy_res.LoadPanel(self, 'dummyOverview') + grid = xrc.XRCCTRL(currentPanel, 'dummyGrid') + pager = xrc.XRCCTRL(currentPanel, 'standardPager') + if not currentPanel: # or not grid or not pager: + raise Exception('standardOverview: Could not find panel, grid or pager') + + # Save paneldata in self.data + self.data[self.mode]['panel'] = currentPanel + #self.data[self.mode]['grid'] = grid + #self.data[self.mode]['pager'] = pager + self.data[self.mode]['search'] = search + self.data[self.mode]['filter'] = filter + + #search.Bind(wx.EVT_COMMAND_TEXT_ENTER, self.OnSearchKeyDown) + if search is not None: + search.Bind(wx.EVT_KEY_DOWN, self.guiUtility.OnSearchKeyDown) + if modeString == "files": + web2on = self.utility.config.Read('enableweb2search',"boolean") + if web2on: + txt = self.utility.lang.get('filesdefaultsearchweb2txt') + else: + txt = self.utility.lang.get('filesdefaultsearchtxt') + search.SetValue(txt) + search.Bind(wx.EVT_MOUSE_EVENTS, self.guiUtility.OnSearchMouseAction) + + if pager is not None: + pager.setGrid(grid) + + if self.mode in ['filesMode', 'personsMode']: + print '' + +# print 'self.mode = %s' % self.mode +# print currentPanel +## self.standardOverview.data['filesMode'].get('grid') +## currentViewMode = currentPanel.grid.viewmode +## currentPanel.viewModeSelect = xrc.XRCCTRL(currentPanel, 'modeItems') +### overviewSizeSelect = xrc.XRCCTRL(currentPanel, 'numberItems') + # set default values + +# self.mode.viewModeSelect = viewModeSelect + +# currentPanel.viewModeSelect.Select(1) #SetValue('thumbnails') + ##overviewSizeSelect.Select(0) #SetValue('auto') + #viewModeSelect.Bind(wx.EVT_COMBOBOX, grid.onViewModeChange) +# currentPanel.viewModeSelect.Bind(wx.EVT_CHOICE, grid.onViewModeChange(mode = 'filesMode')) + #overviewSizeSelect.Bind(wx.EVT_COMBOBOX, grid.onSizeChange) + ##overviewSizeSelect.Bind(wx.EVT_CHOICE, grid.onSizeChange) + + + + + if self.mode == 'subscriptionsMode': + rssurlctrl = xrc.XRCCTRL(currentPanel,'pasteUrl') + rssurlctrl.Bind(wx.EVT_KEY_DOWN, self.guiUtility.OnSubscribeKeyDown) + rssurlctrl.Bind(wx.EVT_LEFT_UP, self.guiUtility.OnSubscribeMouseAction) + txt = self.utility.lang.get('rssurldefaulttxt') + rssurlctrl.SetValue(txt) + self.data[self.mode]['rssurlctrl'] = rssurlctrl + + + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardOverview: Error: Could not load panel, grid and pager for mode %s' % self.mode + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardOverview: Tried panel: %s=%s, grid: %s=%s, pager: %s=%s' % (panelName, currentPanel, modeString+'Grid', grid, 'standardPager', pager) + print_exc() + + + if self.mode in ['filesMode', 'personsMode']: + grid = self.data[self.mode].get('grid') + if self.guiUtility.gridViewMode != grid.viewmode : + grid.onViewModeChange(mode=self.guiUtility.gridViewMode) + + + + if self.mode == 'fileDetailsMode': + print 'tb > fileDetailsMode' + self.data[self.mode]['panel'].setData(self.selectedTorrent) + + if self.mode == 'playlistMode': + print 'tb > playlistMode' + self.data[self.mode]['panel'].setData(self.selectedTorrent) + + if self.mode == 'personDetailsMode': + self.data[self.mode]['panel'].setData(self.selectedPeer) + + return currentPanel + + def refreshData(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardOverview: refreshData" + #print_stack() + + grid = self.data[self.mode].get('grid') + if grid: + + if DEBUG: + data = self.data[self.mode].get('data') + if type(data) == list: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardOverview: refreshData: refreshing",len(data) + + # load and show the data in the grid + grid.setData(self.data[self.mode].get('data')) + + def refreshGridManager(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardOverview: refreshGridManager" + #print_stack() + + try: + grid = self.data[self.mode].get('grid') + if grid: + gridmgr = grid.getGridManager().refresh() + except: + print_exc() + + def updateSelection(self): + grid = self.data[self.mode].get('grid') + if grid: + grid.updateSelection() + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardOverview: Could not update selection: No grid' + + + def getFirstItem(self): + data = self.data[self.mode].get('data') + if data and len(data) > 0: + return data[0] + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardOverview: Error, could not return firstItem, data=%s' % data + return None + + + + def filterChanged(self, filterState): + """ filterState is GridState object """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardOverview: filterChanged",filterState,self.mode#,self.data[self.mode] + + assert filterState is None or 'GridState' in str(type(filterState)), 'filterState is %s' % str(filterState) + oldFilterState = self.data[self.mode].get('filterState') + +# print 'tb >FILTERCHANGED!!!!!' + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardOverview: filterChanged: from",oldFilterState,"to",filterState + + if filterState: + filterState.setDefault(oldFilterState) + + #if filterState.db == 'libraryMode': + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'standardOverview: ********************** VALID LIBRARY Filterstate:', filterState + + if filterState and filterState.isValid(): + if self.mode in ('filesMode', 'personsMode', 'libraryMode', 'friendsMode','settingsMode'): + #self.loadTorrentData(filterState[0], filterState[1]) + self.data[filterState.db]['grid'].gridManager.set_state(filterState) + elif self.mode in ('subscriptionsMode'): + self.loadSubscriptionData() + self.refreshData() +# if DEBUG: +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'standardOverview: filterstate: %s' % filterState + + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardOverview: Filters not yet implemented in this mode' + return + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardOverview: before refreshData" + + + + #self.refreshData() + self.data[self.mode]['filterState'] = filterState + + + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'standardOverview: Invalid Filterstate:', filterState + #print_stack() + + """ + def loadSubscriptionData(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'load subscription data' + + torrentfeed = TorrentFeedThread.getInstance() + urls = torrentfeed.getURLs() + + bcsub = self.utility.lang.get('buddycastsubscription') + web2sub = self.utility.lang.get('web2subscription') + + bcactive = self.utility.session.get_buddycast() and self.utility.session.get_start_recommender() + bcstatus = 'inactive' + if bcactive: + bcstatus = 'active' + web2active = self.utility.config.Read('enableweb2search', "boolean") + web2status = 'inactive' + if web2active: + web2status = 'active' + + reclist = [] + record = {'url':bcsub,'status':bcstatus,'persistent':'BC'} + reclist.append(record) + record = {'url':web2sub,'status':web2status,'persistent':'Web2.0'} + reclist.append(record) + for url in urls: + record = {} + record['url'] = url + record['status'] = urls[url] + reclist.append(record) + self.data[self.mode]['data'] = reclist + self.data[self.mode]['grid'].setData(reclist) + """ + + + + def getSearchField(self,mode=None): + if mode is None: + mode = self.mode + return self.data[mode]['search'] + + def getGrid(self): + return self.data.get(self.mode, {}).get('grid') + + + def getSorting(self): + fs = self.data[self.mode].get('filterState') + if fs: + return fs.sort + else: + return None + + def getFilter(self): + return self.data[self.mode]['filter'] + + def getPager(self): + return self.data[self.mode]['pager'] + + def getRSSUrlCtrl(self): + return self.data[self.mode]['rssurlctrl'] + + def gridIsAutoResizing(self): + return self.getGrid().sizeMode == 'auto' + + def setSearchFeedback(self,*args,**kwargs): + """ May be called by web2.0 thread """ + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardOverview: setSearchFeedback',args,kwargs + setSearchFeedback_lambda = lambda:self._setSearchFeedback(*args,**kwargs) + wx.CallAfter(setSearchFeedback_lambda) + + def getSearchBusy(self): + searchDetailsPanel = self.data[self.mode].get('searchDetailsPanel') + if searchDetailsPanel: + return searchDetailsPanel.searchBusy + else: + return False + def _setSearchFeedback(self, type, finished, num, keywords = [], searchresults = None): + #print 'standardOverview: _setSearchFeedback called by', currentThread().getName() + + self.setMessage(type, finished, num, keywords) + + + ##searchDetailsPanel = self.data[self.mode].get('searchDetailsPanel') + ##if searchDetailsPanel: + ## searchDetailsPanel.setMessage(type, finished, num, searchresults, keywords) + + def setMessage(self, stype, finished, num, keywords = []): + if stype: + self.results[stype] = num # FIXME different remote search overwrite eachother + + total = sum([v for v in self.results.values() if v != -1]) + + wx.CallAfter(self.guiUtility.frame.standardPager.Show,(total > 0)) + self.guiUtility.frame.pagerPanel.Refresh() + if keywords: + if type(keywords) == list: + self.keywords = " ".join(keywords) + else: + self.keywords = keywords + + if finished: + msg = self.guiUtility.utility.lang.get('finished_search') % (self.keywords, total) + self.guiUtility.stopSearch() + #self.searchFinished(set_message=False) + else: + msg = self.guiUtility.utility.lang.get('going_search') % (total) + + #self.search_results.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,True,FONTFACE)) + if self.mode not in ['filesMode']: + self.search_results.SetForegroundColour(wx.RED) + else: + self.search_results.SetForegroundColour(wx.BLACK) + + if self.mode in ['filesMode']: + if sys.platform == 'win32': + self.search_results.SetText(msg) + self.guiUtility.frame.top_bg.Refresh() + else: + #self.search_results.Refresh(eraseBackground=True) + self.search_results.SetLabel(msg) + else: + if sys.platform == 'win32': + self.search_results.SetText('Return to Results') + else: + self.search_results.SetLabel('Return to Results') + + def growWithGrid(self): + gridHeight = self.data[self.mode]['grid'].GetSize()[1] + pagerHeight = 29 + filterHeight = 21 + 8+ self.data[self.mode]['filter'].GetSize()[1] + + newSize = (-1, gridHeight + pagerHeight + filterHeight) + self.SetSize(newSize) + self.SetMinSize(newSize) + self.GetSizer().Layout() + self.GetContainingSizer().Layout() + self.guiUtility.scrollWindow.FitInside() + self.guiUtility.refreshOnResize() + + def removeTorrentFromLibrary(self, torrent): + infohash = torrent['infohash'] + + # Johan, 2009-03-05: we need long download histories for good + # semantic clustering. + + mypreference_db = self.utility.session.open_dbhandler(NTFY_MYPREFERENCES) + # Arno, 2009-03-10: Not removing it from MyPref means it keeps showing + # up in the Library, even after removal :-( H4x0r this. + #mypreference_db.deletePreference(infohash) + mypreference_db.updateDestDir(infohash,"") + + # BuddyCast is now notified of this removal from our + # preferences via the Notifier mechanism. See BC.sesscb_ntfy_myprefs() + + grid = self.getGrid() + if grid is not None: + gridmgr = grid.getGridManager() + if gridmgr is not None: + gridmgr.refresh() + + + def toggleLoadingDetailsPanel(self, visible): + loadingDetails = self.data[self.mode].get('loadingDetailsPanel') + sizer = self.data[self.mode]['grid'].GetContainingSizer() + if visible: + if not loadingDetails: + loadingDetails = LoadingDetailsPanel(self.data[self.mode]['panel']) + + sizer.Insert(3,loadingDetails, 0, wx.ALL|wx.EXPAND, 0) + self.data[self.mode]['loadingDetailsPanel'] = loadingDetails + loadingDetails.Show() + else: + loadingDetails.startSearch() + loadingDetails.Show() + + else: + if loadingDetails: + #print 'standardOverview: removing loading details' + sizer.Detach(loadingDetails) + loadingDetails.Destroy() + self.data[self.mode]['loadingDetailsPanel'] = None + sizer.Layout() + self.data[self.mode]['panel'].Refresh() + self.hSizer.Layout() + + def setLoadingCount(self,count): + loadingDetails = self.data[self.mode].get('loadingDetailsPanel') + if not loadingDetails: + return + loadingDetails.setMessage('loaded '+str(count)+' more files from database (not yet shown)') + + + def toggleSearchDetailsPanel(self, visible): + searchDetails = self.data[self.mode].get('searchDetailsPanel') + sizer = self.data[self.mode]['grid'].GetContainingSizer() + #print 'standardOverview: Sizer: %s' % sizer + #print 'standardOverview: SearchDetails: %s' % searchDetails + #if searchDetails: + # print 'standardOverview: %s, %s' % (str(searchDetails.GetSize()), str(searchDetails.GetMinSize())) + + if visible: + if not searchDetails: + searchDetails = SearchDetailsPanel(self.data[self.mode]['panel']) + + #print 'standardOverview: Inserting search details' + sizer.Insert(3,searchDetails, 0, wx.ALL|wx.EXPAND, 0) + #sizer.Layout() + #self.data[self.mode]['panel'].Refresh() +# print 'Size: %s' % str(self.searchDetails.GetSize()) +# print 'Parent: %s' % str(self.searchDetails.GetParent().GetName()) +# print 'GParent: %s' % str(self.searchDetails.GetParent().GetParent().GetName()) + self.data[self.mode]['searchDetailsPanel'] = searchDetails + searchDetails.Show() + else: + searchDetails.startSearch() + searchDetails.Show() + + else: + if searchDetails: + #print 'standardOverview: removing search details' + sizer.Detach(searchDetails) + searchDetails.Destroy() + self.data[self.mode]['searchDetailsPanel'] = None + sizer.Layout() + self.data[self.mode]['panel'].Refresh() + self.hSizer.Layout() + + + def stopWeb2Search(self): + grid = self.getGrid() + if grid: + grid.stopWeb2Search() + + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/standardOverview.py.bak b/tribler-mod/Tribler/Main/vwxGUI/standardOverview.py.bak new file mode 100644 index 0000000..0831bc4 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/standardOverview.py.bak @@ -0,0 +1,774 @@ +# Written by Jelle Roozenburg, Maarten ten Brinke +# see LICENSE.txt for license information + +import wx, os, sys, os.path +import wx.xrc as xrc +from traceback import print_exc + +from Tribler.Core.simpledefs import * +from Tribler.Core.Utilities.unicode import * +from Tribler.Core.Utilities.utilities import * +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility + +from Tribler.Main.vwxGUI.SearchDetails import SearchDetailsPanel +from Tribler.Main.vwxGUI.LoadingDetails import LoadingDetailsPanel +from Tribler.Main.vwxGUI.standardGrid import filesGrid,libraryGrid +from Tribler.Main.Utility.constants import * +#from Tribler.Subscriptions.rss_client import TorrentFeedThread +from Tribler.Main.Dialogs.GUITaskQueue import GUITaskQueue + +from Tribler.Main.vwxGUI.TriblerStyles import TriblerStyles + + +from Tribler.Core.Utilities.unicode import * + +from time import time + +from font import * + + +OVERVIEW_MODES = ['startpageMode','basicMode', 'statsMode', 'resultsMode', 'filesMode', 'settingsMode', 'personsMode', 'profileMode', 'friendsMode', 'subscriptionsMode', + 'messageMode', 'libraryMode', 'itemdetailsMode', 'fileDetailsMode','playlistMode', 'personDetailsMode', 'playlistMode'] +# font sizes +if sys.platform == 'darwin': + FS_FILETITLE = 10 + FS_SIMILARITY = 10 + FS_HEARTRANK = 8 +elif sys.platform == 'linux2': + FS_FILETITLE = 8 + FS_SIMILARITY = 7 + FS_HEARTRANK = 7 +else: + FS_FILETITLE = 8 + FS_SIMILARITY = 10 + FS_HEARTRANK = 7 + +DEBUG = False + +class standardOverview(wx.Panel): + """ + Panel that shows one of the overview panels + """ + def __init__(self, *args): + self.firewallStatus = None + + if len(args) == 0: + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, *args) + self._PostInit() + + def OnCreate(self, event): +# print 'standardOverview' + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.categorykey = None + + self.triblerStyles = TriblerStyles.getInstance() + + self.search_results = self.guiUtility.frame.top_bg.search_results + self.results = {} + +# self.SetBackgroundColour((255,255,90)) + +# self.Bind(wx.EVT_SIZE, self.standardOverviewResize) + self.mode = None + self.selectedTorrent = None + self.selectedPeer = None + self.data = {} #keeps gui elements for each mode + for mode in OVERVIEW_MODES: + self.data[mode] = {} #each mode has a dictionary of gui elements with name and reference + self.currentPanel = None + self.addComponents() + #self.Refresh() + +# self.guiUtility.frame.Bind(wx.EVT_SIZE, self.standardOverviewResize()) +# self.Bind(wx.EVT_SIZE, self.standardOverviewResize) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardOverview: __init__: Setting GUIUtil" + self.guiUtility.initStandardOverview(self) # show file panel + #self.toggleLoadingDetailsPanel(True) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", '[StartUpDebug]----------- standardOverview is in postinit ----------', currentThread().getName(), '\n\n' + + + def addComponents(self): + self.hSizer = wx.BoxSizer(wx.VERTICAL) + self.SetSizer(self.hSizer) + self.SetAutoLayout(1) + self.Layout() + + + def standardOverviewResize(self, event=None): +# self.SetAutoLayout(0) +# self.SetSize((-1,(self.guiUtility.frame.GetSize()[1]-200))) +# self.SetWindowStyleFlag(wx) +# self.Layout() +# self.currentPanel.SetSize((-1, (self.GetSize()[1]-250))) +# print 'tb > standardOverviewResize Resize' +# print self.currentPanel.GetSize() +# self.SetSize((-1, 1000)) +# + +# print self.GetSize() + if event: + event.Skip() + + self.SetAutoLayout(1) + self.Layout() + + def setMode(self, mode, refreshGrid=True): + # switch to another view, + # mode is one of the [filesMode, personsMode, friendsMode, profileMode, libraryMode, subscriptionsMode] + if self.mode != mode or mode == 'fileDetailsMode' or mode == 'playlistMode': + #self.stopWeb2Search() + self.mode = mode + self.refreshMode(refreshGrid=refreshGrid) + + def getMode(self): + return self.mode + + self.guiUtility.filterStandard.SetData(self.mode) + + def refreshMode(self,refreshGrid=True): + # load xrc + self.oldpanel = self.currentPanel + + self.currentPanel = self.loadPanel() + + #print >> sys.stderr , 'standardOverview: self.oldpanel' , self.oldpanel + #print >> sys.stderr , 'standardOverview: self.currentPanel' , self.currentPanel + + + assert self.currentPanel, "standardOverview: Panel could not be loaded" + #self.currentPanel.GetSizer().Layout() + #self.currentPanel.Enable(True) + self.currentPanel.Show(True) + if self.data[self.mode].get('grid') and refreshGrid: + self.data[self.mode]['grid'].gridManager.reactivate() + + if self.oldpanel and self.oldpanel != self.currentPanel: + self.hSizer.Detach(self.oldpanel) + self.oldpanel.Hide() + #self.oldpanel.Disable() + + assert len(self.hSizer.GetChildren()) == 0, 'Error: standardOverview self.hSizer has old-panel and gets new panel added (2 panel bug). Old panels are: %s' % self.hSizer.GetChildren() + + #if self.oldpanel != self.currentPanel: + # self.hSizer.Add(self.currentPanel, 1, wx.ALL|wx.EXPAND, 0) + + nameCP = self.currentPanel.GetName() + if nameCP == 'profileOverview': + sizeCP = self.currentPanel.GetSize() + sizeFrame = self.Parent.GetSize() + + heightCP = max(sizeCP[1], sizeFrame[1]) +# print 'heightCP = %s' % heightCP + self.SetSize((-1, heightCP)) + self.SetMinSize((500,sizeCP[1])) + elif nameCP == 'settingsOverview': + self.SetMinSize((900,500)) + elif nameCP == 'libraryOverview': + self.SetMinSize((600,490)) # 480 + else: # filesOverview + self.SetMinSize((600,490)) # 476 + + self.hSizer.Layout() + + + wx.CallAfter(self.Parent.Layout) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'standardOverview: refreshMode: %s' % self.currentPanel.__class__.__name__ + wx.CallAfter(self.hSizer.Layout) + wx.CallAfter(self.currentPanel.Layout) + wx.CallAfter(self.currentPanel.Refresh) + + wx.CallAfter(self.guiUtility.scrollWindow.FitInside) +# self.guiUtility.scrollWindow.FitInside() + + def setPager(self, pager): ## added + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardOverview: setPager called: %s' % pager + self.standardPager = pager + + + def onReachable(self,event=None): + """ Called by GUI thread """ + if self.firewallStatus is not None and self.firewallStatusText.GetLabel() != 'Restart Tribler': + self.firewallStatus.setSelected(2) + self.firewallStatusText.SetLabel('Port is working') + tt = self.firewallStatus.GetToolTip() + if tt is not None: + tt.SetTip(self.utility.lang.get('reachable_tooltip')) + + + # change port number in settings panel + def OnPortChange(self, event): + keycode = event.GetKeyCode() + + if keycode == wx.WXK_RETURN: + self.utility.config.Write('minport', self.portValue.GetValue()) + self.utility.config.Flush() + self.guiUtility.set_port_number(self.portValue.GetValue()) + self.guiUtility.set_firewall_restart(True) + self.guiserver = GUITaskQueue.getInstance() + self.guiserver.add_task(lambda:wx.CallAfter(self.show_message), 0.0) + self.firewallStatus.setSelected(1) + self.firewallStatusText.SetLabel('Restart Tribler') + tt = self.firewallStatus.GetToolTip() + if tt is not None: + tt.SetTip(self.utility.lang.get('restart_tooltip')) + + + self.updateSaveIcon() + + else: + event.Skip() + + + + def updateFirewall(self): + if self.firewallStatus is not None: + if self.guiUtility.firewall_restart: + self.firewallStatus.setSelected(1) + self.firewallStatusText.SetLabel('Restart Tribler') + elif self.guiUtility.isReachable(): + self.firewallStatus.setSelected(2) + self.firewallStatusText.SetLabel('Port is working') + else: + self.firewallStatus.setSelected(1) + self.firewallStatusText.SetLabel('Connecting ...') + + + + + def show_message(self): + self.portChange.SetLabel('Your changes will occur \nthe next time you restart \nTribler.') + self.guiserver.add_task(lambda:wx.CallAfter(self.hide_message), 3.0) + + + def hide_message(self): + self.portChange.SetLabel('') + + + + + def updateSaveIcon(self): + self.guiserver = GUITaskQueue.getInstance() + self.guiserver.add_task(lambda:wx.CallAfter(self.showSaveIcon), 0.0) + + + def showSaveIcon(self): + wx.CallAfter(self.iconSaved.Show(True)) + sizer = self.iconSaved.GetContainingSizer() + sizer.Layout() + self.guiserver.add_task(lambda:wx.CallAfter(self.hideSaveIcon), 3.0) + + + def hideSaveIcon(self): + self.iconSaved.Show(False) + + + + + def loadPanel(self): + currentPanel = self.data[self.mode].get('panel',None) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'standardOverview: currentPanel' , currentPanel + modeString = self.mode[:-4] + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'standardOverview: modestring' , modeString + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardOverview: loadPanel: modeString='+modeString,'currentPanel:',currentPanel + + pager = xrc.XRCCTRL(self.guiUtility.frame, 'standardPager') # Jie:not really used for profile, rss and library? + if modeString == "startpage": + # If we don't set size to 0,0, it will show on Linux + currentPanel = wx.Panel(self,-1,size=(0,0)) + pager = None + grid = currentPanel + elif modeString == "files": # AKA search results page + currentPanel = filesGrid(parent=self) + grid = currentPanel + elif modeString == "library": + currentPanel = libraryGrid(parent=self) + grid = currentPanel + elif modeString == "settings": + xrcResource = os.path.join(self.guiUtility.vwxGUI_path, modeString+'Overview.xrc') + panelName = modeString+'Overview' + res = xrc.XmlResource(xrcResource) + currentPanel = res.LoadPanel(self, panelName) + grid = xrc.XRCCTRL(currentPanel, modeString+'Grid') + + self.data[self.mode]['panel'] = currentPanel + if modeString != "startpage": + self.data[self.mode]['grid'] = grid + self.data[self.mode]['pager'] = pager + + if pager is not None: + pager.setGrid(grid) + + if self.mode == 'settingsMode': + self.firewallStatus = xrc.XRCCTRL(currentPanel,'firewallStatus') + self.firewallStatusText = xrc.XRCCTRL(currentPanel,'firewallStatusText') + self.portValue = xrc.XRCCTRL(currentPanel,'firewallValue') + # self.portValue.Bind(wx.EVT_KEY_DOWN,self.OnPortChange) + self.portChange = xrc.XRCCTRL(currentPanel, 'portChange') + self.iconSaved = xrc.XRCCTRL(currentPanel, 'iconSaved') + wx.CallAfter(self.updateFirewall) + + + + + ## if self.guiUtility.isReachable(): + ## self.firewallStatus.setToggled(True) + ## self.firewallStatus.Refresh() + ## print >> sys.stderr , "OK" + ## else: + ## self.firewallStatus.setToggled(False) + ## self.Refresh() + + + + # create the panel for the first click. panel could be one of the [file,person,friend,library,profile,rss] + if not currentPanel: + #xrcResource = os.path.join(self.guiUtility.vwxGUI_path, modeString+'Overview.xrc') + #panelName = modeString+'Overview' + try: + #currentPanel = grid = pager = None + #res = xrc.XmlResource(xrcResource) + # create panel + #currentPanel = res.LoadPanel(self, panelName) + #grid = xrc.XRCCTRL(currentPanel, modeString+'Grid') + #pager = xrc.XRCCTRL(self.guiUtility.frame, 'standardPager') # Jie:not really used for profile, rss and library? + search = xrc.XRCCTRL(currentPanel, 'searchField') + filter = xrc.XRCCTRL(currentPanel, modeString+'Filter') + if not currentPanel: + raise Exception('standardOverview: Could not find panel, grid or pager') + #load dummy panel + dummyFile = os.path.join(self.guiUtility.vwxGUI_path, 'dummyOverview.xrc') + dummy_res = xrc.XmlResource(dummyFile) + currentPanel = dummy_res.LoadPanel(self, 'dummyOverview') + grid = xrc.XRCCTRL(currentPanel, 'dummyGrid') + pager = xrc.XRCCTRL(currentPanel, 'standardPager') + if not currentPanel: # or not grid or not pager: + raise Exception('standardOverview: Could not find panel, grid or pager') + + # Save paneldata in self.data + self.data[self.mode]['panel'] = currentPanel + #self.data[self.mode]['grid'] = grid + #self.data[self.mode]['pager'] = pager + self.data[self.mode]['search'] = search + self.data[self.mode]['filter'] = filter + + #search.Bind(wx.EVT_COMMAND_TEXT_ENTER, self.OnSearchKeyDown) + if search is not None: + search.Bind(wx.EVT_KEY_DOWN, self.guiUtility.OnSearchKeyDown) + if modeString == "files": + web2on = self.utility.config.Read('enableweb2search',"boolean") + if web2on: + txt = self.utility.lang.get('filesdefaultsearchweb2txt') + else: + txt = self.utility.lang.get('filesdefaultsearchtxt') + search.SetValue(txt) + search.Bind(wx.EVT_MOUSE_EVENTS, self.guiUtility.OnSearchMouseAction) + + if pager is not None: + pager.setGrid(grid) + + if self.mode in ['filesMode', 'personsMode']: + print '' + +# print 'self.mode = %s' % self.mode +# print currentPanel +## self.standardOverview.data['filesMode'].get('grid') +## currentViewMode = currentPanel.grid.viewmode +## currentPanel.viewModeSelect = xrc.XRCCTRL(currentPanel, 'modeItems') +### overviewSizeSelect = xrc.XRCCTRL(currentPanel, 'numberItems') + # set default values + +# self.mode.viewModeSelect = viewModeSelect + +# currentPanel.viewModeSelect.Select(1) #SetValue('thumbnails') + ##overviewSizeSelect.Select(0) #SetValue('auto') + #viewModeSelect.Bind(wx.EVT_COMBOBOX, grid.onViewModeChange) +# currentPanel.viewModeSelect.Bind(wx.EVT_CHOICE, grid.onViewModeChange(mode = 'filesMode')) + #overviewSizeSelect.Bind(wx.EVT_COMBOBOX, grid.onSizeChange) + ##overviewSizeSelect.Bind(wx.EVT_CHOICE, grid.onSizeChange) + + + + + if self.mode == 'subscriptionsMode': + rssurlctrl = xrc.XRCCTRL(currentPanel,'pasteUrl') + rssurlctrl.Bind(wx.EVT_KEY_DOWN, self.guiUtility.OnSubscribeKeyDown) + rssurlctrl.Bind(wx.EVT_LEFT_UP, self.guiUtility.OnSubscribeMouseAction) + txt = self.utility.lang.get('rssurldefaulttxt') + rssurlctrl.SetValue(txt) + self.data[self.mode]['rssurlctrl'] = rssurlctrl + + + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardOverview: Error: Could not load panel, grid and pager for mode %s' % self.mode + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardOverview: Tried panel: %s=%s, grid: %s=%s, pager: %s=%s' % (panelName, currentPanel, modeString+'Grid', grid, 'standardPager', pager) + print_exc() + + + if self.mode in ['filesMode', 'personsMode']: + grid = self.data[self.mode].get('grid') + if self.guiUtility.gridViewMode != grid.viewmode : + grid.onViewModeChange(mode=self.guiUtility.gridViewMode) + + + + if self.mode == 'fileDetailsMode': + print 'tb > fileDetailsMode' + self.data[self.mode]['panel'].setData(self.selectedTorrent) + + if self.mode == 'playlistMode': + print 'tb > playlistMode' + self.data[self.mode]['panel'].setData(self.selectedTorrent) + + if self.mode == 'personDetailsMode': + self.data[self.mode]['panel'].setData(self.selectedPeer) + + return currentPanel + + def refreshData(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardOverview: refreshData" + #print_stack() + + grid = self.data[self.mode].get('grid') + if grid: + + if DEBUG: + data = self.data[self.mode].get('data') + if type(data) == list: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardOverview: refreshData: refreshing",len(data) + + # load and show the data in the grid + grid.setData(self.data[self.mode].get('data')) + + def refreshGridManager(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardOverview: refreshGridManager" + #print_stack() + + try: + grid = self.data[self.mode].get('grid') + if grid: + gridmgr = grid.getGridManager().refresh() + except: + print_exc() + + def updateSelection(self): + grid = self.data[self.mode].get('grid') + if grid: + grid.updateSelection() + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardOverview: Could not update selection: No grid' + + + def getFirstItem(self): + data = self.data[self.mode].get('data') + if data and len(data) > 0: + return data[0] + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardOverview: Error, could not return firstItem, data=%s' % data + return None + + + + def filterChanged(self, filterState): + """ filterState is GridState object """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardOverview: filterChanged",filterState,self.mode#,self.data[self.mode] + + assert filterState is None or 'GridState' in str(type(filterState)), 'filterState is %s' % str(filterState) + oldFilterState = self.data[self.mode].get('filterState') + +# print 'tb >FILTERCHANGED!!!!!' + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardOverview: filterChanged: from",oldFilterState,"to",filterState + + if filterState: + filterState.setDefault(oldFilterState) + + #if filterState.db == 'libraryMode': + # print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'standardOverview: ********************** VALID LIBRARY Filterstate:', filterState + + if filterState and filterState.isValid(): + if self.mode in ('filesMode', 'personsMode', 'libraryMode', 'friendsMode','settingsMode'): + #self.loadTorrentData(filterState[0], filterState[1]) + self.data[filterState.db]['grid'].gridManager.set_state(filterState) + elif self.mode in ('subscriptionsMode'): + self.loadSubscriptionData() + self.refreshData() +# if DEBUG: +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'standardOverview: filterstate: %s' % filterState + + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardOverview: Filters not yet implemented in this mode' + return + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","standardOverview: before refreshData" + + + + #self.refreshData() + self.data[self.mode]['filterState'] = filterState + + + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'standardOverview: Invalid Filterstate:', filterState + #print_stack() + + """ + def loadSubscriptionData(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'load subscription data' + + torrentfeed = TorrentFeedThread.getInstance() + urls = torrentfeed.getURLs() + + bcsub = self.utility.lang.get('buddycastsubscription') + web2sub = self.utility.lang.get('web2subscription') + + bcactive = self.utility.session.get_buddycast() and self.utility.session.get_start_recommender() + bcstatus = 'inactive' + if bcactive: + bcstatus = 'active' + web2active = self.utility.config.Read('enableweb2search', "boolean") + web2status = 'inactive' + if web2active: + web2status = 'active' + + reclist = [] + record = {'url':bcsub,'status':bcstatus,'persistent':'BC'} + reclist.append(record) + record = {'url':web2sub,'status':web2status,'persistent':'Web2.0'} + reclist.append(record) + for url in urls: + record = {} + record['url'] = url + record['status'] = urls[url] + reclist.append(record) + self.data[self.mode]['data'] = reclist + self.data[self.mode]['grid'].setData(reclist) + """ + + + + def getSearchField(self,mode=None): + if mode is None: + mode = self.mode + return self.data[mode]['search'] + + def getGrid(self): + return self.data.get(self.mode, {}).get('grid') + + + def getSorting(self): + fs = self.data[self.mode].get('filterState') + if fs: + return fs.sort + else: + return None + + def getFilter(self): + return self.data[self.mode]['filter'] + + def getPager(self): + return self.data[self.mode]['pager'] + + def getRSSUrlCtrl(self): + return self.data[self.mode]['rssurlctrl'] + + def gridIsAutoResizing(self): + return self.getGrid().sizeMode == 'auto' + + def setSearchFeedback(self,*args,**kwargs): + """ May be called by web2.0 thread """ + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardOverview: setSearchFeedback',args,kwargs + setSearchFeedback_lambda = lambda:self._setSearchFeedback(*args,**kwargs) + wx.CallAfter(setSearchFeedback_lambda) + + def getSearchBusy(self): + searchDetailsPanel = self.data[self.mode].get('searchDetailsPanel') + if searchDetailsPanel: + return searchDetailsPanel.searchBusy + else: + return False + def _setSearchFeedback(self, type, finished, num, keywords = [], searchresults = None): + #print 'standardOverview: _setSearchFeedback called by', currentThread().getName() + + self.setMessage(type, finished, num, keywords) + + + ##searchDetailsPanel = self.data[self.mode].get('searchDetailsPanel') + ##if searchDetailsPanel: + ## searchDetailsPanel.setMessage(type, finished, num, searchresults, keywords) + + def setMessage(self, stype, finished, num, keywords = []): + if stype: + self.results[stype] = num # FIXME different remote search overwrite eachother + + total = sum([v for v in self.results.values() if v != -1]) + + wx.CallAfter(self.guiUtility.frame.standardPager.Show,(total > 0)) + self.guiUtility.frame.pagerPanel.Refresh() + if keywords: + if type(keywords) == list: + self.keywords = " ".join(keywords) + else: + self.keywords = keywords + + if finished: + msg = self.guiUtility.utility.lang.get('finished_search') % (self.keywords, total) + self.guiUtility.stopSearch() + #self.searchFinished(set_message=False) + else: + msg = self.guiUtility.utility.lang.get('going_search') % (total) + + #self.search_results.SetFont(wx.Font(FS_FILETITLE,FONTFAMILY,FONTWEIGHT,wx.NORMAL,True,FONTFACE)) + if self.mode not in ['filesMode']: + self.search_results.SetForegroundColour(wx.RED) + else: + self.search_results.SetForegroundColour(wx.BLACK) + + if self.mode in ['filesMode']: + if sys.platform == 'win32': + self.search_results.SetText(msg) + self.guiUtility.frame.top_bg.Refresh() + else: + #self.search_results.Refresh(eraseBackground=True) + self.search_results.SetLabel(msg) + else: + if sys.platform == 'win32': + self.search_results.SetText('Return to Results') + else: + self.search_results.SetLabel('Return to Results') + + def growWithGrid(self): + gridHeight = self.data[self.mode]['grid'].GetSize()[1] + pagerHeight = 29 + filterHeight = 21 + 8+ self.data[self.mode]['filter'].GetSize()[1] + + newSize = (-1, gridHeight + pagerHeight + filterHeight) + self.SetSize(newSize) + self.SetMinSize(newSize) + self.GetSizer().Layout() + self.GetContainingSizer().Layout() + self.guiUtility.scrollWindow.FitInside() + self.guiUtility.refreshOnResize() + + def removeTorrentFromLibrary(self, torrent): + infohash = torrent['infohash'] + + # Johan, 2009-03-05: we need long download histories for good + # semantic clustering. + + mypreference_db = self.utility.session.open_dbhandler(NTFY_MYPREFERENCES) + # Arno, 2009-03-10: Not removing it from MyPref means it keeps showing + # up in the Library, even after removal :-( H4x0r this. + #mypreference_db.deletePreference(infohash) + mypreference_db.updateDestDir(infohash,"") + + # BuddyCast is now notified of this removal from our + # preferences via the Notifier mechanism. See BC.sesscb_ntfy_myprefs() + + grid = self.getGrid() + if grid is not None: + gridmgr = grid.getGridManager() + if gridmgr is not None: + gridmgr.refresh() + + + def toggleLoadingDetailsPanel(self, visible): + loadingDetails = self.data[self.mode].get('loadingDetailsPanel') + sizer = self.data[self.mode]['grid'].GetContainingSizer() + if visible: + if not loadingDetails: + loadingDetails = LoadingDetailsPanel(self.data[self.mode]['panel']) + + sizer.Insert(3,loadingDetails, 0, wx.ALL|wx.EXPAND, 0) + self.data[self.mode]['loadingDetailsPanel'] = loadingDetails + loadingDetails.Show() + else: + loadingDetails.startSearch() + loadingDetails.Show() + + else: + if loadingDetails: + #print 'standardOverview: removing loading details' + sizer.Detach(loadingDetails) + loadingDetails.Destroy() + self.data[self.mode]['loadingDetailsPanel'] = None + sizer.Layout() + self.data[self.mode]['panel'].Refresh() + self.hSizer.Layout() + + def setLoadingCount(self,count): + loadingDetails = self.data[self.mode].get('loadingDetailsPanel') + if not loadingDetails: + return + loadingDetails.setMessage('loaded '+str(count)+' more files from database (not yet shown)') + + + def toggleSearchDetailsPanel(self, visible): + searchDetails = self.data[self.mode].get('searchDetailsPanel') + sizer = self.data[self.mode]['grid'].GetContainingSizer() + #print 'standardOverview: Sizer: %s' % sizer + #print 'standardOverview: SearchDetails: %s' % searchDetails + #if searchDetails: + # print 'standardOverview: %s, %s' % (str(searchDetails.GetSize()), str(searchDetails.GetMinSize())) + + if visible: + if not searchDetails: + searchDetails = SearchDetailsPanel(self.data[self.mode]['panel']) + + #print 'standardOverview: Inserting search details' + sizer.Insert(3,searchDetails, 0, wx.ALL|wx.EXPAND, 0) + #sizer.Layout() + #self.data[self.mode]['panel'].Refresh() +# print 'Size: %s' % str(self.searchDetails.GetSize()) +# print 'Parent: %s' % str(self.searchDetails.GetParent().GetName()) +# print 'GParent: %s' % str(self.searchDetails.GetParent().GetParent().GetName()) + self.data[self.mode]['searchDetailsPanel'] = searchDetails + searchDetails.Show() + else: + searchDetails.startSearch() + searchDetails.Show() + + else: + if searchDetails: + #print 'standardOverview: removing search details' + sizer.Detach(searchDetails) + searchDetails.Destroy() + self.data[self.mode]['searchDetailsPanel'] = None + sizer.Layout() + self.data[self.mode]['panel'].Refresh() + self.hSizer.Layout() + + + def stopWeb2Search(self): + grid = self.getGrid() + if grid: + grid.stopWeb2Search() + + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/standardPager.py b/tribler-mod/Tribler/Main/vwxGUI/standardPager.py new file mode 100644 index 0000000..f987ff2 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/standardPager.py @@ -0,0 +1,287 @@ +from time import localtime, strftime +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.vwxGUI.tribler_topButton import SwitchButton +from wx.lib.stattext import GenStaticText as StaticText +from font import * + +import wx, sys, math + +DEBUG = False + +# font sizes +if sys.platform == 'darwin': + FS_ITEMNORMAL = 11 + FS_ITEMBOLD = 13 +else: + FS_ITEMNORMAL = 8 + FS_ITEMBOLD = 10 + +class standardPager(wx.Panel): + """ + Panel with automatic backgroundimage control. + """ + def __init__(self, *args): + if len(args) == 0: + self.initReady = False + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, args[0], args[1], args[2], args[3]) + self._PostInit() + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + self.guiUtility = GUIUtility.getInstance() + self.initPager() + self.Refresh(True) + self.Update() + + + + def initPager(self, numPages=10): + + + self.beginPage = 0 + self.currentPage = 0 + self.left = None + self.right = None + self.numPages = numPages + self.totalPages = 0 + self.totalItems = 0 + self.itemsPerPage = 0 + self.currentDots = [None, None] + self.pagerColour = wx.Colour(230,230,230) + + self.pageNumbers = [] + self.utility = self.guiUtility.utility + self.addComponents() + self.initReady = True + self.refresh() + + def addComponents(self): + self.Show(False) + self.SetBackgroundColour(self.pagerColour) + self.normalFont = wx.Font(FS_ITEMNORMAL,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE) # was Arial + self.boldFont = wx.Font(FS_ITEMNORMAL,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE) # was Arial + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + + + self.left = SwitchButton(self, name='prevpage') + self.left.Bind(wx.EVT_LEFT_UP, self.mouseAction) + self.hSizer.Add(self.left, 0, wx.TOP, 0) + + #page numbers + self.refreshPageNumbers() + + + self.right = SwitchButton(self, name='nextpage') + self.right.Bind(wx.EVT_LEFT_UP, self.mouseAction) + self.hSizer.AddSpacer(wx.Size(20)) + self.hSizer.AddSpacer(wx.Size(20)) + self.hSizer.Add(self.right, 0, wx.TOP, 0) + + + self.SetSizer(self.hSizer) + self.SetAutoLayout(1) + self.Layout() + self.Refresh() + self.Show() + + + + def refreshPageNumbers(self): + + # Update beginPage (first page number on screen) + if self.currentPage >= self.beginPage+self.numPages: + self.beginPage +=1 + + self.beginPage = max(0, min(self.totalPages-self.numPages, self.beginPage)) + if self.currentPage <= self.beginPage-1: + self.beginPage = max(0, self.beginPage - 1) + + rightDots = self.beginPage+self.numPages < self.totalPages + leftDots = self.beginPage != 0 + self.setPageNumbers(self.beginPage, min(self.numPages,self.totalPages) , self.currentPage, leftDots, rightDots) + if self.hasGrid(): + self.grid.gridManager.set_page(self.currentPage) + + + def setPageNumbers(self, begin, number, current, leftDots, rightDots): + """ + Put the right numbers in the pagefield. If necessary, create new statictexts. + Highlight current page number + """ + + #print 'Pagenumbers: Begin %d, number %d, current %d' % (begin, number, current) + + refresh = False + # Guarantee right amount of statictexts + currentPageNumber = len(self.pageNumbers) + if number > currentPageNumber: + while (len(self.pageNumbers) < number): + text = self.getDefaultTextField() + text.Bind(wx.EVT_LEFT_UP, self.mouseAction) + self.pageNumbers.append(text) + self.hSizer.Insert(len(self.pageNumbers)+1, text, 0, wx.TOP|wx.LEFT|wx.RIGHT, 2) + + refresh = True + elif number < currentPageNumber: + for i in range(number, currentPageNumber): + self.hSizer.Detach(self.pageNumbers[i]) + self.pageNumbers[i].Destroy() + #self.pageNumbers[i].Show(False) + self.pageNumbers = self.pageNumbers[:number] + refresh = True + + # Manage dots before and after page numbers + #if rightDots and not self.currentDots[1]: + # dots = self.getDefaultTextField('...') + # extra = int(bool(self.currentDots[0])) + + # self.hSizer.Insert(len(self.pageNumbers)+2+extra, dots, 0, wx.LEFT|wx.RIGHT, 2) + # self.currentDots[1] = dots + # refresh = True + + #if not rightDots and self.currentDots[1]: + # self.hSizer.Detach(self.currentDots[1]) + # self.currentDots[1].Destroy() + # self.currentDots[1] = None + # refresh = True + + #if leftDots and not self.currentDots[0]: + # dots = self.getDefaultTextField('...') + + # self.hSizer.Insert(2, dots, 0, wx.LEFT|wx.RIGHT, 2) + # self.currentDots[0] = dots + # refresh = True + + #if not leftDots and self.currentDots[0]: + # self.hSizer.Detach(self.currentDots[0]) + # self.currentDots[0].Destroy() + # self.currentDots[0] = None + # refresh = True + + + #print '%d statictexts' % (len(self.pageNumbers)) + # Put right numbers in statictexts + page = begin + for panel in self.pageNumbers: + panel.SetLabel(' ' + str(page+1) + ' ') + if page == current: + + panel.SetFont(self.boldFont) + panel.SetForegroundColour(wx.WHITE) ## 0,110,149 + panel.SetBackgroundColour((0,110,149)) + + else: + panel.SetFont(self.normalFont) + panel.SetForegroundColour((255,51,0)) + panel.SetBackgroundColour(self.pagerColour) + + page+=1 + + if refresh: + self.hSizer.Layout() + self.Refresh() + self.Show(True) + + if self.right is not None: + self.right.setToggled(self.currentPage != (self.totalPages - 1)) + if self.left is not None: + self.left.setToggled(self.currentPage != 0) + + + + + + def getDefaultTextField(self, t=""): + text = StaticText(self, -1, t) + text.SetForegroundColour((255,51,0)) + text.SetBackgroundColour(self.pagerColour) + return text + + + + + + def refresh(self): + "Called by Grid if size or data changes" + + if not self.hasGrid() or not self.initReady: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardPager: no refresh, not ready yet or no grid' + return + + + grid = self.grid + try: + self.totalItems = grid.gridManager.get_total_items() + self.itemsPerPage = grid.items + except: + self.totalItems = 0 + self.itemsPerPage = 0 + + + # if dummy item "Searching for content is shown, do not count it as content + if self.totalItems == 1 and len(grid.data) > 0 and grid.data[0].get('content_name','no_name') == self.utility.lang.get('searching_content'): + self.totalItems = 0 + + + if self.itemsPerPage == 0: + self.totalPages = 0 + else: + self.totalPages = int(math.ceil(self.totalItems/float(self.itemsPerPage))) + + #self.number.SetLabel('%d %s%s / %d %s%s' % (self.totalItems, self.utility.lang.get('item'), getPlural(self.totalItems), self.totalPages, self.utility.lang.get('page'), getPlural(self.totalPages))) + + if self.currentPage >= self.totalPages: + self.currentPage = max(self.totalPages -1, 0) + self.refreshPageNumbers() + +# def imageClicked(self, event): +# obj = event.GetEventObject() +# self.mouseAction(obj, event) +# + def mouseAction(self, event): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardPager: mouseaction' + obj = event.GetEventObject() + + old = self.currentPage + #print '%s did mouse' % obj + if obj == self.left: + self.currentPage = max(0, self.currentPage-1) + elif obj == self.right: + self.currentPage = min(self.totalPages-1, self.currentPage+1) + elif obj in self.pageNumbers: + index = self.pageNumbers.index(obj) + self.currentPage = self.beginPage+index + else: + event.Skip() + return + + self.refreshPageNumbers() + + def hasGrid(self): + try: + if self.grid: + #print 'pager has grid' + return True + except: + #print 'pager has no grid' + return False + + def setGrid(self, grid): + #print 'setGrid called: %s' % grid + self.grid = grid + if grid: + self.grid.setPager(self) + diff --git a/tribler-mod/Tribler/Main/vwxGUI/standardPager.py.bak b/tribler-mod/Tribler/Main/vwxGUI/standardPager.py.bak new file mode 100644 index 0000000..42c0d2a --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/standardPager.py.bak @@ -0,0 +1,286 @@ +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Main.vwxGUI.tribler_topButton import SwitchButton +from wx.lib.stattext import GenStaticText as StaticText +from font import * + +import wx, sys, math + +DEBUG = False + +# font sizes +if sys.platform == 'darwin': + FS_ITEMNORMAL = 11 + FS_ITEMBOLD = 13 +else: + FS_ITEMNORMAL = 8 + FS_ITEMBOLD = 10 + +class standardPager(wx.Panel): + """ + Panel with automatic backgroundimage control. + """ + def __init__(self, *args): + if len(args) == 0: + self.initReady = False + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Panel.__init__(self, args[0], args[1], args[2], args[3]) + self._PostInit() + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + self.guiUtility = GUIUtility.getInstance() + self.initPager() + self.Refresh(True) + self.Update() + + + + def initPager(self, numPages=10): + + + self.beginPage = 0 + self.currentPage = 0 + self.left = None + self.right = None + self.numPages = numPages + self.totalPages = 0 + self.totalItems = 0 + self.itemsPerPage = 0 + self.currentDots = [None, None] + self.pagerColour = wx.Colour(230,230,230) + + self.pageNumbers = [] + self.utility = self.guiUtility.utility + self.addComponents() + self.initReady = True + self.refresh() + + def addComponents(self): + self.Show(False) + self.SetBackgroundColour(self.pagerColour) + self.normalFont = wx.Font(FS_ITEMNORMAL,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE) # was Arial + self.boldFont = wx.Font(FS_ITEMNORMAL,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE) # was Arial + self.hSizer = wx.BoxSizer(wx.HORIZONTAL) + + + self.left = SwitchButton(self, name='prevpage') + self.left.Bind(wx.EVT_LEFT_UP, self.mouseAction) + self.hSizer.Add(self.left, 0, wx.TOP, 0) + + #page numbers + self.refreshPageNumbers() + + + self.right = SwitchButton(self, name='nextpage') + self.right.Bind(wx.EVT_LEFT_UP, self.mouseAction) + self.hSizer.AddSpacer(wx.Size(20)) + self.hSizer.AddSpacer(wx.Size(20)) + self.hSizer.Add(self.right, 0, wx.TOP, 0) + + + self.SetSizer(self.hSizer) + self.SetAutoLayout(1) + self.Layout() + self.Refresh() + self.Show() + + + + def refreshPageNumbers(self): + + # Update beginPage (first page number on screen) + if self.currentPage >= self.beginPage+self.numPages: + self.beginPage +=1 + + self.beginPage = max(0, min(self.totalPages-self.numPages, self.beginPage)) + if self.currentPage <= self.beginPage-1: + self.beginPage = max(0, self.beginPage - 1) + + rightDots = self.beginPage+self.numPages < self.totalPages + leftDots = self.beginPage != 0 + self.setPageNumbers(self.beginPage, min(self.numPages,self.totalPages) , self.currentPage, leftDots, rightDots) + if self.hasGrid(): + self.grid.gridManager.set_page(self.currentPage) + + + def setPageNumbers(self, begin, number, current, leftDots, rightDots): + """ + Put the right numbers in the pagefield. If necessary, create new statictexts. + Highlight current page number + """ + + #print 'Pagenumbers: Begin %d, number %d, current %d' % (begin, number, current) + + refresh = False + # Guarantee right amount of statictexts + currentPageNumber = len(self.pageNumbers) + if number > currentPageNumber: + while (len(self.pageNumbers) < number): + text = self.getDefaultTextField() + text.Bind(wx.EVT_LEFT_UP, self.mouseAction) + self.pageNumbers.append(text) + self.hSizer.Insert(len(self.pageNumbers)+1, text, 0, wx.TOP|wx.LEFT|wx.RIGHT, 2) + + refresh = True + elif number < currentPageNumber: + for i in range(number, currentPageNumber): + self.hSizer.Detach(self.pageNumbers[i]) + self.pageNumbers[i].Destroy() + #self.pageNumbers[i].Show(False) + self.pageNumbers = self.pageNumbers[:number] + refresh = True + + # Manage dots before and after page numbers + #if rightDots and not self.currentDots[1]: + # dots = self.getDefaultTextField('...') + # extra = int(bool(self.currentDots[0])) + + # self.hSizer.Insert(len(self.pageNumbers)+2+extra, dots, 0, wx.LEFT|wx.RIGHT, 2) + # self.currentDots[1] = dots + # refresh = True + + #if not rightDots and self.currentDots[1]: + # self.hSizer.Detach(self.currentDots[1]) + # self.currentDots[1].Destroy() + # self.currentDots[1] = None + # refresh = True + + #if leftDots and not self.currentDots[0]: + # dots = self.getDefaultTextField('...') + + # self.hSizer.Insert(2, dots, 0, wx.LEFT|wx.RIGHT, 2) + # self.currentDots[0] = dots + # refresh = True + + #if not leftDots and self.currentDots[0]: + # self.hSizer.Detach(self.currentDots[0]) + # self.currentDots[0].Destroy() + # self.currentDots[0] = None + # refresh = True + + + #print '%d statictexts' % (len(self.pageNumbers)) + # Put right numbers in statictexts + page = begin + for panel in self.pageNumbers: + panel.SetLabel(' ' + str(page+1) + ' ') + if page == current: + + panel.SetFont(self.boldFont) + panel.SetForegroundColour(wx.WHITE) ## 0,110,149 + panel.SetBackgroundColour((0,110,149)) + + else: + panel.SetFont(self.normalFont) + panel.SetForegroundColour((255,51,0)) + panel.SetBackgroundColour(self.pagerColour) + + page+=1 + + if refresh: + self.hSizer.Layout() + self.Refresh() + self.Show(True) + + if self.right is not None: + self.right.setToggled(self.currentPage != (self.totalPages - 1)) + if self.left is not None: + self.left.setToggled(self.currentPage != 0) + + + + + + def getDefaultTextField(self, t=""): + text = StaticText(self, -1, t) + text.SetForegroundColour((255,51,0)) + text.SetBackgroundColour(self.pagerColour) + return text + + + + + + def refresh(self): + "Called by Grid if size or data changes" + + if not self.hasGrid() or not self.initReady: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardPager: no refresh, not ready yet or no grid' + return + + + grid = self.grid + try: + self.totalItems = grid.gridManager.get_total_items() + self.itemsPerPage = grid.items + except: + self.totalItems = 0 + self.itemsPerPage = 0 + + + # if dummy item "Searching for content is shown, do not count it as content + if self.totalItems == 1 and len(grid.data) > 0 and grid.data[0].get('content_name','no_name') == self.utility.lang.get('searching_content'): + self.totalItems = 0 + + + if self.itemsPerPage == 0: + self.totalPages = 0 + else: + self.totalPages = int(math.ceil(self.totalItems/float(self.itemsPerPage))) + + #self.number.SetLabel('%d %s%s / %d %s%s' % (self.totalItems, self.utility.lang.get('item'), getPlural(self.totalItems), self.totalPages, self.utility.lang.get('page'), getPlural(self.totalPages))) + + if self.currentPage >= self.totalPages: + self.currentPage = max(self.totalPages -1, 0) + self.refreshPageNumbers() + +# def imageClicked(self, event): +# obj = event.GetEventObject() +# self.mouseAction(obj, event) +# + def mouseAction(self, event): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'standardPager: mouseaction' + obj = event.GetEventObject() + + old = self.currentPage + #print '%s did mouse' % obj + if obj == self.left: + self.currentPage = max(0, self.currentPage-1) + elif obj == self.right: + self.currentPage = min(self.totalPages-1, self.currentPage+1) + elif obj in self.pageNumbers: + index = self.pageNumbers.index(obj) + self.currentPage = self.beginPage+index + else: + event.Skip() + return + + self.refreshPageNumbers() + + def hasGrid(self): + try: + if self.grid: + #print 'pager has grid' + return True + except: + #print 'pager has no grid' + return False + + def setGrid(self, grid): + #print 'setGrid called: %s' % grid + self.grid = grid + if grid: + self.grid.setPager(self) + diff --git a/tribler-mod/Tribler/Main/vwxGUI/statusDownloads.xrc b/tribler-mod/Tribler/Main/vwxGUI/statusDownloads.xrc new file mode 100644 index 0000000..4c94920 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/statusDownloads.xrc @@ -0,0 +1,237 @@ + + + + 0,0 + 260,425 + + wxVERTICAL + + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxVERTICAL + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 3,3 + 246,55 + + + + + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 9 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 10 + + + images/triblerpanel_topcenter.png + 0,237 + 238,20 + # 0 + #ffffff + + wxHORIZONTAL + + wxTOP|wxLEFT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + 1 + + + 3,3 + 89,15 + # 0 + #ffffff + + + + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 2 + + + 116,2 + 16,16 + + + + wxTOP|wxEXPAND|wxALIGN_RIGHT|wxFIXED_MINSIZE + 3 + + + + + 20,3 + 69,15 + # 0 + #ffffff + + + + wxTOP|wxLEFT|wxBOTTOM|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 2 + + + 277,3 + 16,16 + + + + wxTOP|wxEXPAND|wxALIGN_RIGHT|wxFIXED_MINSIZE + 3 + + + + + 201,3 + 94,15 + # 0 + #ffffff + + + + + + + + + wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxHORIZONTAL + + wxTOP|wxRIGHT|wxFIXED_MINSIZE + 3 + + + + + 3,32 + 170,18 + # 0 + + + + wxTOP|wxFIXED_MINSIZE + 3 + + + + + 246,23 + 42,18 + # 0 + + + + + + wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxHORIZONTAL + + wxRIGHT|wxFIXED_MINSIZE + 3 + + + + + 3,41 + 170,18 + # 0 + + + + wxFIXED_MINSIZE + 10 + + + + + 246,41 + 42,18 + # 0 + + + + + + wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxHORIZONTAL + + wxRIGHT|wxFIXED_MINSIZE + 3 + + + + + 10,59 + 170,18 + # 0 + + + + wxFIXED_MINSIZE + 10 + + + + + 246,59 + 42,18 + # 0 + + + + + + wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxHORIZONTAL + + wxRIGHT|wxFIXED_MINSIZE + 3 + + + + + 10,77 + 170,18 + # 0 + + + + wxFIXED_MINSIZE + 10 + + + + + 246,77 + 42,18 + # 0 + + + + + + + \ No newline at end of file diff --git a/tribler-mod/Tribler/Main/vwxGUI/subscriptionsDetails.xrc b/tribler-mod/Tribler/Main/vwxGUI/subscriptionsDetails.xrc new file mode 100644 index 0000000..4aaa872 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/subscriptionsDetails.xrc @@ -0,0 +1,200 @@ + + + + 0,0 + 300,600 + # 0 + + wxVERTICAL + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + 3,3 + 10,21 + + + + wxFIXED_MINSIZE + 3 + + + images/triblerpanel_topcenter.png + 13,3 + 280,21 + # 0 + #ffffff + + wxHORIZONTAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 5 + + + + + 0,5 + 280,16 + # 0 + #ffffff + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 0 + + + 275,3 + 10,21 + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + wxHORIZONTAL + + wxFIXED_MINSIZE + 3 + + + 1 + 1 + 0,21 + 62,16 + #ffffff + # 0 + + + + 18,3 + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,21 + 298,238 + #ffffff + + wxVERTICAL + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 10 + + + + + 0,0 + 296,25 + #ffffff + # 0 + + + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,38 + 294,15 + #cbcbcb + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + 0,0 + 296,15 + #cbcbcb + # 0 + + + + + + + wxTOP|wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + 6,59 + 20,155 + + + + + wxTOP|wxEXPAND|wxFIXED_MINSIZE + 3 + + + 0,401 + 294,15 + #cbcbcb + + wxHORIZONTAL + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + 0,0 + 296,15 + #cbcbcb + # 0 + + + + + + + wxTOP|wxLEFT|wxRIGHT|wxEXPAND|wxFIXED_MINSIZE + 6 + + + 6,238 + 20,160 + + + + + + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + 3,275 + 300,5 + + + + + diff --git a/tribler-mod/Tribler/Main/vwxGUI/tribler_List.py b/tribler-mod/Tribler/Main/vwxGUI/tribler_List.py new file mode 100644 index 0000000..ce28925 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/tribler_List.py @@ -0,0 +1,324 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg, Maarten ten Brinke +# see LICENSE.txt for license information + +import wx, os, sys +from traceback import print_exc +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Core.Utilities.unicode import * +from Tribler.Core.API import * + +DEBUG = False + +class tribler_List(wx.ListCtrl): + + def __init__(self, *args, **kw): + # self.SetWindowStyle(wx.LC_REPORT|wx.NO_BORDER|wx.LC_NO_HEADER|wx.LC_SINGLE_SEL) + + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.backgroundColor = wx.Colour(102,102,102) + #self.ForegroundColour = wx.Colour(0,0,0) + self.isEmpty = True # used for DLFilesList.onListDClick + self.updateFunc = None + pre = wx.PreListCtrl() + # the Create step is done by XRC. + + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + self.Bind(wx.EVT_SIZE, self.onListResize) + # Turn on labeltips in list control + + + def onListResize(self, event=None): + if event!=None: + event.Skip() + if not self.InReportView() or self.GetColumnCount()==0: + return + size = self.GetClientSize() + self.SetColumnWidth( 0, size.width - wx.SystemSettings.GetMetric(wx.SYS_VSCROLL_X)) #vertical scrollbar width + self.ScrollList(-100, 0) # Removes HSCROLLBAR + +class FilesList(tribler_List): + def __init__(self): + self.initReady = False + tribler_List.__init__(self) + + def _PostInit(self): + if not self.initReady: + try: + self.SetWindowStyle(wx.LC_REPORT|wx.NO_BORDER|wx.LC_SINGLE_SEL) + except: + print_exc() + self.InsertColumn(0, self.utility.lang.get('file')) + self.InsertColumn(1, self.utility.lang.get('size')) + self.Bind(wx.EVT_SIZE, self.onListResize) + self.initReady = True + + def setData(self, torrent): + # Get the file(s)data for this torrent + if not self.initReady: + self._PostInit() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'tribler_List: setData of FilesTabPanel called' + try: + + if torrent.get('web2') or 'query_permids' in torrent: # web2 or remote query result + self.filelist = [] + self.DeleteAllItems() + self.onListResize(None) + return {} + + torrent_dir = self.utility.session.get_torrent_collecting_dir() + torrent_filename = os.path.join(torrent_dir, torrent['torrent_file_name']) + + if not os.path.exists(torrent_filename): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tribler_List: Torrent: %s does not exist" % torrent_filename + return {} + + metadata = self.utility.getMetainfo(torrent_filename) + if not metadata: + return {} + info = metadata.get('info') + if not info: + return {} + + #print metadata.get('comment', 'no comment') + + + filedata = info.get('files') + if not filedata: + filelist = [(dunno2unicode(info.get('name')),self.utility.size_format(info.get('length')))] + else: + filelist = [] + for f in filedata: + filelist.append((dunno2unicode('/'.join(f.get('path'))), self.utility.size_format(f.get('length')) )) + filelist.sort() + + + # Add the filelist to the fileListComponent + self.filelist = filelist + self.DeleteAllItems() + for f in filelist: + index = self.InsertStringItem(sys.maxint, f[0]) + self.SetStringItem(index, 1, f[1]) + self.onListResize(None) + + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'tribler_List: error getting list of files in torrent' + print_exc() + return {} + + def getNumFiles(self): + try: + return len(self.filelist) + except: + return 0 + + def onListResize(self, event): + size = self.GetClientSize() + if size[0] > 50 and size[1] > 50: + self.SetColumnWidth(1, wx.LIST_AUTOSIZE) + self.SetColumnWidth(0, self.GetClientSize()[0]-self.GetColumnWidth(1)-15) + self.ScrollList(-100, 0) # Removes HSCROLLBAR + if event: + event.Skip() + +class DLFilesList(tribler_List): + """ File List with downloadable items """ + def __init__(self): + self.infohash_List = None #list of infohashes for current items in the gui list + self.other_List = None #the other list that should received the downloaded item + tribler_List.__init__(self) + + def _PostInit(self): + tribler_List._PostInit(self) + self.Bind(wx.EVT_LEFT_DCLICK, self.onListDClick) + + def setInfoHashList(self, alist): + self.infohash_List = alist + + def setOtherList(self, olist): + """the other list that should received the downloaded item""" + self.other_List = olist + + def setFieldsUpdateFunction(self, func): + self.updateFunc = func + + def onListDClick(self, event): + if self.infohash_List: + item = self.GetFirstSelected() + if item != -1 and item < len(self.infohash_List): + infohash = self.infohash_List[item] + # jie.done: click to download. fix it by replacing by db + torrent_db = self.utility.session.open_dbhandler(NTFY_TORRENTS) + torrent = torrent_db.getTorrent(infohash) + torrent['infohash'] = infohash + ret = self.guiUtility.standardDetails.download(torrent) + if ret: + self.infohash_List.pop(item) + self.DeleteItem(item) + if self.other_List is not None: + # only used to move items to common item list in peer view + if self.other_List.isEmpty: + self.other_List.DeleteAllItems() + self.other_List.InsertStringItem(sys.maxint, torrent['name']) + self.other_List.isEmpty = False + event.Skip() + if self.updateFunc: + self.updateFunc(self.other_List, self) + +class T4TSeedingList(tribler_List): + def __init__(self): + self.initReady = False + tribler_List.__init__(self) + + def setData(self, dslist): + # Get the file(s)data for this torrent + if not self.initReady: + self._PostInit() + + try: + # refresh the lists + self.DeleteAllItems() + + for ds in dslist: + peer_list = ds.get_peerlist() + + if not peer_list: + # fault tolerance + peer_list = [] + else: + peer_list.sort(dspeer_uprate_cmp,reverse=True) + + for p in peer_list: + uprate, utotal = p['uprate'], p['utotal'] + if uprate == 0 and utotal == 0: + continue + if not p['g2g']: + index = self.InsertStringItem(sys.maxint, p['ip']) + self.SetStringItem(index, 1, self.utility.speed_format(uprate, 1, "KB")) + self.SetStringItem(index, 2, self.utility.size_format(utotal, 1, "MB")) + + self.onListResize(None) + + except: + print_exc() + return {} + + def _PostInit(self): + if not self.initReady: + try: + self.SetWindowStyle(wx.LC_REPORT|wx.NO_BORDER|wx.LC_SINGLE_SEL) + except: + print_exc() + self.InsertColumn(0, self.utility.lang.get('peer_ip')) + self.InsertColumn(1, self.utility.lang.get('curr_ul_rate')) + self.InsertColumn(2, self.utility.lang.get('ul_amount')) + + self.SetColumnWidth(0, 140) + self.SetColumnWidth(1, 75) + self.SetColumnWidth(2, 75) + + self.Bind(wx.EVT_SIZE, self.onListResize) + self.initReady = True + + def onListResize(self, event): + size = self.GetClientSize() + if size[0] > 50 and size[1] > 50: + self.ScrollList(-100, 0) # Removes HSCROLLBAR + if event: + event.Skip() + +class G2GSeedingList(tribler_List): + def __init__(self): + self.initReady = False + tribler_List.__init__(self) + + def setData(self, dslist): + # Get the file(s)data for this torrent + if not self.initReady: + self._PostInit() + + try: + peer_db = self.utility.session.open_dbhandler(NTFY_PEERS) + + # refresh the lists + self.DeleteAllItems() + + for ds in dslist: + peer_list = ds.get_peerlist() + + if not peer_list: + peer_list = [] + else: + peer_list.sort(dspeer_uprate_cmp,reverse=True) + + for p in peer_list: + uprate, utotal = p['uprate'], p['utotal'] + if uprate == 0 and utotal == 0: + continue + + if p['g2g']: + tribler_id = peer_db.getPeerID(p['id']) + + if not tribler_id: + tribler_id = p['ip'] + + index = self.InsertStringItem(sys.maxint, str(tribler_id)) + self.SetStringItem(index, 1, self.utility.speed_format(uprate, 1, "KB")) + self.SetStringItem(index, 2, self.utility.size_format(utotal, 1, "MB")) + + self.onListResize(None) + + except: + print_exc() + return {} + + def _PostInit(self): + if not self.initReady: + try: + self.SetWindowStyle(wx.LC_REPORT|wx.NO_BORDER|wx.LC_SINGLE_SEL) + except: + print_exc() + self.InsertColumn(0, self.utility.lang.get('tribler_name')) + self.InsertColumn(1, self.utility.lang.get('curr_ul_rate')) + self.InsertColumn(2, self.utility.lang.get('ul_amount')) + + self.SetColumnWidth(0, 140) + self.SetColumnWidth(1, 75) + self.SetColumnWidth(2, 75) + + self.Bind(wx.EVT_SIZE, self.onListResize) + self.initReady = True + + def onListResize(self, event): + size = self.GetClientSize() + if size[0] > 50 and size[1] > 50: + self.ScrollList(-100, 0) # Removes HSCROLLBAR + if event: + event.Skip() + +def dspeer_uprate_cmp(a,b): + """ Sort on upload rate """ + ar = a['uprate'] + br = b['uprate'] + if ar < br: + return -1 + elif ar == br: + return 0 + else: + return 1 + diff --git a/tribler-mod/Tribler/Main/vwxGUI/tribler_List.py.bak b/tribler-mod/Tribler/Main/vwxGUI/tribler_List.py.bak new file mode 100644 index 0000000..9a87ac0 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/tribler_List.py.bak @@ -0,0 +1,323 @@ +# Written by Jelle Roozenburg, Maarten ten Brinke +# see LICENSE.txt for license information + +import wx, os, sys +from traceback import print_exc +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility +from Tribler.Core.Utilities.unicode import * +from Tribler.Core.API import * + +DEBUG = False + +class tribler_List(wx.ListCtrl): + + def __init__(self, *args, **kw): + # self.SetWindowStyle(wx.LC_REPORT|wx.NO_BORDER|wx.LC_NO_HEADER|wx.LC_SINGLE_SEL) + + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.backgroundColor = wx.Colour(102,102,102) + #self.ForegroundColour = wx.Colour(0,0,0) + self.isEmpty = True # used for DLFilesList.onListDClick + self.updateFunc = None + pre = wx.PreListCtrl() + # the Create step is done by XRC. + + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + self.Bind(wx.EVT_SIZE, self.onListResize) + # Turn on labeltips in list control + + + def onListResize(self, event=None): + if event!=None: + event.Skip() + if not self.InReportView() or self.GetColumnCount()==0: + return + size = self.GetClientSize() + self.SetColumnWidth( 0, size.width - wx.SystemSettings.GetMetric(wx.SYS_VSCROLL_X)) #vertical scrollbar width + self.ScrollList(-100, 0) # Removes HSCROLLBAR + +class FilesList(tribler_List): + def __init__(self): + self.initReady = False + tribler_List.__init__(self) + + def _PostInit(self): + if not self.initReady: + try: + self.SetWindowStyle(wx.LC_REPORT|wx.NO_BORDER|wx.LC_SINGLE_SEL) + except: + print_exc() + self.InsertColumn(0, self.utility.lang.get('file')) + self.InsertColumn(1, self.utility.lang.get('size')) + self.Bind(wx.EVT_SIZE, self.onListResize) + self.initReady = True + + def setData(self, torrent): + # Get the file(s)data for this torrent + if not self.initReady: + self._PostInit() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'tribler_List: setData of FilesTabPanel called' + try: + + if torrent.get('web2') or 'query_permids' in torrent: # web2 or remote query result + self.filelist = [] + self.DeleteAllItems() + self.onListResize(None) + return {} + + torrent_dir = self.utility.session.get_torrent_collecting_dir() + torrent_filename = os.path.join(torrent_dir, torrent['torrent_file_name']) + + if not os.path.exists(torrent_filename): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tribler_List: Torrent: %s does not exist" % torrent_filename + return {} + + metadata = self.utility.getMetainfo(torrent_filename) + if not metadata: + return {} + info = metadata.get('info') + if not info: + return {} + + #print metadata.get('comment', 'no comment') + + + filedata = info.get('files') + if not filedata: + filelist = [(dunno2unicode(info.get('name')),self.utility.size_format(info.get('length')))] + else: + filelist = [] + for f in filedata: + filelist.append((dunno2unicode('/'.join(f.get('path'))), self.utility.size_format(f.get('length')) )) + filelist.sort() + + + # Add the filelist to the fileListComponent + self.filelist = filelist + self.DeleteAllItems() + for f in filelist: + index = self.InsertStringItem(sys.maxint, f[0]) + self.SetStringItem(index, 1, f[1]) + self.onListResize(None) + + except: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'tribler_List: error getting list of files in torrent' + print_exc() + return {} + + def getNumFiles(self): + try: + return len(self.filelist) + except: + return 0 + + def onListResize(self, event): + size = self.GetClientSize() + if size[0] > 50 and size[1] > 50: + self.SetColumnWidth(1, wx.LIST_AUTOSIZE) + self.SetColumnWidth(0, self.GetClientSize()[0]-self.GetColumnWidth(1)-15) + self.ScrollList(-100, 0) # Removes HSCROLLBAR + if event: + event.Skip() + +class DLFilesList(tribler_List): + """ File List with downloadable items """ + def __init__(self): + self.infohash_List = None #list of infohashes for current items in the gui list + self.other_List = None #the other list that should received the downloaded item + tribler_List.__init__(self) + + def _PostInit(self): + tribler_List._PostInit(self) + self.Bind(wx.EVT_LEFT_DCLICK, self.onListDClick) + + def setInfoHashList(self, alist): + self.infohash_List = alist + + def setOtherList(self, olist): + """the other list that should received the downloaded item""" + self.other_List = olist + + def setFieldsUpdateFunction(self, func): + self.updateFunc = func + + def onListDClick(self, event): + if self.infohash_List: + item = self.GetFirstSelected() + if item != -1 and item < len(self.infohash_List): + infohash = self.infohash_List[item] + # jie.done: click to download. fix it by replacing by db + torrent_db = self.utility.session.open_dbhandler(NTFY_TORRENTS) + torrent = torrent_db.getTorrent(infohash) + torrent['infohash'] = infohash + ret = self.guiUtility.standardDetails.download(torrent) + if ret: + self.infohash_List.pop(item) + self.DeleteItem(item) + if self.other_List is not None: + # only used to move items to common item list in peer view + if self.other_List.isEmpty: + self.other_List.DeleteAllItems() + self.other_List.InsertStringItem(sys.maxint, torrent['name']) + self.other_List.isEmpty = False + event.Skip() + if self.updateFunc: + self.updateFunc(self.other_List, self) + +class T4TSeedingList(tribler_List): + def __init__(self): + self.initReady = False + tribler_List.__init__(self) + + def setData(self, dslist): + # Get the file(s)data for this torrent + if not self.initReady: + self._PostInit() + + try: + # refresh the lists + self.DeleteAllItems() + + for ds in dslist: + peer_list = ds.get_peerlist() + + if not peer_list: + # fault tolerance + peer_list = [] + else: + peer_list.sort(dspeer_uprate_cmp,reverse=True) + + for p in peer_list: + uprate, utotal = p['uprate'], p['utotal'] + if uprate == 0 and utotal == 0: + continue + if not p['g2g']: + index = self.InsertStringItem(sys.maxint, p['ip']) + self.SetStringItem(index, 1, self.utility.speed_format(uprate, 1, "KB")) + self.SetStringItem(index, 2, self.utility.size_format(utotal, 1, "MB")) + + self.onListResize(None) + + except: + print_exc() + return {} + + def _PostInit(self): + if not self.initReady: + try: + self.SetWindowStyle(wx.LC_REPORT|wx.NO_BORDER|wx.LC_SINGLE_SEL) + except: + print_exc() + self.InsertColumn(0, self.utility.lang.get('peer_ip')) + self.InsertColumn(1, self.utility.lang.get('curr_ul_rate')) + self.InsertColumn(2, self.utility.lang.get('ul_amount')) + + self.SetColumnWidth(0, 140) + self.SetColumnWidth(1, 75) + self.SetColumnWidth(2, 75) + + self.Bind(wx.EVT_SIZE, self.onListResize) + self.initReady = True + + def onListResize(self, event): + size = self.GetClientSize() + if size[0] > 50 and size[1] > 50: + self.ScrollList(-100, 0) # Removes HSCROLLBAR + if event: + event.Skip() + +class G2GSeedingList(tribler_List): + def __init__(self): + self.initReady = False + tribler_List.__init__(self) + + def setData(self, dslist): + # Get the file(s)data for this torrent + if not self.initReady: + self._PostInit() + + try: + peer_db = self.utility.session.open_dbhandler(NTFY_PEERS) + + # refresh the lists + self.DeleteAllItems() + + for ds in dslist: + peer_list = ds.get_peerlist() + + if not peer_list: + peer_list = [] + else: + peer_list.sort(dspeer_uprate_cmp,reverse=True) + + for p in peer_list: + uprate, utotal = p['uprate'], p['utotal'] + if uprate == 0 and utotal == 0: + continue + + if p['g2g']: + tribler_id = peer_db.getPeerID(p['id']) + + if not tribler_id: + tribler_id = p['ip'] + + index = self.InsertStringItem(sys.maxint, str(tribler_id)) + self.SetStringItem(index, 1, self.utility.speed_format(uprate, 1, "KB")) + self.SetStringItem(index, 2, self.utility.size_format(utotal, 1, "MB")) + + self.onListResize(None) + + except: + print_exc() + return {} + + def _PostInit(self): + if not self.initReady: + try: + self.SetWindowStyle(wx.LC_REPORT|wx.NO_BORDER|wx.LC_SINGLE_SEL) + except: + print_exc() + self.InsertColumn(0, self.utility.lang.get('tribler_name')) + self.InsertColumn(1, self.utility.lang.get('curr_ul_rate')) + self.InsertColumn(2, self.utility.lang.get('ul_amount')) + + self.SetColumnWidth(0, 140) + self.SetColumnWidth(1, 75) + self.SetColumnWidth(2, 75) + + self.Bind(wx.EVT_SIZE, self.onListResize) + self.initReady = True + + def onListResize(self, event): + size = self.GetClientSize() + if size[0] > 50 and size[1] > 50: + self.ScrollList(-100, 0) # Removes HSCROLLBAR + if event: + event.Skip() + +def dspeer_uprate_cmp(a,b): + """ Sort on upload rate """ + ar = a['uprate'] + br = b['uprate'] + if ar < br: + return -1 + elif ar == br: + return 0 + else: + return 1 + diff --git a/tribler-mod/Tribler/Main/vwxGUI/tribler_topButton.py b/tribler-mod/Tribler/Main/vwxGUI/tribler_topButton.py new file mode 100644 index 0000000..c01675c --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/tribler_topButton.py @@ -0,0 +1,907 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg, Maarten ten Brinke +# see LICENSE.txt for license information +import wx, os, sys +from traceback import print_exc + +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility + +DEBUG = True + +class tribler_topButton(wx.Panel): + """ + Button that changes the image shown if you move your mouse over it. + It redraws the background of the parent Panel, if this is an imagepanel with + a variable self.bitmap. + """ + + __bitmapCache = {} + + def __init__(self, *args, **kw): +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," "," tribler_topButton in init" + self.initDone = False + self.enabled = True + if len(args) == 0: + self.backgroundColor = wx.WHITE + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + self.backgroundColor = ((230,230,230)) + wx.Panel.__init__(self, *args, **kw) + self._PostInit() + + def OnCreate(self, event): +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," "," tribler_topButton in OnCreate" + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," "," tribler_topButton in _PostInit" + # Do all init here + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + self.Bind(wx.EVT_LEFT_UP, self.ClickedButton) + self.selected = False + self.tooltip = None + self.old_bitmaps = None #bitmaps that were initially loaded on the button with searchBitmaps function, and now have been changed to some provisory ones using switchTo + self.searchBitmaps() + self.createBackgroundImage() + + # on mac, the button doesn't get a size + #if self.bitmaps[0] and self.GetSize()==(0,0): + if self.bitmaps[0]: + self.SetSize(self.bitmaps[0].GetSize()) +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", self.Name +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'size' +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", self.Size + + + self.initDone = True + self.Refresh(True) + self.Update() + + + def searchBitmaps(self): + self.bitmaps = [None, None] + self.parentBitmap = None + self.mouseOver = False + + # get the image directory + self.imagedir = os.path.join(self.guiUtility.vwxGUI_path, 'images') + + # find a file with same name as this panel + self.bitmapPath = [os.path.join(self.imagedir, self.GetName()+'.png'), + os.path.join(self.imagedir, self.GetName()+'_clicked.png')] + + i = 0 + for img in self.bitmapPath: + if not os.path.isfile(img): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TopButton: Could not find image:",img + try: + if img in tribler_topButton.__bitmapCache: + self.bitmaps[i] = tribler_topButton.__bitmapCache[img] + else: + self.bitmaps[i] = wx.Bitmap(img, wx.BITMAP_TYPE_ANY) + tribler_topButton.__bitmapCache[img] = self.bitmaps[i] + except: + print_exc() + i+=1 + + def setBitmaps(self, normalBitmap, selectedBitmap=None): + # This function does not protect you as switch* do. + self.bitmaps=[normalBitmap,selectedBitmap] + self.Refresh() + + def switchTo(self, normalBitmap, selectedBitmap=None): + if self.old_bitmaps is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tribler_TopButton: First should switchBack..." + else: + #save the initial bitmaps + self.old_bitmaps = self.bitmaps + self.bitmaps=[normalBitmap,selectedBitmap] + #should Refresh? + self.Refresh() + + def switchBack(self): + if self.old_bitmaps!=None: + self.bitmaps = self.old_bitmaps + self.old_bitmaps=None + self.Refresh() + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TopButton: Nothing to switch back to..." + + + def createBackgroundImage(self): + if self.bitmaps[0]: + wx.EVT_PAINT(self, self.OnPaint) + self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase) + + def OnErase(self, event): + pass + #event.Skip() + + def setSelected(self, sel): + self.selected = sel + self.Refresh() + + def isSelected(self): + return self.selected + + def mouseAction(self, event): + event.Skip() + if event.Entering(): + #print 'enter' + self.mouseOver = True + self.Refresh() + elif event.Leaving(): + self.mouseOver = False + #print 'leave' + self.Refresh() + + + def ClickedButton(self, event): + + event.Skip() + if self.enabled: + self.guiUtility.buttonClicked(event) + + def getParentBitmap(self): + try: + parent = self.GetParent() + bitmap = parent.bitmap + #print bitmap + except: + return None + + if bitmap: + location = self.GetPosition() + #location[0] -= parent.GetPosition()[0] + #location[1] -= parent.GetPosition()[1] + #if DEBUG: + # print '(button %s) Mypos: %s, Parentpos: %s' % (self.GetName(), self.GetPosition(), parent.GetPosition()) + rect = [location[0], location[1], self.GetClientSize()[0], self.GetClientSize()[1]] + #if DEBUG: + # print '(button %s) Slicing rect(%d,%d) size(%s) from parent image size(%s)' % (self.GetName(), location[0], location[1], str(self.GetClientSize()), str(bitmap.GetSize())) + bitmap = self.getBitmapSlice(bitmap, rect) + return bitmap + else: + return None + + def joinImage(self, im1,im2,offsetx=0,offsety=0): + "Draw im2 on im1" + stopx = im2.GetWidth() + if stopx > (im1.GetWidth()-offsetx): + stopx = im1.GetWidth()-offsetx + stopy = im2.GetHeight() + if stopy > (im1.GetHeight()-offsety): + stopy = im1.GetHeight()-offsety + if stopx>0 and stopy>0: + for x in range(0,stopx): + for y in range(0,stopy): + rgb2 = (im2.GetRed(x,y),im2.GetGreen(x,y),im2.GetBlue(x,y)) + if rgb2 !=(255,0,255): + im1.SetRGB(x+offsetx,y+offsety,rgb2[0],rgb2[1],rgb2[2]) + return im1 + + def getBitmapSlice(self, bitmap, rect): + try: + #print rect + bitmapSize = bitmap.GetSize() + rect[0] %= bitmapSize[0] + rect[1] %= bitmapSize[1] + rects = [rect] + if rect[0]+rect[2] > bitmapSize[0]: + rect1 = (rect[0], rect[1], bitmapSize[0]-rect[0], rect[3]) + rect2 = (0, rect[1], rect[0]+rect[2] - bitmapSize[0], rect[3]) + rects = [rect1, rect2] + if rect[1]+ rect[3] > bitmapSize[1]: + rects2 = [] + for r in rects: + r1 = (r[0], r[1], r[2], bitmapSize[1] - r[3]) + r2 = (r[0], 0, r[2], r[1]+r[3] - bitmapSize[1]) + rects2.append(r1) + rects2.append(r2) + rects = rects2 + images = [] + if len(rects) > 1: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TopButton: (button %s) Result: %s" % (self.GetName(), rects) + image = wx.EmptyImage(rect[2], rect[3]) + for r in rects: + rect = wx.Rect(r[0], r[1], r[2], r[3]) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'TopButton: (button %s) Trying to get rect: %s from bitmap: %s' % (self.GetName(), rect, bitmap.GetSize()) + subBitmap = bitmap.GetSubBitmap(rect) + subImage = subBitmap.ConvertToImage() + if len(rects) == 2: + if r == rects[0]: + place = (0,0) + elif r == rects[1]: + place = (rects[0][2], 0) + elif len(rects) == 4: + if r == rects[0]: + place = (0,0) + elif r == rects[1]: + place = (0, rects[0][3]) + elif r == rects[2]: + place = (rects[0][2],0) + elif r == rects[3]: + place = (rects[0][2], rects[0][3]) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TopButton: (button %s) Place subbitmap: %s" % (self.GetName(), str(place)) + self.joinImage(image, subImage, place[0], place[1]) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'TopButton: (button %s) Result img size: %s' % (self.GetName(), str(image.GetSize())) + return image.ConvertToBitmap() + else: + return bitmap.GetSubBitmap(wx.Rect(rect[0], rect[1], rect[2], rect[3])) + except: + if DEBUG: + print_exc() + return None + + def setEnabled(self, e): + self.enabled = e + if not e: + self.SetToolTipString('') +# else: +# if self.tooltip: +# self.SetToolTipString(self.tooltip) + self.Refresh() + + def isEnabled(self): + return self.enabled + + def setBackground(self, wxColor): + self.backgroundColor = wxColor + self.Refresh() + + def OnPaint(self, evt): + dc = wx.BufferedPaintDC(self) + dc.SetBackground(wx.Brush(self.backgroundColor)) + dc.Clear() + + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + else: + self.parentBitmap = self.getParentBitmap() + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + + if not self.enabled: + return + + if self.bitmaps[0]: + dc.DrawBitmap(self.bitmaps[0], 0,0, True) + if (self.mouseOver or self.selected) and self.bitmaps[1]: + dc.DrawBitmap(self.bitmaps[1], 0,0, True) + +class settingsButton(tribler_topButton): + """ + Button with three states in the settings overview + """ + + __bitmapCache = {} + + + def _PostInit(self): +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," "," tribler_topButton in _PostInit" + # Do all init here + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + self.Bind(wx.EVT_LEFT_UP, self.ClickedButton) + self.selected = 1 + self.tooltip = None + self.old_bitmaps = None #bitmaps that were initially loaded on the button with searchBitmaps function, and now have been changed to some provisory ones using switchTo + self.searchBitmaps() + self.createBackgroundImage() + + # on mac, the button doesn't get a size + #if self.bitmaps[0] and self.GetSize()==(0,0): + if self.bitmaps[0]: + self.SetSize(self.bitmaps[0].GetSize()) +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", self.Name +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'size' +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", self.Size + + + self.initDone = True + self.Refresh(True) + self.Update() + + + def searchBitmaps(self): + self.bitmaps = [None, None, None] + self.parentBitmap = None + self.mouseOver = False + + # get the image directory + self.imagedir = os.path.join(self.guiUtility.vwxGUI_path, 'images') + + # find a file with same name as this panel + self.bitmapPath = [os.path.join(self.imagedir, self.GetName()+'_state1.png'), + os.path.join(self.imagedir, self.GetName()+'_state2.png'), + os.path.join(self.imagedir, self.GetName()+'_state3.png')] + + i = 0 + for img in self.bitmapPath: + if not os.path.isfile(img): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TopButton: Could not find image:",img + try: + if img in settingsButton.__bitmapCache: + self.bitmaps[i] = settingsButton.__bitmapCache[img] + else: + self.bitmaps[i] = wx.Bitmap(img, wx.BITMAP_TYPE_ANY) + settingsButton.__bitmapCache[img] = self.bitmaps[i] + except: + print_exc() + i+=1 + + + def setSelected(self, sel): + self.selected = sel + self.Refresh() + + def getSelected(self): + return self.selected + + def mouseAction(self, event): + pass + + + def getParentBitmap(self): + try: + parent = self.GetParent() + bitmap = parent.bitmap + #print bitmap + except: + return None + + if bitmap: + location = self.GetPosition() + #location[0] -= parent.GetPosition()[0] + #location[1] -= parent.GetPosition()[1] + #if DEBUG: + # print '(button %s) Mypos: %s, Parentpos: %s' % (self.GetName(), self.GetPosition(), parent.GetPosition()) + rect = [location[0], location[1], self.GetClientSize()[0], self.GetClientSize()[1]] + #if DEBUG: + # print '(button %s) Slicing rect(%d,%d) size(%s) from parent image size(%s)' % (self.GetName(), location[0], location[1], str(self.GetClientSize()), str(bitmap.GetSize())) + bitmap = self.getBitmapSlice(bitmap, rect) + return bitmap + else: + return None + + def joinImage(self, im1,im2,offsetx=0,offsety=0): + "Draw im2 on im1" + stopx = im2.GetWidth() + if stopx > (im1.GetWidth()-offsetx): + stopx = im1.GetWidth()-offsetx + stopy = im2.GetHeight() + if stopy > (im1.GetHeight()-offsety): + stopy = im1.GetHeight()-offsety + if stopx>0 and stopy>0: + for x in range(0,stopx): + for y in range(0,stopy): + rgb2 = (im2.GetRed(x,y),im2.GetGreen(x,y),im2.GetBlue(x,y)) + if rgb2 !=(255,0,255): + im1.SetRGB(x+offsetx,y+offsety,rgb2[0],rgb2[1],rgb2[2]) + return im1 + + def getBitmapSlice(self, bitmap, rect): + try: + #print rect + bitmapSize = bitmap.GetSize() + rect[0] %= bitmapSize[0] + rect[1] %= bitmapSize[1] + rects = [rect] + if rect[0]+rect[2] > bitmapSize[0]: + rect1 = (rect[0], rect[1], bitmapSize[0]-rect[0], rect[3]) + rect2 = (0, rect[1], rect[0]+rect[2] - bitmapSize[0], rect[3]) + rects = [rect1, rect2] + if rect[1]+ rect[3] > bitmapSize[1]: + rects2 = [] + for r in rects: + r1 = (r[0], r[1], r[2], bitmapSize[1] - r[3]) + r2 = (r[0], 0, r[2], r[1]+r[3] - bitmapSize[1]) + rects2.append(r1) + rects2.append(r2) + rects = rects2 + images = [] + if len(rects) > 1: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TopButton: (button %s) Result: %s" % (self.GetName(), rects) + image = wx.EmptyImage(rect[2], rect[3]) + for r in rects: + rect = wx.Rect(r[0], r[1], r[2], r[3]) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'TopButton: (button %s) Trying to get rect: %s from bitmap: %s' % (self.GetName(), rect, bitmap.GetSize()) + subBitmap = bitmap.GetSubBitmap(rect) + subImage = subBitmap.ConvertToImage() + if len(rects) == 2: + if r == rects[0]: + place = (0,0) + elif r == rects[1]: + place = (rects[0][2], 0) + elif len(rects) == 4: + if r == rects[0]: + place = (0,0) + elif r == rects[1]: + place = (0, rects[0][3]) + elif r == rects[2]: + place = (rects[0][2],0) + elif r == rects[3]: + place = (rects[0][2], rects[0][3]) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TopButton: (button %s) Place subbitmap: %s" % (self.GetName(), str(place)) + self.joinImage(image, subImage, place[0], place[1]) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'TopButton: (button %s) Result img size: %s' % (self.GetName(), str(image.GetSize())) + return image.ConvertToBitmap() + else: + return bitmap.GetSubBitmap(wx.Rect(rect[0], rect[1], rect[2], rect[3])) + except: + if DEBUG: + print_exc() + return None + + + def OnPaint(self, evt): + dc = wx.BufferedPaintDC(self) + dc.SetBackground(wx.Brush(self.backgroundColor)) + dc.Clear() + + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + else: + self.parentBitmap = self.getParentBitmap() + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + + if not self.enabled: + return + + dc.DrawBitmap(self.bitmaps[self.selected], 0,0, True) + + +class TestButton(tribler_topButton): + + # Somehow can't inherit these + __bitmapCache = {} + + + def OnPaint(self, evt): + dc = wx.BufferedPaintDC(self) + dc.SetBackground(wx.Brush(self.backgroundColor)) + dc.Clear() + + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + else: + self.parentBitmap = self.getParentBitmap() + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + + + if self.bitmaps[1]: + dc.DrawBitmap(self.bitmaps[1], 0,0, True) + if self.enabled and self.bitmaps[0]: + dc.DrawBitmap(self.bitmaps[0], 0,0, True) + + + def toggleState(self): + if self.enabled == False: + self.enabled = True + else: + self.enabled = False + self.Refresh() + + + def setState(self,state): + self.enabled = state + self.Refresh() + +## def mouseAction(self, event): +## pass + + + + def ClickedButton(self, event): + + event.Skip() + if self.enabled: + self.enabled=False + self.Refresh() + self.guiUtility.buttonClicked(event) + + + +class SwitchButton(tribler_topButton): + + # Somehow can't inherit these + __bitmapCache = {} + + + def searchBitmaps(self): + self.toggled = False + self.allBitmaps = [None, None, None, None] + self.parentBitmap = None + self.mouseOver = False + + # get the image directory + self.imagedir = os.path.join(self.guiUtility.vwxGUI_path, 'images') + + + + # find a file with same name as this panel + self.bitmapPath = [os.path.join(self.imagedir, self.GetName()+'.png'), + os.path.join(self.imagedir, self.GetName()+'_clicked.png'), + os.path.join(self.imagedir, self.GetName()+'Enabled.png'), + os.path.join(self.imagedir, self.GetName()+'Enabled_clicked.png') + ] + + i = 0 + for img in self.bitmapPath: + if not os.path.isfile(img): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SwitchButton: Could not find image:",img + try: + if img in SwitchButton.__bitmapCache: + self.allBitmaps[i] = SwitchButton.__bitmapCache[img] + else: + self.allBitmaps[i] = wx.Bitmap(img, wx.BITMAP_TYPE_ANY) + SwitchButton.__bitmapCache[img] = self.allBitmaps[i] + except: + print_exc() + i+=1 + + if self.toggled: + self.bitmaps = self.allBitmaps[2:] + else: + self.bitmaps = self.allBitmaps[:2] + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Switchbutton (%s) bitmaps: %s' % (self.Name, self.allBitmaps) + + def setToggled(self, b, tooltip = { "enabled": "", "disabled": ""}): ## b = None + ## if b is None: + ## b = not self.toggled + self.toggled = b + + if not self.initDone: + return + + if b: + self.bitmaps=self.allBitmaps[2:] + if self.enabled: + self.SetToolTipString(tooltip["enabled"]) + else: + self.bitmaps=self.allBitmaps[:2] + if self.enabled: + self.SetToolTipString(tooltip["disabled"]) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Bitmaps is now: %s' % self.bitmaps + #should Refresh? + self.Refresh() + + def isToggled(self): + return self.toggled + + + + def OnPaint(self, evt): # override + dc = wx.BufferedPaintDC(self) + dc.SetBackground(wx.Brush(self.backgroundColor)) + dc.Clear() + + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + else: + self.parentBitmap = self.getParentBitmap() + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + + if not self.enabled: + return + + + if self.bitmaps[0]: + dc.DrawBitmap(self.bitmaps[0], 0,0, True) + if self.mouseOver: + if self.GetName() == 'prevpage': + if self.GetParent().currentPage == 0: + return + elif self.bitmaps[1]: + dc.DrawBitmap(self.bitmaps[1], 0,0, True) + elif self.GetName() == 'nextpage': + if self.GetParent().currentPage == self.GetParent().totalPages-1: + return + elif self.bitmaps[1]: + dc.DrawBitmap(self.bitmaps[1], 0,0, True) + elif self.bitmaps[1]: + dc.DrawBitmap(self.bitmaps[1], 0,0, True) + + +class ClickButton(tribler_topButton): + + # Somehow can't inherit these + __bitmapCache = {} + + def __init__(self, *args, **kw): + self.initDone = False + self.enabled = True + self.blank = False + if len(args) == 0: + self.backgroundColor = wx.WHITE + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + self.backgroundColor = ((230,230,230)) + wx.Panel.__init__(self, *args, **kw) + self._PostInit() + + + + def searchBitmaps(self): + self.toggled = False + self.allBitmaps = [None, None, None] + self.parentBitmap = None + self.mouseOver = False + + # get the image directory + self.imagedir = os.path.join(self.guiUtility.vwxGUI_path, 'images') + + self.Bind(wx.EVT_LEFT_UP, self.ClickedButton) + + # find a file with same name as this panel + self.bitmapPath = [os.path.join(self.imagedir, self.GetName()+'.png'), + os.path.join(self.imagedir, self.GetName()+'Enabled.png'), + os.path.join(self.imagedir, self.GetName()+'Blank.png') + ] + + i = 0 + for img in self.bitmapPath: + if not os.path.isfile(img): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ClickButton: Could not find image:",img + try: + if img in ClickButton.__bitmapCache: + self.allBitmaps[i] = ClickButton.__bitmapCache[img] + else: + self.allBitmaps[i] = wx.Bitmap(img, wx.BITMAP_TYPE_ANY) + ClickButton.__bitmapCache[img] = self.allBitmaps[i] + except: + print_exc() + i+=1 + self.bitmaps = self.allBitmaps + + + def setToggled(self, b, tooltip = { "enabled": "", "disabled": ""}): + self.toggled = b + + if not self.initDone: + return + self.Refresh() + + def isToggled(self): + return self.toggled + + def isBlank(self): + return self.blank + + def setBlank(self, b): + self.blank = b + self.Refresh() + + + def OnPaint(self, evt): + dc = wx.BufferedPaintDC(self) + dc.SetBackground(wx.Brush(self.backgroundColor)) + dc.Clear() + + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + else: + self.parentBitmap = self.getParentBitmap() + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + + if not self.enabled: + return + + if self.isBlank() and self.bitmaps[2]: + dc.DrawBitmap(self.bitmaps[2], 0,0, True) + elif self.isToggled() and self.bitmaps[1]: + dc.DrawBitmap(self.bitmaps[1], 0,0, True) + elif self.mouseOver and self.bitmaps[1]: + dc.DrawBitmap(self.bitmaps[1], 0,0, True) + else: + dc.DrawBitmap(self.bitmaps[0], 0,0, True) + + + +class SharingButton(tribler_topButton): # used under windows only + + # Somehow can't inherit these + __bitmapCache = {} + + def __init__(self, *args, **kw): + self.initDone = False + self.enabled = True + self.state = 1 + if len(args) == 0: + self.backgroundColor = wx.WHITE + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + self.backgroundColor = ((230,230,230)) + wx.Panel.__init__(self, *args, **kw) + self._PostInit() + + + + def searchBitmaps(self): + self.toggled = False + self.allBitmaps = [None, None, None] + self.parentBitmap = None + self.mouseOver = False + + # get the image directory + self.imagedir = os.path.join(self.guiUtility.vwxGUI_path, 'images') + + self.Bind(wx.EVT_LEFT_UP, self.ClickedButton) + + # find a file with same name as this panel + self.bitmapPath = [os.path.join(self.imagedir, self.GetName()+'_poor.png'), + os.path.join(self.imagedir, self.GetName()+'_average.png'), + os.path.join(self.imagedir, self.GetName()+'_good.png') + ] + + i = 0 + for img in self.bitmapPath: + if not os.path.isfile(img): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SharingButton: Could not find image:",img + try: + if img in SharingButton.__bitmapCache: + self.allBitmaps[i] = SharingButton.__bitmapCache[img] + else: + self.allBitmaps[i] = wx.Bitmap(img, wx.BITMAP_TYPE_ANY) + SharingButton.__bitmapCache[img] = self.allBitmaps[i] + except: + print_exc() + i+=1 + self.bitmaps = self.allBitmaps + + + def setState(self, b): + self.state = b + + if not self.initDone: + return + self.Refresh() + + + def getState(self): + return self.state + + + + def OnPaint(self, evt): + dc = wx.BufferedPaintDC(self) + dc.SetBackground(wx.Brush(self.backgroundColor)) + dc.Clear() + + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + else: + self.parentBitmap = self.getParentBitmap() + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + + if not self.enabled: + return + + dc.DrawBitmap(self.bitmaps[self.state], 0,0, True) + + + + + + + + + + +class PlayerSwitchButton(tribler_topButton): + + # Somehow can't inherit these + __bitmapCache = {} + + def __init__(self, imagedir, filename): + self.initDone = False + self.enabled = True + self.backgroundColor = wx.Colour(wx.WHITE) + wx.Panel.__init__(self, *args, **kw) + self.selected = False + self.tooltip = None + self.old_bitmaps = None #bitmaps that were initially loaded on the button with searchBitmaps function, and now have been changed to some provisory ones using switchTo + self.searchBitmaps() + self.createBackgroundImage() + self.imagedir = path + self.filename = filename + + # on mac, the button doesn't get a size + #if self.bitmaps[0] and self.GetSize()==(0,0): + if self.bitmaps[0]: + self.SetSize(self.bitmaps[0].GetSize()) +# print self.Name +# print 'size' +# print self.Size + + + self.initDone = True + self.Refresh(True) + self.Update() + + def searchBitmaps(self): + self.toggled = False + self.allBitmaps = [None, None, None, None] + self.parentBitmap = None + self.mouseOver = False + + # get the image directory + self.imagedir = os.path.join(self.guiUtility.vwxGUI_path, 'images') + + # find a file with same name as this panel + self.bitmapPath = [os.path.join(self.imagedir, self.filename+'.png'), + os.path.join(self.imagedir, self.filename+'_clicked.png'), + os.path.join(self.imagedir, self.filename+'Enabled.png'), + os.path.join(self.imagedir, self.filename+'Enabled_clicked.png') + ] + + i = 0 + for img in self.bitmapPath: + try: + if img in PlayerSwitchButton.__bitmapCache: + self.allBitmaps[i] = PlayerSwitchButton.__bitmapCache[img] + else: + self.allBitmaps[i] = wx.Bitmap(img, wx.BITMAP_TYPE_ANY) + PlayerSwitchButton.__bitmapCache[img] = self.allBitmaps[i] + i+=1 + except: + print_exc() + + if self.toggled: + self.bitmaps = self.allBitmaps[2:] + else: + self.bitmaps = self.allBitmaps[:2] + + def setToggled(self, b, tooltip = { "enabled": "", "disabled": ""}): + self.toggled = b + + if not self.initDone: + return + + if b: + self.bitmaps=self.allBitmaps[2:] + if self.enabled: + self.SetToolTipString(tooltip["enabled"]) + else: + self.bitmaps=self.allBitmaps[:2] + if self.enabled: + self.SetToolTipString(tooltip["disabled"]) + + #print 'Bitmaps is now: %s' % self.bitmaps + #should Refresh? + self.Refresh() + + def isToggled(self): + return self.toggled + diff --git a/tribler-mod/Tribler/Main/vwxGUI/tribler_topButton.py.bak b/tribler-mod/Tribler/Main/vwxGUI/tribler_topButton.py.bak new file mode 100644 index 0000000..abe3b7f --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/tribler_topButton.py.bak @@ -0,0 +1,906 @@ +# Written by Jelle Roozenburg, Maarten ten Brinke +# see LICENSE.txt for license information +import wx, os, sys +from traceback import print_exc + +from Tribler.Main.vwxGUI.GuiUtility import GUIUtility + +DEBUG = True + +class tribler_topButton(wx.Panel): + """ + Button that changes the image shown if you move your mouse over it. + It redraws the background of the parent Panel, if this is an imagepanel with + a variable self.bitmap. + """ + + __bitmapCache = {} + + def __init__(self, *args, **kw): +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," "," tribler_topButton in init" + self.initDone = False + self.enabled = True + if len(args) == 0: + self.backgroundColor = wx.WHITE + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + self.backgroundColor = ((230,230,230)) + wx.Panel.__init__(self, *args, **kw) + self._PostInit() + + def OnCreate(self, event): +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," "," tribler_topButton in OnCreate" + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," "," tribler_topButton in _PostInit" + # Do all init here + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + self.Bind(wx.EVT_LEFT_UP, self.ClickedButton) + self.selected = False + self.tooltip = None + self.old_bitmaps = None #bitmaps that were initially loaded on the button with searchBitmaps function, and now have been changed to some provisory ones using switchTo + self.searchBitmaps() + self.createBackgroundImage() + + # on mac, the button doesn't get a size + #if self.bitmaps[0] and self.GetSize()==(0,0): + if self.bitmaps[0]: + self.SetSize(self.bitmaps[0].GetSize()) +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", self.Name +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'size' +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", self.Size + + + self.initDone = True + self.Refresh(True) + self.Update() + + + def searchBitmaps(self): + self.bitmaps = [None, None] + self.parentBitmap = None + self.mouseOver = False + + # get the image directory + self.imagedir = os.path.join(self.guiUtility.vwxGUI_path, 'images') + + # find a file with same name as this panel + self.bitmapPath = [os.path.join(self.imagedir, self.GetName()+'.png'), + os.path.join(self.imagedir, self.GetName()+'_clicked.png')] + + i = 0 + for img in self.bitmapPath: + if not os.path.isfile(img): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TopButton: Could not find image:",img + try: + if img in tribler_topButton.__bitmapCache: + self.bitmaps[i] = tribler_topButton.__bitmapCache[img] + else: + self.bitmaps[i] = wx.Bitmap(img, wx.BITMAP_TYPE_ANY) + tribler_topButton.__bitmapCache[img] = self.bitmaps[i] + except: + print_exc() + i+=1 + + def setBitmaps(self, normalBitmap, selectedBitmap=None): + # This function does not protect you as switch* do. + self.bitmaps=[normalBitmap,selectedBitmap] + self.Refresh() + + def switchTo(self, normalBitmap, selectedBitmap=None): + if self.old_bitmaps is not None: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","tribler_TopButton: First should switchBack..." + else: + #save the initial bitmaps + self.old_bitmaps = self.bitmaps + self.bitmaps=[normalBitmap,selectedBitmap] + #should Refresh? + self.Refresh() + + def switchBack(self): + if self.old_bitmaps!=None: + self.bitmaps = self.old_bitmaps + self.old_bitmaps=None + self.Refresh() + else: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TopButton: Nothing to switch back to..." + + + def createBackgroundImage(self): + if self.bitmaps[0]: + wx.EVT_PAINT(self, self.OnPaint) + self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase) + + def OnErase(self, event): + pass + #event.Skip() + + def setSelected(self, sel): + self.selected = sel + self.Refresh() + + def isSelected(self): + return self.selected + + def mouseAction(self, event): + event.Skip() + if event.Entering(): + #print 'enter' + self.mouseOver = True + self.Refresh() + elif event.Leaving(): + self.mouseOver = False + #print 'leave' + self.Refresh() + + + def ClickedButton(self, event): + + event.Skip() + if self.enabled: + self.guiUtility.buttonClicked(event) + + def getParentBitmap(self): + try: + parent = self.GetParent() + bitmap = parent.bitmap + #print bitmap + except: + return None + + if bitmap: + location = self.GetPosition() + #location[0] -= parent.GetPosition()[0] + #location[1] -= parent.GetPosition()[1] + #if DEBUG: + # print '(button %s) Mypos: %s, Parentpos: %s' % (self.GetName(), self.GetPosition(), parent.GetPosition()) + rect = [location[0], location[1], self.GetClientSize()[0], self.GetClientSize()[1]] + #if DEBUG: + # print '(button %s) Slicing rect(%d,%d) size(%s) from parent image size(%s)' % (self.GetName(), location[0], location[1], str(self.GetClientSize()), str(bitmap.GetSize())) + bitmap = self.getBitmapSlice(bitmap, rect) + return bitmap + else: + return None + + def joinImage(self, im1,im2,offsetx=0,offsety=0): + "Draw im2 on im1" + stopx = im2.GetWidth() + if stopx > (im1.GetWidth()-offsetx): + stopx = im1.GetWidth()-offsetx + stopy = im2.GetHeight() + if stopy > (im1.GetHeight()-offsety): + stopy = im1.GetHeight()-offsety + if stopx>0 and stopy>0: + for x in range(0,stopx): + for y in range(0,stopy): + rgb2 = (im2.GetRed(x,y),im2.GetGreen(x,y),im2.GetBlue(x,y)) + if rgb2 !=(255,0,255): + im1.SetRGB(x+offsetx,y+offsety,rgb2[0],rgb2[1],rgb2[2]) + return im1 + + def getBitmapSlice(self, bitmap, rect): + try: + #print rect + bitmapSize = bitmap.GetSize() + rect[0] %= bitmapSize[0] + rect[1] %= bitmapSize[1] + rects = [rect] + if rect[0]+rect[2] > bitmapSize[0]: + rect1 = (rect[0], rect[1], bitmapSize[0]-rect[0], rect[3]) + rect2 = (0, rect[1], rect[0]+rect[2] - bitmapSize[0], rect[3]) + rects = [rect1, rect2] + if rect[1]+ rect[3] > bitmapSize[1]: + rects2 = [] + for r in rects: + r1 = (r[0], r[1], r[2], bitmapSize[1] - r[3]) + r2 = (r[0], 0, r[2], r[1]+r[3] - bitmapSize[1]) + rects2.append(r1) + rects2.append(r2) + rects = rects2 + images = [] + if len(rects) > 1: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TopButton: (button %s) Result: %s" % (self.GetName(), rects) + image = wx.EmptyImage(rect[2], rect[3]) + for r in rects: + rect = wx.Rect(r[0], r[1], r[2], r[3]) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'TopButton: (button %s) Trying to get rect: %s from bitmap: %s' % (self.GetName(), rect, bitmap.GetSize()) + subBitmap = bitmap.GetSubBitmap(rect) + subImage = subBitmap.ConvertToImage() + if len(rects) == 2: + if r == rects[0]: + place = (0,0) + elif r == rects[1]: + place = (rects[0][2], 0) + elif len(rects) == 4: + if r == rects[0]: + place = (0,0) + elif r == rects[1]: + place = (0, rects[0][3]) + elif r == rects[2]: + place = (rects[0][2],0) + elif r == rects[3]: + place = (rects[0][2], rects[0][3]) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TopButton: (button %s) Place subbitmap: %s" % (self.GetName(), str(place)) + self.joinImage(image, subImage, place[0], place[1]) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'TopButton: (button %s) Result img size: %s' % (self.GetName(), str(image.GetSize())) + return image.ConvertToBitmap() + else: + return bitmap.GetSubBitmap(wx.Rect(rect[0], rect[1], rect[2], rect[3])) + except: + if DEBUG: + print_exc() + return None + + def setEnabled(self, e): + self.enabled = e + if not e: + self.SetToolTipString('') +# else: +# if self.tooltip: +# self.SetToolTipString(self.tooltip) + self.Refresh() + + def isEnabled(self): + return self.enabled + + def setBackground(self, wxColor): + self.backgroundColor = wxColor + self.Refresh() + + def OnPaint(self, evt): + dc = wx.BufferedPaintDC(self) + dc.SetBackground(wx.Brush(self.backgroundColor)) + dc.Clear() + + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + else: + self.parentBitmap = self.getParentBitmap() + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + + if not self.enabled: + return + + if self.bitmaps[0]: + dc.DrawBitmap(self.bitmaps[0], 0,0, True) + if (self.mouseOver or self.selected) and self.bitmaps[1]: + dc.DrawBitmap(self.bitmaps[1], 0,0, True) + +class settingsButton(tribler_topButton): + """ + Button with three states in the settings overview + """ + + __bitmapCache = {} + + + def _PostInit(self): +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," "," tribler_topButton in _PostInit" + # Do all init here + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction) + self.Bind(wx.EVT_LEFT_UP, self.ClickedButton) + self.selected = 1 + self.tooltip = None + self.old_bitmaps = None #bitmaps that were initially loaded on the button with searchBitmaps function, and now have been changed to some provisory ones using switchTo + self.searchBitmaps() + self.createBackgroundImage() + + # on mac, the button doesn't get a size + #if self.bitmaps[0] and self.GetSize()==(0,0): + if self.bitmaps[0]: + self.SetSize(self.bitmaps[0].GetSize()) +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", self.Name +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'size' +# print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", self.Size + + + self.initDone = True + self.Refresh(True) + self.Update() + + + def searchBitmaps(self): + self.bitmaps = [None, None, None] + self.parentBitmap = None + self.mouseOver = False + + # get the image directory + self.imagedir = os.path.join(self.guiUtility.vwxGUI_path, 'images') + + # find a file with same name as this panel + self.bitmapPath = [os.path.join(self.imagedir, self.GetName()+'_state1.png'), + os.path.join(self.imagedir, self.GetName()+'_state2.png'), + os.path.join(self.imagedir, self.GetName()+'_state3.png')] + + i = 0 + for img in self.bitmapPath: + if not os.path.isfile(img): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TopButton: Could not find image:",img + try: + if img in settingsButton.__bitmapCache: + self.bitmaps[i] = settingsButton.__bitmapCache[img] + else: + self.bitmaps[i] = wx.Bitmap(img, wx.BITMAP_TYPE_ANY) + settingsButton.__bitmapCache[img] = self.bitmaps[i] + except: + print_exc() + i+=1 + + + def setSelected(self, sel): + self.selected = sel + self.Refresh() + + def getSelected(self): + return self.selected + + def mouseAction(self, event): + pass + + + def getParentBitmap(self): + try: + parent = self.GetParent() + bitmap = parent.bitmap + #print bitmap + except: + return None + + if bitmap: + location = self.GetPosition() + #location[0] -= parent.GetPosition()[0] + #location[1] -= parent.GetPosition()[1] + #if DEBUG: + # print '(button %s) Mypos: %s, Parentpos: %s' % (self.GetName(), self.GetPosition(), parent.GetPosition()) + rect = [location[0], location[1], self.GetClientSize()[0], self.GetClientSize()[1]] + #if DEBUG: + # print '(button %s) Slicing rect(%d,%d) size(%s) from parent image size(%s)' % (self.GetName(), location[0], location[1], str(self.GetClientSize()), str(bitmap.GetSize())) + bitmap = self.getBitmapSlice(bitmap, rect) + return bitmap + else: + return None + + def joinImage(self, im1,im2,offsetx=0,offsety=0): + "Draw im2 on im1" + stopx = im2.GetWidth() + if stopx > (im1.GetWidth()-offsetx): + stopx = im1.GetWidth()-offsetx + stopy = im2.GetHeight() + if stopy > (im1.GetHeight()-offsety): + stopy = im1.GetHeight()-offsety + if stopx>0 and stopy>0: + for x in range(0,stopx): + for y in range(0,stopy): + rgb2 = (im2.GetRed(x,y),im2.GetGreen(x,y),im2.GetBlue(x,y)) + if rgb2 !=(255,0,255): + im1.SetRGB(x+offsetx,y+offsety,rgb2[0],rgb2[1],rgb2[2]) + return im1 + + def getBitmapSlice(self, bitmap, rect): + try: + #print rect + bitmapSize = bitmap.GetSize() + rect[0] %= bitmapSize[0] + rect[1] %= bitmapSize[1] + rects = [rect] + if rect[0]+rect[2] > bitmapSize[0]: + rect1 = (rect[0], rect[1], bitmapSize[0]-rect[0], rect[3]) + rect2 = (0, rect[1], rect[0]+rect[2] - bitmapSize[0], rect[3]) + rects = [rect1, rect2] + if rect[1]+ rect[3] > bitmapSize[1]: + rects2 = [] + for r in rects: + r1 = (r[0], r[1], r[2], bitmapSize[1] - r[3]) + r2 = (r[0], 0, r[2], r[1]+r[3] - bitmapSize[1]) + rects2.append(r1) + rects2.append(r2) + rects = rects2 + images = [] + if len(rects) > 1: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TopButton: (button %s) Result: %s" % (self.GetName(), rects) + image = wx.EmptyImage(rect[2], rect[3]) + for r in rects: + rect = wx.Rect(r[0], r[1], r[2], r[3]) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'TopButton: (button %s) Trying to get rect: %s from bitmap: %s' % (self.GetName(), rect, bitmap.GetSize()) + subBitmap = bitmap.GetSubBitmap(rect) + subImage = subBitmap.ConvertToImage() + if len(rects) == 2: + if r == rects[0]: + place = (0,0) + elif r == rects[1]: + place = (rects[0][2], 0) + elif len(rects) == 4: + if r == rects[0]: + place = (0,0) + elif r == rects[1]: + place = (0, rects[0][3]) + elif r == rects[2]: + place = (rects[0][2],0) + elif r == rects[3]: + place = (rects[0][2], rects[0][3]) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","TopButton: (button %s) Place subbitmap: %s" % (self.GetName(), str(place)) + self.joinImage(image, subImage, place[0], place[1]) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'TopButton: (button %s) Result img size: %s' % (self.GetName(), str(image.GetSize())) + return image.ConvertToBitmap() + else: + return bitmap.GetSubBitmap(wx.Rect(rect[0], rect[1], rect[2], rect[3])) + except: + if DEBUG: + print_exc() + return None + + + def OnPaint(self, evt): + dc = wx.BufferedPaintDC(self) + dc.SetBackground(wx.Brush(self.backgroundColor)) + dc.Clear() + + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + else: + self.parentBitmap = self.getParentBitmap() + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + + if not self.enabled: + return + + dc.DrawBitmap(self.bitmaps[self.selected], 0,0, True) + + +class TestButton(tribler_topButton): + + # Somehow can't inherit these + __bitmapCache = {} + + + def OnPaint(self, evt): + dc = wx.BufferedPaintDC(self) + dc.SetBackground(wx.Brush(self.backgroundColor)) + dc.Clear() + + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + else: + self.parentBitmap = self.getParentBitmap() + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + + + if self.bitmaps[1]: + dc.DrawBitmap(self.bitmaps[1], 0,0, True) + if self.enabled and self.bitmaps[0]: + dc.DrawBitmap(self.bitmaps[0], 0,0, True) + + + def toggleState(self): + if self.enabled == False: + self.enabled = True + else: + self.enabled = False + self.Refresh() + + + def setState(self,state): + self.enabled = state + self.Refresh() + +## def mouseAction(self, event): +## pass + + + + def ClickedButton(self, event): + + event.Skip() + if self.enabled: + self.enabled=False + self.Refresh() + self.guiUtility.buttonClicked(event) + + + +class SwitchButton(tribler_topButton): + + # Somehow can't inherit these + __bitmapCache = {} + + + def searchBitmaps(self): + self.toggled = False + self.allBitmaps = [None, None, None, None] + self.parentBitmap = None + self.mouseOver = False + + # get the image directory + self.imagedir = os.path.join(self.guiUtility.vwxGUI_path, 'images') + + + + # find a file with same name as this panel + self.bitmapPath = [os.path.join(self.imagedir, self.GetName()+'.png'), + os.path.join(self.imagedir, self.GetName()+'_clicked.png'), + os.path.join(self.imagedir, self.GetName()+'Enabled.png'), + os.path.join(self.imagedir, self.GetName()+'Enabled_clicked.png') + ] + + i = 0 + for img in self.bitmapPath: + if not os.path.isfile(img): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SwitchButton: Could not find image:",img + try: + if img in SwitchButton.__bitmapCache: + self.allBitmaps[i] = SwitchButton.__bitmapCache[img] + else: + self.allBitmaps[i] = wx.Bitmap(img, wx.BITMAP_TYPE_ANY) + SwitchButton.__bitmapCache[img] = self.allBitmaps[i] + except: + print_exc() + i+=1 + + if self.toggled: + self.bitmaps = self.allBitmaps[2:] + else: + self.bitmaps = self.allBitmaps[:2] + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Switchbutton (%s) bitmaps: %s' % (self.Name, self.allBitmaps) + + def setToggled(self, b, tooltip = { "enabled": "", "disabled": ""}): ## b = None + ## if b is None: + ## b = not self.toggled + self.toggled = b + + if not self.initDone: + return + + if b: + self.bitmaps=self.allBitmaps[2:] + if self.enabled: + self.SetToolTipString(tooltip["enabled"]) + else: + self.bitmaps=self.allBitmaps[:2] + if self.enabled: + self.SetToolTipString(tooltip["disabled"]) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Bitmaps is now: %s' % self.bitmaps + #should Refresh? + self.Refresh() + + def isToggled(self): + return self.toggled + + + + def OnPaint(self, evt): # override + dc = wx.BufferedPaintDC(self) + dc.SetBackground(wx.Brush(self.backgroundColor)) + dc.Clear() + + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + else: + self.parentBitmap = self.getParentBitmap() + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + + if not self.enabled: + return + + + if self.bitmaps[0]: + dc.DrawBitmap(self.bitmaps[0], 0,0, True) + if self.mouseOver: + if self.GetName() == 'prevpage': + if self.GetParent().currentPage == 0: + return + elif self.bitmaps[1]: + dc.DrawBitmap(self.bitmaps[1], 0,0, True) + elif self.GetName() == 'nextpage': + if self.GetParent().currentPage == self.GetParent().totalPages-1: + return + elif self.bitmaps[1]: + dc.DrawBitmap(self.bitmaps[1], 0,0, True) + elif self.bitmaps[1]: + dc.DrawBitmap(self.bitmaps[1], 0,0, True) + + +class ClickButton(tribler_topButton): + + # Somehow can't inherit these + __bitmapCache = {} + + def __init__(self, *args, **kw): + self.initDone = False + self.enabled = True + self.blank = False + if len(args) == 0: + self.backgroundColor = wx.WHITE + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + self.backgroundColor = ((230,230,230)) + wx.Panel.__init__(self, *args, **kw) + self._PostInit() + + + + def searchBitmaps(self): + self.toggled = False + self.allBitmaps = [None, None, None] + self.parentBitmap = None + self.mouseOver = False + + # get the image directory + self.imagedir = os.path.join(self.guiUtility.vwxGUI_path, 'images') + + self.Bind(wx.EVT_LEFT_UP, self.ClickedButton) + + # find a file with same name as this panel + self.bitmapPath = [os.path.join(self.imagedir, self.GetName()+'.png'), + os.path.join(self.imagedir, self.GetName()+'Enabled.png'), + os.path.join(self.imagedir, self.GetName()+'Blank.png') + ] + + i = 0 + for img in self.bitmapPath: + if not os.path.isfile(img): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","ClickButton: Could not find image:",img + try: + if img in ClickButton.__bitmapCache: + self.allBitmaps[i] = ClickButton.__bitmapCache[img] + else: + self.allBitmaps[i] = wx.Bitmap(img, wx.BITMAP_TYPE_ANY) + ClickButton.__bitmapCache[img] = self.allBitmaps[i] + except: + print_exc() + i+=1 + self.bitmaps = self.allBitmaps + + + def setToggled(self, b, tooltip = { "enabled": "", "disabled": ""}): + self.toggled = b + + if not self.initDone: + return + self.Refresh() + + def isToggled(self): + return self.toggled + + def isBlank(self): + return self.blank + + def setBlank(self, b): + self.blank = b + self.Refresh() + + + def OnPaint(self, evt): + dc = wx.BufferedPaintDC(self) + dc.SetBackground(wx.Brush(self.backgroundColor)) + dc.Clear() + + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + else: + self.parentBitmap = self.getParentBitmap() + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + + if not self.enabled: + return + + if self.isBlank() and self.bitmaps[2]: + dc.DrawBitmap(self.bitmaps[2], 0,0, True) + elif self.isToggled() and self.bitmaps[1]: + dc.DrawBitmap(self.bitmaps[1], 0,0, True) + elif self.mouseOver and self.bitmaps[1]: + dc.DrawBitmap(self.bitmaps[1], 0,0, True) + else: + dc.DrawBitmap(self.bitmaps[0], 0,0, True) + + + +class SharingButton(tribler_topButton): # used under windows only + + # Somehow can't inherit these + __bitmapCache = {} + + def __init__(self, *args, **kw): + self.initDone = False + self.enabled = True + self.state = 1 + if len(args) == 0: + self.backgroundColor = wx.WHITE + pre = wx.PrePanel() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + self.backgroundColor = ((230,230,230)) + wx.Panel.__init__(self, *args, **kw) + self._PostInit() + + + + def searchBitmaps(self): + self.toggled = False + self.allBitmaps = [None, None, None] + self.parentBitmap = None + self.mouseOver = False + + # get the image directory + self.imagedir = os.path.join(self.guiUtility.vwxGUI_path, 'images') + + self.Bind(wx.EVT_LEFT_UP, self.ClickedButton) + + # find a file with same name as this panel + self.bitmapPath = [os.path.join(self.imagedir, self.GetName()+'_poor.png'), + os.path.join(self.imagedir, self.GetName()+'_average.png'), + os.path.join(self.imagedir, self.GetName()+'_good.png') + ] + + i = 0 + for img in self.bitmapPath: + if not os.path.isfile(img): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SharingButton: Could not find image:",img + try: + if img in SharingButton.__bitmapCache: + self.allBitmaps[i] = SharingButton.__bitmapCache[img] + else: + self.allBitmaps[i] = wx.Bitmap(img, wx.BITMAP_TYPE_ANY) + SharingButton.__bitmapCache[img] = self.allBitmaps[i] + except: + print_exc() + i+=1 + self.bitmaps = self.allBitmaps + + + def setState(self, b): + self.state = b + + if not self.initDone: + return + self.Refresh() + + + def getState(self): + return self.state + + + + def OnPaint(self, evt): + dc = wx.BufferedPaintDC(self) + dc.SetBackground(wx.Brush(self.backgroundColor)) + dc.Clear() + + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + else: + self.parentBitmap = self.getParentBitmap() + if self.parentBitmap: + dc.DrawBitmap(self.parentBitmap, 0,0, True) + + if not self.enabled: + return + + dc.DrawBitmap(self.bitmaps[self.state], 0,0, True) + + + + + + + + + + +class PlayerSwitchButton(tribler_topButton): + + # Somehow can't inherit these + __bitmapCache = {} + + def __init__(self, imagedir, filename): + self.initDone = False + self.enabled = True + self.backgroundColor = wx.Colour(wx.WHITE) + wx.Panel.__init__(self, *args, **kw) + self.selected = False + self.tooltip = None + self.old_bitmaps = None #bitmaps that were initially loaded on the button with searchBitmaps function, and now have been changed to some provisory ones using switchTo + self.searchBitmaps() + self.createBackgroundImage() + self.imagedir = path + self.filename = filename + + # on mac, the button doesn't get a size + #if self.bitmaps[0] and self.GetSize()==(0,0): + if self.bitmaps[0]: + self.SetSize(self.bitmaps[0].GetSize()) +# print self.Name +# print 'size' +# print self.Size + + + self.initDone = True + self.Refresh(True) + self.Update() + + def searchBitmaps(self): + self.toggled = False + self.allBitmaps = [None, None, None, None] + self.parentBitmap = None + self.mouseOver = False + + # get the image directory + self.imagedir = os.path.join(self.guiUtility.vwxGUI_path, 'images') + + # find a file with same name as this panel + self.bitmapPath = [os.path.join(self.imagedir, self.filename+'.png'), + os.path.join(self.imagedir, self.filename+'_clicked.png'), + os.path.join(self.imagedir, self.filename+'Enabled.png'), + os.path.join(self.imagedir, self.filename+'Enabled_clicked.png') + ] + + i = 0 + for img in self.bitmapPath: + try: + if img in PlayerSwitchButton.__bitmapCache: + self.allBitmaps[i] = PlayerSwitchButton.__bitmapCache[img] + else: + self.allBitmaps[i] = wx.Bitmap(img, wx.BITMAP_TYPE_ANY) + PlayerSwitchButton.__bitmapCache[img] = self.allBitmaps[i] + i+=1 + except: + print_exc() + + if self.toggled: + self.bitmaps = self.allBitmaps[2:] + else: + self.bitmaps = self.allBitmaps[:2] + + def setToggled(self, b, tooltip = { "enabled": "", "disabled": ""}): + self.toggled = b + + if not self.initDone: + return + + if b: + self.bitmaps=self.allBitmaps[2:] + if self.enabled: + self.SetToolTipString(tooltip["enabled"]) + else: + self.bitmaps=self.allBitmaps[:2] + if self.enabled: + self.SetToolTipString(tooltip["disabled"]) + + #print 'Bitmaps is now: %s' % self.bitmaps + #should Refresh? + self.Refresh() + + def isToggled(self): + return self.toggled + diff --git a/tribler-mod/Tribler/Main/vwxGUI/uploadTab_details.xrc b/tribler-mod/Tribler/Main/vwxGUI/uploadTab_details.xrc new file mode 100644 index 0000000..14c9aba --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/uploadTab_details.xrc @@ -0,0 +1,66 @@ + + + + 0,0 + 300,348 + #ffffff + + wxVERTICAL + + + + wxVERTICAL + + + + + 8,8 + 129,18 + + + + + wxTOP|wxLEFT|wxFIXED_MINSIZE + 5 + + + + 3,64 + 292,155 + + + + wxVERTICAL + + + wxEXPAND|wxFIXED_MINSIZE + 3 + + + + + + 8,8 + 129,18 + + + + + wxTOP|wxLEFT|wxFIXED_MINSIZE + 5 + + + + 3,64 + 292,155 + + + + wxVERTICAL + + + + + + + \ No newline at end of file diff --git a/tribler-mod/Tribler/Main/vwxGUI/web2.py b/tribler-mod/Tribler/Main/vwxGUI/web2.py new file mode 100644 index 0000000..edd523b --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/web2.py @@ -0,0 +1,173 @@ +from time import localtime, strftime +# Written by Fabian van der Werf, Jelle Roozenburg, Maarten ten Brinke +# see LICENSE.txt for license information + +import sys +import copy, threading +from traceback import print_stack,print_exc + +from Tribler import Web2 +from Tribler.Web2.util.observer import Observer + +DEBUG = False + +class DataOnDemand: + + def __init__(self, sort=lambda x:x): + self.data = [] + self.datalock = threading.RLock() + self.updateFuns = [] + self.requested = 0 + self.sort = sort + self.filters = [] + + def isDod(self): + return True + + def register(self, updateFun): + if self.updateFuns.count(updateFun) == 0: + self.updateFuns.append(updateFun) + + def unregister(self, updateFun): + try: + self.updateFuns.remove(updateFun) + except: + print 'web2.unregister() unnecessary' + + def notify(self, item=None): + for fun in self.updateFuns: + fun(item) #???? + + def _addItem(self, item): + for filter in self.filters: + if not filter(item): + print 'item got filtered' + return False + + self.data.append(item) + #print "WEB2: datachanged: " + str(self.data) + return True + + def addItem(self, item): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","web2.addItem" + self.datalock.acquire() + if self._addItem(item): + self.data = self.sort(self.data) + #print 'web2.addItem: notify' + self.notify(item) + self.datalock.release() + + def addItems(self, items): + #print "web2.addItems" + self.datalock.acquire() +# for item in items: +# self._addItem(item) +# self.data = self.sort(self.data) +# self.notify() + # Quickfix: do not sort anymore. Otherwise it conflicts with incoming remotesearch + for item in items: + self.addItem(item) + self.datalock.release() + + def numRequested(self): + return self.requested + + def getData(self): + return copy.copy(self.data) + + def getDataSafe(self): + try: + self.datalock.acquire() + return copy.copy(self.data) + finally: + self.datalock.release() + + def setSort(self, sort): + self.datalock.acquire() + self.sort = sort + self.data = self.sort(self.data) + self.notify() + self.datalock.release() + + def addFilter(self, filter): + self.filters.append(filter) + + def remFilter(self, filter): + self.filters.remove(filter) + + def filterData(self): + pass + + def clear(self): + self.datalock.acquire() + self.data = [] + self.datalock.release() + + +class DataOnDemandWeb2(DataOnDemand, Observer): + + def __init__(self, query, type='video', guiutil=None, sort=lambda x:x): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DataOnDemandWeb2: query is",query + DataOnDemand.__init__(self, sort) + Observer.__init__(self) + self.web2querylock = threading.RLock() + self.web2query = Web2.web2query(query, type, guiutil) + self.web2query.attach(self) + self.web2query.start() + self.end = False + + def request(self, num): + if self.end: + return + + self.web2querylock.acquire() + + if self.requested >= num: + self.web2querylock.release() # Arno: forgot to unlock? + return + + more = num - self.requested + self.requestMore(more) + + self.web2querylock.release() + + + def requestMore(self, num): + if self.end or not self.web2query: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","web2: dod: requestMore: return",self.end,"web2q",self.web2query + return + + self.web2querylock.acquire() + + self.web2query.getMore(num) + self.requested += num + + #print 'WEB2.0: requested:', num + + self.web2querylock.release() + + + def update(self, subject, m): + #print "WEB2.0: new item received" + if m == None: + print 'web2: item was none' + self.end = True + else: + self.addItem(m) + + def stop(self): + self.web2querylock.acquire() + if self.web2query: + self.web2query.detach(self) + self.web2query.quit() + self.web2query = None + self.end = True + self.web2querylock.release() + + def getNumRequested(self): + try: + self.web2querylock.acquire() + return self.requested + finally: + self.web2querylock.release() diff --git a/tribler-mod/Tribler/Main/vwxGUI/web2.py.bak b/tribler-mod/Tribler/Main/vwxGUI/web2.py.bak new file mode 100644 index 0000000..2f2e908 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/web2.py.bak @@ -0,0 +1,172 @@ +# Written by Fabian van der Werf, Jelle Roozenburg, Maarten ten Brinke +# see LICENSE.txt for license information + +import sys +import copy, threading +from traceback import print_stack,print_exc + +from Tribler import Web2 +from Tribler.Web2.util.observer import Observer + +DEBUG = False + +class DataOnDemand: + + def __init__(self, sort=lambda x:x): + self.data = [] + self.datalock = threading.RLock() + self.updateFuns = [] + self.requested = 0 + self.sort = sort + self.filters = [] + + def isDod(self): + return True + + def register(self, updateFun): + if self.updateFuns.count(updateFun) == 0: + self.updateFuns.append(updateFun) + + def unregister(self, updateFun): + try: + self.updateFuns.remove(updateFun) + except: + print 'web2.unregister() unnecessary' + + def notify(self, item=None): + for fun in self.updateFuns: + fun(item) #???? + + def _addItem(self, item): + for filter in self.filters: + if not filter(item): + print 'item got filtered' + return False + + self.data.append(item) + #print "WEB2: datachanged: " + str(self.data) + return True + + def addItem(self, item): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","web2.addItem" + self.datalock.acquire() + if self._addItem(item): + self.data = self.sort(self.data) + #print 'web2.addItem: notify' + self.notify(item) + self.datalock.release() + + def addItems(self, items): + #print "web2.addItems" + self.datalock.acquire() +# for item in items: +# self._addItem(item) +# self.data = self.sort(self.data) +# self.notify() + # Quickfix: do not sort anymore. Otherwise it conflicts with incoming remotesearch + for item in items: + self.addItem(item) + self.datalock.release() + + def numRequested(self): + return self.requested + + def getData(self): + return copy.copy(self.data) + + def getDataSafe(self): + try: + self.datalock.acquire() + return copy.copy(self.data) + finally: + self.datalock.release() + + def setSort(self, sort): + self.datalock.acquire() + self.sort = sort + self.data = self.sort(self.data) + self.notify() + self.datalock.release() + + def addFilter(self, filter): + self.filters.append(filter) + + def remFilter(self, filter): + self.filters.remove(filter) + + def filterData(self): + pass + + def clear(self): + self.datalock.acquire() + self.data = [] + self.datalock.release() + + +class DataOnDemandWeb2(DataOnDemand, Observer): + + def __init__(self, query, type='video', guiutil=None, sort=lambda x:x): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","DataOnDemandWeb2: query is",query + DataOnDemand.__init__(self, sort) + Observer.__init__(self) + self.web2querylock = threading.RLock() + self.web2query = Web2.web2query(query, type, guiutil) + self.web2query.attach(self) + self.web2query.start() + self.end = False + + def request(self, num): + if self.end: + return + + self.web2querylock.acquire() + + if self.requested >= num: + self.web2querylock.release() # Arno: forgot to unlock? + return + + more = num - self.requested + self.requestMore(more) + + self.web2querylock.release() + + + def requestMore(self, num): + if self.end or not self.web2query: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","web2: dod: requestMore: return",self.end,"web2q",self.web2query + return + + self.web2querylock.acquire() + + self.web2query.getMore(num) + self.requested += num + + #print 'WEB2.0: requested:', num + + self.web2querylock.release() + + + def update(self, subject, m): + #print "WEB2.0: new item received" + if m == None: + print 'web2: item was none' + self.end = True + else: + self.addItem(m) + + def stop(self): + self.web2querylock.acquire() + if self.web2query: + self.web2query.detach(self) + self.web2query.quit() + self.web2query = None + self.end = True + self.web2querylock.release() + + def getNumRequested(self): + try: + self.web2querylock.acquire() + return self.requested + finally: + self.web2querylock.release() diff --git a/tribler-mod/Tribler/Main/vwxGUI/zudeo_torrent_description.txt b/tribler-mod/Tribler/Main/vwxGUI/zudeo_torrent_description.txt new file mode 100644 index 0000000..bfa33a5 --- /dev/null +++ b/tribler-mod/Tribler/Main/vwxGUI/zudeo_torrent_description.txt @@ -0,0 +1,54 @@ +Vuze/Zudeo Torrent fields: + +comment:provided by GetAzureus.com +comment.utf-8:provided by GetAzureus.com +azureus_properties: + {'Content': {'Publisher': 'DEPECHE', + 'QOS Class': 2, + 'Description': 'progressive/trance mix set', + 'Progressive': 0, + 'Speed Bps': 139045, + 'Title': 'Erotic Dreams pt 1 Mixed by DJ Mode', + 'Creation Date': 1174262097027L, + 'Content Hash': '6LWTS25XFP5IA2BIWYMXQ6GNKO2PQJ2Q', + 'Revision Date': 1174262097027L, + 'Content Type' = 'Movie', + 'DRM' = -1, + 'Ad Enabled' = 0, + 'Quality' = 'SD', + 'cdn_properties': {'torrent_type':6, 'torrent_owner':'XXXX'} + 'Thumbnail': '\xff\xd8\xff\xe0\x00\x10JFIF\x00\x01\x02\x00\x00\x01\x00\x01\x00\x00\xff\xdb\x00C\x00\x02\x01\x01\x01\x01\x01\x02\x01\x01\x01\x02\x02\x02\x02\x02\x04\x03\x02\x02\x02\x02\x05\x04\x04\x03\x04\x06\x05\x06\x06\x06\x05\x06\x06\x06\x07\t\x08\x06\x07\t\x07\x06\x06\x08\x0b\x08\t\n\n\n\n\n\x06\x08\x0b\x0c\x0b\n\x0c\t\n\n\n\xff\xdb\x00C\x01\x02\x02\x02\x02\x02\x02\x05\x03\x03\x05\n\x07\x06\x07\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\xff\xc0\x00\x11\x08\x00`\x00`\x03\x01"\x00\x02\x11\x01\x03\x11\x01\xff\xc4\x00\x1f\x00\x00\x01\x05\x01\x01\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\xff\xc4\x00\xb5\x10\x00\x02\x01\x03\x03\x02\x04\x03\x05\x05\x04\x04\x00\x00\x01}\x01\x02\x03\x00\x04\x11\x05\x12!1A\x06\x13Qa\x07"q\x142\x81\x91\xa1\x08#B\xb1\xc1\x15R\xd1\xf0$3br\x82\t\n\x16\x17\x18\x19\x1a%&\'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz\x83\x84\x85\x86\x87\x88\x89\x8a\x92\x93\x94\x95\x96\x97\x98\x99\x9a\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xff\xc4\x00\x1f\x01\x00\x03\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\xff\xc4\x00\xb5\x11\x00\x02\x01\x02\x04\x04\x03\x04\x07\x05\x04\x04\x00\x01\x02w\x00\x01\x02\x03\x11\x04\x05!1\x06\x12AQ\x07aq\x13"2\x81\x08\x14B\x91\xa1\xb1\xc1\t#3R\xf0\x15br\xd1\n\x16$4\xe1%\xf1\x17\x18\x19\x1a&\'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x92\x93\x94\x95\x96\x97\x98\x99\x9a\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xff\xda\x00\x0c\x03\x01\x00\x02\x11\x03\x11\x00?\x00\xfc\x17\xdc\xc1N$\xc0 \x0f\xbd\xfe\x7f\xc8\xa6\xb1e}\xb1\x8e\x1f\xa6On\xdd)\xe03\x02\xc4\x8cu\x00\xf1L\x922b,\x1f 7\x0b\x9f\xfe\xb5Jww@"\'\xef6\x10\xc4m8\xc7=9?_Zv\xd5\x9129\xcbs\xb4\xf5?\xe4P\xa1\x82`FA\xda8\x00r\x7f\xa74\xae\xee\xc8\x18*\x85_\x94\x80:\xf3\xd4\xfb\xd1d\x9d\xc0\x07\xca\xf9*0\x06F[5\xd2|2\xf8e\xf1\x0b\xe3\x1f\x8b\xec\xbc\x07\xf0\xbf\xc1Z\xa7\x88u\xbb\xe7)i\xa5h\xf6o<\xf3\x002p\x88\t\xc0\x19$\xf4\x00\x12H\x02\xb9\x98\x04\x8d\x84\x08~c\xd4\xf4?\xe7\xfaW\xefO\xfc\x10\x1f\xf6+\xf0\x97\xece\xf0oP\xfd\xa4\xbe>\\\xe8\x1ao\x8c<_o\xb6\xc0\xea\x1a\xdd\xb3%\xae\x93\x94dEdr\xa4\xcar\xed\x82x1\xa9\x19S_3\xc5\xd4H\xbb1\xb3\x047N9\xa4`\xc1\x03c\x07\x1c\x92j\x92W\xd4\x0bp\x99\x1eE\x8dLy\xe9\x97p9\xc7\xe4\x07\xe3M\x99bF\x05\x86@=\x18\xf2)\x89\x87\x0c\x84)9#=\xff\x00JV@\\\xa1\x01A\'\x03?\xadR\x00\xdd\x18E\x91\x0e\xd6\x1c`\x0e\xdd\xe9\xaac\xc0\xf3T\xe0\xab\x05;\x89\xc1\xcf\xff\x00\xae\x91UA\xfd\xdc\xa3*9\xeb\xce\x0f\xb5H\x14yj\xe5\xbes\x9c\x8d\xa3\x1dG?\xa5\n\xf7\x03\xea\x1f\xf8$\xd7\xec\xb6?io\xdaTX_=\xb8\xb3\xf0\xed\x9f\xf6\xad\xc2^Y\xc34S\x04\x91\x02E\xb6P\xc0\xb3;(\xc6\xd3\xf2\xee9\\\x02\x7f[\xf5\x19\xa5\xf1]\x8d\xf6\x99s\xab\x86\xbc\x8e\xd5\xe3q$\x9b\x84AA\xda\xb1\x83\xc9c\x81\x85\xce9\x1ct#\xe2/\xf87\xf3V\xba\xbd\xf1N\xbb\xe1\xf6\xd3E\xff\x00\x99\xa7\xb2Gz5\x99\xe4\x9bLE9U\x16\x8a\xbeTI+6\x04\xb26r\x85T\x1c\x91_k\xfc8\xf0w\x8b4o\x8e\xd7\xe9\xe2\x0f\r\xc84}9\xe5\xd5>\xd6\xbc\xc6\xcf\x1a\xb3"\x16\xe8A`\x06\x01\xe7\x9a\xfeP\xf1\x8f\x1b\x89\xc5qR\xc3JV\x8d(&\x97\xabM\xbf_\xc4\xfe\x80\xf0\xcb\x0f\x84\xa1\x91\xce\xb2K\x99\xdd\xdf\xae\x9b/C\xd3\xff\x00\xe0\x9f_\x10n\xfcq\xf0\xe7_\xf0\xca\xf8\xb2\xefM\xf1\x06\x91yx\xa9\x15\xcd\xc3M\x12\\A6\x1ft$\xe5RL\x10\xca\xa4+\x11\xbb\x01\xfej\xf2\x0f\x88\xff\x00\xf0Wo\x89\xbf\xb4\x07\xed5\xac\xff\x00\xc11\xfc\x1b\xf0\x7fI\xb8\xf1<\xd2\xdd\xd8\xe8\xfe>\xd25i\xd6++\xeb{7\x9dgKr\xaa\xdb\xd2H\xb6\xe4\xca\xaa\x18\x12r\xb9V\xf7\x7f\xf8&\x8f\xc3]\x07\xc5\xbf\x08\xee\xbcI\xaf\xe9\xb3+j\xba\xa4\x92\x88\xdc\xedi!;\x1a\'c\x9c\xe0\xab+\x1fR\xc7\xb6+\xe8\xaf\x12\xfe\xc8\x7f\xb2\xcf\x86\xf4\xb5\xf8\x83\xe1\xef\x82^\x1e\xb5\xf1\x8d\x93\xc9q\xa7\xeb\xf6\x1aT1^\xac\xac\x08|L\x10>\\\x16V$\x9c\x82s\x9a\xf0\xb2|n\x13\t\x8d\xc5U\xc4C\x9dF/\xd9\xa7+ZI\xe9\x7f\xe6\xb3\xe8\xc8\xe2/g,d)RvnI\xca\xcb\xba\xf7\xac\xee\xad{\x9f\x82\x1f\xf0r\xfa|/\x8f\xfe\n.\xdaw\xc3\xab(-\xa5\xb3\xf0v\x9dk\xac\xad\xba\x80\x82\xe6 \xe8\xb8\x00t\x11,C>\x80b\xbf;\x86c}\xc0\xa9!\xbe\xf0\x00\xf3\x9c\xe7\xde\xbe\xe8\xff\x00\x83\x82>\x19^x\x07\xfe\n\x1b\xe2-^K\x89\xa4\xb6\xd7\xac\xac\xef\xec^U\xc2\xf9m\x12\xaa\x85?\xc4\x00S\xf4\xaf\x86%\r\xbb\x88\xf9\x00n#\xa5\x7fS\xf0}g\x88\xe1\xbc-[\xde\xf0O\xe6\xf7_-\x8f\xc4sX*y\x95X%\xb4\x9a"\x92E\xf3\x18\xe0d\xf4\x1b\xbaR+a\x81#\x00\x8c\x12O\xb5>H\xa2B\xa5eV\x07\xb0\x18#\xfa\xd4aJ\x90\xa8s\x93\x91\x9fJ\xfaTp\x1d\xbf\xc3k\x7f\x87\x9a\xe5\xcd\xd7\x83\xfc\\\x05\x9c\xd7\xec\xa3J\xd7\xc4\xa4\x0b9\x86p\xb2/\xddh\x9f\xa3\x1cnS\xb4\xe7\x01\x81\xc3\xf1\x97\x83u\xef\x04x\x82}\x0f^\xb30\xcb\x13\x1d\xad\x9f\x95\x808\xdc\xad\xc6G\x07\xb7\x1c\x83\x82\x08\xac\xf5\x90\r\xcb\x1b`\xe0\xe3gz\xf4\x7f\x07x\xa2\xcb\xe2\'\x87a\xf8q\xe3X\x1a[\xb8\xd3n\x91y\x9f\xde\xb0\xda\x00\x8dK\x107\x80\x00\x00\x90$\x00! \x88\xd9V\x89Ynz\xd8U\x87\xc7PXi%\x1a\x8b\xe1\x97\xf3\x7fv^}\x9f\xc9\xf4\xb7\r\xe0\xfb\xed\x0bL\xf1-\x9d\xcf\x8b4w\xbf\xd3\x85\xc0\x17\xd6\x91NQ\xde3\xd7k\x0f\xba\xc0\x12A\xe9\x903\x91\x91[\x7f\x15>\x18\xb7\x825\x88\xae\xb4\x9b\xf3}\xa2j\t\xe7h\xda\x90\xc0\x12\xc6@;X\x7f\x0c\x80\x10\x08\xfaz\x8a\xcd\xf1\x1f\x83\xef\xfc+z\x90\xdc\x14\x9e)\xd4\xc9is\x171L\x9d7)89\x04\x10T\xe0\x820@"\xbb_\x84\xfe"\xb6\xd7\xb4)\xbe\x15x\x96\xdb\xedP\xdc\xb6\xfd-\x0f\xdfY2[\xcbF?u\xc9\xc9\x8c\xf4\xdeJ\x91\xb6g\xaa\x8b\xba\xb7R\xb0\x94iTS\xc0\xe2\x17,\xdb\xbce\xd5KnW\xde2\xd2\xdd\x9f\xab?]\x7f\xe0\x93^\x0b\xb5\xfd\x9d\xff\x00a\x7f\x07x\x97P\xba\xfb^\xbf\xe3{{\xadj\xd1m\xadc\x82\xda\xc6\xc1ex\x11$\xd8\xa8f\x9c\xfe\xf1\x8c\x92\xef!eURUF=_\xf6\x95\xfd\xa5\xae|\x05#\xfc$"\x01q\xaa|;\xd6u\xa1s\x0c\xd9"D\x12A\x1am\x08\xab\x9d\xf1\xcd\xc8\x03\x03\x1e\xb5\xf9\xe1\xfb\x01\xfe\xd5w\x7f\x054\xbdS\xe17\xc5\xdf\x1fk\x17zn\xb3}au\xe1_\x10\\\xb1\xb9\xb4\xb1\x86\x15\xb8\xf3\x95\x830\x91\x03\xeeD\xd8\xa4\x84o3)\x9eO\xb0i\xdaN\xaf\xfbH~\xd3\x9a\x8f\x8d,\xf5;\x9b\xcd\x1f\xfe\x10\xcdSF\xd15\x17\xc8K\xa5B"\xde\x8b\x8e\x11\xa4\x9c\x9e\x83\xaf\xa85\xfc\xff\x00\xc5\\\x0f\x8a\xc6\xf1=|\xc3\x16\xaf\x0e[\xa7\xe7\xa5\x97\xcb\xf4>\xc3.\xe2\xba9V\x06\x96\x060\xb5E+5\xf2z\xfeM3\xf5\xa3\xf6#\xf0\xe7\xfc#\xdf\x0e\xf4\xd8\xac\xad\x8b[\xae\x8fg\x18\x00df(\x12>>\xbb\x01\xfaV\xce\x9b\xaf|C\xd2>/k\x9e\x12\xf1T\xe6m\x03[\x88\xdc\xf8f\xef\xa3E$j\xabqn\xdc\x0e\x06\xe8\xdd\t9;\xe4\x1f\xc1[\xdf\xb1.\x93<_\x01\xf4\xcbK\xd9\t\xbc\xb7\xd3\xd6\x19\xb2\x0ew\xa0*\x7f\x1c\x8f\xeb^\x1d\xff\x00\x05\x19\xfd\xba>\x19~\xcc\x1e\x04:_\x8a\xef\xa3\xb5\xf1\x1e\x9b\xabG\xa8h1\xba\x83%\xc9\x0f\xb6DP\x0eH1\xc8\xe8{(p\xde\x95\xf9}N\x18\xc5\xd7\xac\xe9\xd3\x8b\x94\xe5\xcdd\x95\xf5\xba>\x89\xe7\x14\'\x8a\x9dJ\xad(\xab]\xb7\xb2]~Z\x1f\x9e\xdf\xf0r\x9f\xc1=w\xc6\x1f\r|\x1f\xf1\xfe\xd3I\xf3\xa6\xf0\xad\xfc\xda\'\x88\'\x84r\xb1Hw[Hx\xfb\xa7a\\\xff\x00x\x1fZ\xfcpP\xd3\xb2\xc6\x11\x87\xcd\x82\x01\xed\xfeq_\xb6\x9f\xf0U\xbf\xdb\xa7\xe1\xaf\x8d\x7fcO\x17hzM\xd5\xb5\xf3\xf8\xc0\xdbC\xa7\xf9D:\xc9\x1b2\xc8\x18\x11\xc6\xe5\x01\x8f\xb7>\xb5\xf9+\xf0\xf7\xc1\x96\xda^\xa3k\xa9j\xf2$:\x84\x90\x9b\xbbu\x9e \xeb\xa6\xd9\xa0\xdc\xf7\xd2\xafBv\x8c\xc5\x19\xfb\xecT\x9e\n\x07\xfe\x87\xf0\xc3\xfbF\x97\r\xfd[\x15\x1b:r\x92W\xedt\xff\x00\x06\xda>/\x89r\xeaX\xbe \xff\x00g\x92jj.MY\xa5\x7f\xf3\xd2\xc8\xca\xd7\xfe\x1bh>\x03\xf8h\x9a\xe7\x8b\xda\xeb\xfe\x12\x1dR\xe4\r*\xc29\x00[x\x17\x97\x92Q\xb7$\x9e\x14(\xc6\t9\xe5YW\x8d\xd14\xcb\xfd_V\xb7\xd34\xcd5\xae\xa7\x9aEX\xed\xc1\xc9\x94\x93\x808#\xe9\xc6+o\xc7~(\xbd\xf1\xf7\x8a\xcd\xcd\x8d\xbc\xc2#\x8b}:\xd0\xb9\x91\xd6 p\xaaOVs\xf7\x98\xff\x00\x133\x1e\xf5oS\xba\x83\xe1\xae\x9bq\xe1\xcd:h\xa5\xd6\xae\xe31j\xb7\xb1\x9c\x8bH\xdb\xef[F\xc3\x8d\xdd\xa4o\xaa\x0e7\x16\xfd\x1d=nx8\x98a+Wn\x92\xe5\xa5M(\xdf\xac\x9a\xeb\xe6\xe4\xef\xe8\xbd\x0eE\xb3\xe6e\\\x93\xeax\xe6\x9f\x0c\xb3,\xab*\xc8\xe0\xa9\x07p##\xd3\xf2\xa6\xab\x90\xe2\\du*y\x04\xfa\xd0\xa4*\xb3"\x1ex\x04\xb7\x03\xda\xaa\xd71\x95a\xfc\x12.F\xe5<\x8e\x0f \xab\x1e;G\xd5.\xb4\xab\xd8\xb5\x1b\x1b\xc9 \xb8\x82E\x96\x19\xa2r\x1d]H \x82:c\xaeG\xa5zw\x87|Mo\xa9\xd8\xcf\xe2\r.\xc9\x1d\x1f\r\xe2\x9f\x0f\x81\xb66\x19\xc0\xbb\x80/\xfa\xbeH\xce\xd1\xfb\xb6=\n>\xc0\x94V\xcc\xf6=\xa43*\x1c\x954\xa9\x1d\x9f\xeb\xe7n\xab~\xab[\xdf\xd6\xbc\t%\xa7\x89tE\xd45P\xac\xb3H\xa3U`>k{\x96\x00-\xda\x0c\xfd\xd9\x00\xda\xeb\xd0\xb2\xfa\x94\xc7\xe9\x07\xfc\x13+\xc0\x12[i\x1aW\xfc%\xb7v\xcc\xb0\x9d\xb6V\xd1\xcc\x87\x16\xedq\xe7\xb3\xe4\x7f}\x962\x07\xf7S?\xc5_\x98~\n\xd6#\xf0\xbd\xc4:\xee\x897\xf6\x8e\x91s\xfb\x972.\x0e\xd6\x19h%\x03!\x1f\x82A\x1d\xc0e\'\x1c{\xf7\xc3\x1f\x8c\x9e"\xf0\\\xf0iV^7\xb9\xb6\xd3\xe5A.\x97|\x92m\x1eY\'\xe5lt\x19\x05H\xcf\xca\xc0\xe3\x80s\xe7\xe7\x19}\\\xcb\x02\xe8\xa9$\xfb\xff\x00]\xc4\xa6\xaaU\x8dZ\xf1\xfd\xec,\xb7\xd1\xb6\xb4w\xeb\x19\xf4{)h\xf4\x92\xb7\xf4C\xf0\xb2\r\x1b\xc2\xda4\xc7M\xd6-\xfe\xcc\xe9\xe6\x0c\xb8\x0c\t\x07\xaf\xf8\xd7\xe4\'\xfc\x16\xd3\xc4:G\xc5_\x8e\x11@n\xa1\x96-\x12\xc5\xd2GG\xcf\xefd`\xc4t\xec\xab\x1f\xe3\\%\x87\xed\xed\xf17\xc3\xd6\xad\xa3?\x8eu\x18\x19\x14\xabD\xd3\xfc\xc1\x87@Ny>\xfe\xf5\xe4_\x16>:Y\xf8\xc69\xbcW\xe3Q\xe7\x19\xa5%$\'\x0fy(\xc6A\xc7\xf0\x8e7\x1fp\x07\'#\xe6r\xae\x1a\xfe\xcf\xc5\xfbv\xee\xedo\xf8\'%|uL\xc5K\x0e\x97-\x9f\xbd}\x92]\xfbk\xfeG\xce\x9e.\xf0\xa6\x8b\xe1\xeb_\xf8I\xf5[d\x99\xb9\xfe\xc8\xd3\xe4\xc1I\x98\x1cy\xb2\x0f\xf9\xe4\xa4t\xfe2\x08\xe8\x1a\xbc\xe3\xe2E\xe6\xa3\xe1\x8d\nM\x0e\xea\xe9\xe7\xd75\xf7[\xcf\x10\xdc\xb9\xcc\x91\xc4N\xf8m\x89\xed\x9c\x89\\z\x98\xd7\x00\xa1\x15\xea^%\xf15\x9d\xf5\xd5\xef\xc4\x1f\x13\xed\xba\xb2\xd3\xca\x94\x81\x94\x05\xb8\xb8`D\x16\xe0t\xd9\xf2\x96`8\t\x19\x1cdW\x98k\x9a\xbc\x9e\x18\xbb>9\xf1\x0b\x1b\xaf\x11\xeann\xad"\x9cnk@\xe7?j\x94\x1e\x0b\x9c\xee\x8dOL\xef#\x1b7}\x9d8\xc5CM\x17\xf5\xf8\xb3\xbb\x0b\'K\x05j~\xec\x1az\xbd\xf9vr~s\xf8b\xbb_\xd4\xc5\x11\x1f\x86\x1ayUP|Is\x17\xce\xc4\xf3\xa5D\xc38\x1e\x93\xb0<\xf7\x8d\x7f\xdb?\'\x17=\xb5\xc0\xfd\xfc\x87x#\xa8<\xd5\xeb\xfb\xc7\x9a\'\x92\xeaS,\x93\xcc\xcf,\xac\xc4\xb3\x1eI\xc9\xef\x92r~\x95A\xe4\x94\xc6T\xab(#%I\xeb\xc7S\xfa\xd5%\xd4\xf3+\xd7\xf6\xad(\xabEl\xbfW\xdd\xbe\xbfv\xc9\x01\xd9\x9d\xa01\x03\x8eOZb\x90\xeaIN\x01\xe3\x8e~\x9528yz(\x18\xc6q\xc55ch\xa6\xdc\x06\xd5\xdcq\x9ex\xaa0#EdS*F\x068Q\x8c\xf0G\xff\x00^\xb5<;\xae\xea^\x18\xd4\xa0\xd5\xb4\xab\xbf*h\x8e\xe0Tdr0U\x94\x82\x180\xc8*x \x90r\rf\xca@;\x0c\xa7\x9c`+\x1e?\xc8\xa7#I\xe5\x86g\xe1O\'vsI\xab\x95\x19J\x12R\x8b\xb3G\xabh\xfe-:5\x99\xf1\xa7\x83\xad\xa3k\x19\xf1\x0e\xb9\xa2\xcaY\x92\xdd\x99\x87\xcay\xc9\x85\x8f(\xe0\xeeF\x18\xc8*\xae\xdd\xdf\x86|ec\xabh\xe2\xc7M\xbfg\xd3nf\x06\xc2{\x99\x00\x93K\xbdn\x043\x9e\x8290\x14I\xc2\x9c+|\xbb]G\xcf\xfe\x10\xf1N\xa1\xe1\x8dPjVj\xb2F\xc0\xc7sm2\xe69\xe2o\xbd\x1b\x8e\xeaF:r\x0e\x08 \x80k\xb9\xf0\xe6\x97\x1e\x8d\xaa\xa7\xc4\x1f\x0e\xeb\xb1\xdb\xf8q\xd0\x9dIe\xdb#\xc4\t\xf9\xac\xa4\x8c\xe0J\xed\xd1x\xc3\x0f\x9f\xe5\xda\xdb\x08\xbdm\xd0\xf70R\xa7\x8b\x97\xbc\xae\xb6\x92\xec\x9e\xed\x7fu\xee\xd7Gf\xad\xa1\xea\xde\x1a\xf1\xb3x\x92\xcaK\x1f\x1ay\xf6si\xb2\xfd\x96\xd6Wu\x8eK\xe9\x94c\xec\x1f?G\xf4s\x90\x80\xe1\x81\x05\x05y\xdf\x89\xbe"\xf8\xbb\xe2\x07\x8b#\xd0\xec,\xdc^Kr\xb6\x96\xbaz\xa9Q\x0e_j\xc4\x03r\xa0\x12s\x9e\xf9\'$\x92e\xf8\xab\xaai\xff\x00\x17\xbc\x18\x9f\x12|#)\x80\xe8\xb1-\xae\xa7\xa0\xc9q\xbeKxI\xc2\\.y\x90\x13\x85y:\xee)\xbb\x93\x93\xb5\xe0\x04\xb1\x8b\xe1\xfb|V\xf1\x1d\xb4\x17>&\xbe\xb5\x9e\xd3C\xb43m\x97U\x81\x06\xc9f s\xbb\x1b\xa2/\x9c\xc8\xa1\xc0\xf9\xc6\xfa\xa6\xa3-;~?\xd6\xd6=I\xe4\xd8|Uu\x84\x8c\xff\x00w\x18\xa9\xba\x9d*Aw\xf3_\x0cV\xf7\xd2\xd73<{\xe2-\x17\xc3\x1a]\x9cfK{\xc8t\xd0\xe3D\xb4u\xca_\xdc\x9c\t\xb5\tA\xeb\x0e\xe5\xdb\x1a\x9f\xbe#PF\x03n\xf2\rWV\xbf\xd7/\xa7\xd4\xb5K\xb9e\xb9\xb8s$\xd2J\xe4\xb3\xbb\x1c\x92\x7f\x1a\x9f\xc4>!\xd5\xbcQ\xadM\xaa\xea\xf3o\x9ew\xc3\x1d\x9bB\x8e\x81@\x1c*\x80\x00\x00`\x00\x00\x1c\n\xcd\x99%m\xbb\xb6\x8f\\\xd4\xeb\'\xcc\xcf\x9e\xccq\x9fY\xab\xcb\r \xb6^\x9a/\xb9h\x97DY\xb7\x8dnaXdm\xa0\xe4\xe4\x9e\xa7\xd3\xa1\xf6\xaa\x8cX~\xe9\xc3\x04\xdf\xf2\x83\xeb\xfc\xaa[K\xfb\x9d5\x81\xb5\x912\xcaC\x02\x80\x86S\xd4`\x8a&1\xdc\xa8\x9dC\x0c\xfd\xf1\xd3\x9ey\x1cS<\xe1\xb1\x89\x8a\x19\xb1\xbb\x1c\x02\x06q@YQK\x8c\xfc\xdcn\xfe\x94\xd5\x0c\x1br\x8c\x1c\xe4\x11\xf9\xe2\x93~\x01M\xccB\xf5>\x94\x00\xa1\x1b\xab\x0eH\xceiK:\x00\x852G\x04\xf5\xa0K\x91\xf2\x12\xc4\x0c\x02G\x18\xa1\x9d\xa4\x19\\\x8c\xf5\xc0\xff\x00?J\x00#b\x0e\xd4;F8l\xe7\xfaP\x92\xc9\nm\x8d\x86\x0f\xbf\x7f_\xe7Mo\x94dg\x9e9\xc6)\x00A\x18,\x18\xb1\xe9\x8e\x94\xac\x98\x13\x9b\xb9\x0cg-\xceH \x93\xcd4\\N\xc11q\x82\x9fu\x81\xfb\xa3\xda\xa2S\x8cy\xaaN>\\\x9e\xdd\xa9\xef"\x94\x024\x18\xda\x061G*\x1a\x94\x97Q\x1bi;\x88!\x89\xc9\xf9\xb8\xa4\xfd\xe4\x84\xe5\xba\xf49\xa0\x15\x90\x0c7o\x98\x11\xda\xa5\xb1\x1b\xee#Q&\xcd\xfc\x13\x91\xf8\x0ex\xec)\x88\x8a=\x85\x80\x94\xb6\xde\x8d\xb3\xa9\xa9Z\xd68%\xc4s\x96\x8d\x87\xcc\xc4\x8c\xe2\x992\x88\xdd\xa3\x94\x1f1d\xe4\x8c`s\xedLI6\xb9%\x88\xf9z\x83@\x1f\xff\xd9'} + 'File MetaData' = {'files': {'0': {'stream_info': [ + [250, 98, 12], [244, 99, 12], [237, 100, 13], + [231, 101, 13], [225, 102, 13], [219, 103, 13], + [212, 103, 13], [206, 104, 13], [200, 105, 13], + [194, 106, 13], [187, 107, 13], [181, 108, 13], + [175, 108, 14], [169, 109, 14], [162, 110, 14], + [156, 111, 14], [150, 136, 17], [144, 208, 25], + [137, 402, 47], [131, 728, 232], [125, 1064, 789], + [119, 1892, 1740], [112, 2945, 2837], + [106, 4060, 3959], [100, 5202, 5107], + [94, 6344, 6256], [87, 7495, 7413], + [81, 8649, 8573], [75, 9804, 9733], + [69, 10958, 10893], [62, 12112, 12053], + [56, 13266, 13213], [50, 14420, 14374], + [44, 15577, 15536], [37, 16736, 16701], + [31, 17894, 17865], [25, 19053, 19030], + [19, 20212, 20195], [12, 21371, 21361]], + 'header_size': 7572}}} + } +encoding:UTF-8 +creation date:1174261903 +announce-list:[['http://tracker02.azureusplatform.com:10000/announce']] +info:{ + 'length': 172917863, + 'piece length': 131072, + 'pieces': '\th\x9b\x0b\xab\xfa/\xbd\x92&h\xe3\xb5\xcap\x05\xa6\xfa\xc0L\x9d~\x08\x90\x7fqR\xb4\x08\x16w\xdc\xc1hCM?\xa2\xaf\xc6T\x84#\xe46tM\x8c\xbc\x9c\x109\x12\x8f\xfb\rDWe\xda+M\xa8w\xd5\xadnR\xe2\xe9\xdb-D\xa8\xb5t\xe0\x91I\x85m\x05\x18b\xc7\xd1n\x9b\xeb\xee\xb5o\xd1j\x0f?\xe8\xe3\xa7\xb1*\x88\xf2e\xfb\x15\x9b\x8b\xa5\xac\xe6\xfd\xe1\xc0\xf4R\xcb\x16f\x06P\x07\x8e\xa1\x88\x9e\xe7\xaejg\xaf\xff\r\x960\xf7pQ\x84\xf2\xf8\x8b\x04b\xaaP\xf1\n\x1a\x9a\xfa\xc0ss(S\xe9C\x86\xcd)\xfc]\xdb9\x8b\xdd\xf2\xdf\x90\xa04\x06W\x02\xc9\x08%\xa8\x94\xa5\x15\x1ff\xb6\x9d\xd7\x08kD\xe2\xfd\xc5\xceU7\x05uR\x15L\xc4\x08S\x1b\x0f\xae\xcd\x14\xa5AW\xa7\xe6\x1bG\xcc\x05\x81\xf0\xcf\x07j\x92\xce\xb8\xbd\x92`\xc4\x08{\xa6\xccX\xc0\xfaq\xfd\xc7rw\x08e\xe2\xd5\x8b\xa2\xe2\xeaZ\xe6\x98u\x1c\xc6\xae\xea\xcb\x18\x01\\t\x166\xb95\rU\xbe\xb1j2\x9f\xf8V?\xa8`\xe7\xdc:\xba\xcaG\xb0\x8f\xe5\xd85]\xfct\xc9\xfc\xe9\xddK\xd6\xcf\x8b\tx!Y\xf0_D\x08p4\xb4\t\x04x\xed\xd2\xc8\xc6\x86\x18 \xcf$\xbb\x84\xe9\xf2\x0eh\\\xaf!\xca\x830\xe3*\x0e\xa3\x96d\xa7\x9d\xab\x92\xd5\xb1Y\xb5%\xe6-\xa3\xd0\xb0\xe3\xbe\xec4\x9d\xedu\xca\xd8\x07\xf0\xe7\xd33\x92\x04\xd3]\x98\x82\x00\r\xff\xca\xf6\xb8\x82\xc4 \xbf\xe9"\xe1\xd7\xe6^w\x95o\tn=\xbcm\x82M\x97J\xc3\xfd63\x9bW\xd4\xd0S\xe6\x8b\xbd\xe0\xb4\xd9Yy\x0f\x171\n\xd580K\x9e\x003\xdb\xad9\xd1|xCwj\xb5g\x16\x15\x9ee\xe5p*\xf3\xc5\xa9b\n\xb7\xe1:p\x10s\xf0\xd8\xd6\xcc}\xaf\xdc\x14\x8b\r\re\xaa\xbdc\xbc3\xf2\xbb\x1cc\xc2[\x8cE"\xb1\x8b\xc6u\xa6t\xadp\xb6\xf3\xec\xdbhZ\xb3\x85\xea\xcc\xca\x99\xda\x95k\xbfX\x80Y\x13\x1api\x01\xfb\x8f\xe2\xd9_&\x0b\x15`\xc8\xa5\x98\xbb\x00\x1b]B\x04\x18i\xed\x95,}ZOc\x9e\x9c\x13\xf0\xefy~n6\x12\xfd\xc5\xb0D\x86\xbb\xc3\x14\xee\xc6tD\xd5D\x8fUg\xee\x93S(\xe0\xc89-\xba\xab\xc56\xec\x01\x04\x84\xad\x9e\xa7\xd5g$1K\xefc\x9ar\xb5\x12+\x9b\xb1PU\xc0\xaaZ\xa0\x8f\x91\xae\xde\x15\x98S\x03c#\x9c\xc4]b\xde\xc4}\xf7\x90\x9bA\x15\xae\xd5\x16B\x12\x9c?h\x96\x0f\x01\xfa\x98\xfeS\\\xcb\xcf\xac\xaf\xd3\x90\x8bo^\x11\xa8l\xf7\xc4|\x10@E\x15gurQy\n\xc3\tk1\x84\xee\x14\xa9^\xd4\xc6{&\x92\xbcG\xbeG\x08\xe8\x0e\x05\xb0\xc1\xa7B9\xfa\xb5&\x12\x7f\xfe\x9a\x99\x0b\x87@\xd9O_w\xb9\xe9\xc2j\xa0\x8c\xba5E\xdd\x84\xaa\xeb\xbb\x91\xec\x00\xb1hc\x8a\xffw\xca\x85Q)\xa1\xbf\r\xafu\xe5\xb6\x98\xacK\x12\xec\xb27\xb1^\xe4B\xa7\xf7\n\xdd\xa0D\x0f\xb1t\x07\x93A\x16\x8e\xc0_i\xc2\xdf\t\xa0B\x895\xad\x9dc\xc6\xa0\xd5tP\xb9\x8aB\xa3^\xe88\xbe=H|LZfr\xf2\x99\x9f\xa9\xe9\r\xea\xcc\xb2@4\x019z\xd6\xaf[\xe0C\x9aS(c:\x81\xd6\xfd"!\x85DF\xf5\x04\xc7>\xff \x9bf\xa1\x17V\x8e\x00\xb7\xb0\x12\xfe\xf5\x90\\\xaa\xd3s\x10\xe3\x16\x95)\x90\x08\x07\x13\x00\xcdYMw\x8eM=\xf6\xc9\xf0\x85h\xab\x98f\xfa\xf8\xdc9\xbaY0\x89@}\xb9k\xe1\x9b\'\x8d\x83\xd5q\xed,\n;\x91\xd8P\xac\x87t\xe5\xb9\xf5\xb9\x01\x90\xfa\x7f\x84\xb0E\xfffdx\\\xa2P\xdf\xff\x8d\x01\x81[\r\x0bN\x8d+\xcb\xaf\x83MUL \x80"\xb4oA3\x08\xd7Q\x12@\xc6\x1aho\xa8U\xd5\x8at\x91\x19\xd8\xeb\x9f\x7f\x16\x93-\x1cj\x95$\xc9\xdba\x8e\x98\xc0L\xbc\xddY\xe9*\x85\xcfe>\xb1U\r\x96PV\xf0\xdcM\xaf\x17\xbd\xae\xb2Y\x07\xea^e\xa9>E\xe1\xf7\xe9\xc7\x05\xfd\xa6\xa2\xbd\xcd\x92K\xc7KX\xe1\x06\x81\xfb\x94zw\n\xc2\xd1+4\xc6X\xa6\xc8~$\xfa\x9ac\xebzj\x94ZZ\xc4\xc7\xb0;\x030a.^3e\xa2k;\x8bc\x83\xcd\xbe%\xf0iDC\x9f\xcfX\xc3\xb0\x8a\xa5d\x8e\x07\xfc\x03\xa1\x84\xc3\xd7\xf8D\xb5\x08\xbc\xbe\xd4\x1a\x83\x1d\x93\x84\xb1\x8e\xf2W\xca\x8d\x8bd\xde\x0f"\xc7\xf1\xb9J\x82\xc6\x8dd\xbfx\x86\x8fA\xfdt \x0e\x106T\xde%HCJ\xae\x05UDh?U\x82\xa7\x1c|\x12\xc0r,\x82\xb0Y\xd2I\x9bs\xb7\xbc\xfd!x\xca#RB\x8e\x95\xfe\x85\xf6\xf7\xab\xd2(\xc2\xcb\xca7\t\x80\x1a\xadzV\xbf\xafht\xa8o+\xb7\x13\x99s\x8c\xd3\xbe\x1aC\xf6]\xbf\xdf\x9f\xa6h\xec\xd2\x92\xb9-\xba[\xb3\x82\xab\xda\x9a\xa5^a\xef\xcf\xf0\xff\xd1KFH9\x04\t\x08l\xa0\x039\x01\x0f\xa1\xf6\x88q\xac\x1c\xa1\x1b\xa0\x9b\xf1\xf8\xcf\nZ\xf2\x9f\xc7z\xf9\xcb\xa3\xd6h\x1a\xb1\x98\xfaS@\x84<\xd2\x9a\x8d\xd3\xa9\xdb|\xa5M6\x87n\xa11\x91lb\xe6\x18\xf4\x05\xa0\x9e\xd2u\xeeU\xca\xc6\xf4\x04J\xed\xc4\'h\xba&\x8a\xec\xc0\xcf\xee\xa8o\xddrSE\xd6T\x04\x9d\xd3\xcd\xf5\xb8m\x9feN\x04U\x01eq\x00\x1a\xce%\xad\xe4\xab\x0e9\xaa\xdd6V\xd1%\xbf"\x1a\x1c\xcd\x01\xaa\xcc\x05k!N\x105\xd9-M\xdc\x04\xc4\x81\n\xf2\xa7\xaf\x9b\x0e\xf9\xb4C\xef\x1b R\xd3\x97\n.=\xde\xf7\x96\x02\x0c\xa0;S\x96J\xe1J\xb1[I\\\x87\xb4\xda\xcb\xba\x99\xfd\xa4\xddT\xcd\xce@\xd2Y\xb4)\x08Y\xb3O\xad\x0fB\xb5))\x81uD\xe8\xea\x83Ev\xfaP\x80\xa1\x93\x8a%LE\xdb\x98(\xcf\xfd\xe3\xe4fNt\x82\xbe<>)\x88\xab\n\x03\xe2:T\x83.\xa5\xbd\xf7\xfe\x89\xb7\x87\xdf\xeb\xec+\xf9uaj\xfa\xce\xff\x82\x97\xa1\xad\xd5\xff\x1d\xab\x92\xad\xb3S\x0b\x9bkS\xa0\x87"\xf9u\xaa\x07\x13*\x1d\x1c\xae\x9a\x9b"\xe1 \x9c\x15&\xe1\xca\x02\xe2\x06\xec8\n\x0bZ\xb6z\x1e\x9cD\t{n\xa7\xb4\x91x\x9e\x86e7\x0c\xd7\xca-0\xc0\xe97\x8e\x8a4\x99M\xeeyE\xcd\x08A^\xdd\x88\xfd0+\x03\x03\xa3\xd6\x0e\x19G\x91\xf4Iq\xd0(\xf4u\xd3\xb7\x86\xd1\x82\x8eNv\t\x0c\xfe\xc7\x15{\x12`u\xc74A\xd9\x0e\x8b\xbd\xe9\xb4\xe7#\xdd\xeb\r\x1coO\x90Anj\xae\x81S\x94\x07A\xc6^kU)\xef\xe3\xd7\xf2\xd2RF\x8a&\xf4\x8e\xea\x84\x01r\x13j\xd74=\x9cJ\x05\xc8\xa9u\xa4\x1e\xf9\xd2;w\xd3\xdd\x8c\\\xfe\x83\xb33z=I\xcf\xb7\x0e\xe1\x12\xbe\xb9\xce%R\x87\xaf\xa9f\x98XU|[t\x9d+\x1b\xd4\x00\'\xe4\xaf:K\x08**\x87T9#\x92C\xb9\xa5\xd3/\xbe\xd4\x9e\x8c\xf0>\x9dS\x04\x9eh\x1e\xe5\xa9\\}GN\xd9\xb8\xf6\x0c#o\xdc\xdf\x1f\xba\x1bv\xd1\x04\xf9c\xb2\xe6j\xae~\x1f\xf2\xc9XN8\xbbc\xe4/\xfd\xf4$5*"\x11\xa9Se\xa0\xed/\x93\xd5\xa3 \x8e\xd9Ct=\xdc]\xc6\xae<%e;\xa3\xf9hb%\xce\xa4/\xd6\xb6\x0cc\xba\x91\xb0\xbd\x82\x81\x13]\xd7F\xb8Vjr\x17\xb4\x89\x9c`[q\xc1>\xa1\x11w\xabm}\xc4\xc3N\xcd\xff\xc8\t?\xc0t\xc1\xa8>\xbd\'O(\x96\x01\xb2\xda\x1e\x1d\x8a0\xd1\x84\xbc\x00\x02\xb7\xa6\xaa\xecN\xa9\xb4\xf6\xe8C\xa4\x10\xb5,\xa8\xab\xde\xbc\xfd\xd6\xe5\x86\xd9\xc1\xad\xa0N\xe6J\xfcDmk\'\x8eB%\x0bf\xf9\x98\x90U>\xc4\xe3\xb75\xb7\xad\xa1MF\x9a\'Q\xc7\xab\xd80g\'\xde\x07P\x88\xe3\x84\x011\x8a\xc9\xc3\xb3\xf7\xa5\xf7\xd1\x1fGgK\xda\x04|k\'\xadC\xda\xdc;\x1f\xda\x1c0\xa5\xc5f)\xa1y\x9a\x9d\xd2N\\\x19\x03\x8e2\xb7~\xc1\xa2|\x99\xfdM\x8f\xabW}\x96\x9a\xe2\x1a\xea\xd1\xdb\xf7\x83\x18\xacR\x87\xe3\x02\xd8\xae\x8d\xedX\xfa&\xc2\xf0\xb7\xe3T\xe1\x9d\x18\'R\xb8\x84\xb1\xebs\xa8yb\xec\xe8\xb9\xde(\xb0\x8f\xb97\xd3\r\xb9\x80\x81\x00a\xc2\xa0\xc1\x04\x99\x11Z\xc9\xcd\x00\xc9\xd4(\x15b\x95\xa4\xcep\x08\x0e:\xefg\xef\x90\xc2\x11\xff\x11\xfd\x90\x04\x00\xf3g\xb0\xb2\x0b/:\x02$NY\x97g)\xd6@\x06@\xb2\xd2o\xff\xac\x14b\x17\x9122On\x1a\x11G\xff\xfe{\xa8}k\xaetK\xefW\xa4\xed\x1f\'\x9e>\xed;\x88\xc3\xdebPl\x0b\x18\x04(\x18Z\x19j\xb9\xa4\xa5\x1b\x08W\x15~3Zmy>G=B$x\xb1A\x8fJvE\x0e.\xf5\xd0TOP\xc0\xe4B\xadK+\xbeU\x0e*\xf6\x92\x15A\x7f\xc8\xa9U{\xb5\xa4\x9d\xbcb\xa9\xc3\x18N\x94`\xc8\xe4H\x97B\x1f\xd2\x1d\xbc\x0fx\xfakT\xb0\x0f\xda<\x9e\xa4\xfdJ\xdc\x00 \xa1\xabi\xcf\xa9\xd4\x94o\xe4C\x93v2\x18\x8dM\xcb\xc4A\x0c\x18n\rI\x8dRM\x9e+"D?\x16\x934\xc0t\xbci[f\xb6\xd7\x00\x84\xa5zB~Rty/\x81:\x88\x84\xdd\xfe8\x06\xb6`\xed\x0b\xb2\xb5\xb4\xf7B\x90\x8a\xb9,\xa1\x08\xce\xa9\x82\x92\xf26\xaa,W\x19\xd5\xafh\x9e\xc4p\xbf\xe2\xc3v\xf5\xf1tb\xc2\x85\x7f\x19\tXOF\xe2\xae\xf7\xf4\x84\x17\xd7\n\xf6\x87A.\x8c\xee\xc6/n)\xb7\xed\x10J\x1c\xac\xd7\xb6{\x9d\x16\x92F= I= \xee\xd7t+T\x1av\xac&\x18\xa0\x98\x96\x84G.\x8b0=>\xf5`\x0f-\x88\xccAqB\xffH\x80\xbf_MC\t\xbf\tW\x12\'\x1e#(\xb8\xd5\xe6\xbb\xf6O\xed\xcc\x94R\xfc\x0e6\xc5q\x8e\xdd\xce\x08-\xcd\x18\xb3\x8c\x14?\x10\xdd\xfb\xfe=\x97\x0eB3\x8b\x94\x03\xf7:.\xe0\xbc\xc4\xe3\xf0\x05\xeb\\\xf6\xdf\xbf7D[\xe7D\x9e\xdfjK\x98\x9fU\xee\x16>\xb9\xd5E\xa0\xcf\x8e\xb86F\xaf~\x94\xf2\x98\x10\x18\xb3v\xf1\x91N\xbc\x94\x0f\x7fx\x9a\xb5\xb7\xf6\xdb\xa5\xfa\xe76\ts\xb9&[\xa4\xc8\x02&Sq3\xd3]"\xb7\xbd\xde5\xfc\xa6\xe1C\x82V\xda\x02\xb9\x1c4\xd8\xc1\xc6`\x0b\x17J\x8dY/F\x1b\xac;\xf0\x89\x80u\xd0\xc63\x03\x06\xae@9\x9aq\xac8\x1c\x16\'\xe8\xaf\xf1\x96\r\x96>\xb9\xaa\x14(.}\xf4\xc7\x15W\xcf@\xd8\x96K\x96W\xd3q\x93C\x1c)\x8e\xce\xff\xf1\x0cA\xe3~\xc3q\xc2!\xef`\xb3\xd9L\xf5k\x99\xd2\xa9\xe7\xbcGO\x82\xd0rB2\xab\xa7^\x1ci\xd4\xe3\xc1p\x9b\xfeK\xc7OeC\x0c\x84\xb9\x9edp\x84\x8a\xe6\x13\xa7\xbf+\x0f\xdf}\x05\xb1\xa0\xa7\x98\xbct\x0e\xa7\x87\xa7\x8a\xd1\xe2\x1ei\xf59\xa32s2BS\xa2\x0br\x1c\x0c\xe8j\x84\xaa\xc7z\xc40\xaf\x99\x96\x92\xdd\x8fM\x05\x08b\xe4\xf8h\xaf\x9a\x00\x8fN"cB7T\xe6\xd7\xcc\xbc\x8c\xcf\x05\t}KwD\x9c\x1be\x10\\\xdd"|\x7fuU\xe3\x9c\x9640,Q\x1c\xdc\xa4\xb3y\xff\xc1\xfe\xd6"Z\x90\x9e\x02Dq\xdf\xa6\xcaW\xa3\xad\x05\x95\x15\xf1A\xe2\xdb\xc3\xe6\xc0\xc3\xaa\x9d\x04\xd4ip\xca\xc7o\xe0\x0b\x8b\x98\x1e@\xd4\x0c\xb1F\xdaSL\xd9\xc1q\x99\xfe\xec\xb1\x89\xeb\xba\xe3n\x92\x1c\x04,K.?\xef\x94<\x1aU0<\x1b\x89G\xf1\xa8\xbe\x9f\xa5\xf0^\x05\xb1\x95\x13\x81\xb0\xc1}\xb2\xb9\x87Noh\xa7"\x82\t\x18\xc9\xdc\xd3\x9fzT\x820a\xf1{\xc0\n\xc9\t\x91uSZu\xae\xf7\xef\x9b\x19\xd0r3\xbdV[\xc5In\xda\xa0\x17\x92\xa9\xc8\x05\xf9\xd1\x98S0\x8drsTu(\xb1\x1a\x11k\xc0\xc3\x95\x04pt\xed\xe1\xcf\xd1My!\xd2H\xb4\xa5wP\t\x7fr\xf7T\x95\xf8\xe3JN2\xdb\xc1\xc0\xbb\xdb\xa2\x86\xf33$y\xc8 ^\xa3\x94E\t\x0e\x1a\x0ec \x15\x80\x00\x18\xad\x85\x04\x8c\xaa6\xf2\x03\xad\x9e\x7f\xe7\x11S\xaf\xefv\xb0\xa6W\x8at3\xb0\xa2\x12\x81\x88\x84\x07\x14jka\xb5oM\xf7Z(2\x11K\xd1\x1dk=\x12 \xb7M\x0b\x17\xfa-N\xc9K$\x8a\x95\x0b\xa3gF\xbd\x04\xed\x15\x89ud\xf3\xcd\xa8\xe3,"\xefI\x18\xf8\xc0\xf52\xda\x92E,#\x9b\xdd\x9c\x96\x82\xc1\x80Qot\xe1\xa0")=\x15\x1d\x980\xffQ\xd1\xe0\x9c\xac\xfbS\xaa]\x9f\x9f\xe6\x18\xbfw\xdf\x9a/!\x05q\xb5\xc3\xd5\x16\x82\xe0\xae&\xee\xe3n\xaa\n\xd2\xd8\xfb\xb4 \xd3\xa0@g\xd6\xa9\'\x1cE{P\xb1\xf7\x07\xce=\xe6\x0c\x16g\xe3\xc3\x00\xb6r\xd8&t\x1f\xb7\x11\xfaY\xa0\xf4\x8c\x96{\xbaD\x14\xb8\xdc\xc2+\x9c\x8d\x83\x00\xc1U\xb7\xe8H\x89_, \xb76*\x92r\xd4jbH*\x84}k\xd2\xe1V\xc2\xad\xdfk\xa4q\xf1\xb4\xe5\xd2\xf1\xa90\x98\x9a\xb7vx\x85\xfa\x94=\x9cY\xb9j\xe5\xa6\xd1|\xb3\x18\xcdP.I\x8e\xbeM\xc2jS\x8e\xa5\'_2\xb3\xef\x86\xd8\xb1\xa2\x13\x9f\x02\xddP\xa7\xda_\xd2\x91\xa3\xa42\xeeil\xce\x06\xc0$\x17\xa3\xa0\x05,K\x824\xc0\xa0\xa1\xbf\xb5\x1b\x1c\xe3\x0ebG%\x96\x13\xe9\x141\x83^j]c\x99*\xe6<\xbe\x84_&\xc4\xe1e\xc9*nqE\x98\xa66\x02x\xa6)\x04\x19\x8c\xdf.h\xc2\xd1\x87\xf4\xb4\xaf\xb3T&?\xa2z\xcc\xdf]\x9b3F\xb9\xf7(T.\xe0\xcd\xc7\xb9\xb5\x8e\x9a\xd9\x8b\x94q \x9aa\xdf\xb4,\xf8\xa9\x91\xff\xb1-\x7f\xf9\xeb\xd5\xde\xb7\x00\xc5\xb1\x11\x8d\xe9\xff\xea\xbfp\xa7:\xea\xbe\xfdGC\xbf\x1a4f\x11T\xe6y\'\xb4\x84\xabV\\9\xa8\r\x05\x0e\x02\xd1\x7f\xbc\xff\x03\x86&K\xc8\x06=\xa5\xba\xdc\xa9\xd2Z\xb8\x018\xb7[~)\xbfi<\xc3\x14\xe7BB\xe8\xa1\xd5\x89\xef\xb8\xfc\\[Ez\x00{\x19\xc6\x8f\xf6\x8f\xdf\xe0\x03K\xf0/IP&\xfb\x9a.\r1\xe8\xe2\x06\x81\x88\x134\xfb\xc6\xf7\xf6\xcf\xb0\xce\x19S9\xd8\x8d\xc7\xa2[ \t\xd0\xdew\xf3\xa0EoU`m\xa9\x9a\xc9y\x81\xcfHQ\xcd.v\x15\x90\xfc\xe4\xe4\xec\xa0\xbc\x0e\xac\x16Cg\xa2\xb3\xae\x83\x8a\xe0L\xbaB\xc0xt\xe7My5\xd6\xc0\xd6\xe6\xd8\x87\x80Ta<)\x15\xdf\xd9\xb6\x95\x8cU\x8e\x97\n\xc3KXP|\xaf@g\x1d)\x05\xd1G\xad\xca\x1e-n-nl\xcdR/\xf82\x16Q\xbf\xaf\xbb\x89(Vm\xf4\xd1.\xf5^\x06\xe90\r\x12/,\x14TT\xdfY\x02\xbce\xb5\x15e\xb97N=_\xb2\x90\xbbq\x98\x03K\xcb\xc84\xc7\xbd\xbf\xd0\xee\x1e\x88\x05!\x95*g\'\xbc\xe9M\xbez\xd6.\x85\x883n\xe0\xf7\xd4%\xb2\x9a\xd3T\xf3\xd1~0\xe6\xc2\xff\xa2\xcc\x06\xc3\xe6qS\x97\xb2\x9b\xbc\xd0\xfc\x95\xbf@\xfc`0N;Y\xbb\xbe}\xef\xe80\xcdb\xa9T\xd2&Q*\xedP\xfaC<7O\x85?\xc2\xab\x10\xcb\x19x)z\xf35\xd5\x0f\x88\xb0\x0c\r\x90\xaew\x1a7x\x7f\xb1\xcb2^\x99\x14\xeef\x91T>\x18S\x93\xbd\xa4\xb4\xbc9t\x9e\xc2\x1c\xeb\x93\'\xdb\xf1u\'\xf5X\xef\xd4\xb8l\xf5\xce*\x0c\xda\xf9Q!\xf2[\x96\xc5\xd9%\xe0\xee\xba\x93\x82\xd21\x11\xd1\xfb\xf2\x03\xd6zj\xc0\xc7\xc9k\xd3\x96L\x90\x9a5$\xde\xf1\xda\x1dku\xd4\xa1#\x85|\xc4\xed\xfb\xcd\x1c\x1c\xf1\xbf\x96:\x89Fwy<\x1e\xa2{9\xea 7\xef\xe2\xf8(\xb3\xadFX\xc6\xa6j>\x1c\xad\xb9\x13\x8f\xa0\xb6\xb2\xcf\xa2+\xd6\x95\xd8\xa5\xf2K\xf2r\x96V\xfcmH\xf3j\xd1M\xf2\x14b\xfd\xfb\x18\xe4>!h\xaa\x96\x8b;\xb4\xd3\xa3\xfdev\xa5l\x1c\xed\tYM-\xcc\x0f\xf5\xd3\xf5e\x18\x84t\xa2q\x8eD3uI\x13m\xeaX\x83\xd2\\\x0fu\xfd\x9bP,\xa2Om\xa46\x13\\\xfcK\x9cY\xee\xa7\xf2=\xb2\xf5\xc3\x05K\r\x03=T@\xaa\x19B\xf5\xa1x\xa6.\x8at\xdb\xff\xccs?\xaf\x99\xdeo\xcb\xdc\xee\xb5\r\x84\xac(\x1a\xf3s7\xc8>q\x1eY>7\xf4-\xdaVZ\xc2)\xd8\xed\x01\xdd\x0e\xf8PD\xc0\x7f\xe7F\x06\xa7\xd1\x00Z\xa1A\xdc+E`\xd7\xaa\xb0\xdb\x0eDR\xe9\xf0(\x1d\xb2\xf3F\xcc\xc4\xae\xc2e7\x93\xcbvd\xcc\xdeE\x1e\x01\x82`g\t\x80jp\\Yg\x86\xca\xa7\x1bY\xa4\x9a\xfa\xddr\xbfy\x96\xc82/\x97\xb2\xc5\xc5EX\r\x97\xea\x91z;\x12\xfd\xb3\xafV\xf4\x10\xc18\x899\xf4\x8b\x08\x91\xa9\n\xa3W/:\xefx\xeeQ\xd60\nq\x9a\xffU$\xa4w\xf4\t\xda{vo\x8f6\xd5t4\xb9w\r\xcc\xcb\x811v\x897$\xd9\xb6P\x12UE\xf0\xce\xf2L\xc2\xeb\x1en\xa1P"\xc4#\r7ky\xfd\x96,\xdd/\x07\x8d\x9a\xa9\xaa\xc1\x19\xc0^\xc6d\xf7_b}\xf4q4\xf0\x11\x1b=y\xcf\x80\xea\xbd\xe9\xd8\x1b\x92\x88\x0c\xa5+\xb91\x13\\\xba\x92\xab/\xe7\xaf\x88$@XN0t\x03\x87\x8e>\xe3\xba\x8fG\x85\x13O\x05\xc7^\xdfW\x97"6\x0b\xb3\xc5#Dynj\x7fB\x90J\xac\xb0\x0bV\x93\xba5*\xa8\x9a\x1f\xe4-\xcc\x02t\xa6t\x91\x94\x89\n\x7fk\xd5\xc5s\xa5"\xbbs\x07\xe1_\n\xa2\xa39\x1f\xe2n\xbfX\xbf\xe44\t;\xf8\xe4,\xc0\xd4\x08\x15\xa4b\xbeL\x03\xa9\xa8\xd4\xd7\xbe\xc7o\x85\xf9C%\xf0&m\x1d\x96\xed\xba\xb8\xdc\xae\xbbTu\x90\x0f\xf9\xa6\xe0\xf3\x88.\x0b\xcf\xae\x8b9\x88\xa3\x83\x81&\xf2\\\x0ei\xeaGW\xfb\x10\xbd\xa1N\xf6\x80\xa9zw5\xb5\x10:\xc7\xb2\xa99F\x8ekD\xb1\xf3\x04\x8c\xf3\x93oE\xa2k\x17S\xd9\x9fST\x85\xe3H\xa3\x16\xed\xaa\x99\xcav\x07\xa1(\xf2\xfd\xdaE\x97\xee\xef\x04\x15\x8e\x83\x08\x8d\x1f\xaa\xd62\xf8\xe9\xa2\x8a%\xecGG8\xe2[\x02\xee/\x13L*{c\xae\x1f(\x87\x90\rv\n\x97\x00iS\x87a\x00\xbb\x9c*F\xee\x19I\xb4\xe5\x95A\xe04U\xc7\xce6\xc0\x0e\xba\x16\xfd\xcbE\xcc\x11A\xd8\xda\xc1\xba\x14\x81\xcf\xa7\xe6\x14q\xbeanS\x18\x94\x04\xb1^\xed&[\x15\x8bQ\xc7\xd8\xef-\xd1\xaeY\xcfK\xbd\x10\x8e\x88\x99iy\xaa\xa4\x1d\xbc\xcbd\x11\xf9\xa9,\x0e\xfb\x0c\xf6\xd7\xf8-\x8fV\rk!\t\x953\x8e\xc2\x9b\x10\x9c\xb9\xb6/\xf2\xed\xb3{b9\x1e\x15\xf5\x1e.\xa4\xfc\x1d6\x0eb\xcdu\xd9\x04#\xa7m\xf7oK.\xf6\xa7d\x90\xea6\x7f\xdbV\xd2\xeb\x0b\n\x9a%\x93V\xcb\xff\xaf\xb02\x9f\xba\x07\xa3\x94\x7ft\x17\xa1\x19\x8d\xe0\xb0\xf7\xe2VG\x13t\x01\xb8y\x0b\xcb0\xa0\xaf=\x07\x15/ioc\x97\x99\x07\xf9\x9e\xc1\x87\x01F~\x9e&sY\xbdv\xe3\x13\x87\xf9\xe3\x11\x19\x80\x0cA\x87\xc7\x0c\xde\xa2C\xb0I\xa1\x97\xaa\xf9\x85\x97\xae#\xaf\x8d\x1e\xc0y\x85\x1d^We;d\xb2rY\x11\'J\xf5NB$\x86tH\xff\x99D\xa0\x00s\xc8\xf7\xb5\x0c\xd4@\xabK\x0c"\xe1\xbe\xf2\xd13\xb3\xac\xfb3\xb4\xb8v\xc5\xc6\x9c\xe8\x12DD\xaa\x8bf\xcf\xc3\xde\xecB\xe1\x02\x9c\xcf\xf6\x9fI\x0eRw\xa2\xc7\xa7o\x89\xb2\xd1\r-\xcb\xe2sO,u\x9d\xaf\xe5\x11.\x9f\x17\xae\xb6\xb65\xaa\x8e\xcbU\xaa\xf8A\x81>\x1fP\x85\x8d\'\x0e\xc0\xf9\x8e\x9e?\xc9\x15Z\x9e1\x89\xc0A\xad\xee\xe2\xd7\x01y\xedp\x08\xccV\xdd\xe1\x8b\x82.\x865\xda\xfd\x18b\xec\xed\xcbL\xc9"\xf5g\xa4V#\xa8f\x1bF\xb8\xe3\x95Je+\x12bi\xe1\xd2\x96\xe7#\xb1\x19\xc7J\xc6\x835\xf6T\xc9t*\xe2\x9c\x90\x8baT\xe92 \x1bg\xc2L\xfaU\xad[\xb9^\x0b\xe3Rf\xea\xb9\xea\x8e\x9a\xbf\x84x+\x03\xa0\xf8x\x93\x81\x95\xe0\xfc{#Y\xedp\xf3tP\x0fS\x94\x98\x8cC\xc3\x03D\xc7\xb51\xa0d\xa2\x0c$w\xa2\xdf\x05\x94\xb1\xbfS!\x1a\t\x88(\xdfnr\xd0\xe33z\xee\xa0\xafJ\xbe2\xa3\xec\xf6\xe3\n\x04\xf3\x0e\x94\xd2\xc7P6\x00\xaa\xb8\xaf\xe4U\x97\x14\xc8\x81\x19\xbfv\x86j\x1ak\xd4\xd2\x98h\xaeI\xe0\xed\xa1\x94\x9d\xf2AO\x896F#\x00\x7f\x95`IpzL<\x87\x15(\xb2\xf8\x9f]6&2\x02\x82.\x1a\x888\x9d\xfdI8@\xe6n\x80\xa2kS\x0f\xe0}t\x90A\x93\xf8-5Go\x8fG\xa8#m\x8e\xcb\x12\x087C\xcfQ\x03\xa9\x08\xff\x92\xc5\xfd\xb8>OF\x9f\xc1Bnc\xa0\xd8(\x92!\xf7\x19;\xbb\x8d\xec1\xca\x88\xb5\x03H`\xb0\x1b\x9e\xa9N\xb4\x82-M\xbe\xe8\x1b\xa5o\xd8\x97b.?\xf3Z\x02j_\x00\xbb\x80l\xa5\xfaH\xb9\x96\x85\xb8\xa0\x15\x08\x9eCyM\x11\x9e\x890*Z\x08\xe8mv\xc8\xd1\xa0\x04\x9aZ\xa3\x19h%\xd9\r\xa20\xebG|\xbf\x83U4\xab\n_\xeb\n\xc3\xde\x1a\xb1v \x0ctFw\\\xb1r\x1coz\xa4&\x9d%s\xe2\xea\xd4\xa4\x1d\x94\x04b\xc3\xbd\xfa\xf82\xd6)\x1e\xca\xd8\x1a\xce\x10+\x9e\xecl;VX(\tf\x986\xf8T$\xd3\xb6\xde>aEn\x8ag\xc6\xfb\xd6\xa4\x98\xa2\x97\x86\x15\xact\xb9+\xac\x8f\x8b\x17jD\xd5z\xe2\x93\xeet\t\x91\xd9#\xe7\x98\xb7\x8f\xaa\xde\xcb\x07\xe6\x80\xdd\xaa\xea\xe7\x99#v\xc8weL\x03\x86\xd6\xadZj\xc4n\xf1Tpy\x06\x9e\xf6}.\x89-0\rr\x8a\xc8SJS\xefl\xc8\xdaLI?\x0b\xac\xfb{\xa4\x170R\xe4s\x8f,\xbeJ\xdb\xd53\xa7\xf6{n\x83\xe6\x7f\x89\x18\xd2j\xfe\x94\xc8\x04\xb2\xc3\r\x1e&v\xc0\xca\xc5\'\x9b\xd8}\xfbe\xd6h\xa8\xb5\x1eX\x84\xc3\x94\x84I\x80(\x1fEI\x0c]6s\xcf\x89\xc1!G\x83\xf7\x8e5\xabjO\xf5\xf1\xfb\x1c\xc3\xde\x8e\xa3\xbb\x90\xfe\x86{f!\xa1\xe9\xb5yeE\xa6\xfa\xf1]\x9a\xb2\xb2\xa9O\xdae\x9c\xcaI%t{L\x9f\x05\xff\x9f\r\xc9\x8dj\xacj\x06\x91\x93\x12c=+&\x87W\xb6\xd2\xe1B\x0c\x9d\x94Q\xb9hV`\x01(\x07\x84\xaa\xf9\x99\xf1\xf0\xacH\xf3\xf7\x8f\xe2\xc0\xfeJ\x11J\xbc\rC\x91:\x1e{(\xe3\xc8M\x0e\xa7\xe5\x0cvk]\xdb\xc1\xfd\x1fi\xa9Hl\xda\xfdI\xa8\xfc\xfa\x85\xb0\xa4B\xaaL\xd42\x0b\x04\x8e#\xf7%\xdbWOU\xec\xa2\xc8Q\x93K\xac1\x8d\x0b-b\xcap\xcdE\xe8\x02n\xe2\xf2\xb9)U\xc7\x8d\xf2\xa6\x05\xc5\xdd\xd4y$4\xb0\xb2P\xc2\xe5-\x0f\t\xa3\xb8\xdc*\xcbi\xa4\x08\x84\x97\xe8\xf9\x7f\x92\xc3r@\xef@\xaf\xadG\xfb\x1f\xdeY\xd8\xc5\x9e%\xe40\xf7\xbc\xc6{\xb7\xc6B\x04\xf3&\xa5Y\xad{7\xe60\n\xad\x8f\x93\x8e\xa9X\x8a\xb3\x12X\xa3\xf9`\x16)\xdf\\\x1d\x12\xd3-\xd2\x171>\x9d\x89m\xb4u \xc2e\xe2\x95\xad\x16Y.F\x9b\x99\rO,\x10\xe4\x1c\x7f\xe7\xf9&\xca\xa5\xce\x88\x89*\x19\xdfRx\xf4\xe16\xc2\x18\x0el^\x01\x80]\x92AJ=j\xbf\xe1\xe1#\x02\xd4\r\x07\xb0\xa3\xef\xd95\x80o1\x8f\x90o\x1a\x94g\xf7\x04m\x0emU\xe4VR"\x87\x81\x11H\xf7\xc4\xe7\xd0\x18\xbf\xb57E\xdc\xca\xf9nO\xdb\x98)SI?$E\x06^4%\x1a\xec$\xe1N\x94\x03\x0b#T\x8e\x85Ifw\xba\xef&[\xf0`C\x11\xce\xed\xd2\x8b\x7fm8+\x87\xdf\xa2\xf6\x0e\xb3\xfd\x1a\x11JV\xcb\x08\xa1\x81\x91\xde\xddv\x19\x91\xc3\x8d\xc8\xcc\x99\xb2\xd8U\x90Y,\xb8\x8aj\x83\xa0}M\xc8\xeeuZ\x1d\xce\xacS\x95z\xd5\x0c\xb3\x89_\xf8j\x10\xb6\xf2\xae\x18\xbd Y\x8d8J`d\x1e\x92\x1a\xe3\xd6Z \xe1\'\x8f\x84\xa6\xd6\x85$\x04\x9c\xf4\xb5U\xf32I\xa0\xdd\x9b\xe44\xc9\r\xbc\x1a\x0f\xa7"\x85z\xc8\x13z\xdbnV\xe2\xb7\x9b\xeaW\x86\x9e\xa4h\xd0[-3\xc9\xbb\xa0F\x1c\xdc\xeb\xd6\xdb\xaf\xad5\x1e\xd3\x13M\x1eT;d\x8bY\xce\xc6\x95\xb9\xf8\xbc\xdfr@\x96\x8en\xff\xa7\xff\xdfav\xf6\x05\xde\xab5\xd0aH\xf8\x17\xeb\x8dC\xea\x9a\xef\xb8\xbd\xd5|\r\x82\xc6\xd3\xb9<\xfaHN(F"\xb4(\xe2\xac\x88\xaa\x96l2\x1c\x8b?\xbd\x18\x1dM\xc5\xf3*\xb7*\xad\xd9\x12\x14f\xd2W>F\xf4B\xcfe\x05G:\x1f\x1d\x0e\xa7y\xd2?We\xd6b\xd9\x92\xa2\x11YQ\x83\xce\x88\x8c\x99]\xd3\xbegCG1*/\xa5\xae\xa0%\x8c8\xef~\x8c\xe3\xeb\xad\x02\x87\x96!\x81-\xf0[\x0e%\x11\xf8\xd3\xd4f;\xa3H,@x\xd3&*\xeb\xdc\xd7\xaa\xaf\xb4\xd8\x86\x98m\x85\xdf\xff\xfc\xf1\xf0\xe9\xf4M$\xd9\x1f\'\xb6\x03j\x88\xd9\x9b\x88\x14\tI\x16\x11\xcc]\x87;\xe3\x0bu\xa3`B%\x96\xda\xa1X4\xc4\xd6\xe6\xe8\xf7\xe0m\xf6H\x9ann/\x14\xd5\x82\x9b\x9b\xbb\x02\xb1(\x1c,\xdb\xc87\xe1\xb5\'\xd5\xdbCl5T\x9c\x88i\xb59D\xeb\xc4\x902\x0e\xea0\x80jV\xcd\xfan\x11\xca\x12\xf89]\xfbZ\xe2\xbf\xb5\xda\x05\x14X\x8bx\xf17\x9f\x19\xad\xb9\xc3\xf0\xea\xf6\xd5=\xf4\xdf^\xdfm\xd6\xad9Zl\'\xd9\xacyT\xb7\x15\xc5\xb7\x0b\xdf\xd6\x19,\xca\xech\xc4\xf3\xccb\xff;\xf5\x800`:\xe4E\x88\xcd\xc3R\x04X\x0e\x97\xef\x1a\xeb\x02?\xbdma\xb3\x19\x9dH\x84\xffc\x1f\x01\x02\xcdV\x99\x92\xa8\x97\x8f\x8a\xd8\xf8+%\xd7\x13\x85N0M\xa7`\xeeY\xb4G\x94\xaaq\xde\xc6\x81H\x04X}N\xc2\xec\x93\xdb\x96\x13\x1b\x15\x930\xbe\x8a\x0c\xafg+\xe5\xb2\xa1\xe5\xd0\x139?\xccY\x7f7\xb8\xc0~\xab\xa7ME\x03\xf5\xc6D\xe4b\xda\xbb\xdc\x85\xe8\xe0;E\xb5I\xa7]\xa4\x8f\x94\x9a\xcbYqz\xb1\x9a\xbdw\x01b\x8eu)\x9a\xb1\xb6\xb3P\xed\xf2\xa2\xc6\xf5\xcd\xd8\xcd7N:2|\x85\xed\xe8\'q\xe9\x91\x08\xfe\x81@\x8aag\x15\xc9\xf6\x89\xe9\x9cLgoYL\x86\xdc\x9bs\x16\xca\xd37\xb7\\\tO\xfe\xcd\x95\xe5:\xeeh\x0b&\x96\xcf/\xb8|\xc1\xb6\xcb\x98\ryK\xdc\xe3\xb5\x9f\x1d\xe8\x99\xd1\xee#\xe1\x13\x9c\x16j\xf2(\xde\x14H\x14x\x89\xabCV:\xd7\xcd\xfa\xe7\xf1\xbb\xb0\x7fY\xff1;)\xd0\xe6,\xe7\x8a\xb0\x89\xd3\xaa\x91\x9b\xf0\xc1T\x10\xdc/D\xf58pn\xa5\xfb\xe4\xe3\xa3h\x06\x9eA\x11\x05\x99\xc6x\xafZ:\x0esX\xe3\'\x91L\'\x17-\x1d8\xce\xf1\x89&\xc3\xbb\xbc\xb2p\x8f\x0f\x03`\xb1\xb1m\xeeN\xce\x01\xf6\x0f\x0e\xca\xd3T\xd8\x00\n\xd0\xbe\xf01\x88\x05#\x95\xf7,W\xb4\x05\xdaY\xa2\xb7\xd9\xff\x1c\xff\x8b\xe3>\x14\xfc\xc9\x95\x1c^\xe7\xe9\xf8f\xa2J&\x00\x93\xd8\xea\x19P\xf6\x02\x94\x03\xdb\xa9W\xb7\xec\x1c\xf86\xb2cB3\x9c?b9\xcd\x1e6\x9f<\xe2\xd9\x14\x9eX\x1c\xdf\xb4\x9eR$)\xf1\x15~\xbe\x94\xe0O\xb3\n"\x92\x9f\'\x87z\x15v^\x7f)e\x18w\xfe>\xff\xda\xfd\xa0X\x9d\x95\x8a\x95\t\x97\xb9\x95\x0c\xda\x9d\x16X\x9e\x9d\xe3Kp5B\xf7\x97\xdfJ\x1d7\x1b\xde6\x18Kg\x84K\xb1\x1e*S\xc8\x16\x8f\x87w3\x11\xed\xe7\xbe\xf5a2\x92\x19IC\x04dt\x89\x97\xd7oX\x0fW\xf0\xe8\xb8\x96\xc9\x1ed\xafpC\x9e\xdb\x83anD\x90^V\xf2\n7\x05\x051\xc1\x7fT\xd65D\x9b\x00Q\xb7}\xeaQ\x8c\x90\x1d\x12G\xb7Rr\xeaD\xf4\x93\\\x89\x12\xfe\x01\xb7\xa2AlZ!\xa7\xb5\x0f*\xae\xa5\x18I\x0f\xe5\xfe\xcf\x0fIh\xc4\xcf(\xf7k\xfd\x88\r\x88\x8a\x14O\xb5\x0e}$\xc4\x1cgk\xb1\x0e\xa0\x99Mu\x8de\xe9\x88\xb7\x94\x96\xd9\x07\x1e<\x8c&\xe29\xebg\x1d\xd9W\x8b\xe3D+\xd3].cK\xde\x06\x91\x92\xe3\xbc\xed\x0b\xe8~\xf9sLO\x19#\x9a\xc4\xec\xa6G\xe6\x81\xc0\x8a\xb2\xf7g\x90\x05\xae\xf7\xc5E\x7fC\xd2^\xb5#\x0c\xeb\xc7K\xd6\x95l\xf1\x84)\xb7\xf3d\xf8\xe8\xa9Mp\xcc\x87\r\xeb{s\x80\xea\xec\xc6\xff\x89\xa2\xfe%aD\xeeZ@\xa8\xab\x99U\x9c^\xad%\xc1\xacr\x96,\xfb\'\xfd\xaf\xeb\xa6\x00\r\x08+1\x82&\xc2\x055k\x07\n\xfe\xd8(\x90\xbb\x15\x86Xx\x00\xd8\x9d}\x0f\\\xf3\x10V\xf1\xb79\rwB&\x91o=\xc6+\xcd\xdb7l\x1f\xff\xcf\xe8\xa4\x1e\x89c}\xd8]Ejgn\x8c\x80\x9aa\xe9\xbcb\xd6}\xb7y\xa4\t\xfd\x97\x85\xe5\xdd{\x87.$0\x97\xb1a\xdd\x7f\xd1\xda\xe3\x03\xb9W%s\xab`r<\'\x88\x1ft0l\xbar~Dl%\xc0\xee.\x95\xbda\xe0SA\xdb\xdb\x7f(t+\x02\x81\x1e\xfcP4\t\xdb4\xa6\xa9\xec\xf838\x87\xf5 \xd1X\xb0\xbd\xa72\x93\xb7c\xeb\xe4I\xc5\xafL\xd6\x98\xdb\xe8\xbb\xd2F\x0f\xe3\x1b\xda\xf5\xe0\xc8G}q\xf2\xd7\xfe\xa7\x91l\x15\xce\x8e\xed\x0e\xce.\xf9\x8fIH\xe9\xc8\x8b\xcd^i\x07\xef`\xa4\xe2\x07\xc6F\x99s\xa2\x11\x93\xe0/\xae|M\xc9\xdf ^\xb4\xb68\x1c\xbc\x9ao\xf3\xc6\xe55\xb0\x8c\xea]G\x0cr\xacm\x90\x0b\x19*\x80\xa9\xe1\x9eB6\x8b\xf7\xa6%\xeb\xe8.\x83\xeas\xa9\xca\xbeHF\x98\x9a\xa7\xd5}\x12u:X\xc7\xe4\x1a|\x1e\xe7\x19\xa4N\x83\x85\xbc\xa3\xdf\x1d\xd5\xc8k\xbb\x8b\xe7Ik\xe3\x0c;\x16\x81\x1a\xbc\xc9S\x8a\x90\x96\xfeq\x89\x13\x00\xc4c\x9e\xa3\xaeT\xe3\xbd\xadd\x02\xf3b3\xaf\xd4!\xc9P\xee_M2in0en\x13\x16q\x7f\xa4\x85\x99\xed\xd4\xbd\xaeL\xf5\xcd\xc7\xce\xcb`\xf1\xbe\x1b\x81b\x84\xf4\xec\xca\x13\xa7\x1c\x99:\x95-\xb3\xe8b\xee\xf6o\x80\x85\x9dP\xebw\xa1u$\x8c\xb3,\xcc \xbfS\x02\xab\xef\x06\xa9\x0c\xcb\x83\xa5\xc9\xca2X\x87\xfbw\xebT\xb9\x8el\xd6\xb1\xefh\xfb\xc7@\x1f\xfe\x99sQ\xd7\xa7\xa7\x1f\x1a\xdeY\xbe\xdee\\zV\x95\x10\xa5\x1aV}\xf3;\xcc\xdc|\xb0\xc47\xe1\xd6{\x98\xa4\x07\xf5\xe7\xc9f\x04L\xae_\x97[\xe1~pXO\x8e\xaf\xd2:\x1eU\xbf-\xe6\'\xa0\xd6\xcaEQ\xc3\xe8\xe1\xe2\xb2\xcfKp*(!\x9b\xaa\x8aUkd\x027C\xdd\xd6g\xe1&TY\x9d\xb8\x99hc\xc0\xae\x8b/\xc1\xbbY\x83\x14\x0b\xf8\xb1;\xcce\xbc\x95\xde?(t.\xf3!\xd4ta\x9e]\xdfb\x168\x95\x8d1\xaa_6K\'\x80f%\x89\x1eZ\xf0\x9f\x7f}\'\xb0\x9fsn+\xd4\x8d\xe7\x0b\x8d\xb9\xccJ6\x99\x8d\x8f\xe8Y^\x85\xcc0\xba\x1d-L\x7f\x83\xb3Ft\xe8l\xbc\xe1\x9c\xae\xa6Sv\x82/\xa7\x12F\xba\xaf\xadz\xe0\xf9\x1eJ\xc3,fg\x8d\xbe\x1c\x00\x0eM\x04C_\xfa/\x0c\x1e\x94\x0c6\x06\x8eavn\xf5\x1b+\x11]v\xa2\x14\xaaT\xfb9\xfb\xd2So\x01]\xc5LM=E\xa6\xa9Y\xf4\xb5\xfd\xee\xc0\x12`\x12\x19\xea\xa5\xee,\xaf\x00|s6\x90Hu\xc9\xb8\xcc\x99\xd3\xd8A+7[\xbfu\xc6\x8a*\td\x18\xc5n\x08*9h\x0f\x14\x1e8\xda*>\xfe\x18\xe4\xde\x07\x98\xc8\\\x13\xdc.1\xe4\xd6u\x0f|f\xd9\xae_\x8e\xfeI\x03\x1e\xb1\xee\xc9\xdbf\xafF\xdfS<\x0c\x07\x84e\xab\x18\x9euI"\x10\x80\xe1\xdc\xe9\xc3\xb3\x080}^\xe38\x83\xa6\x87\x03W\xbaE4\xdbq\x19\x1c\xbd \xa9\x03\xb9\x1d\xf9\xc0AK\x83\xfds~\xcez\x86R\xfe\x94\xff\xb6@\x14\x99\x88:e\xaa\xe5@\xf5I.2\x1a26\xb0\xbd\x91\x86o*\xd0\x83\x05\xa2\xa5<<{\x8e\x18\xc1]\x96:Y\xfaT\xb8\x03m\x01\xe0\xa6\xd5I\x0c\x95ti\xb7h\xc8\xd9s\xf4\xbe\x07\x19~k@|[\xe2\x14\x1c\xf8\x14\'\xec\x99\x85\xa6\x05@\xe6\xcc\x9b\xccZ\xed7\xf3\xc5V\xf1\xe6t&&Id$ID\x90/O\x99\xfd\xa0*m\xca\x15c\x08\xd2|\xd7\xcf\xecC\xf4\rT\x94#4\x18\x92h\x94V\xc2\r\x95j\x93(\x1b\xef\x03L\xb7SX}\xc7#Y\x8f\xa5&j\xe8\xca\xd1\xde\x19H1\x96q\xc8\x96N\xcf\xabz\xadA`\xfeZ<\x11\x0e\x0b\xd5\xfa1\x89\xb2\x1d\xbe9H\xe5l-d\xec\xb3;\xa6\xe3\xa0(}R\x16\xae\xb7\x0f\x07\x9f{\xf7\xc8\xafJ8\xec,^\xd7`sW\x8bGO\x0b\xf5\xa6\xba\xa6\x84=`\x8e\xcf\xa2\xd4\x8a\x9e\xd0\x18\xe5\xc1\xd6\xcd\x17\x9a\xdb\xa0&V\xcb\xf1\xdbJYXR\xeem\x90AU\xa8\xe0\x82V\xca-\xa3\x07a2\x14\x90p\x91V\x92\xb7\xdap\xbe\xder\xa5e\xcd\xb0cy\x10Y0\xcc/\x8c%O>f\x13\xd7,\xc8\x8fY\xd9\x05\xf2\xec\xc22C\x80\x8c\xb7I\xdfL\xfcf*\xee\xef\xedi\xa7\xd1\x84.\xbe\x00\xc6\x1cO\xde\xb4\xc7e\x00\x84\xbe0\\O\xc9\x94\xf8\xd0\x81\xb5\xa0\x9d\x10\xcc\xd6\x1b\xd4\xff\xf8\x11nW\xb5\xf8\xc4\x97\x0f\xbd\x06\x0b{\x87\x7f\xe6\xc5A,T7\x94\xa7cp\x81LS\x8a\xdaL\xa7\x8c\x1bS\xc5\x1a\xc0\xcf\x8e\xe9E|\xd6\x18\xael\x06\xc9\r\xa9si\xbf1\x9as\x90\xaaM\x00\x03\x84r\x10\\\x04\xaa(\x8e(\x1f\x8e\x13"\xe3h5l*\xd78\x11\x85\x82ICl\xb5\x1c>7\x8f\xe4Y\xecIle\x85\x9c\xcdg\xee\xbc\xa1\x94\xcb\x81}\x8f\x03\xdb\x95\x94\xf8\xcbY\xd8#+kk\x97,:w\x1b\x04\xbe^\xa3\x03Qn\xa0\xcb\xcf\xc0\xf6\x8f\xb4\x9c\xc2|T\x86{haj(k6/\xb6\xb5\xab]\x82Y\xff\xab\xad\xbcB\xfa%\xd8\xeb\xa8\xc0\xec\x93\xe9LD\x8d\xc2\xd4c\x83t$&\x92!\xb4.\xf9\x0b\xb12\rh\x19H\xc6\xa6\xc2\x97\x14F\xee\xf2&\x93\xad+\x08\x899?\x15|\xfb}\x06Y=\xc5\xda\x93s\xe6\x91\xe6G\xfc\xa3\xb5\xd2\xcf\xc1bc45\xf7]W\x8c]7\xf4\xb3\x82\xf8x\xf3\xb3\xa46|\x82\xf9\x9e\xdd\x1fy\xd2\x13\x8a*\x0c<\x05\x1cF<\xdc\xce\xc9\xfe\xd3\xa5v\x1ac\xe3>\xa3c\x05\x86\x193\x81\x1b\x9e\xa9\xc6p&\x11\xbdT\xb8\x0e&\x87\x02\x0fE-wx@\xb6\x03\x15\xd2\x7f\xa0\xc0\x9c!\x98\xdb\xb0\xd3|\xcfH\xa5\xe1\xa9z\xe2\x05\xef\x9f\x9bC\xeb*(\xd0\xd5\x85\x9e\x8e\xd9h\xb4\xfftI\x9d?\x8f\xe8\xa1%\xf3\xfaT{\xed/\xe6v\xd0t{\xf4;\x8f\xf6\x16\x8b\x86\x05y\xc8\x14\xe1\x8b\x11\xb5\x89z\x1a\xffB\xb9\xc0\x88\xd8-t\x01\xb4~\x14\xec\xf6z\xc27\xba\x97\xd0\xb0\x01B~/\xac\x95\xdcT\xeb\x1f\xf3\x0b<.\xfc\x95\x1a;Q\x8a\x1d\x180$\x0f\x9f\xeaQ\x9b\xc7q\xf4r\x1c\x1bS\xbaY\xf8\x9a\xfa\x879dtx\xc1WG\xa7\xe3\xa9\xac:J\x1f\xb1=\x05Wv\x8d\xb9<\x8a\xfa3\xe7\x11\xfd\xfbyGlo\x19X \xda)\xe9\xac\xd7rNs Or\x0b\x87:\xe8!*3\x1c\'\xd3_\xd5sx0\x80\x9e~[\xc6\x15>\xb6\x8f\x0b]\x1eY\xad(\x0b\xe9#9\x1d\xcd\xd2\x9c\x04\x15\x89\xf7i\xb4\x13\x1f~)g\xa4.gh\x98\x7f\xb2\xc2\x00\xe1\x1a\xf5\xec\xe8\x89>C;\x9c\x11{W\x87\x97\x9a\xf1\x0e=x[\xfd\x07\xf5\x8cD"-\x16\xf4\x8c\xfe\xae@@\xf5\x9f\xaa\x91.\xce\xfa\xdb\xcc\xca\xa9\xf7\x1er\x9f\xbe\x90\xdb\xea\xcc\xb0\x94\x86\n\x90\xc9w\xa8\xdbp.X\xa7o\xfd\xaa\xd4\x15H\xff\xe7m\xea\xd3\x8bff\xab\xccLy"\x9d\xba\x92>\xe0\x86\xaaz\x0f/m\xdfM*!\xc1d[\x8cB\xfb)\r\xc9C\xc5\x92\x18&-`\xfe\xabvF\xd1/z*F\r*\\X\xcb\xe4\x12_\xd3L\xd5\xf4\x05\xe6\xd3\xac[\xe7h`\xbc\xc1P0\xb5\xbf%Lq\xec\xd0k\xc8\x1f\xe0]2\xa5\x9e\xb7!um\x8a\x14T\xd0\xcc\x15\x1e\xc3ybt$\xd2\x00\xb4A\x84=\xbadN\x9f\x17\x9d\xd6q8\xa6x\x89lr\xd4}u/i!\xefE\\\xd7C\xab\xf3\x9cM(\x8e\xf4\xba \xf5ZXe\xfe\xa1\xc8\xbf\xd4t\xb9\x18\x02|\xc3\xdb\xe4u\xee\x1a\x84{#\x1d\xf5\xaf\xcc\x1d,\xef[[2\x04\xc4\xb4C\xa9\xc0L\xab\xb7]\xc7\xce\xee\x0c:w\x9e\xc9\xa4\xed\x8a\x0fV_\xe6|O<\x00m}\x88\x1c\xb1\xde-\x0c\nP\xca9IgU\x80\xae\xba\xcc\xf5\x8a^5\xba\x8a:\xf0\xb0\xc61q\x01\xfaBA?\x86$\xe4\xbb\x10m\x851S\xbc*vN$\x10\x1b\xa7\xa9\x07J4u\x05\xf1F*\xe5\x0e\xe4\xa3\x18\xb3F:\x01\x90\xb8\xd8\xc8\xdb\xc2h*B\xd3\xa1{\xff\xc8Z\x9b\xc1\x04m*p\xcfz<\xaf\x87\xe2\'_\xb9/3\xa2\x9ad\x9bmZ\xe0\r\x9c\xe8\x8ao\xd5\\I3\xd9\xd7\xa8\xe7\xf3\xeaB\x98\nT\x98}\xeb\x03>\x96,\x17\xd3\x10\x86\x10\x7f\x9f8\x90\x0bN\xc9gho\x85\x11\x9b\xfa\x04\xde\xf3\x9d\xa9c\x14\xc8\x06c\t\x8f\x10\xf4\xdcG\xf2\x05\xda\xc1\x98B\xbf\x96y)\xd0:\xaf\xad\xa6\x9er\x93\x01\x97\xb0q#D\xc1\xb5\xa0/\xc2\xca\xaf\'\xd47\xd3]\xe7\x03\xe8\x99\x02iJ"\xfdPB\xdd\xbc\x87}\xe0\xb6\x90\xe3\xc3+F\xc7\xf0\x91\xf9S\xb7\xab\x9e\x16\x920\xf5-\xed\n{\xe1\xfe\xfcJ\xf6Z\xec!\xea\x00~\x8b\xdd#<\x9dX;\x0c\x18\xf0\x18\xf5\xac\xf1\x97\'\xabE$j\x06\xef\xdbV\x97v\x8e\xfah\xdb\x17+\x83\xd4p\xa5@\xd5\xf2@\xefKp\x15\x1e\xa1[\xccj\xc6\xb2\xbb\xce\xd1(0S\x80H\x86!\xf9\xa9\xda\x06\xb8o\xad\xd2\x96\xcd\xc7&\xd2q-\xd1\xa7\x81\x8a\x07\xe3YyP\xf2\x1dO\xedB\x90\x95/B\x0e\xea\x12:\xde\xbaY\xb4\\O\xb6P\x0c`\xfc\x8fd\x8a\x03\xf6(\\B\xe7Z\x9cC\xf67\xc4\x1c\xcd\x83zH/*\x0cGu\xe4\xee\x8a\xda\xaeG\x9d\xd9\xc3\x89A\xad^_O\xe87\xe4\xc7\x8f\xe7F\x7f\x0f\xb5X\xaa\x9d\xb8T\xb64\xbf\xd7\xf0\x85\x95\xbe\x96UP\x1b\x14\xd1\xbb\xc1\xeb\xc4\xd9\xf9TK\x90\x14\xe7\xe8\xe3\x11\x1c_\xf0\xf2\xa7z\x14\x85\xb5\xb8\xba\xb0\x9e\x18\xd6\xb4\xa8=\x1d\x01\xa5c\x8dM\x7f\xf2\x1e\x98\x86+G\x87\xc4\xb6\x8b\x17B\xc3\xcd\xee?H\xff\x1e\xf49\xc9\\\xdb\xb4\xb4t\x12=\xc8\x91}O\x8a\x03\xf1\xbf\xf1\x83\xda\x17\xe4<\x12\x86\xc0\xf2\xa5\x03\xb6#\xf2\xdabng^\x894\xe8\xea8nw\xd9\xd1k\xda;\xd4\x94U$\xf9iV!/=\x86\xb9\xec\x10\xd0lh\x17\xb1/\x1e\x18\x84\x9a\xa5\xa8;E>)9\xb6\xee\xe9\x9a\xad\xb6\xaaC3\xb4\xd1\xfcW\r\x19(\x94k\x03\x10dV\x15\x07\x86\x80{\x81\x90\x8c\x99j\xe5\x1c\xd7\xb5\x85N\x809\xa6=\xaa\xfd\x97Q_\x1c\xae\x8e\x1d\x1fDo\x89\xad6\xe7\x07\xd7\xd9\x18\xca\xb0\xc7\xb9\x0c\xa6tW,a\x16+\x1f\xdaH\x98<\xa1tt7\n\xec\xc8\xdf\x11\xb7x\xc5uxT\xf2\xc6\txr\xd1\xba#\xf1\xbc\x15,s]p\x86\xe5\xa9h\xdb\x8bi\x8e=\x91\xbfW7\xd6\xb7\xcd\xee\xaa"Ox\xe9\x8e\xfb\x07\x8a\xf0\xe3P\x1bX\xbdQ\x90\xe1G\xc8\x97u\xda\xbe\xa3\x14\xc8:\xf4\x86\xd2F\xe2!\xce\x02/%\xf5\xcd\x03u<\x8a\xd3\xbb\xcapW\trE\xeb\x0cK\xed>\xedo\x9aM\x90\x84\xda\xc43\xcey\xb9\xd5\x8b]\xc8\ni-\xd1\xa5a\xca\x1e\\\x8a\x07J\xa1\xb7\x12V\xd1\x0eA\xea\xa6\xf9\x994\x91\xf7\x80a,\xa6\xeb\xf6D\xb2SH\xeb\xceu\xbd^\xdc\xf5lo\xdb\xafa\x9e\x90\xbb\xc3\xd3\x10\xbc\xe2\xd8~]`\x830\xe7\xd2\xe1u\x8b\x02I3\xaf\x9c/:\xe0\xd2\xe18\x98\xd4p\x1c\x84O\x8a\x1b\xbe\x88\x0f\xf8\xed\xe0\x96\xcd=\x9d\x86\x0f=4\xf4\xe0S\xa4\x85EPz2\xac\xc93f\xb1<<\'\xe0R\xd6N\n#,\x07\xd1\xdb\xd0\xa8\xfd\x9a6\x9b\x86\xddN(w\xd3\x1f\x9d\xaf7r\x9d7$\r81l\xc56JIVF\x14fG\x1eoq\xc2\x12\x0e\x84.\xe7U\x1b5\xfb\xa6\xf7\xac\x8a\x9bViz\x97\xd6O\x1f\x88$\xe7\xf2\xae?\x95\xa7d\xb1!rr\xacr=m\xcb\x00Z3s\xf3X\xca\xbe\x85\x7f\xd8~\xce\xa3\xa9\xa9\x0e\x04\xe1\xc5n\x12\xb9g{\xeb\x015\x0b\xa3\x1f\x1fW\xf4\xbd\x00\x10XHCq\x03\xa2\x9f\x0cVa\x12\xc07\xb4Z\\T=A\xb6*\xe0x\xb8\x1dSf\x8d\x83\x85x \xe6{7c\x10\x0c\x8a\xa0-obK>pZ\xe3\x05\x7f<\x9e\xd0u\xce\xde\xdb\xed\x0c\x83|w\x89\xbd\xc6\x9b\x97\xa1\xc5_\x81ZH\xbe\xae\xf7\xf7\x01m\t@w\xea*\x04\x9d\x03<\xd9Rs\x87\xe94\x10c\xfa&\x88\xa5n\xdd\xc8\x117k\xc0)\xf6\xcf\x02\x0b~\x06\n\xab\xfd\xdbC \x01\xf9R\\]1\xe2\xa7k\xfd\xde\xfd\n\x1d\xa0\xf9\xb6\x01*m\xe8l|a*\xcf"\xc9\x81~\x0e\xa1+\'Pj\xed<\x8a(py\xd6<\xff\xe2\x03\x1b\xf5\xd9\x96a\xaa\xf3\x99\xa9\x92~M\xc9\xbe>\xbc6m\xb5\xd4\x04\xe5\x80\x0c\x1do3\x80\xbd!\x03\x0e\xb8IU\xa44\\\x03\xc7\xe5\xab$\xa8|\xc2\xa5\xcd\xe7\x9a\xef\xd1\x8d\x91\xfdd c:_~kE\xefxU\xd0\xc0\x95An\xef\xf4\x92v\xd1\xef>^\xed\xc3;q\xa9\xbb\xd3\x92n:?\x15u\xe6\xaaQ6\xdb\xc9\x9e\x8a%s\xbb\xdao>\xac\xce\x1b\xa2&j\xd74\x0eu\xaf\xcf\r<\xed\xa1\xa8D\x12p\xd6\xf04N9\x0e@\xa6\x84\t\xfam}L\xed8\x06\x07-h\xcb%\x08\xbb\x00\xf8\xba-?\xf6\x93$\xb5\xb0\x13\xfd\xcd\x06\xc2\xa2\xf4\xe1\xb5\xd4.g\x08q\x7f0\xb9\x11\xe5\xf5=8\xf4x~\xba\x03wZK\x05v\xed\x9c\xd2\t\xb3\x04\x89\x12vZ\x94q)\xdbq\x87\r.\xfd\xdb\xb8Sbd\x1ci\xdc\x0bw\xa9\xe3\x93Wh\xee\xde\x1c\x1d\xb6\xf8\x86:@\x93K\x16\xf5po\xb2!\xd2j\xd9Q\x12k\xaaY\xf4[\xdab\x88#\\_w]\xb2\xe9\x14m\x12\x15*\xac\xb2\xfd\xd8\xa8\xf5\x86\xdf&\x05vD\x87\xabA.\x90\xd6\xaa\x95\xd1\xc1\x9c\xd5\xfe\xd8\xc0\x03*\xbc\x08j\xc6\xb9\x90i\xab\xdcJ\x8b/\xbb\x00\xb6W\x8e\x05\x1dE\x94R\x82\xe3c_\x006\x00\xd9\xa0\xbc \xadA\xd5B\xd3\xe8\xfb\xe5U\xa3L\x1e\x90\xbd\xabw\xd8U\xddEc\xf8\xd8\x18m\xc4-\xfb\xf9a\xb5\xa3\x9f\x81\xbc\xbc\xcd\x91n\xe4\tg\xc1%\xe4\r\x07\\\x04\xa4\xcb\xbdowL\xa9\xedl\x88\xc3\x10\x16\xf1\n\xde|us\xd7\x19.6\x16/\x83O\xd5\xe1\xe4\x9ao\x90\x95\xe2\xe03\xed\x1bb5\x14\x19\r\xef\x86\\L\xb5\xc2\xdd\x15\xab\x06\x03\xcd\x01\xd5\x10>\x9f+}+up\xea\xaa\xb6B2\xf9Y\x17E[[\xcf*31\xf1\x038W\xcf\x0c\x1c\x96\xbc^\xdf;\x00\x9db\xdb4@[\xf7\x16\xc3\x0c\x93VM\x10\xee\x93\x06g$w\x0c\x87\x9ai\x03(\x8a\xd6\xa5jS\xe43\x12\x98\xe7W\xb8\xc6\xca\x89\xac\xc8x\xadi{N\xfd5E\xd2aD]\xdc\xc6G{:\x82c\xf7\xa5K\xb0\xb7\x1e}\xc6!\xe6\x1d\xe4\x83\x97\xd8h6p\xe4>\xefc"\xd2\xfcL@"F\xe1ZiZ\x9c\xe1\xff\xa9\x9f:\x84x\xf9D\x89\xb0Y\xd9\x9e\x8d\x1a\xdb\x9bV\xc6\xcc\xe3^GM:8\xee\x8bdA\xb6\xba\x91s\xadN\x84a\x8cCaL\x81\x8f\xaa\xdc\x8d\x8e|t\x87\x80\x1b\x02S\xa4\xd3<\xed\xaa#\xea\xb5F\xaf\x94\x99\xeb\x04\xdd\xddi\x06\xf7\xd1\xf0A\x82\x85\xa2<\xec\xab+tJ\xf0\xba\x9b\x9a\xbc\x14\x97!\xe2\xd7\xde\x86\x07\\2|<(\xc5\x1dc\xce\xb5g\x9b\x8e\xdd*\x90\x18\xf4\x8eY\xf98p\xd1k\xcaoy\x02\xfd\x95z\xe5\x97\xf2\xe7\xd4|\x149c\xea\xce\xce\x1ef\x8c`9\x99\xb3\xd8\xe53\xb2F\x06\x02}n0\xc6M;\xd9\xcf\xbf\x17\x9c\x10\x86\x9aW\xa1\x81%\xf8\xe3\xa3\xfe\xdf\x8b\xafj\xb6\x7f!=\xf17\x0e\xad\xd8\xfd\xe7\xf8L\xb1B\xf0cI\xd0\x12\x98\xfd\x8d\x01\x07\x00\xed\x94\xea%Y\xc0\x82\xfem\x8e{\xce\x13\xd5\x90aDsz\xb1\xe3zIj\xc1\xc4\xe5\x18\xa1\xb8\xfa\xa2\x92\x99\r\xf2&\x8a\x07\xe3\x1b\xe6\xb1z\xe6\x16}\xdf\x9d\xafT\x0b4\xed\x96\xb9\xf5.\x10EzI\xd8\x03\xc4\r\x17L\x01\xa6}\xa6\xd6\x7f\'\x99\xed\x07\x06\x04{cJM\xd3\xda6]\xf0\x87\xbeY4S\x93}o:\xc3\x9fg\x04\xb3sSW\xee+p\x1b\x80Ei\xc3\xdev\xeb\x94x)\xba\xdc\xbc3\xffi\x0bdH\xa1\xea\xfbw9\xa7\xa6[\xd4\xd3\x1a"\xb2\xb2E\x04\xe5\xa4\xff.\xe7\xbc\xc0\x86(\xbf&\xcd {HMgnw~\xea\xdb\xafTU\'y\x9f\xb7;{<=\xe4\xc1\xe6\xa8\xe9i\xc6&;\x89\\!\xcf\xc9\xbex\xe3w\xaa\xa1\xb5\x8aA\x06\x1aJ\x18l\xa2\xbe\r\xdaK\x7fK\xfa(\xb5\xa5=\x88\xad1\xf0[\x0e\xd2#!\xea\xa0\xcbBX\xacQ\x0f\xa3\x10\xba;\x93\xa8\xe2`O,\x0f\xcaCN]\x98{I\x8c\xb5Wnup\x03\xc9\x82\xa5\xd5\xdd\xe4[]\xcd\xdb\x0c\x90\x9c<|\x98\x93\x8d*7\x93\x92\x96\xc7n:R\xf1R\x88\xcf\x10"\x82\x9a\x04C?|\xcf?\x82\x82{\x13QfDi\xee\x8d_$4\x92\x1c\xceH\xea\xb7\xc7\xc6_}\xd5f\xdd\xffA\xdf\x04;\x1c\x9fS\xc2nO1\'xnq\xbb\xad\xb7\x0f\x82]\xf5\x80+\xafm_\xcc\xfd\xdaXSQ\xb9\xb8\x1c#\xc9WXI\xc9\x8a\xe9\xd8I\xf4\xcb\x94\r\xec\xa7C\x91\xd4\xc9\x1f,@W[\x88\x0e\xf0\x93\xef\xb2m\xf5\x86}\xa4B\x9c\xe8\xdc\x9cK\x85z\xc9\xbcf\xfc\xef\xa6`{\xc25y\xb5%\x01aB\xbc\xce\'\xd6\x02\xa6V\x8a\xc1lI\xb6\x99\x0bZ\xe7\xd7\xb4\xfe\t{\xf2\xd5\x8a\xe9\x86\\r\xcc[\xde\r\n7\x01\xb5\xfb\xfe\x11\xb3\xf9sX\x16\x94\x8f\x05hmt\xd47\xf8*WRV[^\xba^\xc1w\x15\xbd\xd8>\xfbJr\xc4\x04\x86\x83=+\x9c\t\x06\x14\xe4\xc2_\xe4\xa4\xcbO\xb0\xda-E\xd5\\\x97\x92\x85X\t\xf4\xde]\x16\xac\xd8\xd3\x0f(\x8a\x90\xb6+\x97\x96j\x82\n6-\xc3\x1e\xc8\xc9\xcf\x1e\xf5\x87\x12ps\x91:& 0\xd2\xf3\rJ\x8d\xf3\x16\x19\xc8(\x006f&tm\xec\xc1\xffn\xc0\xc8-\x88\x8em8\xd0\x12\x02\x93R\xc8(\xe4\x94.\xa3P\xe2\xf5\xdd\x16\xb7@\xb2\x85\x12B\r>\xf3\x8b[\x87\xc7#\xc2\x9f9\x19\xba\xb9\xfc(\x13yZ\xf6c\xdcJg\xd8o\x9f\xdd\x88@j\xd4\xb9\x1f\xefCQ\rd:\x05o\x81\xfc,5\x17/\x8eUF\xadfwi\x99\x9a\xe5M\xbdi\x8bp\xd76h\xb4\x8d\xe5:T\xe6\xb0\xdd\x8d \xdb\x0fP\x8e\xf6\x9a\xfb\xe5\xad\xea\xd6\x8b^\xcb\xdd\x87\x8bU^\xf4\xee\xf2\x15\x82\xa0#&\xee}]n\xb7pn\xa8+\x80)\x08\xeaV\xf0T]\xb9&<\x18\xb5\xa7 \xfe^\xe9-\xae\x7f-\x13c\x11\x7f\x08\x8d\xdb\',R\x92h\x96>\x04\x1a\x0b\x94\xecj8`]F/\xebI\xa1~\xc9\x98FH\xd4\xd3=\x17\x81[\x17\xce\xb5"b\xb8 \xdbO\xcb$>\xbfwZ\x12\xa2\xed$o;v\xa4\x06\x833\x89\xfaA\x19\xe4>\xe3\xcdx\x19\x03\xd4\xf8\xcc\xaa\x92\xfe\xa9\xbf\x86+\xc1\x00L\x1e]\x83v\x17\x1ed\xf8Q\x97\xe8:\x7f\n\xceZ\xf3p[1\xbb\xaa\xbfV\xaa.\xc7\x8e\x9fq\xeeL\x0e\xad\t\xfc\xd0dr\x9e\xcd\x82\xab\xfc\xe9\x8f\x14A&M\\\xab\xb9\xff\xf0\'\x952\x8a\xado\xd6\xe75\x03\xedd\xaf\xe6>h\xc6as\xa3C\x91\xb7\x18\xcd\x9d8\xde)\xe1.\x9b\xfb<\xe7}\xb2\xcd\xc5\x03\xf2\x0f\x0c\x1e\x99idY$X9\xe9\x91\xe1h\xd5\xc2*+\x03\x97\xa7\x9b\xaa\xa99\xfaJ\x81;e\xfe\x82\x16\xe2\xa5@\xc6\x94\xcd\xdfH^\xb3\x02\xa7\xb1\xfe\x1aW\xc0\x99+\xb2\xa4\xa4\x9cZ\x9b7\x0f\x14b\x9a\x83z\xdd\xcf*;\x11\xc4\xf4\xec\xa8m\xfdTF\xb5b\x9e]\xfc\xfb\xaa\x03f\xbe\x85*-\x19U>n\x8c\x17\x0eZ`\xcbn\xbe>\xb5\xfa$\\5\xf9D-\x96\x0cZ\xbc\x90A\xa8M\x1c\xfeO_\xc9@]Sz\x00\xb0I\xb5\xfd]\x15gC\xf7o!\xde~1N2\xf3\x01-u\xd2^/\x13\x9d7\xf6\x96\r\xe5\xc7\x8c\x123\x19\xe7\x1d\x88\xf1A8\xe2\xff\x9c\x85\x031g\xb0\x11)\x9fteK\r\xe1\xea\x05\x17A\xfd\xe7\xa3vP\xf5\x9d\xe5s]\xb7E`\xd7\xd3+HK\x88\'Ig\xbf\x7f\x96\xc5q2uy?S\x8epi\xa2(\x97t|D\xf9y\xd1j\xf0mK7nE\xca\xa6\x80\x87\x0f\x08\x9fLKJ~J\xa1B.\xa2>\xf6\x03\xb7\xc2\x99}/4\xd7\xabMo\x01V\xcc/Y\t\x1b\\\x19\xe0a\xd3\xf9V\x9c\xc1\x86\xf7B>T\x898\x9a\xfc\x18\xad\x04\xcb\x980\x92\xd5\xed\x80\x83\xd8\xc9\xa4Y^\xed0\xc7\x11\r\xc7\xfa3}e\x18\xbfb1^\xc1\x97\xcf\x80\x10a\xee\x96H\x18\xcbcV3,\xb7\x14%\xf2\xa1Z\x0e\xb1M\x92\x89C\xd7\x15b\xce?(Qy\xd4\x18\xd0\n\x9eu\xd1r\xe8\xa7\xfd\xc4+\x82\xc2-\xd5g\x95\xde\xbf\r\x88\t\x85\x08\xe6+TA\x85<\x05m\x02\x15\xc2\xa0\xe8\xf0\xed~\x8f\xd7q\xf5\xb5\x15\xb0D\xab\xae\x99\xf1&\x84\xd0\xf65\xf5\x15\xd5\xaa\xcf\xc7\xb0$%\x81\x16\x80\xb5\xf5_ \x14\xa0\x9b\x8dfa\x1b}\xd3qB\xf4+v\x89\xf1i\x81D\xf9\xcb[v\xf1\xe8L\x03\xde\xda]sc\xf6`e\xab\xb0\xeb\x87\xfc\xbd\xb2JU\xfd\xbe\x9e\xde\\\xe3\x06\x14\xbe$0iJq\x0f.\xfeY\x189L\x16\xc3\xc0\xbc\xc3\xb41e\xad\xa7\xd0i\xf7\xf2\x9e\xd9\\lM\xdb{V\x95.\x11P\x91\xa4|g\xe9\r\xd7\xb5>\xbc\xdcE\xb4~CG\xb7F\x06\x8ek\xcf\xa0\xca\x88nR>\xaf6#_($"\xd6\xcd\x8a\x0b\xe3\x02\x03\xfa\x1dL\xb7Jr\xdbB\xed\xe7\xc3N\x1b\x12\x0b_D\x1d\x16\xfd\xefK\xbeO\xcftf\xd6\x1f\x99r?\xf3g\x8c\xb8\xe10\xc0H\x98\xedF\xfcd\x129\x9d(U\x15\xb2\x87\xd2\x84R\xa3\xff\x08X\x1f\xa6pn\xe0\x00\')\x8f\xe3\x08\x8c\x8f\x1c\x86%G\x8f\x1c\x95*E\xfb\x1e]-tv\xe7\xe2\xae1GJ\x80\xdf\xe1\x04\xd8\x08#\xc5\x93:C\xbc\'.4M/\x8b=\x12tZ\x0f[\xd3\xbc\xfc\x87o\xc0\xa9j\x9e\xfdA\x11,\xfc\x9e\x1c\x9b*\x0cy\x1b\x01\xc7\x00\xa5\xf4q%\xe8 d\x86\xaf!\x8b{\x8d1i\xecT\x8fU\x1f\x03\xd4;\x17\xd3\xbb\xba{\x8c\x10\xa9\xf2hbx\x17\'H\xc6Q`P\x80\r\x95L\xe2\x18~\x9f\xf4\x1c\xc7\xdah\x91\xb1\xf7\xc9&\xda\xf9\xfb \xbbH\xc1\xa5,\xd7a\x12\x0f\xc4\x8e\xd9|T6)\x13M\xa4V\'\x9d\t\xc38\xfd\xe1u\xe8\xd2Oa\xa2\xae+\x98\xc5\x91\xffC\x7f\xce\xfd[\xcf9b\xe2\x90\x9d\x06rJ\x12+\xe0\xae3\xa8_\x95\x06f\xac;\x1b\xa1\xf6H\xf7\x85\xd0\xc9L|:?\\\xe5\x8f\xb5\x87\xe5\x1e\xbb\x92\xea\x99t?Cgf\x01\xfb\x0e:\xb0\xd9\xfd^\xf9\x08\x06\xfc\xf6L\xc8\x93Fm\xb4\x12\xf6m\xef\xf6\\\x8d\x89\x80\xcc\x92\x90z\x99\xc3\xd9\xa1D\xc9\x99}i\xd4]\xc4\tmb\x8bZ\xcbk\xff\x0bI\xd1\x14.\xaa\x0fT\x0b\xf9\xf5I5\xd6\x13?\x13\xe0-\xc0\xed\xfb\xbb:\xd5\x8c\x88\x18o\n\xa32\xcc\x97^81\xa8<\x9eX&V~\x9a\x8f>\xbd\x84\xce\xc8\xce\xfd\xd8\xe0\xc6\xe7\x12\xbd\xad\'%\xb3"\x10;\x98\x03\x03\x8a\xfb\x17R\xb38\xb1\xc0\x90_8\xf8\x84\xd3\x81g\t\xde8\xe4\x17\xabi>\xd8K\xb4\xa8\x05y\xf7?\x0f!\xa6\xef\xb5f\xa9\x82=\xfe\xff\t\xb5\x91\x16c\xff\x8aA\n\x0c\xcf\x85\xc9S6P\x82\x90\x9d\xf1\xe3\xd8\xa6\xe6%\x0f\x165\x9cW\x15"@\x82\x96L8\x187rI^\x97\x1d\x0c\xb5M\xe0\x05\xeb^\xc7\x01\xc0\x97\xe5\x1d\x7f\xeaWu\xc7q\xbc\xec\xaa\x9f\xb02k\x19\xe6\x95 \x00c\x000\xa8\x9fj\xe6w\x08`\xb8\xea.\x104H\x8d\xc0ID\x94UfN\x07\\\xd4W\xb6K\xe7\xcao\xb3$\r\xccG\x08<\xdd>\x82I\x8e\xb3\x1df\xbaUz\xc7\xd4\x97\xaa\xb3\x89W=7<\xc8\xc77\x91\x84R(\r\xd7x\xa0\x7f\xfbO/\xf3o\x0b\x14+\x875^W\x84\x9a_/z\x0e\x16;\xd4\xe2\x9a\x8e\xaer#\\\xbd\xde\xed\xdd\x93\xd7\xa0\xa1\x04\x87B\x19\xe5\xa8#g\x0f\xfc\x9cU6\x82F\xbbN+E\x91\xa9s)\xea\xa3\xe2 \x81\x1c,\xe8\xd5"\tg\x81\x84\xb3\x1f[\xea\x0e\xbc\x95i\xa0\xac\x10kI?\x87\x88M\n\xe6\x81B>)NC\xafb\x9c\xb1\xab\xba\xabz\x9b2\xb0UI\x80I\x15\xf3b\xc9a\xa6\xf1\xc2\x95\x19hOJ\xc8\r\xc4\x11\xdb\xbc\xf06\xbe\xd5\x1b\x99\xc7z"\x99\x1c\xe6\xd84R\xf1\xd5\xb1\x8f[?J\x14\x88R\x0b\x11\xb4$\x81\xed\xe6y\xc7d\xd1*;\x8f[G\xed6>lR\xd5+\xa8I\x0f\x03\x85\xb2\xce\xa4|\xcf\x9bz\x15RT;\xf1\xd4}\x11\xfeK\x82\x9as\xbe\xe2\xad\xd9\x1a\xa5\xff\xcc\xf1\xcc\xeaMG\xe2\xdc`\xcc\xdc\x9d\x87\xb4\xf1\xb4\xe4\x82\xb01\xee\x94\x83O\xbdt\xed\x02\xdfQ\xb7u~\xa7\xb2\xa1<\xcc}\x9c,Bn"\xe6\xf0\xe7e\x1a\xc0J\xc4\xf1\x1fa\xef\x02\xbai\x8d#\xa1\xf0\xc7+\x8av\xef\x93Q\xf6i\xc1\xe0\xdcL_\x97@>\x87+q/\xd6t\xfa\xf8\xcc\x99\xff1b\x95\xf5\xe6u-\xb5hP,\x9f=\x94h\x94\x1e:T|P\xc9\x1c\xf6\x17\xac\xd5\x93\x00\x9df\xd2\x15\xc2\xe7G\xc4\x14\x1bp3\xf4t\xdf`\xa1\x84J\xa2\xc3Qn\xc1\xbd>\xe0\x7f\x8c)\x08\x96c\xc0%\x1e\xa0\xedR\xdfzay5\xb2<\xb0}\xbc.i\xa4\xdee\x15\xf1:i\x1f\xcd\xfb\xb4\n\x901\x05\xaas\x05R\\\x93B$\xf6\x88b\xa23\xd6\x06[\xe6\xa2\x16\xb6W\xfccFf\xa1\xd56j<\x14K\xb3x\xf1\x95No?k\xe2\x8bz\x14\xd7\xa8\xee\x03?~gG\x9c\xf4\xd0j\'\xb9\x9a=J\xac\xc6\xb9\xc6\x01^\xab\x94\x82N\xe4bw\xe2x\xbf\x9c\xe6`\xe0z\x90\xcd>\xf3\xd9\xc5\x01\xbd2\xdd"\xfe\x9fl\x83\xcb\xa4L&PS\x01\x1a"\x80\xa9\x02o\xbcm\xc4\xd29\xca\x04\xdc\xb0\x89\xbb\xa4\xf9\x95\x8d\x00\xf8\xc8>FC\x9a\x95\x8bp\xb0V\xc4\x0c\xe5\xd2\xbf\xf9\x86\xc7~\xda\xca\xa0jG!_\x8b\xdd\xd9w|e\x92\xc4C\t\xe8q\xb8j\x92\x0bac5\x93\xf0\x16\x0f\x8f`]\x90\x8d\xf1+\xa1\xdbYH^\xaf\xd6\xbc\xa2\xd5\xe8\xa6y\xddC]\xe9\x94\xf3?L\xd9\xde\xe9v)R\xf8\x05\x06\xc4\x93\x07c\x8e\x0cu\xd7\xd5wA\xf8F{\xac\xdaIw\xc6E\x8f\x08\xfa\x85\xbe\xd2\xc6\'\xa4\xd5\x98\x9f\xf5f:\xfa\x1c\xd32@6\xd7g\xf9\x08_qS\xce0D \x98\x0e\xfc)\xfbLr\xf9th)\xd5\xdf\xc1\xfbP\xb4\xa3\x00\x07\xbej\xa2\x164\x88g\xaa\x11\xc2\xceB3\\\xf3\x08\x90c\x1a\x11F\xbc\xba\x1c=\xf6\x17\xe9\x14\xe8\x10&\xd7\xdf\xf4W\xf1\x0c\x9d7\xf8\xe5\r\xc3\xc3T\xe9\x84E\xc8\xbd\xf6I\x1dEf5\xd0\xf8\x1aWH\x11I\x03ea\x85\xca\x89 \xdb\x00PJ/>Q.\xfa&\xa3T\xca\xf6<2\x86p\x97E\xbc\xd5\xef\x14\\2\x9a\xac\xeb\x15\xb4\x85*c\xd1\x972\x1c\x05U>\xdcS\x13v\xf09vpS\xb1[\xf1S\xc4\x9b\xd8\x8f>3C\xc1\x85\xe9Sw\xd0\xa6\x95O,\xa4Up\xc7J\xca\xa2_\x19\x04\x94\'`7LV\x1a\x05.<\xa7\x8f\x9e\x01\xa0\x93\xb1\xad\xe3\x1e\x829Z\x16\xa7\x84\xff\x0e\xd1Z(\x89\x92\xcf\xe0I\xae]r\xdd\xe4\xec\x9c\xba\x92U\n\x9d,jG\xe4m\x89f\xbc\x19\xcb*\x04\x00u\xea\xcby\t<\xc4\xae\xba\xf1A\x13"\xf4\xd5cG\x8c\xb5\x9dY\x10\x9d\xecLD\xad\x17\xe9\xf6\xedA\xad3\x9eI\x12^\xf9N\xe3O\x00\xb8\xf2\xb1\xc0Yud\x07\x90\xeb)\x92\xe6}\xda\x85k\xad\xb6\xcar\x1c\xe2\x19&\xe9t\n\xe1\xba\x16\xf5\xcc\xff\x9b\xf8\xc2\xef\xe9#0?\xf8\xe3\xeb)6>\x8d\x1d\xc1\x0b\r\x8f\xca}\x8eD\x90$K\x9d\xdd\x9e0\xf9\xfa\xa1\xcab\xf3\xee\xa4\xef\xa8\r\xdf=\x97az\x8b\x96\x89\x15\xd8\x1bW\xc5hG\xe0\x84\xedu\x92\xc7"\rS3L\xd6;\xc1i2\x8e\xf3\xb1\xa2\'\xcd\xa8wSk^V\xb3\xa6\x965\x01\x14\xef\xcc\x02\xc0\xd0\x11qt\xcf\xe7|\xda\xbe<\xc6\xae:\xc3\x0em\xf9\xb2m\x1a\x92\xb3_\x1c\xe3\x91\x8d\xe6]4\x96B\xe4"\xa9+\x07\xd2\x83\xbeTM\xeb\xafI\x0b"D \xc4\xb5\x8e\xee\t\x14"\xf2\x8a\xde\xfb\xac\xe6\x11\xfeB\xfd\xa9\xd5\x85\xa4!\x91\n\xa2f\x92\x7f\xf3\xdbY0\x95%\xe7B\xb3Ko\xe8\xc71Q\x94 0\xe6\xd5y{J\xb8JP\x91\x83\xb1\x08=\xfa\xee\x90\xbd\xe0Jj\x93!\xc8\x9f\x0et\t\xe7\xfe\xcc\xfcV\xabV\x0e\xcd\x05J\xd5=\x7fv\x19m4\xfb\xa6Tah\xf8\x17\x86\x0b\xcdL]\x89\x9f\xc1\x821Y\x9f\xc3A\xe7R*\xdb\x7f\xfb\xf9@\xaf\x11\x83*\xe9\xaa\xbeC\xc1\x82E4n\x82\x9d\xe3}\xcb6\xfd\xaf\x1c\x90)%\xf7\xb2\x86\xc8@\xacU\xed\xc9\x9bOZ\xef\xc3f\xb1\x87\xb2\xf5\x1b\x88N\xb8\xd4\xce\x96)\xd6\xe3t\xe8F\xf7\x8c\x88Q\x01\x99\x012.q\xdb\xc4\xd1\x16\xee\x9d\xaa\x1bPK\x80S2\xc8:i\x19xB\x19\x8f\x8a\x90b\x9f\\MH\x8c<\x12\x1aI\xc0\x92x\x93m\xf7\xc7\x01\xd1\x95\xbc\xedO\xb0\xe6-\xc6Uqe\xe9\xb6\x83\xb0\\`\xddg\r\xc6\x08V\x0b\x8f:\x021\x88C\x94\xed\x86n\xf2\xf0\xb9\x95:\'\xf57\x98\x0b\xe2\x15G7\x95\x02\x9e\x01\xadv\xfaa\x00M\xb6\xbe\xe00\xabT5V\xb6gz@\t1`\xc8w\xad\x8a\x91c\xf6G\xeeSL>3\xbb\xcc\xa1n\xab\x97\xd8\xd41\n>\xac\x8c\x91\x12\xaeV%\x19\x8a\xa3\xb4p\x06F]\xd08\xe0\x7f\r\xc9\xa8\x97+Q\xfd\xf4N\xc3\xf9\xa7\x1e\x94\x83n\xfe\xa8\xabnKS\x14!\x14\x9d\xaaxR1\xf0W\xf7C\xf2\xd8\xe5\x9e\'\x9c\xa8\xe6\x03Pa~\xd4\xfb\xd7\xc7p\xa3%\x07\xea\xf3\x15\xf6};W\xf9\xd5h\xa7\x9c9\xe7\x14C\xf4\x96\xe0\t\xa3o\x8b\xea$\x9cb\x03X\xeccn\xe3\xd5\x86\xe7\x93.\xbe\xf3&\x04\x88\x8e\xf4~\xb1\xab\xcb\xa3S\xcdPa\xc1\x96\x82\xc8\xed%\xf2\xd0Wq/\xcb\x11p\xe9\x10\xfeW\xb4X\x8d@\xa5\xbc\xfev\x8d\xfd\x13\xd62\xe3\xe1\x91.\x99\x13\xdf\x1a\x1a`\xd8#\x98b\xb3Qk\x9dl\xeb\xdbU\xf7\xb8\xb0\x86A\'\xef\'`\xb9*(\xdc\xa6\xbed\xc2B\xc4\x93\xf9\xcb%1\x84\xcd\xa0.\xba\xad\xf8\xf4@\x88\x02\x8a\x05\xc1a\xbd\x17"\xc5\xf4\x03f\xad\x0bk2_\xb5~\x86NRa\xf7\x17"\xc9\x15\x14\xbaLM\xc9\x14=\xab\x1cK\xbe\xb1\xd1\xe5\x16\xc4\xbb\xc4\xaci\xf4\xde\x18\x00\xb1\x84\xc8\xf3\xdb\xfc7\xce\xe4\xc2\xc5\xb2\xe9\xc8\x85\x08k\x85eg\xa80\xe2\xaf\x0e\xa8\t!+\xc7\x98\x94@`x\xbaR\x04\x85\x9a\x9al\x8a\x12\xce\x81\xbfw\x80.\xe5)Ik@o\x9c\x12\x00+N\x0f\xfex\x9b\x89\xed<\x9b\xef\\\xe9z\xd8\xc1\xc2@\xcfq\xa1\x8f\xe9\x19\x83\r\xd7\xc8}!i3&U\xeb=;*\xb3\x11N\xa8*\x0f\xc3\xa9\xf4\xe80\xfe\xcaQ\xa1\xa8c\x91\x81\xfb$\x19\x9aPp\x15\x14\xb4.\xbb\x89\xc35\r\xd3\xb6+\x99TNb\x13#\xfe\x07\x1d\xbc\xa9aw@c\xede\xde\x9b/9\xaf\xb9\x11!\x9c\xe0\xe7ND{{iM-/\x1cN\xef\x01\xcf\x109AJ\t\xacT\xde\xce\x04\x19|(R\xb3\x82\xeb;\x1d\xbbw\xbfP\x0c\x9frW\xb2\xf9\xbc^\x9d\xe0y\xf7)\x88:\xf5g4U%\x81v%\x84\xfeE\x0f\x9c\x08\xf3\xe2}Jn\xf4\x17\x7f\xa1\xf3\xf7\x07i\x06r\xb9q\xd6\xb7p\x87\x14\xf2\xea\x84\xa7\xb5\xb7\xe6\xdc9r\x15\x8c~\xcf\x08\xf7XE\x94\xc1\xbc\x86\xf9X\xb6N\xdb\xb6\x9c\xd1*\'\\C\xf1\xed\xfe\x85e\xd4!\x9a\x10p1c\x9d\xd6\x0c\xb1W\xe5\x0e\x85Dm\xba\xf8\xc8\x826\xb4\xdb\xe3\x92\x9d\xa7\xd3(B\x10\xcb\x8c\x01p\x85\xefp}\xe5%A\xa20\xdc\xfe\xeb\x8b\xba\xac\xb4\xb7\x92\xa3\x13\x1b\xd5\xfd\xd3\xa4\xa6\x98\xa7*1\xe9\xa2\xe1\xfe\x14S\x8dC%\n\xebN\xb6\xb5\x9d\x8d\xd6\xb3\xd7\x98\xdfW\x99\x19\xb4iRuv\xd2\n\xb2\xa9\xdf?\x9eI"\xb4\x9c\xa6;G\x10\x90PA\xf21[LS\x9b\x83`\x19.\xf4|\xb4\xd9j^\xbe\xf2\xd60K)\xbc\xbbE\xa8\x0cQKo\x1b\xef\x90\x12\x18\xbe#\x0b\xc9\xffJ\xe0I\x8bT\xee\xb9y\xd8\xed\x16\xed\xfcMz\x0b\xd8\x952Aq\xf6UU\xb4P\x13~)W\xf8\x12QlT\xf9!\xde\xb5\x0c|\xad\xb1$\xe8\x99\xa5\x02\x10\xf2\xadO\xdcM\x13\x0b\xe7\x1c\xa9\x98\xa0\xde\xc2\xfa\xd8R\x91\x12K\x83JQ \xf2wTe\xe2\x16\xed\xf5\x01\x02\x84\x14*\x9b\xcd\xff\xdala\x02\xd5\xe0E\x16\x18\xc0\x82\x08\x8a-\xfd\xe1\x7f\xd1\xf3\xb7\x10J\xedLOO\x1d\xe3p\xab0\xcd\x8b\xb4\xdf\xb0]]S#\xa5\xaf2\x8d]\xae\t\xa9\xc1\x92}/\xa8\xcaR9\x8b\x0b\x8a\n3\xe4\xfb#\xcd.\x05\x90\xc7\xb5q\x9c\xa6\xdc\x17 k\x95\xf2h\x1eud\xdb\xfb\xdf\x1f\xf8=\xd8\n~NE&\xda\x17\x13\x8f\x17\x81F\xce\x1aR-\xd44\x9c^H\x95x\x07\x9dx*W|\x9f\n\x89s&\xb7\xf0X!\xc7\xb0\x1c\x1c\xa8\xec\x9c\xdf\x97\n\xc5\xf3\x83J4\xa3\xf5\xaavNUK\x17!\x96\xb7\xf6\xe5\xce\xff\xeb\x9d\x82\x1f\xc9k\x91m<\xd9\x9a\xfb=\xe5\xf5\xbe1+\xd1M\xe4\xc1\xcd\x8b\x82R\xdb\xa5Wf\x04j\x96\xad\xd5Ol\xe5#\xfb\xbf\x11\xe6\xe2\xd7Or\xb6\xc5[\xe04\xb3\x18s\xbfgE\xf0\xf4\x12B\xd2\x8a\xe7j\x08\xee\xf0\xc4\xe8_F\xbb\x8d\xc0\x85\x19S6cb\x0eFc\xa9\xf6\xe1\x87LnF5\xe59\xbb\xab\\L_\xef\xc1O\xe5\xcf\x7fFZ\xacL\x96\x89\xa4 <\x9f7\xa1\xc9\x8eI$"\xb0e\x81\xd7(\xa3\xad\x80\x0e\x0e\x1e\xfd\x02\x1c\xe8\xc2\xec\x185\xc9\xfa\x94Hy[\xd9\x15\x05\x00\x89\x0e\x08\x9c\x0c\x19?\xd8\x15\x86\xabUB\x92Y%\x9a\xd3(I\xdem\x1b\xf8<\x1c\xff\xb7\xa5"z\x9cB+\xd8\x88I\x03Z.#3\xbam9\x88\x02\rW\x87\xc67\xd3Q\x80_\xd6\x90\xc7#\t\x12\xe4Ds&\xd6\xbb\xff#\xfc\x04@\xaf\xda\xd9\x114u\xcb\xc6m:R\xf8\xd9C\xe3\xe1{\xd74\x14\xa1D\xf9\x84\n\xc3o\xe76\x0f\x18g\x07\xdf\xf7b_\xad|\xbe\x06\x075\x88\xe1l /\x81\x9e\x7fN\x97\x11\x15R\x1b\xaf\x8f\xa2X\x18\x82$U\xc7\xaae\x0e\x8e\x8by/\x9bT\x14[(\xb7\x9e\x16\xbd\x99`\xf2\x08\x04\x9bM^W\xd1\xa2\'&5\xdc$\x05\x875\x90\xe3\x14\xef]\t\x08W\xd6\xab\x9e\xbe\xd9H\xab|c\xe2\xdf\\\x97\xef\x19[]^\x92\xc9+\xff\xe2\xd2\x8e\xaeGc\xc2C\xed(W%_\xb0\t\xac9\x03\xb0kQ\xb8\x95}\xae\x85\xe2e\x0b\xbf?\xed\xbb_\x0c7\x10\xc7\x0f\xc5\xc4L\xc9\x02+\x1d8\xe7z\x90\xe5c\x0c>\xfeT\x87\r\xe3P->\xdb\x11g\xc6B\x0c\xf1\x00c\x8a\xce-$\xc9\xd2\x85\xa6\xa8\x8bp(\xd1\x83V_\x06"1d!s1\x14\xc4\x1b\xa3\x90!\x88?^\x0f9\tO\x00\x0f\xca\x10\x98\x98\xdd\xb5\xdfmc\xac\x1a\x01R9\x02\x8e\x84\xef\x18\x8a+x\xcc\x92\x9f\xe4r\x80\x9e\xc8n\xf0\xf1\xcb",\xf5\xb2\xa5\x17\xcc\xf0M\xce\xfb\x07{\x95\xec:J\xb3\xc1\xb2\x94\xcf\xa9wL\xe7\x00\x13\xaf\x87$v\x8d\xdf\x1d%\x11W\xe6\'\x1b \x8cb\xb5C\x12d\n\x0f\xa3\xbf\xb2"~d\x1b\xf2\x86\xd9r)\xcd\xbd\x08\x90vR\xf9\xf0\xa0\xc0\r\xe5\t\xb8kd9\xd0\xd9S\x90RoF\xdd\xc3\xfb\x17\xfb\xbe\x9d\xcb`(\xfdr|x\xd1\xc9\xf9\xe3\xbaS\x15\xdcYj\xf44\xbe\x0b9\x96\xbf\xbd4\xad\xe1\xd6\xf2\xe5\xf5q\x92\xa6jxo;\x03E\x1c\xed\x9f\xda\xcf\xa1rXn\x91/\xea\x8a\xd6`\x1e<8\x87\xcd\xda\xcd\xc0z\x1bDD1\xfc\xb1\x06\xa9\\\xd2`\xfd}lv\xd4D\xc0_GL\xf0[\x13\xe0%\xb8\xd3\x0b\xbd\x0b7`\xfd\xc07\xdcz`B8J\xdf\x94,\x9c\xb0\x0c`\xe2\x02h\x90\xdd!\xa2+\xa9/\xbf[\xc0I\x14\xc6,\x7f\xc7L\xd4\xac\x9f\x80\xa0\xf8\x14\x0c\xcd\xa5\x13\xf4\xb5\xb7\x83\x12\xf1\x81\r\x08\x16\xf7\xe8\xfb(\x86F\xe3\xef\x84\x1d\xa8~\x98gd\xd5\xc4\x19\xf4\x9f\xc9.4\x15`u\x1b:OY\xd0\xf7\xf6\x16\xb2\xa0\xc1\xe8\x82\xd7\xb5\xc7\xfe/\xb9\xf33\\Hj\x8d\xf58J\x04\xc7\x04\x9c\xd6\xca\xe3\x13\xfd2!\x88\x01\xa7\xb7\xa6\xfd%\xa8;\x8f\x88\x1a\xf8\x9dE\xfa\xd7\xa8\xdei\xd4\xa1\x11\x07\x07G\x9e"\xac,3\x9c$ge\xad\t]C\x98\x9e\xf2}\x9e\xd2\x05\x10Ie+\x81\xcd\xae\n\xe0\x93X\xfc\x96Jy_\xe3\xc3\x06\x9d\x11\x86\x82|\xe1\xb0e\x13\x1f\x90u w\x18\xbe\x12v\xb4Z\x93\xb1b\xa2\xc4\x8a\xb6\xe3B\x9ai\xa8_\xc5\x16\x166\x9d0M\xce\xe6\x96\xb6;\x993\xc2m\xef\xcd\x0c\xd4q\x08\xde\x8e3\xa9\xab\xf7m\xf8\xca\xe1n\x8b\xe1\x05\x03)?\x0e8\xf3aB\t4F\xf3\xd9f\x8c^_\xd6\x00OV\x08\x07\x96\x14G\xb2\xd92\x92\x8ef%\xda\xbc\xf0\xfd\xb7\x88\x16\x14\xdf8\xff\x110lP0&\x9a\xbf\x9c\x80\x19*z6\x7f\x8a\xf3\x87\x9b\x81O\xb5_m\xa0\xa7f\x10d\x0cl\r\x9d\xe6{e\xe8\xf8\xa3Q!_-9\xbb\xe3#\xbb\xbf\xf2\x18y\xb7\xa5\x8cu~\xa9\xe7M\x80<9\x81\xf7/\t\xb4>\xf9L&\x84\xcd\xba\x04#@\xfd\xfa3\nR\xcbO\xdci^\x14HYEf\x00\xfb\xd0\x05w\xd3\x04\x94\x98\x88\xba\x8d\xe4\xf5\xb5\x9a\xb6\xd0\tN\x04\xa6\xee\xfeNo8\x0f\xf4\x91\x02\xab@\xd6\xfc\x108\xefy\xbd\x87*\xca&k5\x1b\xeb\x8e\xd9\xc9\xb5K\xedM\x06\xcfk\xf9\xd0\xe4\xff\\\xb4\xc9\x96\xbbRHoR;0\xe5M\x82\xb3\xac\x85=o\x8dN\xc8=\xac\'\xfd>\xa3\xea\xf6\x1e\xca7\x96\xfcx\xd6r\x93\x15\xd5\xcfE\x82\xdeg\xfa\xe0e\xca\xec\'h4\xda\xabk6gq\xe4\xf8"\x96Z\xf8\x1dV\xff\x94\x88i\x8bG\xa4\xecCX\x04pa\xb0\x9f1\xf0\x9d5\x16[9C6\x9b\x18a\xef\xe7Lz7\xc8\x18\x8f\x9f\xea\xbe\xf8\xcc\x9f\xb9\xbc\xcf7=\xb84\xd6b\xbf\x92\xfe\xf5?\xa0\xc4\xb2\x0f\n\ne\xebd\xd2\xed\x82\x90\x1f\x1bg\xb4\x8ezn\x87\xe6>\xba\x84R\r\xe0V\xad"\xf0\x903k\x11\xff\x0ft\t\x9b"\xf9\xc5TN\x0f9\xa4\x9e7\x885r\xb6^\xcc\xfd\xb3>\xf1\x91\x8c\x00\x8eO,$\xd8D\x88\xf8\n\xae\x98\x0e\x0c\xa3\x8b5fj0\xa8&\xe5dn\xec\xf7\xb9&A\xf9d\x86U\t\x9bH*\xf2\x83\xb4f0b\xe5\xc4\xb6q\x81\xe18\xc2>\\D\x19\xb6\xbe*\xd24\xaa\xae\xac\x1e\x0f\x1f\xeb\xca9\xd1z[Z\xd6C\xa0\xdd\xbfVp\x89\x9fL\xe8\x04\xed\xcfB\xfd\xeeTKf\xd3\x9b\x11\xd5\x9c\xb7\x08\x8fJ\xa9\xca5@+|x\x12\x81C\xfb\xd4\xd7\x01v\xdc\xf3\xfcj\x9a\x05\xd3![\xd2\xe2EML\x81\x1an\xe3\xd2\xe4\x19U\x16\x151\xcaoSM\x80\x7f#\x16}\xc6\xe4\xdbTmGTn\xa0\xf2\xf9\x11\xd2~\xc9&\x0f\xebV\xb7eZ\xe6\xaeX"\xf8vQd\xa7P\xfcS\x81\x18\xea\x1d\xb6\xea\xadl"\xfb\x9ac V\xe7\x89r\xa8\x0c\xe2.e(M<\x9e\xe9\x97 \x13f\xd1\xb3\xcd\x14\x9f\xce\xb31\x1f\x98mL2\xaf\xa5\x00v\x04\xe7\x88\xfeA\x03\x15rH\n\\\x7f=\x13\xca<3f\x83\xce_\x1flY\xb0\xc4\x95\xfbe\x8a\xca\x99k\xc95j\xdd\xd8\x9d\xban\xed\x8b/}\xbe\x16\xa7_a\x9c\x96\xa0\xf5r\xf2n\x14^\xb7\xbd\x99\xf0\xcd\x0c\x08\xaaR\x96\x16\xdf\xd9\x18\x87\x1c\xd5\x0e=\x18\x10\xc8\x97\xce/\x89\xb7d\xcd\xb6yl[@\xe2\xbaK.i^*)<\xb3\x08\x05\xcd\x1c\xaf\xd5\xc7\xbd\x98\xbf|\x11\xdb\n\x8f\xc5\xf9\x95\xe31>\xe5l\x7f\xf7\xd7^\xbb\xd8\x8e\xc7!a}\x08E\xdd&\xcfr\xc1\xdf]l\x18A\xda\x9de\xbd2x\xf7\x0b\x1f\x1bH\x08<\xaf\xe6\x82\xc5>\r\xa2\x1a\xc9\xd7\x15\xc2Q\x95\xf4\xd8\x83\xfd\xe7*\xc9\xfbb\x01\x05q@SjZ\xac\xbc\xdb6Y\xa3\xbd\\\xca\xcf\xe1\xfaY$\xb3\xfe\xf0\x85\xbf@\xc9\xf4\xe7\xce \xa1o\xc7\xf3\xa8,\xea\xee\xa3nE\x0e\x18\xc6\x04kL\x99\xf2\x9c\x1c\xa4\xc8\xca\x02\xb2^\x9c\x80z\x9f\xdc\xc8\x08\xf9\xe1-\x18\xe8\xc8<\xf7\xe5\xc8\xa1\xe8\x04\xf0\x05r\xa8\xae\x928\xbaH P\x86yz\xc3\x7f0\xcc\xf9\xdb\x89\x95\x17\xd2\xc7\x0fj\x8d\x12\xd0`\xf6\xfe\xac\\\xbe\n\xf1i\x18\xd5\x00\xcc\x11\xa3\x06\npH\\O\x0c\xa1\x10\xc3`\x92I\xbe\x0f\xc0\xcbEB-^"\xf9k\xb5\x13\xdc\x85\x9b\x012\x00\xd7\x82\x10\x06o\xb9\\\xa2\xa2\'/\xc5\xa8\x84tH\xdfF\xd3#\x1e\x99s\xde\xda8"?\xf2W\xeb\x1b\x02PS\x12\xdcc>\x1e\xa0\x94\x13.#\xe1\xc7\x1d\xe1\xe2\xbdh\xf7\xd1\xc5\x9e"\xffx\xeb\xa6\xbf\x9d\xf7\x02\xc6z\x8a\x1f\xb4\xff\xaf\xf0@\xb22\xc5^\x9d,\x1c\xab\x9f\xd2^+bL\xcfy\x1a\x10H\x98\x82\xfb\x03\xd0\xb8\rIT\xca\xf6l\xfc\xff\x1d\xa9\xcf&x97\xf6\xd6g\x00\xfel\xacT\xa7\xc8\x86>j(\x9c\xc0\xa7\x87\x7fX\xbd\x7f\r.N\x91+f\xda\x93\xd7\xd4F\x879|\x99\xe5w\x0c*\xa9\xd7*\r\x02\xbaN\x9a\xc7#\xaeL\xcf\x06\x9cPJ\xc5\x97D\x8d\x94\x1d\xfa\xecV23*\\\x0c\xca\xf4\x97\xf9\xdfD/\xbf\xdb\x14\xb0~\x7f\xae\xfc\x8e\xe3\x8be\xfa\xd4\xf7\xc7D\x97P\x8fjI\xbf}\x07f\xd3\xd7%\xbdY8\xbakfc\xfcc\x15\xdf\xa6\xab\xdb\x1c\xb7\xa0\xbc\x0fb\xbc\x0f\xbf\x17z\x11\x1d\xb2xC J\x94u\x1c\xcf(\xb2\xa8n\xfdr\x93\x87([\xda\xfc\tQQi\xc8\x81?2\x9b\xf9\xf7&FR\xc4\r\xa9\x86Jj\x83>h$l\x8d\n\x82\xa8\xc1o\xff\xa9\xe6\xc6xcrF\x1c\x8b>"\xf0\r\x83\xa4\xfd\xee\xa2\xd3\xdc`\xa2\x15PP: \x84\x01\x12[\x8d\x9e\xdd~\xc63\xe5\xc1~\xba\xbd\xcf\xcb\n\xa8\x13\x00\xd3#Pb\x13\x03z\x0c\xa3s\x86\x16\xf8b;\xe5\xab\x9f\xd3\xb6\xb6\x00\r|)\xc8\x87\t\x7f\xf8\xbd\xa9\x1b\xa5\xa1\x15\xc0\xc2\xc2\x86\xc1X\xa4\xb7]Q2\xa6\x18F\x86\xe7g\xe1\x94\xcd\x7f\xc1w\x868\xe4\xb6\x82\xdeH\x98\x11\xd9\x94\xe3\x07\x8bBCRsv\x81\xbe\xd4\x0f\r\xef\xfd\xb4}u\x84\xd7\xebl\xfd\x13\xb9\x00\x9c\xfc\xc3:\x14QV\x05\xe0\xc1tpoB\xf9a:`\xff\xcf\xe0IF\x12\x1b\x07.B\xf8\xc07\xd6Q;#\x85?\x84\x86Im\xf7\x1f\xd6\xa8\x16\xfd\x0f\x197Za6\x19jTu\xbeG\x1b\xf9Vz\xa3\xdf\xc8\xb3\\\xd6\xc8\xa7|,\xd9N\xe7\xbfn\xf7\x0f\xae\x0c\x8b\x80b\x0c\xb3\x8d\x89K\xfe\xc9\xaa{\xa7\x83\xcd\x05m\xceS\x8e\xc7\xfc\xb7+\x0f\xa0\xfc\x01Y\x88c\xf8\xe1L\xdf\xcb \xf0\xd4Q\x7fg\xf01a\x96\xf8r\x8e_\x99\x01\xc1\xc1\xe8+\x99\xd9\x8b\x9c\x9c&\x16l\x96g\xb9\xb8\xbd\xa2l\x9e\xe9<\x92\xd7,\x1f\xd4\xf6\xc2|\xe0\x94s\xce\xf53\xc4\x8d\xb9]\xc9\x95\xedG\xd7\xc2.u+\xa8K#\\+%\x15\x8a\xe7\xd0\xb1\xf3J\xc8k\x8d\xe4\xdc]^\xb6R\x9bU\xeeU\x81\x95*{\xcc\xe8X\x08\x91\xa6\x0b\xa2\x07I\xa7\xe3\xbe\x1b\xde\x8a?\x1f\xab\xba2\xd0\x1c\xc47\x8c\xd4\xf8\xfe\ns\x8d\xe8\x81\xf4L\xcc]\x04URk\x97i\xae\xccKh\xec\xa6\xd0tMz\xe3\r\x84d\x8f\xf4%]\x066\xc4\x11\xab\xc9\xe9\x93\xa8\x84\x16\x9a\xd6U\xe0BF\xd2._\xfa\xac.\xe6t;\x10()\x96\xea\x1d\x10_E\xd8G-\xdch\xed>&\xde\xff\xc6\x08\xb4\xe2BrN\xb8i\x87\xac\xab\xc2\x91\r\x9a\xa4\xf0\xd2\x18pz9!\'"<\x85m\xcd\x16\xb2\x1fE^\xcf\xff\x1d\xc4\x8e\xc4\xd3\x16z\xd8\x80\xc0o1\xb8\xcc\xca$\xf8\xce@g\xb2 Q\xf7\xc8\xf6#}\r\xa3\xbb\x08\x90\xe2\r\xf0\xf7\xe9\xb4s\xa9,\xd1\x19\x96\xa7\xffd\xc1\xea#LR"\xc4\xb8]6\xbbc\xb3\x94\xcfuODH\x11@TuBx\xe5\xc4\xc6\x96iC\x05_\x8b\xab\x9d\xd8\xbc)\xeaXc\xd8!\xab\xb0 \xfc\xc7\xf6\x1d\xfd\xed6\xc17b\x90\xd6Sk\x1b\x07\xbf\xc0\xb1\xaf\xf2\xaf\x14s6\xb3q\xf8{\x16Q\x95\x8f\x9f\x15c|\x8d\xe5F4\xc0\xff\x9e.\xc0\xc2(\xa1\xce\x9d#\xcd\xc3\'\x91\x01\xe8S,\xbe\xb2O:\xed\x9d\xf6\xcaG<,\x0b\xe5N\xfe\xfc\xd4\xee\x03\xbea\x86\x8e\x85\xbbY)u\xf9\xd9t\x1fE]\xae>I^o\x11\xc3O\xe1\x9aY\x05H~]\xbb\xbcT\x02|VC\xa8=z\xf7B\xccRW\xbc\xd1W\xef\xd4\x84F\xaf\xd0\xbb\xa2>\x13\xc10K\x98@\x96\xc8e\x0fc\x00+\x8a\xc4\xe2/\x9e\xa0+%K\x8b^,G\xf8m\xa0\x83\x83\x10\xba\x11\x87Z\xe6HV\x18F5n\xf4\xad\x8fV\xf3I1\x06]\xfd\x8a\xe6\x0e\xc6+\x08k\xbe\xfam\xfd \nn.?\xc8>\xc5\x10\x82:\xe9\x17\xb7n\x96\xeb\x97\xdb\xa4!\x80\xcd\xddF\x87M\xfdB_\x91:\xf7]G\xf9\xf5k\xa6\xbc\xf9k\xf9\xb3\xf5\x80;\x8b\xc2\xab\x1e\xf7\xd3J\xef\xa0y\xe0\x01&\x08*\xda\xb9\x7fQv\xb1\x14Y\xa67\xbe\x03CF\x12a/\xe32\xc6\x8e\xce3\xc3IU\xcco\xfdM\xcc\xe4:\x9b*\x13xQ\xa9\xe9l\xb0\xb7e\xbb\x0b\x16Yr\xf9i\x8c\x98\xad){D\xbb\xb4\x829\xc2fDp\x0c\xaf\x04\x07(8\x0e\xd1?\x81\xfe\xd9\xbc\xc3\x13\x1a\xcdm\xd2?\t\x84\xc2\xc2W\xca\xed\x11\x8e\xa7Gx\x12Q\x85\xa7\'b\xb7^{\xc4\xe4\xe6UK\xe2\xf0\xdaal\xeb#\x91\xb7VF\xa7L\xb2\xcc\'\x04\xaa\x91u\xf6\xe2\xf1\xe5\xe6u\xb3\xcf\xc4\x1dk\x01\x00\x0ei\x11\xe7\xbe\xff\xf7&\x93\x0b\xa6\xa85\x81\x07.\x8b\xcb\x92\xfa&\xd7\x82e8\x9cf\x05\x9e\xc2\xb1\xca^`\x8d\x1d\x86iG\xe6\xe7\xcf\xd5\xeay\x92B\xd4\xc5\xd8\x01x5\xaf\xb3\xeb+\xdai\x97\xa2\xd4\x89\xf9]r\x17\x05\xf9\xc1\x13X;\xb5\x7fx\xce\x12\xee\x0b)\xf5\xbf\x8d,\' \xed\xb0\xe0\x90\xce\xa06\x12\xdf\x87\xba\xdb\xf5a_\xe2o\xb5\x0e\xcda\xf0\xdb\x89E\xee\xf3f\\Fe\x92\x9a\xfc\xe44X\xe6\x11\xe5\xd2\x04\xf4\x03+0\xa4\x88i\xa3m\rlTr}\x05\xa9Pb\xfa\x92\x17T\xb8\x00\xa2\xcb\x07\x86\xde7\x93\xbf\xfdp\xe5\x8a\x97\xbf\xf0\x92m\xf8m\x8b\xb6+\xb8>W\x99\xa1\xf6H\xbb\x88\x0c\x00\xc3z\xe7\\\xb18\x8c\xb7\x8f\x0c\x14\x97\xa9b\xf58\x95tL\x0bg(h\xba\xde&\xee\xf3\x94\xcf\x03^\xab[ \xdb\xbb\xe8\x98X (\x04\xac>bQ\xc9\xf1i\xfc\xfa\xd3\x7fS \x93\x13\x9c\xb5\xb9[H\xd26.\x16Y\x00.\x02\xd4p)\x988\x16\xbc\x89K\xb2[\x86\x91\x96\xceoF\xf9\xd1\xac\xbey\xe6\xceo\xff{\xc4;N\x18&\xb5\xd4\xa6\xfdS\xe6c\xc9\x02\xd2\x16l\xc1e\xa3XPB<\x80\xf2\xa4%\xbc{\xac\xb6j\xa6s\xcfm\x0c\x94D\xce\xfdX\\M;\xc5Q\xb1\x81\xa6\xda.\x9bAo\x02P\xe6\xa4R\r\xd6\n\xb6 \xb2\xe2d\xd6\xc7\x10\xef\xf5\xee\x7f\xfar\xb8\xcd\xd8\xcbm\xb3T\x17\x94D\x98\xdex\x8c\xa3\xa9\xeb\xc5\x86r\x8bh\xcd\xaeG\xbd|\xae\x02\xe123gl9geY\x0b\xca\xe2\x0c\xbf\x1c\xff#n\x10\x08\x14\xab\xf9V}1C\x9cI\x9f\xa4\xa5p\xb8\x84\x00\x81f\x15\xd0\xa7\xbe\x96P\xee\xf8\xb7Y\xefE\xfd\xad\x9d\x80h\x026\xfc\xd7\x96D\x14=\x97\xc9\x9f\xb42\x06\xe1\xfc_R\x19g\xde\xd0\x1d\xeb\xd1\xbf\x1f\x82r\xce\xb6\xea\x15\x89\xc0fc\x1fh\x12\xf6K\xdd&\xaaR5-!\xd2A\xc3\x120\x1e]\x967\xab\xc5\x93\xe7\x7f\xb4Zg\xca\xf13\x03\x18\x0f\r\xf1\x9d\xb6\x0b8\xdd|\xbe\x83\xdf\xe4\x8fH\x12c\x01u\xfb(\xa6\xb6\xd5\xe5\xe3G\xa2^\xb6\xb5\xe4\xca\xa9^\x975\x11\xa7\x86\xe6\n\x13\x9e\xfe2\x18\xa1\xd06\xaa\xc3\x9e\xa7\x1d\xbaj=Y\x80XA\xf4jX\xb5F\x9a(\x15\'8\x91F7c\x1ddA@\x03\xaddV\xcb\x0e*#edg\x98\x7f\xe9\x92c\xa4\x99?\xf8\x822:\xa2\xebB\x86g\x93\x03\xc7\xaaBq\xfed\x9b\xa7`?A\x1b5\xbcAA\x96\x8e\xfb\xb3\x05\x12\xdd\xc3*\x89W\xdc\xc9#\xd2\xeb\xc6lXHAZ\'\xbdh\xeb\xdb\x1f]\r\xdb\xb80\r\xb6\xc0\xce\x9f%\xfd\xa7g\xd6\x98t\xdb\x1by\xbe\xff\x18\xcb\x9dB\x93D:\n\x80:\xb67\\Cr\xd80\xc9\xe5f\xa5\\5\xd4\x07\xb3U\x1e\xe9\xf6\xa1\xbe!!\xb3X\x8c\xc1D\x16\xeb\x8d\x14\x9f\xce\xa4\xd3\x86\x96i\x13t\x0c\xfb\x88k\x8b\xf6\xde\xe1\x17\xec\x02\xe1p\xc4\xe1\x1a\x16\xbd\xb9\x97\xcd\x18\xf0O\xd2W\xb9\xaa\x03\xa7\xcc@\xab\x9d\xb9>\xb5\x0b\xc7\xad\n=\x94\xf5\xd2c\x021>\x086\xa4/n\xa2\x18\xb4\nR\x1b\x068KW\xc7\xfaY%Y\xe8\xb6\x87\xeb\xe7\x1fk\xe18|Ub\xad\xf7\x1dd\xce\xfe$gz\\\x94q\xcf\xb6\xb8\xcd\x9e\xfc\xe4\xc2#\x03\x10\xe9\x9eS\x8e\xbb,J\x92f\xe0X\xce)\x8f\xf0\xaa"\x02E.K\x91\x1bp\xed\x1c1H$\x02\x82Ts\xf4\xb3\xc7\xf3\xa0`\x80\xff}>-\xdf\x08\xcfW\x9c\xe8z\x8e\xb0m\x1c<\xa2\xe6Q8k\x89\xcb\xb2\xaa\xae\x7fk\xa5\xd2\x87?b\xe7\x12\xbe\xc0v\xb5\xb3_\x15\x07\xb5\xfa\xa46\xdd\xec\xab\x16\xe64[\x94\\\xba\x94\xb9Wc+\xba\xa3z\xcfX\xf7\xaa\x8d\xa8q#\xe0f\x03r\xff\xb6\xa0\xfd5\xb5\xb3\r\t\x97]5&\xa7p\x12\x8bc\xf5\xb3\xbd\xb4\xcc;\xd0\xc8\x0b\x91`\xea8\xbep\xf6nD\x07\x00\x83:\x86\x81i\xec\xfe\x16\xd9\xad\xc2Z\x8d\x0c\x80\x8a\xe7\x1ds\x8d`\xff\x84\x8c\x1b\xb7\xf4^\xe5\x95\xbb\xe9\x94\x9e\xa7\x95\x94u;\x81\xeb\xe2\xc1\xe9\xc9&\x06\xaa\x9c\xea\x92\xeft>\xf2\xb4\x9dD\xf1\x88\x0b#\x7f\x87\xa3P"\x8c\xcfkPP\xfc\x12\xb9\x80\x8c\xb9B\xea:e\x05\xa8\xa2Jk\xc8\xb6\xd6\xb9B[\xc0\x89|J\x0cB\x80\xd2:ic\xdac\x8ekd\xe7=\xa5PPl\xf2z\xeb\xfb\xa4\x96\xbb\x8a\x90\xe4\x8d\xa8\xb8fv6\xcf\xa9\x8c\xe7H8\xafm\x19\x18bK\xf4#\x81]\xc8\xb2j\x1c\x84\xe2\xaa!\xd0\xa4\xf6\xfb\x99\xd9\xe7\x01\xb3/\xcft\xa8?\xad\x0cf\x08m\x1a:\x8c\xaf\xec\xaet\x1f\xfd\x1f\xff\x1a\x16o\x00\x7f\x96rh\xa9]\xf7K\xfa\xfe\xd8:_\xe8q9\x1eh\xe5\xab\xb9\xacAp*\x7f\x92\xe2[\xc6r\x81\xb5r"r\xdef\x1d\x82\x87\xdd7\x82\xc8\xdd\xd7J\xca\xe0\xf1\x90=\x7fS\x0c]\xac\x7f\xdd\'\xab\xf9Z\xb9\xb9\x15X}\x1f\xaa\x878s\xf7i\xeb\x9e\xe7,\x94\xd3\x9a}\x12I\xc6c\xd4\x9f\xea\x14\x8fwI\xc3\t\x1b$\xe8g\xa1Wi*0\x15(x\xd7\xbf\xd4\x14\xb0\xa6\x01\xb5\x90d\xb4?\x17z\xeet\xf5\x97\xe4cQ`\xc5\x8a\xe6,(\xc7\xae{\x9e\xe8^\xea\x9d\xdb\x1d\xa6l\x7f\xddz&;U\x91\x10\x0f\x9b\xf1\xfep#\xc8rF\x82\xa9\x8cFm\xc3\x01`\'U\xed\x89\xf1\xcc\xc3\xfb \x1a\xfco\xb1c\xe60\x87\xee\xc8\xaa\xe4Q{\xa2\xe0\x8c\xb8\xe8K\x01\'\x12\xcbBA\xferb\xdf\xcf-\xc5\x93M\x12\x0b\nC\x0b\x84\x882\x1fL\xe9EK\x8c6"\xb8hr\xa3\xc3\x19\xf10k~\xafmi\xd7\x8c7\xe1x\xe30\x9c\xfa5K\xe2\xc9\xc8\x99\xf3\rF\x13\x1e:\x88\xaa\xd7W\x88^\xa1\'\xeb^|\xc7\xc8\x08\xd0S\xe4,\t\x9c\xd0\x141\x92\xf3^O\x12\xbe\x18\x05\xad\xec\x0c)\xd1\xbb\xa2]X\xfb\x1f\x1d\xfe\x85\xf6\'\x9f{\xc3\xb6\\\x96\xadX\x1b\xa1\x80\xfcWf\x17l\x16\xa0\x1e\x94\xd2=\xe1\xec\x9b\xfdiaEo\x89\x9e!\xc4\xb4"\x1e,\xd8k\x9ab\x92K\x0c*\\#R\xf9\x01\xb9\x02\x1c\xcc\xf38{7\x80\xa3?Q\xbb\xd9\x0b/\x8c\x8b3\xb3\xd7\xe9 \xa9+*\xbb\xb7\x84wI=\xe5\'\x89\xfa\xa6\x97E\xc8\xda\x0b:<\x03\xa1C\xf0d\xa3G\x8d2\xa6\x88\xe1\x12\xf4\xe2\x19\x05]\xd9\x1bk\xc8\xc9C\xc4\xfcW\x17\x98\x0fZO\xa9\xb2\xc9Y\xeb\x85\x02\xa5\r\xc8\xab/\xd6\xe2\x00\x8b\xa4\xa7\n\xec\xeb\xf0\x91\xaa1d\x15o08\xfe\xc6B\xc9R\x05l6\xd4X\xb9\xb4a\x88\xcdb\xbet\x03s\xed\x06\xc2m\t/\x1e\xe1\xa8\x8bp\xd7\xe3\x04\x8e@\r\xa2u:As\xaf\x15\xdb\xd9\xcc\xe1\x18b\xc3\x86X.\x0e.E)\xbb \xb7\xa4\x96\xb7\x85}\xe09/k\n\x00X\xd5\xe0H,\x9b\x1b3B"\xea\xb7*\x040\xff\xcd\xee\xd6\xea\x8b\xaf\xf9\xbf\x91\xae\rHJ}P\xbe\x02\xce\xf8\xd5\x04;\xe4\x02;\x1e\x92\xbe9\x9b\xe5\xee\x8e\xef\xa6\xa1\x18\x86\x97\x96g\xf1\x9dF\x9d\xe6\xccMv,\xc6\x85\x868\xc9\xc2,\xd6\x99\x12:\x10\x12\x7f\xde\xcb\x16\xbd\x04\xc7\x07\x7f\xec\x87\x1b\xe2\x84"\x93#\xbb\xfa\xdbM\x19\xab\xba!J\xe6\x16\xe5`^\x1a\xbc\xe74\xe7\x18\xa9\x84\n\x10\x84\x86\xe2t\xbfd\x02\xe4b)\x85\xc3\xf6ox\x85>p\x10x\xf2o\xcdJ\x19\xf9\x00\x0b\xe23\xe7\xca&\x82Dm\xa4\x05\xf8\x15\xbd\xd0\x0c\xdf`l0\xe9%\xe2x\xc5v\x88\xa7\xf8\xc35\tc\\;Q[\xf9,e\x0f\x8e\xb2\x12!\xe5\xb9\x8a\xc9<\x9b\x13\x15a~Wm3\xe5\x92\xe7T\xf2\x8d\x1d\xe3\x1c|\'\xb1\x9c\xfd~\xd8\xdb_L\xcft\xb0\x8cn\x14\x1b\x99PjY:\xd2E\xad\x10\x0ba\x19\xb3\r\xd5\xe9\x18\x03\xf1:\xd4\x9e\xf4\x18L3\x85\x89\xc5+,mZV\xca\xff\x15WC\xc5\xbcK/\xd9.\x12\x19\xc4\xfc\xb4\x9b2\x96\xaeZ\xfatY\x88\x04\xa7[\xd6\xfbZ\xb5\x1c\xb4\x82\x05\xdb\x93\xd0\xe5#@g\xbd\x9e1\x8d\xaf\xeb\xdb\x02\xf8V\xc0\xfd\x18\xa2@\x8e\x93&\xf3\xb2\x98\x9e\xa4\xaf\xfa\xd5d\xb7\x95 3\xe0\xd2\x0c\xcc:\xb4;X^\xeesy\x93\xc2\tq\x19\xc7\x14\xd8\x1a\xd4\xee\x92\xdb\xa5X\xddD\xd4\xc5t9\xa6\xea~&\x9dzH\xf8\xccv\xb2\xc4LQ\xbd\x00A\xc1\x83\xb9\xeeP\xf5,\xcb7\xc4\xe2e\x0b\xb1\x8d\xc87\x1e-Z\x06\x04@\x95\xca\xcd\x04:\xf9:2Fd\xefq\xbaM\x90b\xfa\x11\xf4\xa9F\xab\x8d\xd7%"\x84`', + 'name': u'Erotic Dreams pt 1 Mixed by DJ Mode.mp3', + 'name.utf-8': u'Erotic Dreams pt 1 Mixed by DJ Mode.mp3' + } +created by:Azureus/3.0.0.6 +announce:http://tracker02.azureusplatform.com:10000/announce + + +# Typical size of Thumbnail: (171, 96) diff --git a/tribler-mod/Tribler/Player/BaseApp.py b/tribler-mod/Tribler/Player/BaseApp.py new file mode 100644 index 0000000..c74515b --- /dev/null +++ b/tribler-mod/Tribler/Player/BaseApp.py @@ -0,0 +1,573 @@ +from time import localtime, strftime +# Written by Arno Bakker, Choopan RATTANAPOKA, Jie Yang +# see LICENSE.txt for license information +""" Base class for Player and Plugin Background process. See swarmplayer.py """ + +# +# TODO: set 'download_slice_size' to 32K, such that pieces are no longer +# downloaded in 2 chunks. This particularly avoids a bad case where you +# kick the source: you download chunk 1 of piece X +# from lagging peer and download chunk 2 of piece X from source. With the piece +# now complete you check the sig. As the first part of the piece is old, this +# fails and we kick the peer that gave us the completing chunk, which is the +# source. +# +# Note that the BT spec says: +# "All current implementations use 2 15 , and close connections which request +# an amount greater than 2 17." http://www.bittorrent.org/beps/bep_0003.html +# +# So it should be 32KB already. However, the BitTorrent (3.4.1, 5.0.9), +# BitTornado and Azureus all use 2 ** 14 = 16KB chunks. + +import os +import sys +import time +from sets import Set + +from threading import enumerate,currentThread,RLock +from traceback import print_exc + +if sys.platform == "darwin": + # on Mac, we can only load VLC/OpenSSL libraries + # relative to the location of tribler.py + os.chdir(os.path.abspath(os.path.dirname(sys.argv[0]))) +try: + import wxversion + wxversion.select('2.8') +except: + pass +import wx + +from Tribler.__init__ import LIBRARYNAME +from Tribler.Core.API import * +from Tribler.Policies.RateManager import UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager +from Tribler.Utilities.Instance2Instance import * + +from Tribler.Player.systray import PlayerTaskBarIcon +from Tribler.Player.Reporter import Reporter +from Tribler.Player.UtilityStub import UtilityStub + +DEBUG = False +RATELIMITADSL = False + +DISKSPACE_LIMIT = 5L * 1024L * 1024L * 1024L # 5 GB +DEFAULT_MAX_UPLOAD_SEED_WHEN_SEEDING = 75 # KB/s + +class BaseApp(wx.App,InstanceConnectionHandler): + def __init__(self, redirectstderrout, appname, params, single_instance_checker, installdir, i2iport, sport): + self.appname = appname + self.params = params + self.single_instance_checker = single_instance_checker + self.installdir = installdir + self.i2iport = i2iport + self.sport = sport + self.error = None + self.s = None + self.tbicon = None + + self.downloads_in_vodmode = Set() # Set of playing Downloads, one for SP, many for Plugin + self.ratelimiter = None + self.ratelimit_update_count = 0 + self.playermode = DLSTATUS_DOWNLOADING + self.getpeerlistcount = 2 # for research Reporter + self.shuttingdown = False + + InstanceConnectionHandler.__init__(self,self.i2ithread_readlinecallback) + wx.App.__init__(self, redirectstderrout) + + + def OnInitBase(self): + """ To be wrapped in a OnInit() method that returns True/False """ + + # Normal startup + # Read config + state_dir = Session.get_default_state_dir('.'+self.appname) + + self.utility = UtilityStub(self.installdir,state_dir) + self.utility.app = self + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",self.utility.lang.get('build') + self.iconpath = os.path.join(self.installdir,LIBRARYNAME,'Images',self.appname+'Icon.ico') + self.logopath = os.path.join(self.installdir,LIBRARYNAME,'Images',self.appname+'Logo.png') + + + # Start server for instance2instance communication + self.i2is = Instance2InstanceServer(self.i2iport,self,timeout=(24.0*3600.0)) + + + # The playerconfig contains all config parameters that are not + # saved by checkpointing the Session or its Downloads. + self.load_playerconfig(state_dir) + + # Install systray icon + # Note: setting this makes the program not exit when the videoFrame + # is being closed. + + self.tbicon = PlayerTaskBarIcon(self,self.iconpath) + + + # Start Tribler Session + cfgfilename = Session.get_default_config_filename(state_dir) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Session config",cfgfilename + try: + self.sconfig = SessionStartupConfig.load(cfgfilename) + except: + print_exc() + self.sconfig = SessionStartupConfig() + self.sconfig.set_state_dir(state_dir) + + self.sconfig.set_listen_port(self.sport) + self.sconfig.set_overlay(False) + self.sconfig.set_megacache(False) + + self.s = Session(self.sconfig) + self.s.set_download_states_callback(self.sesscb_states_callback) + + self.reporter = Reporter( self.sconfig ) + + if RATELIMITADSL: + self.ratelimiter = UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager() + self.ratelimiter.set_global_max_speed(DOWNLOAD,400) + self.ratelimiter.set_global_max_speed(UPLOAD,90) + + + # Arno: For extra robustness, ignore any errors related to restarting + try: + # Load all other downloads in cache, but in STOPPED state + self.s.load_checkpoint(initialdlstatus=DLSTATUS_STOPPED) + except: + print_exc() + + # Start remote control + self.i2is.start() + + + def start_download(self,tdef,dlfile): + """ Start download of torrent tdef and play video file dlfile from it """ + + # Free diskspace, if needed + destdir = self.get_default_destdir() + if not os.access(destdir,os.F_OK): + os.mkdir(destdir) + + # Arno: For extra robustness, ignore any errors related to restarting + # TODO: Extend code such that we can also delete files from the + # disk cache, not just Downloads. This would allow us to keep the + # parts of a Download that we already have, but that is being aborted + # by the user by closing the video window. See remove_playing_* + try: + if not self.free_up_diskspace_by_downloads(tdef.get_infohash(),tdef.get_length([dlfile])): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Not enough free diskspace, ignoring" + except: + print_exc() + + # Setup how to download + dcfg = DownloadStartupConfig() + + # Delegate processing to VideoPlayer + dcfg.set_video_event_callback(self.sesscb_vod_event_callback) + dcfg.set_video_events(self.get_supported_vod_events()) + + dcfg.set_dest_dir(destdir) + + if tdef.is_multifile_torrent(): + dcfg.set_selected_files([dlfile]) + + # Arno: 2008-7-15: commented out, just stick with old ABC-tuned + # settings for now + #dcfg.set_max_conns_to_initiate(300) + #dcfg.set_max_conns(300) + + + # Stop all non-playing, see if we're restarting one + infohash = tdef.get_infohash() + newd = None + for d in self.s.get_downloads(): + if d.get_def().get_infohash() == infohash: + # Download already exists. + # One safe option is to remove it (but not its downloaded content) + # so we can start with a fresh DownloadStartupConfig. However, + # this gives funky concurrency errors and could prevent a + # Download from starting without hashchecking (as its checkpoint + # was removed) + # Alternative is to set VOD callback, etc. at Runtime: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Reusing old duplicate Download",`infohash` + newd = d + if d not in self.downloads_in_vodmode: + d.stop() + + self.s.lm.h4xor_reset_init_conn_counter() + + # ARNOTODO: does this work with Plugin's duplicate download facility? + + self.playermode = DLSTATUS_DOWNLOADING + if newd is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Starting new Download",`infohash` + newd = self.s.start_download(tdef,dcfg) + else: + newd.set_video_event_callback(self.sesscb_vod_event_callback) + newd.set_video_events(self.get_supported_vod_events()) + + if tdef.is_multifile_torrent(): + newd.set_selected_files([dlfile]) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Restarting existing Download",`infohash` + newd.restart() + + self.downloads_in_vodmode.add(newd) + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Saving content to",newd.get_dest_files() + return newd + + + def sesscb_vod_event_callback(self,d,event,params): + pass + + def get_supported_vod_events(self): + pass + + + # + # DownloadCache + # + def free_up_diskspace_by_downloads(self,infohash,needed): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: free_up: needed",needed,DISKSPACE_LIMIT + if needed > DISKSPACE_LIMIT: + # Not cleaning out whole cache for bigguns + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: free_up: No cleanup for bigguns" + return True + + inuse = 0L + timelist = [] + dlist = self.s.get_downloads() + for d in dlist: + hisinfohash = d.get_def().get_infohash() + if infohash == hisinfohash: + # Don't delete the torrent we want to play + continue + destfiles = d.get_dest_files() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: free_up: Downloaded content",`destfiles` + + dinuse = 0L + for (filename,savepath) in destfiles: + stat = os.stat(savepath) + dinuse += stat.st_size + inuse += dinuse + timerec = (stat.st_ctime,dinuse,d) + timelist.append(timerec) + + if inuse+needed < DISKSPACE_LIMIT: + # Enough available, done. + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: free_up: Enough avail",inuse + return True + + # Policy: remove oldest till sufficient + timelist.sort() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: free_up: Found",timelist,"in dest dir" + + got = 0L + for ctime,dinuse,d in timelist: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: free_up: Removing",`d.get_def().get_name_as_unicode()`,"to free up diskspace, t",ctime + self.s.remove_download(d,removecontent=True) + got += dinuse + if got > needed: + return True + # Deleted all, still no space: + return False + + + # + # Process periodically reported DownloadStates + # + def sesscb_states_callback(self,dslist): + """ Called by Session thread """ + # Arno: delegate to GUI thread. This makes some things (especially + #access control to self.videoFrame easier + #self.gui_states_callback(dslist) + + # Arno: we want the prebuf stats every second, and we want the + # detailed peerlist, needed for research stats. Getting them every + # second may be too expensive, so get them every 10. + # + self.getpeerlistcount += 1 + getpeerlist = (self.getpeerlistcount % 10) == 0 + haspeerlist = (self.getpeerlistcount % 10) == 1 + + wx.CallAfter(self.gui_states_callback,dslist,haspeerlist) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: SessStats:",self.getpeerlistcount,getpeerlist,haspeerlist + return (1.0,getpeerlist) + + def gui_states_callback(self,dslist,haspeerlist): + """ Called by *GUI* thread. + CAUTION: As this method is called by the GUI thread don't to any + time-consuming stuff here! """ + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Stats:" + if self.shuttingdown: + return ([],0,0) + + # See which Download is currently playing + playermode = self.playermode + + totalspeed = {} + totalspeed[UPLOAD] = 0.0 + totalspeed[DOWNLOAD] = 0.0 + totalhelping = 0 + + # When not playing, display stats for all Downloads and apply rate control. + if playermode == DLSTATUS_SEEDING: + if DEBUG: + for ds in dslist: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Stats: Seeding: %s %.1f%% %s" % (dlstatus_strings[ds.get_status()],100.0*ds.get_progress(),ds.get_error()) + self.ratelimit_callback(dslist) + + # Calc total dl/ul speed and find DownloadStates for playing Downloads + playing_dslist = [] + for ds in dslist: + if ds.get_download() in self.downloads_in_vodmode: + playing_dslist.append(ds) + elif DEBUG and playermode == DLSTATUS_DOWNLOADING: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Stats: Waiting: %s %.1f%% %s" % (dlstatus_strings[ds.get_status()],100.0*ds.get_progress(),ds.get_error()) + + for dir in [UPLOAD,DOWNLOAD]: + totalspeed[dir] += ds.get_current_speed(dir) + totalhelping += ds.get_num_peers() + + # Report statistics on all downloads to research server, every 10 secs + if haspeerlist: + try: + for ds in dslist: + self.reporter.report_stat(ds) + except: + print_exc() + + # Set systray icon tooltip. This has limited size on Win32! + txt = self.appname+'\n\n' + txt += 'DL: %.1f\n' % (totalspeed[DOWNLOAD]) + txt += 'UL: %.1f\n' % (totalspeed[UPLOAD]) + txt += 'Helping: %d\n' % (totalhelping) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: ToolTip summary",txt + self.OnSetSysTrayTooltip(txt) + + # No playing Downloads + if len(playing_dslist) == 0: + return ([],0,0) + elif DEBUG and playermode == DLSTATUS_DOWNLOADING: + for ds in playing_dslist: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Stats: DL: %s %.1f%% %s dl %.1f ul %.1f n %d" % (dlstatus_strings[ds.get_status()],100.0*ds.get_progress(),ds.get_error(),ds.get_current_speed(DOWNLOAD),ds.get_current_speed(UPLOAD),ds.get_num_peers()) + + # If we're done playing we can now restart any previous downloads to + # seed them. + if playermode != DLSTATUS_SEEDING: + playing_seeding_count = 0 + for ds in playing_dslist: + if ds.get_status() == DLSTATUS_SEEDING: + playing_seeding_count += 1 + if len(playing_dslist) == playing_seeding_count: + self.restart_other_downloads() + + # cf. 25 Mbps cap to reduce CPU usage and improve playback on slow machines + # Arno: on some torrents this causes VLC to fail to tune into the video + # although it plays audio??? + #ds.get_download().set_max_speed(DOWNLOAD,1500) + + return (playing_dslist,totalhelping,totalspeed) + + + def OnSetSysTrayTooltip(self,txt): + if self.tbicon is not None: + self.tbicon.set_icon_tooltip(txt) + + # + # Download Management + # + def restart_other_downloads(self): + """ Called by GUI thread """ + if self.shuttingdown: + return + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Restarting other downloads" + self.playermode = DLSTATUS_SEEDING + self.ratelimiter = UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager() + self.set_ratelimits() + + dlist = self.s.get_downloads() + for d in dlist: + if d not in self.downloads_in_vodmode: + d.set_mode(DLMODE_NORMAL) # checkpointed torrents always restarted in DLMODE_NORMAL, just make extra sure + d.restart() + + + def remove_downloads_in_vodmode_if_not_complete(self): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Removing playing download if not complete" + for d in self.downloads_in_vodmode: + d.set_state_callback(self.sesscb_remove_playing_callback) + + def sesscb_remove_playing_callback(self,ds): + """ Called by SessionThread """ + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: sesscb_remove_playing_callback: status is",dlstatus_strings[ds.get_status()],"progress",ds.get_progress() + + d = ds.get_download() + name = d.get_def().get_name() + if (ds.get_status() == DLSTATUS_DOWNLOADING and ds.get_progress() >= 0.9) or ds.get_status() == DLSTATUS_SEEDING: + pass + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: sesscb_remove_playing_callback: KEEPING",`name` + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: sesscb_remove_playing_callback: REMOVING",`name` + wx.CallAfter(self.remove_playing_download,d) + + return (-1.0,False) + + + def remove_playing_download(self,d): + """ Called by MainThread """ + if self.s is not None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Removing incomplete download",`d.get_def().get_name_as_unicode()` + try: + self.s.remove_download(d,removecontent=True) + self.downloads_in_vodmode.remove(d) + except: + print_exc() + + + # + # Rate limiter + # + def set_ratelimits(self): + uploadrate = float(self.playerconfig['total_max_upload_rate']) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: set_ratelimits: Setting max upload rate to",uploadrate + self.ratelimiter.set_global_max_speed(UPLOAD,uploadrate) + self.ratelimiter.set_global_max_seedupload_speed(uploadrate) + + def ratelimit_callback(self,dslist): + """ When the player is in seeding mode, limit the used upload to + the limit set by the user via the options menu. + Called by *GUI* thread """ + if self.ratelimiter is None: + return + + # Adjust speeds once every 4 seconds + adjustspeeds = False + if self.ratelimit_update_count % 4 == 0: + adjustspeeds = True + self.ratelimit_update_count += 1 + + if adjustspeeds: + self.ratelimiter.add_downloadstatelist(dslist) + self.ratelimiter.adjust_speeds() + + + # + # Player config file + # + def load_playerconfig(self,state_dir): + self.playercfgfilename = os.path.join(state_dir,'playerconf.pickle') + self.playerconfig = None + try: + f = open(self.playercfgfilename,"rb") + self.playerconfig = pickle.load(f) + f.close() + except: + print_exc() + self.playerconfig = {} + self.playerconfig['total_max_upload_rate'] = DEFAULT_MAX_UPLOAD_SEED_WHEN_SEEDING # KB/s + + def save_playerconfig(self): + try: + f = open(self.playercfgfilename,"wb") + pickle.dump(self.playerconfig,f) + f.close() + except: + print_exc() + + def set_playerconfig(self,key,value): + self.playerconfig[key] = value + + if key == 'total_max_upload_rate': + try: + self.set_ratelimits() + except: + print_exc() + + def get_playerconfig(self,key): + return self.playerconfig[key] + + + # + # Shutdown + # + def OnExit(self): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: ONEXIT" + self.shuttingdown = True + self.remove_downloads_in_vodmode_if_not_complete() + + # To let Threads in Session finish their business before we shut it down. + time.sleep(2) + + if self.s is not None: + self.s.shutdown() + + if self.tbicon is not None: + self.tbicon.RemoveIcon() + self.tbicon.Destroy() + + ts = enumerate() + for t in ts: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: ONEXIT: Thread still running",t.getName(),"daemon",t.isDaemon() + + self.ExitMainLoop() + + + def clear_session_state(self): + """ Try to fix apps by doing hard reset. Called from systray menu """ + try: + if self.s is not None: + dlist = self.s.get_downloads() + for d in dlist: + self.s.remove_download(d,removecontent=True) + except: + print_exc() + time.sleep(1) # give network thread time to do stuff + try: + dldestdir = self.get_default_destdir() + shutil.rmtree(dldestdir,True) # ignore errors + except: + print_exc() + try: + dlcheckpointsdir = os.path.join(self.s.get_state_dir(),STATEDIR_DLPSTATE_DIR) + shutil.rmtree(dlcheckpointsdir,True) # ignore errors + except: + print_exc() + try: + cfgfilename = os.path.join(self.s.get_state_dir(),STATEDIR_SESSCONFIG) + os.remove(cfgfilename) + except: + print_exc() + + self.s = None # HARD EXIT + #self.OnExit() + sys.exit(0) # DIE HARD 4.0 + + + def show_error(self,msg): + dlg = wx.MessageDialog(None, msg, self.appname+" Error", wx.OK|wx.ICON_ERROR) + result = dlg.ShowModal() + dlg.Destroy() + + + def get_default_destdir(self): + return os.path.join(self.s.get_state_dir(),'downloads') + + # + # InstanceConnectionHandler + # + def i2ithread_readlinecallback(self,ic,cmd): + pass + diff --git a/tribler-mod/Tribler/Player/BaseApp.py.bak b/tribler-mod/Tribler/Player/BaseApp.py.bak new file mode 100644 index 0000000..489b52b --- /dev/null +++ b/tribler-mod/Tribler/Player/BaseApp.py.bak @@ -0,0 +1,572 @@ +# Written by Arno Bakker, Choopan RATTANAPOKA, Jie Yang +# see LICENSE.txt for license information +""" Base class for Player and Plugin Background process. See swarmplayer.py """ + +# +# TODO: set 'download_slice_size' to 32K, such that pieces are no longer +# downloaded in 2 chunks. This particularly avoids a bad case where you +# kick the source: you download chunk 1 of piece X +# from lagging peer and download chunk 2 of piece X from source. With the piece +# now complete you check the sig. As the first part of the piece is old, this +# fails and we kick the peer that gave us the completing chunk, which is the +# source. +# +# Note that the BT spec says: +# "All current implementations use 2 15 , and close connections which request +# an amount greater than 2 17." http://www.bittorrent.org/beps/bep_0003.html +# +# So it should be 32KB already. However, the BitTorrent (3.4.1, 5.0.9), +# BitTornado and Azureus all use 2 ** 14 = 16KB chunks. + +import os +import sys +import time +from sets import Set + +from threading import enumerate,currentThread,RLock +from traceback import print_exc + +if sys.platform == "darwin": + # on Mac, we can only load VLC/OpenSSL libraries + # relative to the location of tribler.py + os.chdir(os.path.abspath(os.path.dirname(sys.argv[0]))) +try: + import wxversion + wxversion.select('2.8') +except: + pass +import wx + +from Tribler.__init__ import LIBRARYNAME +from Tribler.Core.API import * +from Tribler.Policies.RateManager import UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager +from Tribler.Utilities.Instance2Instance import * + +from Tribler.Player.systray import PlayerTaskBarIcon +from Tribler.Player.Reporter import Reporter +from Tribler.Player.UtilityStub import UtilityStub + +DEBUG = False +RATELIMITADSL = False + +DISKSPACE_LIMIT = 5L * 1024L * 1024L * 1024L # 5 GB +DEFAULT_MAX_UPLOAD_SEED_WHEN_SEEDING = 75 # KB/s + +class BaseApp(wx.App,InstanceConnectionHandler): + def __init__(self, redirectstderrout, appname, params, single_instance_checker, installdir, i2iport, sport): + self.appname = appname + self.params = params + self.single_instance_checker = single_instance_checker + self.installdir = installdir + self.i2iport = i2iport + self.sport = sport + self.error = None + self.s = None + self.tbicon = None + + self.downloads_in_vodmode = Set() # Set of playing Downloads, one for SP, many for Plugin + self.ratelimiter = None + self.ratelimit_update_count = 0 + self.playermode = DLSTATUS_DOWNLOADING + self.getpeerlistcount = 2 # for research Reporter + self.shuttingdown = False + + InstanceConnectionHandler.__init__(self,self.i2ithread_readlinecallback) + wx.App.__init__(self, redirectstderrout) + + + def OnInitBase(self): + """ To be wrapped in a OnInit() method that returns True/False """ + + # Normal startup + # Read config + state_dir = Session.get_default_state_dir('.'+self.appname) + + self.utility = UtilityStub(self.installdir,state_dir) + self.utility.app = self + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",self.utility.lang.get('build') + self.iconpath = os.path.join(self.installdir,LIBRARYNAME,'Images',self.appname+'Icon.ico') + self.logopath = os.path.join(self.installdir,LIBRARYNAME,'Images',self.appname+'Logo.png') + + + # Start server for instance2instance communication + self.i2is = Instance2InstanceServer(self.i2iport,self,timeout=(24.0*3600.0)) + + + # The playerconfig contains all config parameters that are not + # saved by checkpointing the Session or its Downloads. + self.load_playerconfig(state_dir) + + # Install systray icon + # Note: setting this makes the program not exit when the videoFrame + # is being closed. + + self.tbicon = PlayerTaskBarIcon(self,self.iconpath) + + + # Start Tribler Session + cfgfilename = Session.get_default_config_filename(state_dir) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Session config",cfgfilename + try: + self.sconfig = SessionStartupConfig.load(cfgfilename) + except: + print_exc() + self.sconfig = SessionStartupConfig() + self.sconfig.set_state_dir(state_dir) + + self.sconfig.set_listen_port(self.sport) + self.sconfig.set_overlay(False) + self.sconfig.set_megacache(False) + + self.s = Session(self.sconfig) + self.s.set_download_states_callback(self.sesscb_states_callback) + + self.reporter = Reporter( self.sconfig ) + + if RATELIMITADSL: + self.ratelimiter = UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager() + self.ratelimiter.set_global_max_speed(DOWNLOAD,400) + self.ratelimiter.set_global_max_speed(UPLOAD,90) + + + # Arno: For extra robustness, ignore any errors related to restarting + try: + # Load all other downloads in cache, but in STOPPED state + self.s.load_checkpoint(initialdlstatus=DLSTATUS_STOPPED) + except: + print_exc() + + # Start remote control + self.i2is.start() + + + def start_download(self,tdef,dlfile): + """ Start download of torrent tdef and play video file dlfile from it """ + + # Free diskspace, if needed + destdir = self.get_default_destdir() + if not os.access(destdir,os.F_OK): + os.mkdir(destdir) + + # Arno: For extra robustness, ignore any errors related to restarting + # TODO: Extend code such that we can also delete files from the + # disk cache, not just Downloads. This would allow us to keep the + # parts of a Download that we already have, but that is being aborted + # by the user by closing the video window. See remove_playing_* + try: + if not self.free_up_diskspace_by_downloads(tdef.get_infohash(),tdef.get_length([dlfile])): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Not enough free diskspace, ignoring" + except: + print_exc() + + # Setup how to download + dcfg = DownloadStartupConfig() + + # Delegate processing to VideoPlayer + dcfg.set_video_event_callback(self.sesscb_vod_event_callback) + dcfg.set_video_events(self.get_supported_vod_events()) + + dcfg.set_dest_dir(destdir) + + if tdef.is_multifile_torrent(): + dcfg.set_selected_files([dlfile]) + + # Arno: 2008-7-15: commented out, just stick with old ABC-tuned + # settings for now + #dcfg.set_max_conns_to_initiate(300) + #dcfg.set_max_conns(300) + + + # Stop all non-playing, see if we're restarting one + infohash = tdef.get_infohash() + newd = None + for d in self.s.get_downloads(): + if d.get_def().get_infohash() == infohash: + # Download already exists. + # One safe option is to remove it (but not its downloaded content) + # so we can start with a fresh DownloadStartupConfig. However, + # this gives funky concurrency errors and could prevent a + # Download from starting without hashchecking (as its checkpoint + # was removed) + # Alternative is to set VOD callback, etc. at Runtime: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Reusing old duplicate Download",`infohash` + newd = d + if d not in self.downloads_in_vodmode: + d.stop() + + self.s.lm.h4xor_reset_init_conn_counter() + + # ARNOTODO: does this work with Plugin's duplicate download facility? + + self.playermode = DLSTATUS_DOWNLOADING + if newd is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Starting new Download",`infohash` + newd = self.s.start_download(tdef,dcfg) + else: + newd.set_video_event_callback(self.sesscb_vod_event_callback) + newd.set_video_events(self.get_supported_vod_events()) + + if tdef.is_multifile_torrent(): + newd.set_selected_files([dlfile]) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Restarting existing Download",`infohash` + newd.restart() + + self.downloads_in_vodmode.add(newd) + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Saving content to",newd.get_dest_files() + return newd + + + def sesscb_vod_event_callback(self,d,event,params): + pass + + def get_supported_vod_events(self): + pass + + + # + # DownloadCache + # + def free_up_diskspace_by_downloads(self,infohash,needed): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: free_up: needed",needed,DISKSPACE_LIMIT + if needed > DISKSPACE_LIMIT: + # Not cleaning out whole cache for bigguns + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: free_up: No cleanup for bigguns" + return True + + inuse = 0L + timelist = [] + dlist = self.s.get_downloads() + for d in dlist: + hisinfohash = d.get_def().get_infohash() + if infohash == hisinfohash: + # Don't delete the torrent we want to play + continue + destfiles = d.get_dest_files() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: free_up: Downloaded content",`destfiles` + + dinuse = 0L + for (filename,savepath) in destfiles: + stat = os.stat(savepath) + dinuse += stat.st_size + inuse += dinuse + timerec = (stat.st_ctime,dinuse,d) + timelist.append(timerec) + + if inuse+needed < DISKSPACE_LIMIT: + # Enough available, done. + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: free_up: Enough avail",inuse + return True + + # Policy: remove oldest till sufficient + timelist.sort() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: free_up: Found",timelist,"in dest dir" + + got = 0L + for ctime,dinuse,d in timelist: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: free_up: Removing",`d.get_def().get_name_as_unicode()`,"to free up diskspace, t",ctime + self.s.remove_download(d,removecontent=True) + got += dinuse + if got > needed: + return True + # Deleted all, still no space: + return False + + + # + # Process periodically reported DownloadStates + # + def sesscb_states_callback(self,dslist): + """ Called by Session thread """ + # Arno: delegate to GUI thread. This makes some things (especially + #access control to self.videoFrame easier + #self.gui_states_callback(dslist) + + # Arno: we want the prebuf stats every second, and we want the + # detailed peerlist, needed for research stats. Getting them every + # second may be too expensive, so get them every 10. + # + self.getpeerlistcount += 1 + getpeerlist = (self.getpeerlistcount % 10) == 0 + haspeerlist = (self.getpeerlistcount % 10) == 1 + + wx.CallAfter(self.gui_states_callback,dslist,haspeerlist) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: SessStats:",self.getpeerlistcount,getpeerlist,haspeerlist + return (1.0,getpeerlist) + + def gui_states_callback(self,dslist,haspeerlist): + """ Called by *GUI* thread. + CAUTION: As this method is called by the GUI thread don't to any + time-consuming stuff here! """ + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Stats:" + if self.shuttingdown: + return ([],0,0) + + # See which Download is currently playing + playermode = self.playermode + + totalspeed = {} + totalspeed[UPLOAD] = 0.0 + totalspeed[DOWNLOAD] = 0.0 + totalhelping = 0 + + # When not playing, display stats for all Downloads and apply rate control. + if playermode == DLSTATUS_SEEDING: + if DEBUG: + for ds in dslist: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Stats: Seeding: %s %.1f%% %s" % (dlstatus_strings[ds.get_status()],100.0*ds.get_progress(),ds.get_error()) + self.ratelimit_callback(dslist) + + # Calc total dl/ul speed and find DownloadStates for playing Downloads + playing_dslist = [] + for ds in dslist: + if ds.get_download() in self.downloads_in_vodmode: + playing_dslist.append(ds) + elif DEBUG and playermode == DLSTATUS_DOWNLOADING: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Stats: Waiting: %s %.1f%% %s" % (dlstatus_strings[ds.get_status()],100.0*ds.get_progress(),ds.get_error()) + + for dir in [UPLOAD,DOWNLOAD]: + totalspeed[dir] += ds.get_current_speed(dir) + totalhelping += ds.get_num_peers() + + # Report statistics on all downloads to research server, every 10 secs + if haspeerlist: + try: + for ds in dslist: + self.reporter.report_stat(ds) + except: + print_exc() + + # Set systray icon tooltip. This has limited size on Win32! + txt = self.appname+'\n\n' + txt += 'DL: %.1f\n' % (totalspeed[DOWNLOAD]) + txt += 'UL: %.1f\n' % (totalspeed[UPLOAD]) + txt += 'Helping: %d\n' % (totalhelping) + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: ToolTip summary",txt + self.OnSetSysTrayTooltip(txt) + + # No playing Downloads + if len(playing_dslist) == 0: + return ([],0,0) + elif DEBUG and playermode == DLSTATUS_DOWNLOADING: + for ds in playing_dslist: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Stats: DL: %s %.1f%% %s dl %.1f ul %.1f n %d" % (dlstatus_strings[ds.get_status()],100.0*ds.get_progress(),ds.get_error(),ds.get_current_speed(DOWNLOAD),ds.get_current_speed(UPLOAD),ds.get_num_peers()) + + # If we're done playing we can now restart any previous downloads to + # seed them. + if playermode != DLSTATUS_SEEDING: + playing_seeding_count = 0 + for ds in playing_dslist: + if ds.get_status() == DLSTATUS_SEEDING: + playing_seeding_count += 1 + if len(playing_dslist) == playing_seeding_count: + self.restart_other_downloads() + + # cf. 25 Mbps cap to reduce CPU usage and improve playback on slow machines + # Arno: on some torrents this causes VLC to fail to tune into the video + # although it plays audio??? + #ds.get_download().set_max_speed(DOWNLOAD,1500) + + return (playing_dslist,totalhelping,totalspeed) + + + def OnSetSysTrayTooltip(self,txt): + if self.tbicon is not None: + self.tbicon.set_icon_tooltip(txt) + + # + # Download Management + # + def restart_other_downloads(self): + """ Called by GUI thread """ + if self.shuttingdown: + return + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Restarting other downloads" + self.playermode = DLSTATUS_SEEDING + self.ratelimiter = UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager() + self.set_ratelimits() + + dlist = self.s.get_downloads() + for d in dlist: + if d not in self.downloads_in_vodmode: + d.set_mode(DLMODE_NORMAL) # checkpointed torrents always restarted in DLMODE_NORMAL, just make extra sure + d.restart() + + + def remove_downloads_in_vodmode_if_not_complete(self): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Removing playing download if not complete" + for d in self.downloads_in_vodmode: + d.set_state_callback(self.sesscb_remove_playing_callback) + + def sesscb_remove_playing_callback(self,ds): + """ Called by SessionThread """ + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: sesscb_remove_playing_callback: status is",dlstatus_strings[ds.get_status()],"progress",ds.get_progress() + + d = ds.get_download() + name = d.get_def().get_name() + if (ds.get_status() == DLSTATUS_DOWNLOADING and ds.get_progress() >= 0.9) or ds.get_status() == DLSTATUS_SEEDING: + pass + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: sesscb_remove_playing_callback: KEEPING",`name` + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: sesscb_remove_playing_callback: REMOVING",`name` + wx.CallAfter(self.remove_playing_download,d) + + return (-1.0,False) + + + def remove_playing_download(self,d): + """ Called by MainThread """ + if self.s is not None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Removing incomplete download",`d.get_def().get_name_as_unicode()` + try: + self.s.remove_download(d,removecontent=True) + self.downloads_in_vodmode.remove(d) + except: + print_exc() + + + # + # Rate limiter + # + def set_ratelimits(self): + uploadrate = float(self.playerconfig['total_max_upload_rate']) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: set_ratelimits: Setting max upload rate to",uploadrate + self.ratelimiter.set_global_max_speed(UPLOAD,uploadrate) + self.ratelimiter.set_global_max_seedupload_speed(uploadrate) + + def ratelimit_callback(self,dslist): + """ When the player is in seeding mode, limit the used upload to + the limit set by the user via the options menu. + Called by *GUI* thread """ + if self.ratelimiter is None: + return + + # Adjust speeds once every 4 seconds + adjustspeeds = False + if self.ratelimit_update_count % 4 == 0: + adjustspeeds = True + self.ratelimit_update_count += 1 + + if adjustspeeds: + self.ratelimiter.add_downloadstatelist(dslist) + self.ratelimiter.adjust_speeds() + + + # + # Player config file + # + def load_playerconfig(self,state_dir): + self.playercfgfilename = os.path.join(state_dir,'playerconf.pickle') + self.playerconfig = None + try: + f = open(self.playercfgfilename,"rb") + self.playerconfig = pickle.load(f) + f.close() + except: + print_exc() + self.playerconfig = {} + self.playerconfig['total_max_upload_rate'] = DEFAULT_MAX_UPLOAD_SEED_WHEN_SEEDING # KB/s + + def save_playerconfig(self): + try: + f = open(self.playercfgfilename,"wb") + pickle.dump(self.playerconfig,f) + f.close() + except: + print_exc() + + def set_playerconfig(self,key,value): + self.playerconfig[key] = value + + if key == 'total_max_upload_rate': + try: + self.set_ratelimits() + except: + print_exc() + + def get_playerconfig(self,key): + return self.playerconfig[key] + + + # + # Shutdown + # + def OnExit(self): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: ONEXIT" + self.shuttingdown = True + self.remove_downloads_in_vodmode_if_not_complete() + + # To let Threads in Session finish their business before we shut it down. + time.sleep(2) + + if self.s is not None: + self.s.shutdown() + + if self.tbicon is not None: + self.tbicon.RemoveIcon() + self.tbicon.Destroy() + + ts = enumerate() + for t in ts: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: ONEXIT: Thread still running",t.getName(),"daemon",t.isDaemon() + + self.ExitMainLoop() + + + def clear_session_state(self): + """ Try to fix apps by doing hard reset. Called from systray menu """ + try: + if self.s is not None: + dlist = self.s.get_downloads() + for d in dlist: + self.s.remove_download(d,removecontent=True) + except: + print_exc() + time.sleep(1) # give network thread time to do stuff + try: + dldestdir = self.get_default_destdir() + shutil.rmtree(dldestdir,True) # ignore errors + except: + print_exc() + try: + dlcheckpointsdir = os.path.join(self.s.get_state_dir(),STATEDIR_DLPSTATE_DIR) + shutil.rmtree(dlcheckpointsdir,True) # ignore errors + except: + print_exc() + try: + cfgfilename = os.path.join(self.s.get_state_dir(),STATEDIR_SESSCONFIG) + os.remove(cfgfilename) + except: + print_exc() + + self.s = None # HARD EXIT + #self.OnExit() + sys.exit(0) # DIE HARD 4.0 + + + def show_error(self,msg): + dlg = wx.MessageDialog(None, msg, self.appname+" Error", wx.OK|wx.ICON_ERROR) + result = dlg.ShowModal() + dlg.Destroy() + + + def get_default_destdir(self): + return os.path.join(self.s.get_state_dir(),'downloads') + + # + # InstanceConnectionHandler + # + def i2ithread_readlinecallback(self,ic,cmd): + pass + diff --git a/tribler-mod/Tribler/Player/Build/Mac/Info.plist b/tribler-mod/Tribler/Player/Build/Mac/Info.plist new file mode 100644 index 0000000..b1fb4ff --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Mac/Info.plist @@ -0,0 +1,57 @@ + + + + + CFBundleDevelopmentRegion + English + CFBundleDocumentTypes + + + CFBundleTypeExtensions + + tstream + + CFBundleTypeIconFile + SwarmPlayerDoc + CFBundleTypeMIMETypes + + application/x-tribler-stream + + CFBundleTypeName + Tribler Stream Meta-Info + CFBundleTypeOSTypes + + BTMF + + CFBundleTypeRole + Viewer + NSDocumentClass + DownloadDocument + + + CFBundleTypeOSTypes + + **** + fold + disk + + CFBundleTypeRole + Viewer + + + CFBundleExecutable + SwarmPlayer + CFBundleIconFile + swarmplayer.icns + CFBundleIdentifier + SwarmPlayer + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + SwarmPlayer + CFBundlePackageType + APPL + CFBundleSignature + ???? + + diff --git a/tribler-mod/Tribler/Player/Build/Mac/Makefile b/tribler-mod/Tribler/Player/Build/Mac/Makefile new file mode 100644 index 0000000..4f00559 --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Mac/Makefile @@ -0,0 +1,116 @@ +# Building on Mac OS/X requires: +# * Python 2.5 +# * wxPython 2.8-unicode +# * py2app 0.3.6 +# * swig, subversion (available through MacPorts) +# * XCode 2.4+ +# +# Use lower versions at your own risk. + +APPNAME=SwarmPlayer +PYTHON_VER= +PWD:=${shell pwd} +ARCH:=${shell arch} + +PYTHON=python${PYTHON_VER} + +all: clean SwarmPlayer-${ARCH}.dmg + +clean: + rm -rf build/imagecontents/ ${APPNAME}-${ARCH}.dmg + +.PHONY: all clean dirs + +# ----- SwarmPlayer + +APPRES=build/imagecontents/${APPNAME}.app/Contents/Resources + +SRCDIR=../../../.. + +build/imagecontents/: + rm -rf $@ + mkdir -p $@ + + cd ${SRCDIR} && DYLD_LIBRARY_PATH=macbinaries PYTHONPATH=macbinaries ${PYTHON} -OO - < ${PWD}/setuptriblermac.py py2app + mv ${SRCDIR}/dist/* $@ + + # Thin everything for this architecture. Some things ship Universal (Python, wxPython, ...) and + # others get a stub for the other architecture (things built by Universal Python) + for i in `find build/imagecontents`; do ./smart_lipo_thin $$i; done + + # Replace any rogue references to local ones. For instance, some libraries are accidently + # linked against /usr/local/lib/* or /opt/local/lib. Py2app puts them in the Frameworks dir, + # but fails to correct the references in the binaries. + #./process_libs build/imagecontents | bash - + + # Background + mkdir -p $@/.background + cp background.png $@/.background + + # Volume Icon + cp VolumeIcon.icns $@/.VolumeIcon.icns + + # Shortcut to /Applications + ln -s /Applications $@/Applications + + touch $@ + +${APPNAME}-${ARCH}.dmg: build/imagecontents/ SLAResources.rsrc + rm -f $@ + mkdir -p build/temp + + # create image + hdiutil create -srcfolder $< -format UDRW -scrub -volname ${APPNAME} $@ + + # open it + hdiutil attach -readwrite -noverify -noautoopen $@ -mountpoint build/temp/mnt + + # make sure root folder is opened when image is + bless --folder build/temp/mnt --openfolder build/temp/mnt + # hack: wait for completion + sleep 1 + + # position items + # oddly enough, 'set f .. as alias' can fail, but a reboot fixes that + osascript -e "tell application \"Finder\"" \ + -e " set f to POSIX file (\"${PWD}/build/temp/mnt\" as string) as alias" \ + -e " tell folder f" \ + -e " open" \ + -e " tell container window" \ + -e " set toolbar visible to false" \ + -e " set statusbar visible to false" \ + -e " set current view to icon view" \ + -e " delay 1 -- Sync" \ + -e " set the bounds to {50, 100, 1000, 1000} -- Big size so the finder won't do silly things" \ + -e " end tell" \ + -e " delay 1 -- Sync" \ + -e " set icon size of the icon view options of container window to 128" \ + -e " set arrangement of the icon view options of container window to not arranged" \ + -e " set background picture of the icon view options of container window to file \".background:background.png\"" \ + -e " set position of item \"${APPNAME}.app\" to {150, 140}" \ + -e " set position of item \"Applications\" to {410, 140}" \ + -e " set the bounds of the container window to {50, 100, 600, 400}" \ + -e " update without registering applications" \ + -e " delay 5 -- Sync" \ + -e " close" \ + -e " end tell" \ + -e " -- Sync" \ + -e " delay 5" \ + -e "end tell" || true + + # turn on custom volume icon + /Developer/Tools/SetFile -a C build/temp/mnt || true + + # close + hdiutil detach build/temp/mnt || true + + # make read-only + mv $@ build/temp/rw.dmg + hdiutil convert build/temp/rw.dmg -format UDZO -imagekey zlib-level=9 -o $@ + rm -f build/temp/rw.dmg + + # add EULA + hdiutil unflatten $@ + /Developer/Tools/DeRez -useDF SLAResources.rsrc > build/temp/sla.r + /Developer/Tools/Rez -a build/temp/sla.r -o $@ + hdiutil flatten $@ diff --git a/tribler-mod/Tribler/Player/Build/Mac/SLAResources.rsrc b/tribler-mod/Tribler/Player/Build/Mac/SLAResources.rsrc new file mode 100644 index 0000000000000000000000000000000000000000..162a889ce7cf5f7383cc7cc849a1a67120264037 GIT binary patch literal 46262 zcmeI5dyr(;UEkXvt)wv%9F|m46qUM}P^I0}>}Yqv0!a=@?@Z5bOS3cM>E4yMAeG#i zxiiz+p6;O^J6b^ny!r!!pTxmXt~^Qz6hbigA!37V$pi=6JPOJVl}bVtsiYDx1Y$@* zTqFhge1E@l&b>X4m1Lwi0aUis(|zwbzw>*)&N;Q(t7^5{`=6`T-pBu6^?CmIv-Z!o zom?ok@^N>RE#{NeiYgml3twDb`d+ph?XEn-e&YxMoIV$We4gRqr>A06QBY!tm=F`I4h>3TL}1I}clewJ@+6vLstGVE`S_VYn8mt|qh z&3@MFk9_FaY>zb#vzZ9S+zdc}Rqe!G3&nUe+}L?_{rVHd-l$Uyir(Gz^=`4#D|&Y= z48|MRi{4bLv$0bQdI0?Ej|hgMm$f=Y*6C%RuDth|*=)b)`A@r@QMSor7eGO0cX!;| z=8sL#+zN1>S?WAd^q#m;3`T=|dzclS9-oJ!yrq^TquFe|GlB}A zj<9_u%X`-ymN^6F`n!98oNWSEoFnUv2LQ>S-L9>y0^kJTjI~|ZXrpW=UoU!d5c6rU z6DWCG8h-0bqG6yzNy7l&7ed4tf%$s^bGVnktKYq=z5xyM!PK%?!#>{K+}#^=b~}Tq zi{pH-nGc}h$6B2XvV14>;IVNrfKxX5y{y}ZmEJbk?ezG-Z#r zJd^F^gFIk2A7n*0D_OA7v%klCW5(!ftZx|?aHEEN+ZQ%%xg@b}?Z4Mf+`Tx+dvDD< z!@KKXRoZg)01MOO^z6ZP(W2TA}` zG5)j3< zYI7F*?JZGPPjv(v1m5gC(dqX(h=|CMi2DSz-O4+rU09_rlChyM%L)rbvt*qriR;}F zdNMY_pJ}! z`s`ib`yamNUjp~D@A}p!zxm10liN==p1k;E{cYRdxA*3Eeb49L@^|0$ySIMf?Vo$g z+i!i!-v7X@-@NtNxBuo_zw_3oZvEmL>(zH|eeTKmC*S$x8=l;L<2OC|W(|JpqqlzT z)~DV$PBVY-jW=$6?z{de0Db({du}~<>jSrb;EfmF__x!@pS<-0<=B}2u4B!3v%iKQ_F(b$!sek&Q=p zx1<0d*gIEtvxRZC?axUP%wFB-bi3F4m=jH~aUIcmOEwb+o5{A`J;?IWs0358XG`Ee zo8#*vf$bw>FN@=cw*|!U9Y$lmPu5QS#nt{`G~OP+V>p{#805oF_n!HFPePav?!glJ zr=%k9Y31D;My9tQI|xY7kUTF1xtmEpqAW~1Ch>WzRHKY5vW0p~6C6~K{fd5M_}-gM z8&;;67$!kyYq?cB@fEGk?p_yZFE+FJosLB6j$i!IPyeSA@Bgs=bH}s4_nD_oJo{gt z{eh?6)BfbI|K$Jr_#f))_dWN^zy18De)&D0{_IbD{1<-z`QLf^M?djf?|t^E_v*K& z-uJO*-~0Z5``%}N{*%A`kw1FsX@ARyi_iIAPrdJ{r(ue3ubnvA9^{*y5lh8uieNmm z`oZNBKlrr%bH_9D&n%tz*yGO(pL%5VnY|ym_`WCf^$&gc1K<2ZouB#ihkoHle(HUn zfA-%$RsZNO|BF9(>YwSir+)Z{EHQL&aypUqAfRyz}Lq3ps3g zN2Ax-zVk}Kzwf%TITTM$)d$x*_zEXQB8+W_-Qsrx!+uVwO zQaf>HYuw-M+ws{k_ia z_&R@N#m*)UUVqrfHOhB+BhP*%JpLdJeg{$sGMy~#V}{J$dM|2&ct zZELoX_n^3(!hp&@-A48q6yGCgt-N=~PdxvzPu!z_WzT)`6aVJKCw}@v@BP^yyX#|r z__6;2hyDEz|N8U4mA&^jp8Jm>?5E4#83b1k3IiK&;1VHeB=Wkf9eyz^wFRD=+8X=vAOWxPya#o+|NDx zub%yPX#ktK9Oa*X=;`nVRQe_OXt_TKqHs?WTaZZ{l7gSR{%$e)Gd~Xl#lVH51WbkByP@&S@aY`AfIIkO< z>p&4kOi)YBJ^i^WJsdIuA7Y@vKK!l#rr4a#VC`(HBWF#1$e`#J`4BxgCRV`K5inhD zwx_3DqA`@k9oY{~goy85#yKi}jlsh8GhBoLdBV3fnez^1(KMC`9Ys zlpi(h^Ll?>un348!1ivY2rqihJdJ-i9QJXz88F-IZ;a(*Ikbj>>8Vp%@Jx(xW=`8Q zn+592ms5s6iYNQyk-}S?wGAzKc6zGQ+vtur1x5V5OMnpO(Ab6rOkF?2nI`H4(%Edc zzuDQ+e+#W_dTMXHt{8U~Z&HJ=k4HQ@)R!SxWhwW;A;SXNF+>OASwLHcXN;otG7kt+ z!033gnfG_Fw*n@vQ|R6r4|>cAViZ)!pfUt=K2~gu^p&39>UX>SeXY(sUgLrXMY%RV z=Ii|%a)l$MVK2ZntXrgQa5#0tPs1JY-g*(y#@t|8-^Det#PK>R8&4P#?BQ`3;~iM5 zv6w)pt_M%&MrV^DXX>r2*_t6d4HP4i7f%_7h=Z;Z53|GsBhy*^S1m8Pe9gbuwe zk{8>BSW-TvMRa(Hqs-By*|K~L(+wc@pn~X~eiu<4PEV~%$K>P}d^VQ0(ak%%un`7q zTRbvw@wdkUg!?-MLn{^m6qv?1+2joA9`L6`V1OkI?@D!`GzJ~g2-}v;!PbEsXu=~M zhJZJEFpYFTO1dP<@QhXh^QRHp!MvZh}QpDIr^uEt1OG zbTzQ#5{54FXU@-^n>!a29DLeV%837I3FNZ~NMXw$m4~&m&ZRcC|Cu#_r@9 zf`t8K#02KB0CGkO@FyKTOzFe`%DK7wZ7G7sRvZ*udNBNudy0VQ6@wMg3jaEcv6QbXBFlH@;-Kf}&L$|g zI;0a#xA%K+V1Un)yXEL6zl+IeXnFBTR#+|`29a&vA^<$h!`)(QWQ!K8Mnlso055GT z0gU*JD~4TA-6Jf^M!t(l!lRMa2?P(iS))k{1t@IHG*Esc?{rO*mTu6+4%+x-kNiGBT*LRxg4c!PA#?;C48(KMeDQ)RKt~IB#A9cHitrn zMM^x_;sAG0K8Z-M5(*1$M{FTH=z%;!{7|TgfIk6m zNXhX+wzibJ9wk+0$TSE6sEbA#luQnM7$Pup1`_QdH-?Y^F8Wl!F=jzjX+jm`vdRy0 z=a?UXm3zZ~OJ0Emji>6yDrrpT%c_u+JQyXUsukrJff7?RRdx@!hkvri`lJCivxUw! z-W%b#^sZFYTvi7tkqiyEF>5o(8@R{zSehf5ViMdpwMRVMIyb!eIDCOjRKg+~*9- zoe>D>?(O9E7ol_>r9^DZ^7jP^k}sIUyGL@E4!}=R&twkeF8%DjJ z_|&vKj|_3fNWyi=vlT)xPAKI7JVuE$5W1k51;d1Fb_T(PfSq=8q8w&GS_6%-F&H@G zEE&iP7>mZ*htrBR;#&uG2@RF%-`bcHZkeVT_xdnh9E~m^p2!8}J81>di7F9NTv#ea=E$DRrx5Mbh6m%f z(JBv_zFSl5J~WqTfDb{1hUE{?RHh_Z!34{^@X{g*O|3;T8lQZvxi;c^){FLxKgKtX zvK@vKNsT2g@bC<)_(0~^VWB3V<44OIpphkCgQCCX0<=%yW@;tNe7%H{eajaIh2lC3pX*BULf zSlzWL`jcp7t-joDHd?b;;|=<`CAHeTyt>qEEX-vWuJMS)H;p&c`LaQ6;7Xgp+UPm* zNH&s-a+*dud!gByU#d4vgI_x^u_g%f+Q^q1Yx9=?qJE*d)NJ$BTDI71FAEgY_siF^ z)%seyIe%rTzLu?CSzBE}CG%AKQoU_)*A@M}n7Q)LTpS;5E-e|H&E>^4tOZJ(f!ddv zYYR3UP&F@JYPSsEuXRkdF4ew>dS4866BxV+~{-m^MP(T%K<( z2-QooS!=a1-_!?S#+bDlufM`Ce3LEIFV`R_vNWvQ)^FRv^#7q0>Q ztPt8>Yl3_Gi|3EFe7<;_Nsuly)|ywL(N)%gp=_)yLguTD<*d1w)fcW}Hy2{;3Q}9m zK!NbAq2~T*)?7DdNrB_~;5ZQw_7#FEfW)CuYO06(n2qf)WawwL!h*i=!Xww6D6zglPt0h4 zWj*=rvJvI4NeBH{7Y-T=DLSXpf> z&&{t~KHj@aGJN(JSL;asOJADbXfs4bpxMaDTv?l!G#~Kfva5(MPGxrPMLTb041)9J zf5=zE>2=48<�edl_Zt?mwHgsQ8Zh+j&-p$;NmiE$mUA&66yooHSq09(e89*D5dq zsj`nRkFRIm)}P$RV}*h#h9xtnje;-H>2n-K<8?@ou_czbxUCm>=Kp;+u!<-Gzc1Vk z)Idk$3_V!&&Z;xQKrc9DbheIk&%%tQvr!%Wr8pZd-~TuKje|3N(Oiyd);D`eE(b1N zSj89Sbxh3svU(jBNzazfhrE-j`(Z9uOm)AUt_EIM+al;g7XOuSHI8>|l7F#HewF?Q zlNxDhOY9LWhJ-yX4^lbYBe7xGddW>U-*+pB?xaeHCEv>Cb6QdQRO3+lrp-M4*Y9n0 zsWa*cy=6g%(&D)7UtAw3D1?{C4GM!D=LuPj=oNK^vbvHIWtrQexh&Z4V4G&tjYtPb z8ChY&4<*h>AP<#ezt0|?+ZpY4ad2-cJBq_cPR?2u7o9YFNmZR=e6nwN z;I-MR%KOmZ(VY&$xj>2r$1Ih8>a;SPd*B?-j}Y*RCE`4%A8Dvy;ES4wGh&CNhIMD8 zpzHs)Oq?V3h5e!9GjYPjv8lN5~O}0}166vnvw{ zE7JT=+bmY=7)(@Dj+CGEb96>@w3x~(J4!~y<8%knx<^m7R3cjITgC*m!_yCDGa;Xq zid&WmE4A-|HkBh|O+Y;k#NbDisS3g{Z;GgvYkcycz_qnSX5G}YVlg3whColC3;1$n z<6eiBaCnGp0{uM7YLbhlHb=kdAUk!vM}bFa#GKHux7jUN@HuqP-1+nOojZGugs6Er zC3FHI?+&fZ=3P$!VpS(=^L-p76ZbRpivjM++R}s3bnR?*=NyDN)^cAJh#9jxuVix8 z(Mzh~Pe*drmSaf6a~I*ZD$I90?HM_E1%AKyK2Y>?EmRb;NrwdQDza9}iTO5xI?E8`b3n-z%W=}GDL z_GxFM%8yNqy+PGDE`EpCK;83pA)SnmdLJ$PbCbe`D|!mZ&YEH3FZQzt_Db#Cf|CE0E-pVC(Jgh)Q9NZ16YrM7>ly?+$w9sN!{BSW?wg((HW*N z_3K5Pqv!Zy2h*mvUY=0ehB_{gmh1M4#?eZXhmrzm3#&>7e`H228LRCXx{cTgN0X>E z&e4QRRS1E$0tK@5(kc-_PsS?8%Ug-a!aokUR;K2|Ra#D(RKe6~(xS9!9Us<$63dy5 zwHF`f+Ncwid7^=H*%@_r!nQBa##!IRv1IEN&Ax5KveL-u4NIZzoM=|A;ifqzt7W@A zfGs zjG&QEj7ugB_E6Wgz_1tBDN*^SQoo1D%b#KDGQpx!oYh{^n8Wd&^_LTSOe`rDTtfGt zmE+JJFF+5_AQoYklBA*IHwq68LTEHD9jVsOlWAkV0DH#p{@3i=h)gXhbzR zZqPfJmx)fjzMFDosy4Od@q5 zry(u7-0#k2uU8CCYs_qRZOkwF7w@W(l796*Cl;)a2ipX}82E_*I_*BPy(ZGd>TOtp z656$M22WBuC&)ys59pdPw%0@R6WWxtise?m9>Nw~T8eQWk;b9K4;gD08#}!|nOb6N zRLa#s=dmK2p3*q!!Gea$q-Z_qa+F@xJLdETfTZ+n^HS8E581I4Z2qGhkYpX*Xbsi0 zocXlADM;UzJ;YKF4!*_D2s3H=8YXwy>&;UG~mXeP{Em znwc-xv+V5obLY;UIrp`sDp6B)sSJ_m_LAk#tTN0x=R~~L|2%*H*|S-_?yve+ZNRN~ zF#e_g+vTzxj=m+)<~wO?ztzta#iZdjO?1vE4oo7``j|jZKyE7H9SE2PR2f z+1=z+b`<;xNS!rM3^&!b5M3(V4{xhN zH4<*kj#B5S0UnYEg}X=lt2?dVWU7Q8#M8bDCH4JBeHC?85Yp8po!g^f$eC{F*dq4I zDdDOiCU!XK3nP$Qk^>R#Y~16Jou^`?156l3uf%zKca{F73b6=7mlCZ|}!d`<~$*{m%AB zG?0G-e0^u?eKFcLAehAre2dcVUFQsd0SD^{^xSq*g#DP)Q$eS@j9{mP7uyOXB``_Q zh#3it9-})ED-2qQ{znBa4Tv(32tlyX>iG-1P&aageW`I;FS8G}OGmu#v|VQh*?zEJ zkE*L#D7QXo(2F6x#XEZWkY9at>8Nxy%1+i?Wc-z zP-U%rG(CkhxD6G^aTX@5Xr2%u9t1Qy6-0);)-bVQ7Q87iRz!&L)Yxou-8uab*mNf+ znrfKJwm_I~%&Q_cwIX&fqpiZ5wC%wTm3Q?}5K%#!kfJ@kqA!w{@$ zci7A>PGap$vfg2ba>A>&9}CX~j3h+)Y{3r}GTD|tyzW0Cp5p-<$4F25bJJ7)AjEHn z{XEqPQ9VOBkQ!veLt+p=q&JwoKH9G=p$8%-f53Nw%r>9v2^Z)X4=B$jS}2OEiCY#4eG3^{nwk7lj_(*h_Lduwu~#F(+)Jj z5ZuKI#32)-5rh(TBl-4!KOO?280={|>}%6ko*BXHz<=0c&Bt@Q(^Chh3OhVmnCyGp z)ewe_!IFf61p)yC?`=yYz)GEO#chTeTWh^;G@PiEJL#m-R*; zlmW*Fnb7_I8B%C_IxcFbzxiNx>Kr>etLGUoqykZkF+C}C>byNd8!yOpSwN8l<1Rj7 z=e4oH(0=WbuJ3ZVLrTyXo8vN7_YFzVwd*LbDw9S+g=Mr$QMMNO#Qt*80a&|OG_bJ` zT1mcIWecpS2N`|2R%LvIPzh(Qb;2Trc3dWtj0zmQ%0UM@u|mfs5m3qm%Nx{jC#I)3 zkxl(giWT%;h5tkFBN7P^IA=exk3~cVmTxEQqbk@OkhV`xa2veuzj9)Moryp>gfKa_B>1{T( znl$_Q4TzOm<%RVf?GhF8wd~??EB0ej`dd1TgWYm+f`Up1xsOU1b&}eC=)2LddVCtvu04mP8&@7F8`g zu)tz^>gd-d+|y{q+?4^*n^m}wb@bbiMi+ILmD{Yt7UCE5_ne79*8y))(i~@YVU}40 zk*r##I2{TjWi4Hd%F`g#XL(5bh^tH$y$N#!(~)w*uEt?$Mo}GW8I)wSl!!;%@&jbn zBLgp~J#=i_ftUUW~g{tGgI{h@zp|TcsnYJIE^#7 zFk$pRj*1d4mjchIW4x^@95H64NL&VECN=Y*Q>(DE`5KJ#{|&P9+S4S)7Bb=S0K#OD z!;vGChYOgUU;x{3-OV2_M-q7x9DP zp0>4D1cudID!1sS05)pcw89bqiDQt_6u)L0odJhZ$(j%l1WmUHwUd(ypyaq&^ahce zn!nA&@6l-3p>0!Bci3TKex z%ob-=;MuD)f%XG2vrfd_=|CvZ*8!wMio^Xj>31C(_rBI+4pxws@?+u9S|JhmK(A%@T^F*2HseO;8;c`HJCe3WAFfbqK)P z&Y=wE(@uJFMpg^MP|`v4CG+5$~*&4C8QP5OpH(8QTh0aId~ zh+_Q75F`4DA>bsZd0eLi4LG9N5>n82uju2^N3824AEIA&=xwU_aUhigmL_=aFi^s} z3?WI&kSWu-PZ6Z(;&29u&M*NYI|IV>*d!bphg1rHF&dK1OlW>oJ12>AfF26HJOo6# zl+*7xptsCx-M7T)K95s>2=74=dof^_R#iWBNb}s59AYv(5Tj&;ZeWe~rl+97V0dp- za5xhjCo9$?cj=ifkAK8-;h3aA5ocR@%T@Deumr57QkUC>5abIdaHWhXatyN##$eZE zAdc*9rtFuEZg@;VI?$0U0S|4K&o-0|9s1|75~7RBq~pS~JmxPDh=BS^ETYu2c_=;$ z%^CW}`&!|VRdSLW9H@a=S|AfdV$0@vJ!MYT@fkkRTQXyZ|4oh=;E$`t#Og!N# zfqn2uD4=Yvxqr7$-;=yQ_Arw9mDPj$vnlwhGH$Jb&3W4(?`He#v8{8J^rvEkq=mww zF}5cSuyHry>Xeqh2UBz6rmG z?*xnqBgz6DYQq>e$XAaiL(&fTATOdfW!rHja*QVjs7XBp9*uUlIy6-A10bcEJ6M4L z0=jRV=?EP|`(IUGCAyLd_v9iWm-6C@{n2|=h&Q1oH4q&q>da31+m5*d6{jwti3t>R z-W0A@7(;qcdOaNE;R-}8PL;zxvC)z#>Dz3k3en8mX%}#g8BMul&g>TU90$&_dK)%W zG95nTXN!&Xts#mpNW*w4tvbA6)N*EvHez4~^>&4%UNwbnQ<+z0&{lskfCw8Hd<-{A z)T08yQQ@c^BX)V9s?fn|@eZYrIl#Hhb>1e%nQ_*gTA^=l<_kg z13Z|6Z~%c@onY6(VuEDUk=z@e6n4VFQ#jSf8M0t4gPh8Xw$#c*W}s9#%HNYsG-o@U zUL_ZVbItdZU2U@z{9yB*un^(k*-87Xvf#Ww?KufvjrraIr|~fumNaM|QW$Hg ztIRe0X_iaK&uWw-3{pGb&IuotuuVOj!3&C`$whKHrk^5mChdFm1?ZYt(+&IlEDtUh z19BRwO0A_NlS@25RucYUsC$|CNM=X>DN7sE=TaeBG%AZaN@t9}35b4ZtYGvi1N=Fp zXgKyRX!BF{c#IkrE;gDY^wtOomX8havi9;ZTZC0UqT$szEXdAh1+9=GwyHdoT`x#O zBTKMXDVc7tB|;$Dw1PBe)kYeU>^nGfog+dl^Qh%v074&(*O4litTvvMyic8?(prMk z6)+p*E|JDgVuowf*(r4)hTq${IaG}`NQdjpSP}H;CwH+GIMxiiRumj&=*(F@aik z>BVZppuvw%?Q!@M(oa4+YSw186f6pa<~Q5`R&~%{2-Ky~>U3F!@(hp&>qVe6CY(m; zn%V+@f+p#fz@rl4O!Gbc6b>jeqN<=)g5oh_ z&|kJVpkYnHIV6-2Fb)R;D%9nHkOK>c-bim8_EUkU6oUlC=4svg1fS;{c51RXQGh38 zL?u@7%9597Jm#>NUnk1R=MzZ4tiEHK8N#6f-r6wfN-`DeQ9u+%5ds4wG~xk>>18yT zOfnX0+s-wo%x}Ii!K6bQ-|VQqwPZAi5pkqtVm2OE*7Iavf%7j>%Xp5^MIawkQ)r8n_^QCEil!Kvz z3{|y<+Xi`TrsYYjggfKPrCQM?t-u6TnzCY_K?kRJt2+f=mzuQ6LBs{T#`wa7-4OyU zBbvtL^<_k-VwWG)z&oZ3i7k7s&*Sf}W^yEoT>MdU2a^v}G~W_sCrt7{$D0644B6 zP+KvYM|!mL&H@4!pro}`RE+G7NHfXOK*|D$9?}5b%LDUJE*&UFzmD5ImsWM5&-% zcrP-k4}E+MST3)3-YE<%aw4?@I|enZHFtQ#8AT&vp9qmADeJ~kMMPq4^1{|o441ni z7JxF%?Yw$b6+-ftb*uO<@0iIAH^(m^;-uQNstCPY%w+U{N;zdd4!vhgfNWXoF)Eeo z(!w9-!y-`>FFdjB;{fq&*_v=Mq;tKvPh4UeI}RU#!w?Bw>W!E?O;5S2P4`Oh5S=01 zPmEd^VS;&uN_fDcffa>6rjFy2DnpPeHZ!l@ET>yk3mvj71_+rq4}c9ex%QzHCZ>JC zo&h(XqNw?sis>=up_*0hSgQ!MDRc)dWUq1dG;QGKwj5Ho(o)!%biA3Q`VC8(%wmwJ z=EWP97njiKk0`dD=90^mtzZ}6vg8 zhup|*a96t(JsZ1nB6OEIq;kq)>eE6mDlf5duoT-cz%cWp)6a*!f=2{P9Ij+7G%^BZ zfT9`m5+$6^; z5>5OZ>VSdUy`dY7SG$wl6^K=&C>M}po#e4@OHytVl*@Wo3kQ~hB9rGOkE07*h@Pt>hQ`LTyM7uzy5C9}q$3d&^fl0~9 z4LPE=n0g6>+cvCJ0SI)0i-4n;hK-?AiIb?%?zeo0*xoWa66b>gYr$_hLoDx*Sv>~4 zsI_M-_D8}QTb&(2l6TNmSZbwZf*}Rwss%_C7g4L~LeS|uV~^N4GNH7OV4MR2n&i&3 zw=*At#%hE%XC2{Cd1C%+#~uPa`cC>s8<*8F8DdVG&V#WD`A|tkfTooIc=Z(q4hgN& zdKo2!5Y)TMn3gFtM*yXP_KpzA=7pj-K*ew!3R9g58Uw9sM1y}SjnGaF%gEDT0ZEkX zRCGq?Fpd&&o>dU3F>wsDIt02o?u=qwf)>}Q^w4`yGt|YBP z$j-e(L`Nz)!eQXRA{`tV)b#df2UZ73p5wuoL{HQ~W)K*}JO{38_GWweV+TVfCmW^` zoLZwYNT~{=D;;@>jbx^+1d;Zwo1fzS;>HtJ@i`!!DU*z1VBwzSkp?>8AKV{nWGmt( zzjEvV;j6&KB_}>HcRR)(9LpqjDink3~c$?qZUK9 zfl%G#0^*7B`0S;nU<~}a6HuNy5u(y_{ssi|YB{{89#m(zg;55c(X!+ih4gX20Fj1m zdQ(%S99S2xqt#&)vzfzCFbOUe=ZX_RIww?7aDd2h9B1(f3ItRo*Dj{NftC#YP9G2c zVCGwkbERr21j6uYQaw`M2oUv}dso($sH4>+6Ot`=@DVGd25ca?m}b+|1@|8D6@5pC zlJ-FrT#NCW?dmv~@^*xk<)2`34Jfm0ETUw6B+Pt11Q>Y}Wbv7ZIr~zXBLf{3QW_v> zHU@|msIp%q-#6eJwN`OmYZT*3px%uA#q27akt!Ya%yZnu(}OESxyBy z4y%UCOEX#%HdNXIqieE7q2chs2Hgp$&iH(kz)f3A0vtO zM9jnHG*}0}uOzEnFILqm zKxzL%VEON!v9ttZ@pNkYN#H);I z02=Jn%W=vV;U{hEF%)S(6p+DT)So%;gN7S31|nxEt8v=})~%~xTIE5+5?2o$BZLae zcJ!GQSKt%x^mK5l9=paAz}?U*S^7ej#L!qJ`Q6m8AIrxv8ieAJ3>TIo+ScQHCc{LM zs6dJUE@Y;dX;D%S^8~+Jb)5eSh(x87#tIc8i{jM{Q@IB(p+Du+68o94(0!H`$%{OsY^lLc%$0B&LeR-GTR%F%Gz#!u2K4*!Zb_d-g8;9!&lcn8>7QBlpzM2qZ zgf&d;!l_iI?&oOo;Aq%Xk{MN538dBL8}WyLyAS~Y0u%~(qN*9O0{?nCj4~k4<-Yd%l}W6}eFydm!^>`y1p-S0Dr>##?Rng2sc;Iw_%#hZ*!%w(~yHO25d%C$@0jO!4SYVu8HjJt)nKERb$*NYpK zZGqH11-~cPr82pQj7uxk=c>+A&*(99C}Kv`(ylV$UL9OJmc^4+W=S0cxtreND8|MM zfQr^;&wd2on@pn8yyDWAOeZo!y$SA45>k!zz>I}UoIYxfJLvKBWHKh+K9N?X6yp(< ztRxI1E!g<7e)EjoVksE-rnimJjUy%ije8u8;Wt9?ayoHr>CFQ@lnTzg^HRyHA^E`L z7QNxGgaMBVky3#;y&v+#VmY%dv9jlEv=q#$5FPoJN42rcI*ns1hEOCoK))#trwNy! zO$W|}4n}yTY?-sd4J^Xba$?xHXCiqUe>^TO&1?9p1~Ea+dfXxf85g2iV#FjVN<;Y& zsZ^B6pqEQhz?yAg|5)$a#`nFXn&0Avfk!< zSszO-PGS+#Ut_8Amn32w6KMO_AnMk5z-#8dR!*b|L2FtV`vCF3yn82T@1P!x-(f;& z-;Sag%~Q%)IK#$!3uqNY_il*4O6l}XSk=0xw5sS?G7Q6-UvrNB_@-{H6-wa|?=Wx% z(-*8Idu+V9t=>A}3&yJS{P+P5Y6jU-5|BGF{V8CA4`YLcH^pVAgeTk9!s5VfIQ_wo z-#(36tFWk+>nO#i@P<+{T%PzwI2|sUDAX3kq1?KS(`JSVgnPLcY=n{nTl%ZUiIq7F z>m^1kxWU1<9uc)xk^6OJm4dY6Uxph847AuZ9i=JLHzyg5Wl#z!#)MyxCe zXKj%q=V0Z0mg1{)?PR61aH5@}ArDPEEpHKxZ@zN3u*)y@7b!noLRYqU-{ zI=lk2e2`kCm?-Nr!!Wd@@NuVL4w|AZg~C$C&`N?@SbEKEHA7%@DhH@lcI?|CaZlMl z7L`5DYdO9$LbSBFsZ|#!;v@YelE%l353KD`j>@4o_$D?$RVMaYvgbENVhtGElcT`I z3E*f*q)Lt$-~MtnAna&qA>SGHkcRXbhvb@~^X5q6fuN?XGt!ez;UHZb#T*jrqvYr^ z(_nG>xaFC~g$Wq3<2srt*jyK)AtVm>NJKY>{D zLal(nau1$(iI5!O_)FK*tSgnEC<(<4;kOHQ>g|y{qRC)lm6ffgq0?f}FPJf- z*u=ZwAw}_6YA7SMg7vqGU3xu~s>c{KpMo=3?F0=M?{I(ygGF;kq!6zbiFqbc1>S6o zVpC#0rmLAygbOE7@VOi&l9xW-HjgYSaovhsyO_=_HZLyjzN5B__W<8I?P+42o(c(h z%WG+_CG8bI?!?L9k*cU6G2_30VW}$}#+Zh(=nON2Gr(q5Sv!)K9rRQb&RFgUZ$JSZ zYAf3Mp$K}zkA>D7VF$Y%_=pCiilgmdcDeza$m_Avl z&G>{L`r*EfE!z`1u!7$hpc2l3hYt!EBC2eqX(b*9P!Uh+DuN-3ULjffUgeC6*pU&T6bsSIPN&KetblhBI_8skJZT# zy`wm<<1SRZp&2a+F+hlqs7v^zz!6<``e{*CLX{eu4X35FsDFsS@1VNUrgT@$e%=f$ zqP|^AGoaljWV+(!njfxo7#ggj%Ah60AcCF1DhYHZhbnGMj1@VwQxQNJt%E}OmC0a~ z_BClaBZLX^ade={T%AvkAyF*kjKcndKE^(h8SuFY6$aG?YjO7p_I*-|`!8!jNk=}U2>Y7elS+H9&E(3FeAJgq4oJr4J0Lv?FP<6SfrL|WKJJVAy z`A!l;U%hhvKL?gwSzBFcHQ+fz<8qs8Mlvq%cqC&H0lBZgQeSF=*AVU3IQ!*6AqHU z+MMSC6RxFgtk#=r!qoiAnl41)GLdLYX&kFwxpHNBiT8Qq^;cNa5gegW>K9=%_JFNvYtx3cmyk^>tq(fO)~c13k|3>J+;ygF zAze4nAUdrph+cEqNh40+CoX>BGM?2s*U9jN2m~hT&7~`FCDK!6CP3vjW*gJEPMC4B z#5;UQRqFPgthtCfzQT<_F3K9VO211GI+v<2*{yNgj}?m0WW-|5vHdCNsQ;h zB?c_gUg4@N(-LqExAllKTfs1@W?}!3OP3w0+xEz|rSRI8!sE+p(#Xp=V(vfVr@QhD z7B!p^h+iXTt?Letm($GPl8TlG!$7!h!N~d2b)v>lCkRrZ(}C8E8N?zHdxU^`;{z=6L_I!##B}^@ny=@-cqy? z@RVh*Q!|u}z&ojdCSZZ4Cc7KV2WS6c!N#Y}!p{Blh}X}JoBKwu)6J(y@f5dryiT1t z-^dj4eld5y8S3ows82KCy!e z<`3-l#kxUxFOoDTQ`o=Bqo+upnXRB+=5>`2Y_Y?G&K^78ow#GDh>)%yo+dga z`$K(z{Vb@HQp+eV3kUo7v0fEj>D`5~HS{K`^ODN-V4IE-yNCe2W?O{yl9WO0ow(M& z$!VQxKy2DoO)508?l>0%5XU4<2hs?*rHox1MCqtL{@x;)hI_dNBYk7h71!39dE;>Q z^&wqQgl(jPWuYDx7}*+{jlQ=_T&BV?;jWlo=bH(&%LUckzR8_y{oXfn4H0IX&5gQ_ z=&Ys=zi@LLFrgJy_R;mYxax1W;?As%Hrd)TyLaXRs-Y3eP`Ulmeh z=)G@EB?JpkAsC-S2q}wCoG$MI9t@fb9EJXP+N65pxlGrO8QY2{BEfkcVE~BHXz#)M z?%UtrpCf_EEQ7iJVEex5saow-wOZ}{&(&)06e*VMj>ufF!qmw5I;K7aMe@2-7yt@b5v;B$8J&9$%L^B?p1YfiqUHe0KG>F@LT)XBHjzP47o z<2U#`d-83yhibJu-_7UElW(sr)M_Wblh56g@2EXotKHS+^XTOF)YfXXyZ z%GT)dHun`5U(r%+!mm!4CvC@9C);X^lXfG;8@@ZSU=!WN-2CU=Spb8~aDv(oP*CB#NY zgi3_I{z>m$mbwX?K!t-HZqh^z*qA6u<5_ zLh3e$3h&$7aZzDn!SyRwZr)n`E1_}+6;jE$&(&RGObAz*(5>+?5t4xGSAG@TiCcYv z5P8&K)kqaqiA>0_5>q_FyZCtI<>h2$q$I{ig$DXwzVLHGYV3;hgzDsjDip?IN|u|G znU5vji3p$@4(dB0Yd(3TVz738KT$AtL(=IOwl zCiNhsXalbvX*gAn)sbpzQDXQJ@XNrpu(UxN@7xOYzv$)2n+2hwr>oIQI^-SrEh@-PO9&TUKI7~-rCs4l zXwNh_-mkF+NqK2;VNPm7gz)!suIRrGALgJa=5i%4Hm3%kN-JCl<>^LpLqj9K0jnp~ z)uBW-*j7QR<1H=BNxvN>zWTEV@B1TP9Qydf4?p_k(3f8yJuOH9xicX@`>+&ZEiKB; zOoG9@aN6m_cSjC?@!4mee{uMmBmX*nf_pJ0Zk-b$MVfwTa*!F&D%rbTtvQQB6&CHHf8UC3m60ks_Z9 z-tK%Z_TvxezbPk9pd23G+3l3)uh9n_39&;H(ug&Ag8{4_fkuz(rRAk}^D+`+Lj12@ zIOFN=;_U3ian^Ei^El<{eeT?q6uH=?`Aie3u@QsO!s-yn4tiCkw5%vU8)l2d@7l%l z=dd&AS@ew7*`LmxJAeMyUjv#Q2(KrbPvb(g0TL6f)2oG`t18M%3UV@%Vk3eAd|~Ov zE~8f{moHzsgnwSX@_T{~kC1wT6m4AF&`@7rS65qGU0o?HFD}SVPr4Ny9wZh82m^$H z!oWa*z~AqNuaD1->(?VxXo_ss*W>h36f*u(e5gr;6 zBoPG)!GN$xF66eL&7_tV*Je%=CAaX*OQ-9bno3G@GHbC$1Wf7G)z#Dx4KIMT{Z7*D zg!qJbEP)yy7Z-Cg3hYV5f#MjsDTh{aOKYne7+jfr^v50gy9=7LJUx$me!V8YpsBGD zZ&fYC1;bZVn4g!Gk)D>8ilxy~Q&W*8H72^agM1i3|*c5a(Z1^QvsYvvXPbefjU z@wC>~GwW=3tbF6QoR%N7&5wTY{>j(xBgS}-GHTa%5StUu`Z!*!>;M#wmPqOyp24ig>L z82nBVE>pNgxk04Uy<<&#`vn&3FSpZ@UQ$)pH#IRdH>Bw9tvF`&`spO8$3BY{Umt$L zfcf-)S$Z`!!B`_O5%nQuV1(pG$8Bb3%1vUOt{rQnUo)7i?B1^t>TA+i#w?~8O=Dlk zcFXlE50oF;V6o*5+f}-j>kJt-9yhDTO~Txko5a^`e1ss(Y`H~lC)z>k?BsTAjC;qJ zLZ#?vi!#i#HPoq0h8|UI)xJ&J_F3CWzO=Jo>FU2=rEh4t#>`;-``^3%lG*^VGKob4 zU+Lr>fE$+EbpgA&*XhSX-~KGGD%x zchG(%Q&-<$cX(7mSuxBRSc!;)!bb{^R&LY1bN9O%RN5y2b|xPOZKCT?v~*}xEgc4( z%4E{DY5)C;mz@pU%-D#zVb%UOS1w;+^EW%A?RGofK63IK&)oY=mfV$FBz1SYcJ02g zNt^Oin*XYmJ8c;{6bi%4h^EERqcco)8fpE>l%Y#8vNAKVSZ2wv+RoniFW#$%?4R59 z^x?f5U%8Gi6V=3h7+l;+OXNm5-A-K=9Th*Cyb=BnV{Owt1{9i(g`2B|u7M4m@k6=C zA^%TbLZ3E#&tDSzrZ#mY?@fdC>#m1eeQ?ao_{q1v@k48MCK3QoD!ENww@O!6cQ>bN zS$pc`CtrG&!JwKw&0<=bTl4D=S|9X(>)rB>md1{T>XOo^RJ2|H>@r=8t*b0IJag>$ zpPqT;@IO5zxs{Nw+LM;c%O-lD$Eh3bG78>ctZQtdyTW>zt@)-u+Zey!S$3_cqqzk# zgV#=N{`K{pJ3U=4+;;t0^h*}jsDdy2Xsy4&2Wy`kR(rk6Y{R{+K~yXf3nTFct-SCbu{dJ)qjr(cV7pTxTlynrgY$RYq?v{O5{| z8D)F5RJHVUwRYrlVR)Rnc=#-Bhi;>{v@|!vQX%g*2zy-ENB4SllciON%!{r@HF@D* zQQZ!`e30_GPzbplZKJiej!WV42G>ft4(WI4qd4wdO(!3fd9j(4S94Hky;Bdrn+rlM zh_wcBJ~45J;(tu3}@@%uP3 zQn9_uxX6vIx=Ah~wKR`wWUd30{_dzmhnK&2eeH6qQ~l0;WSQ(8S2pQ#F9wd>*~r^? zhT~Io!j0R2PQepT<~kn|3f_DRtwH$U|AM)lP2Irz)O(+V)9=uSh%h$EzJUu}RquG2(~E!s6xY|*w|%S1>lR(^1AFv>0fSLl~QUx)Zddwn-^> zP-S3XaF9EoReI^t3G@SV)>q~HehlOgXSx$iS==?Zhtv&fmD63Fop|iyj(eO3IRjd; zy3|WGd$m=+>UHWT_w}W7+2mv1jgB&OLRtqgYh3ICEzI$8X}8-TWne&hcW7>hszrc3w|h!Up*Lqzro} zHfg7bD?^Iiq4Od4$smxQPwE;vIj9-Q)>*ypxaBihoB>BTAI~0d4xH}BRmW`sPOaJKRAGMDA?0(*FLw zK71iClGIy9fG&Jl! zq}KAi-3xy$aU3L-e-3l_47VRCh4vDrCk#6eyTW;&8QK0Oa~dRHJ$npRm}6%L2IS}B zs|Kfc;#{|3=ONV&Y#qPTh3G{-{1%=3?a5(}A^f-_OlQc($!JbPB(8%_(CZF;JQ*^1 zzBiojH0T27JHj$EaXz_iWCZ_>^SA=CXVMa91RX|3PJuoAx3~_Lnu$SZbAsP^KsMnK zIQC5qjtd+o4Gp`2Gbb2FSSBVD!+pdPnvWmn_jCFtGqe2^NZh~_?gYkeB4gVfM#!Th zqv(h#d>t9(jBtj@Bf}#O!;VA9Fb8q~8RFn02`PZD;#`>6{S))A!)x^9h}*CeSark~ z=*i<};J7PvEPi})jp#TxdTPXlILezmO76IK&v6v3B#(|(az@FOqwq6nbQG%~kBn4c zBczHE_;Gkx${j|fJ03TqI zXgTyGZXHHyq9-fR5oU!vLWnP$h!!G*3sV-XXUN3$6j}{^nFr>os@S3Mfj3>=qq#)D zxXDWBOT67VSOt~n0HPytlbBSUot4GT8iFNn%BH-@7J2vX<$=Y#dsX8mD@Wx+MwqNb ziRMzqyA&UqVHj_9_&$`yhHTtqXQ?I%QL5)HtK~7d;#8-5bG%~`e zAdNt?VXJ)vcBF?nQqnM(L8Qb~nH-N_?s<;7jW`YShscA2lShBOcZxXbI7FH{co7b| z!l^LdCQrN!$6Q97hk297NS$N9-#ZBhorbxS2a!%}> zbp9|l{)<}Jm3s8sd2Bis;#4{)jfJ?C8Uz!9bmK@>%ggZ1lM>A$*p`N!j)DR-Uk^6< zv*9m)dHgpwXL~aDHSpXtJ>sdIffo(D%M1?pZio^-g z@BfbyBAx1*?8vOfrY6|ws^&Zlh~PO41p!gzozyVD$lB)W?40z};>Ma9eD7@n5LGAy zh}>E<<9z)Dev$QMQJ6?W92XWd@3oF@Ikd$6p7ZW6jj0?RX z@b?K6r`JvcM34%K0V3muyX9E{g2aaAJAvY;n!4EQLCH9*i@poilpsVW+bmie3ks6_ zB%uKO@>^m81fjWUf!E?1q9oz|J_$I@v1)+4Q3epHr3wT$uKS=uR)n;*EZAQt77L@Q z%0)ralz_q}9Oozlh#FIGl}Gye-ni~72}`SKZO!w$5f*Z*u(l;tAi8raCM&M!0YEgq znU7PFnE;V-zOPSwL74DHV5meCo?4P6xq2hFD5I{WB?A@f_yyb!jn7CeNUxp%M3Xz@ zQg{qWb7nx4@8ff`s-`M3AVd@x6eRE$3nCkn{i3rg8*^epLXz%8`1ys$24+qNL@*B_ zP#n6<1c+MF{Cp*$i7h1ofnZT`?Yby1zC6@V6nLxgc1URS?XXZ7u^^E+tE}NZKqN;l zj{-#G>XZ;Pz)&D4tqm3j`Ui*wK7zoATjAlc;*iqhFjT|{j1~upguy|6fk|0exs`JR zqD@5rW@B#)1tHbxfO>s>MPlC@0tx- z3(?GgDBdq9B&{q|lw2kf2m*yS1VW$d*L+0&J^?|JxZIooKe0p@A_@+PPsDrscDR3N zaP+OjjKs_eIYPh*8Q!V)0iu%707-0JX=rF9aBVOk5cv9lQ9qxs$cUQ)IOZQ93<`(G zt0d9E!Ra}-lR_e+BNHM+qw@$rG#=@FfM{!HOHNo~MT%G`0SjVrNKUrU?}j)?lGap| zRhkfc^JY*?aaq-EVSqR@(LXGts9YGI8YBt7m0U;wBK)sxd93oyo+&`oSyvJp8!iwA z2Hk|#iX}O13BrWZTR=AuT}=0rOXAQKMl)Pkg2ae27~6^Y()jan~rxT`OrAtnc_nonG%1$!0-sM zNDv^pnI!QIufzF}6G3+-`O?gINJS>=mAUll7KR7=2ZV*i#KPJo6!`fHN^wBs)HyRA zf(a*Y0TySw=uugfKWexge@Sc|?T5_Oy0cNMW|f?}mR>Q@0X` z2;a&i5s0Xxyfia6;aWgWr;>=ML3pFQJ6<44?^6yDQ3nP(Gj854ERe+J$OZ^w|}oF~cAR5Yi@d5_bKgvc2HpM%1L$a@w#x^O#V9M3fQTnyHMaa?VMMO|?@$+<6gVUb>L|&KC`ln;M&bZsbgtD_1*o4hoM@ zc%GgN`-NyVu2rsd$O|^y083!gs)rOLLoUtUuwAD-8FFtXHBHnH$>9*bNjCw9<|rRR zT23vXfap}SCxkpzKm=7k$`=rM?>;q40g*y{$h~ECb93@Pet24US3z^;$y47Px>l3- zZ~+k%gv?n$)cmF(f9K#Ao7diQmF@}%_l4^Udkza!|3Uw%NZn{w?s-65aD>J^6KyZG+Se(r9=It z={+|4InQq_3=TxZDC^P&17>5m5^|I65fsiUoG`5n{`d9SwKw*0jV*)7M^ zO^*EiU5{6NmBT|TFBvjFx$^uvnhHtV(C$+K&%}^@-iLR$rR9V^%gWI1!!_?Ye1FYR zsrsQ`n9SD`(h^@p$P7aRJ!3sh+HUE$=C7Vgl)CRRu|53xMm_qLS4>Y6c*uw-$HOu0 z1RfeM_Q{m*?8)ngzM|5XuQ^a1z);cFW$Dt4v{d&5Z!ufL{#*GcYfYYb&3XmRY_%TM z(k=S_`XM>ScoZJm82c7WOIuSzU6^j9uA-vFpwP8Q%Xiyt-ea*w^w}CyCXMl&IbF|u zrIG%+caORJlyV;)8pkg8*AMN!wuYkf%v*F#+oTf|O;tpduC1=2uBW@q%#f+~O^uV0 zsfC4&xv|Y=?EUq&G#Z1sD=c!3`k{AJw6s4GtTFs3Xgx(kQ&mGpOHG5KtwpElXlTFs z^Xavg)+Q_#ZQb%cuiGxOv3hegYwPN5Z+-3l)v2lap@;F1&w6!D`&7T>wmWR78k(9^ zW0tlW)cjBlcd*oU80khKKNe9Ur7_ z=dP_C72g}a7WS2ay73FTn%WvBuFj@3rnQdF@iO;M{62nh^VTf~{KPSDsB5t~uj{T| zbuG;N@824;?BD##cMd$%o^o;Hi`#W5+9sQIX=Wyty!!Vo-uL^<+hs)kP*rhBBwjys z#)4+@UH{FL>+bj3@FCRmXZ zS%<~6S^eBDZ50&;B*sb+UgV_!D}qv@Ksy9c*Dzw+efWgB=F^+mTRHX0aH8)lX}dn4 zXj9ZIR;#cAgH9&RQA#w$iJA-kVPltGx?3Glqf^zkD_%W`;!FPMWsMEV&1%RF=Ji z!lguF8}mIDwy!-w-)pb&fgBP!&RI(IqPZ1o)1CBZcWq#PsaPq|Pi!-@Eo=CxpPK2v zr%Wl4H=VNVBdCC*Fil^dUnx=I#|OdeiR1@2}-6fr(nVdtRV3tPbSPfQfj$yoXAOH0mo-ty!D5geZoIWM5m@JfE%% zCOW^1ZSZcLa+s*^+#7GKU!VLCCPIkXAat^ZMqWt7-*#XhUztLp<7=M#ivop2&7ZQY zFDP0_gsyg&rH}}($>Br65UF=EcAOKr4Dtyc#Cxcc2(Rm!UP%Nsha+B(tR$KbC&KHM z@Vw&PS3-0WM>R875uF4W^>7st7w`}d-VP&x;U@7AHbfeR&1>GE8o(haF60bo!1w+E zsO7{S!b8dz5-k`EJ^n(X@})wenG1;&;X*{I)BiCpq)JqI&00t_GZ(_kiyq)YGZzxw z&xIDhkm%@^FF!|+KWXCs*WC0MiL9y)n;bMcouT-PPN&nT6m3m)RTUCxx;3SLCXv-N zsZ0Z7Q*#STgL*4J753sznL92WiBKDlA4YI`>FZ+9~=Gw{u`e8_W((q zYP@>K0{o8+zO{e=NMv=Isok!H_#YemoB6zl0Q}RMmATL%`wzfu7# zhJR%OSQP(C1h6>%iOaDHc@U*P~2%RdC5&@I`b`5zlrD1gQD4}TZnJut@~r;F%+ zZ16=>T`jfQ16WM|V_hqZ>6)su{&Bph{>Kt+4JfmZz~cHJeBOeoHERbfuK%%1D-37v zfJOFSw!xgCIotNgBKsfQY@@F|+W;)G|9d-D8tSOcG6NRb|JZYDO{nUBp#K-wo6*$C zkKPnq?Dp^3XilGP1z2qVFKzl`{qME6_(R*j?~nJ-(3s^CxY*;r&))L?wEvg?;Px;1 z|6}uSssESy|BqaMm-25Z|CaLa(SHCe_WjS&{9BrTOY?7O{yl03U}^oqpFemE{QVIG ze`)<)T7Q?;-=+0;Y5kqK2bQisOV^*J>+jP2$0Kf^7JPW={%h&}Yw7-L>HcfxyAK3e zy8l?Z|5&>JSi1jM@QubJ3@qJ$D02U?;QF6O^#0hGV$Xm4q5dC3{naCzKZZYlH2eL} zW2irSwEMBSpZ{8T{oO3?$9CD7!t-yBzW+fMUVk{7`>`FXjc0xS50Tej?s@yr(c?d! zKp)@h#E-|19(sGvQ*47-pMON;^`}qodE>yrk3N2!ua6EMcw^7gYt83+0GFu0UHjCI zU3>QK+xPP0dhOe{ch9aJPp!3LQD-j;l-D1dvsc?~w12#>jdrWq<}4aKfIUn4;dh|( zsJ~XAP}{=X)YyPY)tuu2bUc80)Su5?sg4(@>u9MfP#sPpsi>-{tE+1~-WOO@Q>;3k Qgulp-|3%n+=r51|0cC$ElK=n! literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Player/Build/Mac/VolumeIcon.icns b/tribler-mod/Tribler/Player/Build/Mac/VolumeIcon.icns new file mode 100644 index 0000000000000000000000000000000000000000..8a9d3839957e3de558db0952a90b5c7a1561204f GIT binary patch literal 37339 zcmeIb2Ut_*8#YdWkg$;rf+!mHa1YdKT@+WHRa@<^t+lP6R>fTxCkcBAirdyvWRrxj zhwKdrd+!ahR}w~YzW0!zpommWuK)G_e6I_0a?bPK_dB2Sp7+jkJbv`lIUynUisR>( zEEW>loh1Yk1iK1}e53r0GGvTbb`o+k!YjGr9QNW*ppPFvvHOaetB^J9A|w_T>lY9h z5P%P~CP8F~0tG=-;o#sst1ImQ5M6$DKG>DGM$$@gA zJXC%@z5ri{Dk{Pk6HA~{>oQ`wSXovq>F=miCoWnc@mX+8ayCR48p|P+4}lCqz#b|= zm6j69@a0eip^{LAs;(y15Nc6%b;No?1JsCVs?CcJa1Y(@2yh&QFdFA?lVg+Vg-{-p zPbeT1LPdmP0-z+65#AsWYKXP?I_r8&b4Pt%^mF_gZf0!Tl@~F|nI$c)gf{3EzMaqk zb>h1S-Kd@(LNC4#)!$DTzz?Dr3_O#-LJbW$4P!<|SRDl+4rDHXxW9v6cv5y{duJzM zs(b_{9?6Fv#*d&zNAYZHjw6ibFe;NC-{lqG-_DIy(WO#Saj;v}4%`Z?ZeV zVjNa=vj3C)4&3Uy*u98Iq1R4b?8)>5Hj(23@(a%%2D7!Ls=SZIf+p6%6@Uykq?6p9aY4il&bs==q~|qsy=emo)IqU_ za_7w4MG1^9a==ZSl-|znp`l@JU68W40hp2IOsbQ+E(MnQ8I{!LK`1B!zI zT9WEoW_h%o^A#~t3l1;bpO)7Dxk)X9cdBigAu4aq(inGj9$9ne;=-sZos9S?v7zz07-@!?=()Mod zfP(tx4)nHm=O16UTv1L+LVdwKR=Bx=-qLxh7I_)Q+g@z?D!m5+rMU-SdJAIiWbsOq za`9A-*DwDepkd%w$<=m0op-!Z z-qY95f;jg9RDW^o!$4l^_+EJ)o|045Md0={G@w(yLuvU7rvnD!B2wzOJ-7c-R9hTN z!IyQlTg{(4Piz07#lM|%wtc~T2!IC55}xq-9OI0C5t)`--Gv{YD0)e&6b!G1^!aF_ z^TtZ`Z!T`1i&b1MXX@X^xDUFFS&`^@cfbAEAQZ=2dq=0| z)pqxe4?ol9@FU6qK{B{=g#=boUSXD+jM#?DMyAVfyYdg}Yb{(bd$Fpvy5?Nn|J@FY zOR8qy29RK0__(Y?Ank?3_g{AOdzo2K-vf$=jKvj&=N8BrY*Uh%9pbAkiA7=LWTd1d zRMhp24OIDFtQCE&Y9i&H#D1HZ9QM85bFAV0kkz zE~~IUQ| zT6F6Q#nsz(o^thd_9z~81mGhL=@dtOE;z>O%x!W)c2P4lR%mEYGVO%1%(6m?vHn`a zS&*cG@@f+qNqKo0IkTU%b$6-C$;!<(TDyL&ww(O>9R}O3T(sS}c>l7E77N$7b&X*+ zXNB0?aIqyBmiMTMwBpvjKCWL|#b9{q?YX^AL0oyO5-6>qx1YBB(j8iB(uXMiboc9* z&Ytn9Ye`unDYL^;S84WdS;v-G?$B9x>Q;2G6PPY-v@qvyxzG~7T0Ra=rkB3rA*(Wq z@PFzppChBLGH;=}?h!;uHSzpVQ=4jJ9by&M>3d2KT^-tfZ@7ai;!&tCLR$f_H1 z0if+UkvCafP;u!CPr_1i%R9L4P?^zpS6NX`Rm0%x1v)yq-=14L|6<2T&)Dp>fh9|W zUc|Q45B1R7x(9BJ&BADpB3`5BTzVdnmRH$@ICF@hIFPnWPD69<>h(HW%Ci^#_CJ#i z`0g7!?kBXOs;beFEnmxEF{(>+7wo7A z1{n+j$jB;pTs;SCP+Y%LLvAe%@u_$Y9wv-s9B#-Aw!h2GD8V@QFeD|ntZghGo+TTh zuQ6BM?yix7{383JA@&#_k>iWZi{`9Qr?0u$#D6{aPgq)Bc{}%3EIX93Qf=e>1^YHC zto%xOa}gkeU~7&)j2>;JMYwizGhyamd={BrP}zyx2Iau5r(mD9!EDWcOMm)iwSsvm z1PVyx1R{ejH|lmFFHg>2p3#|wm0fs{M*-wvJ~7qOFxNYma@JJqt27p8jEDdWbK3Kv zV$q`V-k}j|7&TO6TP#vk+{>{Ca9w#Z_xyO}jORzfdGrLqMeU#z;8rboV0dK>n6acMsvH3BAZS-C)$mT$+$=VZ~?LI)_!rc z;@Un?JZK0SmS$wT-C8cAvsXUQ#!}TWH9Puvi0L^Y!){&eO()kAg-e!otmG^&D_0 zqac9Pxy%VCY;dyC8sH3+CBAT5hv3+P0chn)PRcE99E7+F9%w{rIQ{(d*mKu!{=Ra_ zona!!21Y$Fz~dy$&fRi$zOvZ_XGjHrJ*)9JG0WS|&bO|Z;c+NEJc2w}k>Y2!g^Mh{ z?wSWRC9kZB!CQn#jgscAxV~YdJ?Z4R?=lDoZr@2fPVzuT2b00*&I+~N_6z_IZouO( zA>a(NC;J#c-d05f$jquVl3fNDSt9kq{m`_6@>b^15VFLQ9v#j5TE}v&%z|&ejk;S5 zaqM7J)gh48fdjgTJvxwE0dQI0;Bi7^WCSEEg>a}<;Rr0NHl1om;uU!J<-fzz3(MPB zV9i7rksBRlx8HLnU)gFp$0&?B%mHHrRj?a?T(^Z$95#R)*#*8mBf_Z4eE@OM3j`5N znh?idc#V7Q($mPyqRMs_0w_DmW^>pLmg+y!{!x=!LIF!kRNE0ePGsqo=u&XZJ^;|} z0kkp`2q&!n3^0Jn)RZ0WEW(4^bj33|tGKEQj34}nFeo3yk$_p&vzNF9?2)zza0110 zBH%DatIfavACy`F8khs{g`DxQ@KEv}1RN@Q6u}&ArAOYZ;9_DLuX)GPN~*iLkZb@c z1;dt-rCKlb4Q7=&~KoJRvy~7Z~yME^guJOuD2eW zj4^k}yn@C-esT2Dnm$l#QBX_{7>wdg-%HIh`TdX@e#i+Htvqe0uVaG~(OKoqAizQ= zox$KF4pp-~ij%rl7(-&eQrEJ-k56zu>@aM&D1`hz2; zCi;3r&}2~~cv$Fy1DG-fUy~Tfl<6*ve{##pW^AZq+?>hDdFAzk5VEF6+$tW%7Oyk- z_7Y+9W`HKe0sRAybTwr=U*EP6wASTL0CyD`+{=hdMQ%K(;n)Jm-jYO5mvsoFjgXWT z!Ht?yP~ONK8C#Fqa3o=P0W3$* zx!)k`=)sEA0P74ch{TplkHRyGD_e&UZ|(^E9{R!JIfnNAfUUyCud;^)27z#gjSd*H z>xux1J3v{7pfIb_DK;c73U>G9Cy`ktRc*t_iXU-L7#I#cxJcD3$R5U2S?Cy@eHv8{ z)d`mo4MA~^)dQdh0B9Kk#H!5bj^jCCx>sxg?#=Lc10D={Z;)N5AIU!41a2yg-0b4LH+2*Dy84%?T26M8lFq|=%c?GeVS44oK|m{(rg2QZvK z=c4-^^yPo9u!F@I9ijJ}k8Z_p6Ww{;t(XBCox>hw$67$!P!_H3;Qsb1@8$jHTojC_ zJvq6cvVH)0tb;gcxc|Y1-|`({kzr!*GOVN@qTz_Npd2DXip2dJ(Y9D>g^#qtOsxzpz7Ce*CD=%U}PqdGcK<$M=k)HtzW^Goe zJ&R{ycHK82ud=p_4es*XT0I9Qta@6wa~aR10UwYE+OQj0JaA1sfyrgdXF|6n zrxaGz_kzV5&*TL#8EMIlx%-4?;^|0DE3R(n2kRZA(}DN%0xdt%mLL1b)r!b%zlS$N zGfHY22RXwKBOuc?&Uxcm)|_*cMi zPI&i|TkdYQ#ie)MTpx|OoXT{H^==-?i|bwq`BnApU^OJ3bz=SI1O7Ej6TyU^_k2^E z&$kxqZNCr(}`& zwrytQe~q@9Z+qH__{zcR%wQV~k7+mLmsD8Y(8(skru`PT8=L;wSB9g*IrXs1#-e5U zO^pZc-`P}CLtB>jBko*+JI<)qe7ma1l_v+?N4Cqz1cWgMa|j= z&U;|E5m&I&>YCmUPYvr$!eE%VE7PC>;ksa6TbYV0*nf49>AJC6C#5*=JJg!`uptO?O#$IrbGaW75Y+^)UlZ`4~nR<=QrYX8nwsEJS`VLb-%ws@Vq|LM0Q(wOH#M(zMW}|Fa-;9u z<577!g=Ci1HuZ6=VY$Kd#PlDp^xWT&Wf*kqe2eEIC|+VwkiG7rr4~``X7|C62sNx< zX}oyx?={i-W0iu|r6q*whQbYh?#PGhjBlmJ)xeQDix#hPeq{>={cv+$>;va@1n!u< z=N_I_Ue`PTYNeI9p&f?z{0Vy*rsHDQI>9%;Vz4_GM_cZRykgwbwe)WYmZTcjo7e^U zltvf5g2|>X8rK1waa$zHn%LqWsSbvM?fky(q5GKFX0idQuxO)IaLj4qf;)N}46|!Tn|r{+bS+#5?iHepdbo}S?tDY|8j;GrL2wH~Hw?h=Km!Dx0ji-I zOe0*!1m$Z)h`P&C{jCmgiLeUSJrfFQ8ea`nf-Ak{>14t@-jxG4e3A-l8r#7Nz8JK6 zUu6c>x|n-SV&08_{O|#;fvzNPvLgjc zrbfr7CI>ru@oXY({C)p^_>__eB@l?~)bl&fD9Jg+;Bl4r z+&OtuHWE~rGrDCmLyp;+I~oN4fSA@HHpj zn5@Ez_CfG8j69i5FpdGn17%4*H;7zgan)09f#4agssq@LZ7U)B^@nR6T#sszv2PrnADxGQ_z%|Aeo_rLVnqOAe!viJ^6A@q{SiVt# zEwd&w)R9*T374}^BQgpr8~YdxB9jP;OdPfz!H-6>!HNMm!3g&&JkT3|d4d~DL;C>4 za9}#HtcPNT#aMmy^yqsRc^0P${wZZm9RnbH85_gw>ufBH`}-{K;z{R-{Q9=8KGxk~ z_aRr78`FUS4PyJd+MCMLJkNg*jWsy-%s*)r^^Fbn4fh)#H{7qcufx{YH`G;R1ztLV zSU^J9{Z~o!vhtF`0({=X{Kp0N@~!i*`31#gCA8q{Pq{W?2XBPX3-fXbIi=uFEH^ig z76#dIuZf!2Q?qhia#F#cC_O6-@5W^owjrev>9=zzxwmtObdfZ_Q|8nbKK)&jXNG^4m}Ns3NxuL4ft>9k2Y$bwY)u!T#XmmA zt-lE2?|5(>efMU_24Z69ZjQUy}Zr4E^_r| zRE7-=1?>P=cR2F;u92w4HF8R}H4UBo@UJV^p(KJeQH+4U zarLi9$wZoHc6unZhi52u!8a+3kPXq$DgL*vUcZrHg}HJ4>aBnjP={=h6w*Zl#E>8) z^2gnnOsDJwngiW7TO>2a{SbHJis)YTO@y*MvMDs{Y9$#-83|s0tw$WZwU8L@xT{}OT;k2-n%% z7l92l(HDV@UjkcicVl8kCxhA7*WJzdNP$hRzq%-c6w<^TsHrI{Zet8Gc=w(EB(GJG zeFFo-U$w_G#hM7$vMYXp?yhv*b4yO8qw}MRwYt9Vxwf>&Iw&ajt?-v2&ppa5l zUAP}9A}r`-dn+ZjCMv9LXomV)e{D)>0m;|b+lL&I(!d<34)lqPN~!B-r3XY5BxM%m z_kzvb(V6IL?a|)UvfLO_U|2AKD#;1*3TUj&>0%5PgoXt9$E8N3(~1kLM?S2twS;+7 za_R;vsnj5HaEO<85ILp)rB7&j!*E_qSW<2h)hjTH98$*|oq@jA)0jmGAO(`@2O>%2 zKr&cfk%E%)f`h{;@m)z_q0tf4@SxD3;Lwny^0rs62EgtBcwCuQUwf4u6&9614xrXm zg^&V#eJMdcK7rJbu<&SdNKIR^cK{_gB7{OtpoRO8L&L(8>%03H%#q=d8R%<8K0%?W z^+`bqwIKlkB$96c$>)Wa55?b`6hcj}%k&NirjUbz!V>B6R0=gYo)i(6L@%i=F7E+b zqu@DVI(=GgXb6Jk z*;NTK(TN$Uw1UEtoSgdhu0i&P^tHia|G*%!A2lY35)_t9e;J%TIKmne#(=)o*V_1s z$zTt4F^5N{)7S6}g_cB0Kp-V5hDr&bL}jH=Lb91-C?A&BdT8Mke@bLTTpSphB(kq> zWZ#VBHB@gN6eSlNKn(-aDI_u}EF`&ZWM=YO=gXjkicH@C3N@HSN$zTH7?_c~)>D@g zN}><7LD@@{f)TicJVh?wO&y){z_>P9j%#XC=N2PU@YR zyf#!39vqw$TktB23fw)|s6=$UPmtGGB@_xdH7)@-3Ucfq*uf%>?VNwKyf)NPQW`>v zNbvCv3@so24D#CW@K80C5*tJg3~Awfyu5}Vk$hEH*OHxAnjIcl0Ve(DQ`hRFVsaDW ziW;)B~wM=`;){qP8Bu2y0r3c8Ld340f8ullNYa>NrJllxCV`jYs8Ne z*M6I+xCTuluHEpMUR=BE`l;d?;r-&8^-RSztJmV%`RT>A3-1xv+zw7Bu35eo*RFd_ zBd-17@~*h%`rEj;X7IhGmA$jI6XblyNyypB(b>V)o?s_rYhy#OM%&n3dM&XDqvaH> zmF_D5l}-GM%JwCU?f(N|Y;|!zoE{5ft1SEIFt%>etv@wQ;u)PJe-~o%M~AWbYhaXT zugJ~bHg}c?R{MM7y{Tz^|4C;X3u6m2!tVE~tT5NmxBP3ZrnasqR^m$We+*-*$w|F- z?Dzsn)c_`5U)wx4)=c$O{9@HvQkzm{Dzkwww!Gg~FOieRqLt^~Wrc0h#VyiQ*pi!W z@Qvr!Ytm*K#?}*g;;Dzex{|bnnCyX&{toZObJs38=wCnZ3udM5k7w<#mG|`aGiM^R zm2Hz>;_0wY7cGQUn)j{y#aT+GFUeLQjP3ed4NZ+b-z_|9>1gdagD|$l6>_>)g8RJG zP*O_D3exge@$VaVDX%&e&b+o%dGmP-5Z<;#+R$%iVQgXFE6B{kV5pr-(V|$HSu#rE z!s{;T87lx_{>Z&!F?G*l--NNf_(tDg-JwJ0?QHGO zob!p#s{L>nn}@udq@tScS99lUYwtL-TI)jln=rN#if0T6W9z21emIORb%&Iyy5@?t z+Uklb#=jmm`Wot*q0UzKqllcMqMqr-uf#<}6ipY*+gdjxolU95T2n|&R&w_IHQ&t@ z*4VdU_3yzm3}Y+VI%loRVGI@{qb(=0qcA>VW?^h~A=j78xiA#FNJvOpL2>;I!`Q-h z8meLUv={G|6;u3)#0_I311Wg=5VkVA73yf+qWWzrQmeAL8e0&^G;R1=xURaUvaN@n ztgNwZ;aK<@@)HW@!OgW>kVU`L))|@{fT+KyRPGOhUQXN%UHH!v-^eLfgSvTF5NG#Z; zG)oTmpbtQOe3+Uv5~lV-OJ|SXoVnUzqd@=o*ci1m%V#goTy;6NY@r8G-9J}^THu@| zSJ$n#4Lo7FHv@!ZeYW_t+_l=LR!Pj=yff-f3BdjI=rkE5I_-`l>9V1b}`@ zY?{niY+C(kmBZP8t6&#WK#P8k&@_VDLRbG?5?lN~lXx~3nwD+3KzshKi$8sa&@?1o z1_@0&ATDEY?0~Y>M~9|CK&=)ZzN@_Dd2#$E|fXIM&KzK|;GcGzX-O;?aImInzmDM9y>}X4cb3myP|t=SyJOm%uXc`)v?5_$9FH zOJLcTz_KraWnTiz_!L-%pA=Yj&4CwKb|Y|dVA+ZBz_M#z(*>6OGOm4HeL7`e*`uF% zv0Yaly&G7@Q=QO%-I`MSx-}jbcGmeF?aO5x_oU7H1Iz4q1R{rSykGkwjB8(;E=?a; zcKx;Xb!K|)%W_=%`o&{XVA;+6Jnd`#g((8d{xsnQmaV+>j`sE9{2E?h+5BG}ecui& z3vfPa$PFwL)!TpKst5$q{&_-3XgwBrmE+q9qherUY;0@-lIf&`oOvrvx<3A8q7487NXAC9CElia&v(&f zkMbH?CZ*QaR_=H1C->|4N#qhZ=6;MFkJ*l2=4NbZD)!qV{vO6jr?Dq$*hu)@XHT3q zNBK!iQ+^RX1cKE0lo;2=rsSM>a&{J<$Pw6v6%(nZJl+Dr$@!Ls&rHcV@#LvtzOjS% z5j_;1$TiimHHfU|TjB(MXaER50`JD-UrLmos?HP6iTU%793bZ?OyHX8P%4_Q{1sYL zkxn?L2;R%U$JA^xforNmE%2rlKBlg9Q;|+MXWYz3b{J;toxnBK;kquq@(=wn73qX? z`xt!XAH`4Ln(FY6A-?i+JU-w6@%(guzybSB?*N58`0D;(I7Vu!R)2j?%#Y9f^ywX- zR19CcABM}PcYu6$e=yuKy#v%N;dA{v&0hEQR0oDleC8kAH5Kjab7q}<=4bu#nro_q zU4wk)AH6XZ?dx;Du=vc+zWJJKs)HN+b$`GC`iM@&JAUpae;$1%G-||jkAb4ze0F~@ zoITw$m_)>clJe)UM032Yr}&ho@rQH*YLn3vyyK@dOD9*i-_`MELAnp%r!oSY}sul5l@*xE6?{k0xaev@# zv^(CJc-#|~4{704xhXq947}%BASqvYN`ARifw04Hoz9f}Dg^@KZ=2cy>eLB{ zzklnL^laY{N2hjx0&;&~1!zBI{RQOyfCGFsr317Tko$vS5@w19 z5PmEm_XopCvQzM5NCFyuFq}7)1C&h?7=JBpiux<(35>sW@f7^H8iDcmZJC09ajU@i znFmqt7Qnn$VEiNJ-sMLf6wK-msU+L}T@@I0o}WLwncTFTf8KR~!uA4le=tmWR|FCL zM_}#`hLh#r82}PNliP@|-wM><=0_={@Re~gc&+|B{OW>Q{UIf3TmJS05Z14qTtvQp z>)-wk|8haC{*a2XzJGfH3Y+uucYkj;=ExcJ3cXM;dtc+F7(IraH~>+Yt%BJ4A7=*>@VfX!eqrp-g4z3e zT>=k@i4#Cr;)VcMKS&`cQlf7>5K2_?t^ij*2!Fz?2?G!%DkG@9FN7OOh3c3I^+(Gm za!(8F-`e>TCIAYfBDjY?ZWTLBCrkigtg_%9e!2X0xCs+L7^@H>u$Jc%70;D?GXaIM ziq8c+{xj7j-`s%!zrBF@BUB{cH~?V@MJoaOAF}EM2P7h)cubJXFW~<@0RQV1h=_#J z4+6gb*s4vczmkQZHeb2JfAsVOew2uW^15z;PQc-x)ui8yKfu4VQK0+-yWZm07wr14 zdig|tQ3)m76+tdQIM4M}rQWPSMZ^`g*9%JhLHfCRkB;(0|AU>SweW`=fsKHMQ_Hj! zCEkobVT_!H;ksi9{QmJXw}PzP3!4^d$zvznf6&q@Ix8%Gw2rFo0RP`8KT}s-s@t)h ztM%1oF~YAGKqyfOg}EkQ?L2(^Jorx(U0gquT&7AN$^@>vI6FZXPyBDshDF-SQev+c z;3$+RR#pwSz4uAjM9*gzM473!bF(R*ku#o z&=_$^X&D(=IesNs85wCw39J~xKB@X+Er1dh5k-q(Fc>Vq62KNigSrb(Zv3(O|G)ar F{{y{4$e`{GU;{oVGpvJ=rbm^Bc}FGl}>fv185ky5IP> za8ZrmH~APaN3jmg3s9$wD&@(A##U0y%diDG(9l=o}OL- z^0Nd0(&!aFLVnWY$dxH6DJA0gO#^ZXiuv7;y?#h&8MvlPt@QzIC<_sAy~bqO_)Knq zA8_9c2_iZL-p2qFh?`|opF4N}bM??0`MPH~0MPD$^HciwD?T)CHT|7QZtExS^q)}E zt>dIB1JkT9_oK6g^+>rR$Q9yvEjxsc90CxA>f3q9M+#w|=e%heoNgEGyos@jiDh8x z<~a_j0eqZ zieCF{okTI9#3jThFpEB)yaHEP)@U}phwzu_s%xjQ@KFB#6h%$V%@ zshmi)!1!cK&J&@QuS_6>;SVOwq_HL-^()l9#NiG z5cw%la$Fgj+k}72DnrmJ$q?q`>y%C99%}?XPchOmT^L}V+RGcs6)KPUyy;R)duS5= z!{+^8k!^1}-WjuI7JTAv{o;32H4NV>i7^DEDnt1eY_e*ox-7A)@vCFp6TF6pVyen3 zXNxl|=+|$P_V&;ZnDSRj-;H$D8F}4MpH6l(st|sUA%UIulIn8mL3htH8{jwm%a`+( zmuXKWYR_twNL1;Q(CtjZ{q;SyGCP@A^F?3w>iD2%SH?%~(}Wkj8?0}IcJ;?L%Do7;mEn;AW0 z%fq%sUD+C7jV5QGC(X@bJ7nq&vqU3&Vo8cM7cQlkoI;`l8^`|sF&`yA?zyTNUOwj{ z4bYPL6rjA3xM8}B4Yt^4T`K$&+W)h!Ji8tKZDV51)o&q=#F7EVP*1bWK=;mcUm(cL zImkWF{h&vGvTieOxc}<%vbI=O8~QT%;rWp}26MF(X`b^}_SSgk!o#aK#KggWb2}?} z#~?*ysXpQOqiYiO47&}*-1JXYUqqfC$33W@3R$83!@9+LXm=!ymfn`0p}u_5tv=Xo z#-I8UBb;qEk0&eK76Dkqo4V1=2zn`r>vd{T2>#|1<+k8?z~#Wv#V_))Jn1uBC`k#8 zf;ZQ3yqH&bu9RS~->N$?{&w}(%XuGe3T z%FdR3sy}KPfZh4~WAwmk{l{b7Ar(tz!vKTbMtf6dFW8!}8Do>j>05W=*75W7Y485y zNaQ?Sip$!#%7TaEaMP#O)^YLCu~D-Cu3dqeNHEh#j*5Qs{$CyBzn%_mcg%DY_Z#&O^)q!t`4>5d1){0= zx({C0Rxi9~jhrXEB!H4n35;?gU(`Lbq=+k`git2W7x4MId-*UKK$wc~Bun6dmHX~y-NWwME1TtY+CeHqxZt#zZZoUfvC3}8@u^tRRyVCJoe{>Z zp{GHzUd6Wv%8h-Cp$Y6bpjcSHzUo0^o*y}OxM4PWq|Rf;O2bY~p*!c#nqDFN9MY^f zY@N57kB6G?|6bFAOg<-Dk)xt#ratMA*mzG$Ap4$>l}P6UwVVfwbNp0Kl!lYWpoWsh zGPjMsa9ONLszh89&8O#j!{&Tyd=h*)#^%P`Q|wbD)xob%ZLn*@OC^gd%d{Lvg$#x^4Wa8DYMUwnWY)lm#p=9 zw0RSq&pK(lUUW)v>~P#=$7QD~&~MYDC?D=EL1Mcy{R2uQUkBWzy=RU73 zHxIYWOnOP29>y?6C)qH#^lNElp=_k!X!XeCu+wnfTeJh|CjE%_*sraMEiGzkQKu)! z#@#yi&#)g31ty_QDP1Jtp9*V|v+GZMoaxL>8KkTN9r3ADEQ`t3*;85=3@R&JA~25G z4=o&SMYw60Qpm#$kOBzWWGT&fW3jK#WmqR#`Kb)UN-BvH70h7infDojcRG3!e0(bD(#%=xCd7glMdsB_>|R9&1)) zI&c?$<@q{fXx0|5ZDTkPvU#1{EW%aUHF0OIuB|Z8G#|G6*(cV$+*hjU&sCUmiaqIi z{wJ<&EvgGy>D!Bgb>?ozk=MIARS2GC%0xk1?b>N3wzVy)aTK*6*4!Ued6ac?xVmuT z8A{d@fg`R!Z7t8?)h-Q*WoCXuW)Lk@KsJhN@}AxbZuik9Ao`&SNABI!<23Zh&IZMi zv^q5!&yjhXdPbl@=!E&ifrO}=)egcH!HXZ!rSDemi8$CV4eCgRqGDtE{*q-&!m6Z?6jW)Q- zJF?zZT5Wy&u8i}o@Rr8bpEJf!v;(XatXGh2Pvh8Mljd{bNjLxj!-kHHC{Jiy7V~?JRqIPk%y{?sfBn8Qv(@?4*{T>JdZ7ZQyf2h zHG~FpV-))_?lQj}7syO9HU)nf0ih@|WIZon4E&@J>FK2b&H%E5b|FKTwwiYYUDG~^ zvDNuq$VS}ICqRdoME~49;0ZtwAU-m)ApH51BZe8mL(iw9ZizEldm#I5F_|mQ~2oqFU$6jpSO6kSo_PikL_~T84h|f z8$+}lUq%%-bNRG?bCW0GENUAkahK%P>0EGoCh_zU6h;X9N%{t2Tn#%Gh}=S+YT_`+ z-xeAg5a6&5voMCU++}@rcY};lZt9ejRMgHJ0G^S`!I(h{4p95uHeWuu*l7#^^3V8V zGzp1gyVCyHC$x*pdr|NUh^AJC*A~LZ^xe|^T1bGOw>WAy`Yppm>`~wk@CKOTbA7+e z-8oD9aqrvieXNN2znGGY+oG{FQ7qe?jF52cJuOfzmiTnE;+22 zMkjhPhgdC(lbYzwbCu0Bd)5r3=$zN%=CN(xCoWB!sO|MNfi_f?45La-gr89B$q75`|xHAWJ`D!RaRm;W5_2e&C#~C4uTNB2Wo0+oG;S#O-v+w zF|*$kqKag=1cjT-1a^sKHv5u!=|r8xJyuj59Ua$=gwy(SpGSNkrW3X9i8r$F=a};c zWCT*s^>uZ#NbmB}(#_3MT^*f~0$8G9#Y+PNgJet#qmlUBTNju4M)&1P^JdB0$v!mN zu0K^M_~K|JUxkW_s<@S8QK`{;dpzx_oo1G#H)ggeij;{HCT!D}RAGwH%$Aneb&x5N zLwYa%3|mE~AQ~DTQBn@%KtophlB-Y`Z>RGC-NVK9h+k2p*vXpw(o!B6yepOpJ5^g8 za(m-|-NIRFPDZcFUhdSs$Pnd#343pii8zkO(LCkc)d2(}FHf-(WB*-s2*ejAD5|p3 zWvoQU;Y)x;P4nORd6fM~9ukTC_wOILNB);}+`CGEU=}(>P|zHC1OfyuTUAj}QBtBB zIO0h-TVW<3<~rX3&UhYicXztp@n2`me>lVH>+36wYyRy_)#Bolw74yGY>edpu^0;u zv-G{%EY9&!-rigKm3L>4^zMD?YH;IIwRQ_lo^~J{;JLs8bxxInQU5_eDtngO*mySM z+D6GAYPSy-zO}Wbo+2=iDRJ1%6teTDSZg{K$+mJ>v9`7b;>88~;W8&V0`XJ6zC5i0 z(c$N>sH$3M_H~{1MDkc8L|tYlr>7;oxAdum0A#`U?@r6VKf&PNE!$A7tunh3a}TOa zjVjHi>K%>1qfHla9G!=d4fUrz?fyjP3l80#K@_(ZO8xV8{PlsDfq~(0edtt@KNMrq z?Bfh)pJ{LgsiPjKo^of~P&7jZq!OH+*Qin>@SdDLIj2TRNl9biDW=A%dvt!(rXA_M z)}J;$K7MfE;kt~sCv7{Bp3Dw&c6OdtK=K(QB-^^4+V-mk6397$Wop4!&i0p=WK*9Q zBeu4;6GK9paW)zf*i=3|UK8I=F78tIqYUdi+OtYvFGT-|!YWbSx~g*?a$% z8~tG%6-hw;9UDs;u*D~3+&QcRvKQi66yWR*PEHzusuPM^hU;PoZKyx(;vKLNjN$CW z#Kd42OZS`Gy26J5L#2lfq3fppWB!|~$wn41aAPbRS6Q)_?}2y~xw$zA z^a(dN-XI~1phuJ0ddM>|v5@QI3D>sk6De%V;egn}EGk%P?loDK%X15B;k34()zLzA zP|*C9l)4U9(ONmOyHu}+Wd;eCBKvG>ximAy-Tx`VV7)E=hmZJ-_b#@pie-pc_k-?@ zj?B)s_+sb5u4%i(G!K$4O>I6Z*!kBLo3$Rr6mqq%1-)LmBD+cRl<$cxJ{8W^5zPSG z0K5cwcuLF4$|@^;-Vxj;uA9bF-3ALT(};+PVP_CS=PXwYbKxmThYDvondI^oUdNex z*sIkmxAyQm7Ht}W#ikTV5%{DaWp8dSv8R|hkm{t{($HCi>%wG>Re5u>c#9rOU!qRTTyvm8U3S3HM#3qy zz}G`C0#XAuNgY?gGIZhA?e^xHpP%1BF#KwipQs%yZe)D?>&eF??`$U2`C5j`7seNX z_f`@-<+;f)j>GC3k0KZACoJfmypqEP^{t+o9*H8A!i*c7rW%}QMlJUL9qrCc1j`3- zh0O7gp}+E9?jjZfekIGVhaGu<12_H@56!m*k}^vB9c_%nQVBkq-~OzwrnWYibvYl1 zl^`uh`NL1t*AaP-O291jv-gX1==OGV27w(b)dOKDevz>zP4*DP($L@E z-`m?8bcaK^GcDvD|B>C(zur*rY(Nrt9UmfQN#EP8EaC8YLdlSd2xf*s+;4$i_bj;A z!}c#OD3jcKOZ8zndl}C&S>G(*DA`EI>_` zOyEg-1U@;JCcSKs-?S-`(-;AYx}Lti7wBbRD-^WeVXhwJ0U|ci}Yuk zOTVH(+t>Wh7C>@?ovsQmC@XsmF7W8PDQHJ8>mS453{pO`Z6VNIet;$Td^H7Zw~VKD zgQ-GR|Cz@=5=yj%0|rY6hzNg7w1LO%X0H-~~lR zYoNq9xZ*^wfV#CkUI8W|^{;&A6KyxiCz%+6sS5D5phZh#Yisc4Xd!4q4nsHuR-mKV z4Q7HaI#>N#0W1%+P&hj%J42usRhc#Yc+>T<`wLUC3_f${^z<|sP|85J9{#4VWZ`n9 zKjw0bU1$Rn#@XJI-B7l9lSiTBK5+ZJp}}V$T?7W-nQc0(K+LNJYMv_Xf;-Ju{!~^` z!65FoxWC*Db`c#d?WhIbo^ZgyO7B;GQ`z&C#Lu5UgH{%DG@{}l7$6YV4EiM~o?tir zj-&Ae_ubdmcX4r1M}^a31_rQ`r6?vJMp6ve(O|2CGc3{|XVkx?d@(JzHv|=u^ zU})jeNR2nn=jTPM>hB?pUDsK%p0kMZt~=ie@j7hFHd$s zRGR}&UDl0$e`YYXu#g1V`eRUcL45&NJlzt=Mf6$=Q&Lib)Bvvs3%@sS4MOJv%JTD< z!FXU)W#R1PicNyB{MDWN=*nYk z2}GoEIRJ>$hK|j4+gYbs}WbAks@wS zw6Wd$$Otdi*BPx~FNISF?yaogU8TandN)jhuwNZiDZIy%&yU$Im&>r177>AH+SuGQ zt4L-U`5B!c6MR| zv6a4j{dyO$>9{OIhEg^#&$BYz|96kUOU#?W^{fL!%;j)~@;fD13KU3yDFZXk z9gvf?4y!TnG4e8qF-tJ4HQ9)>9Dt=*0e*ftXo2z<>|bvwe+HIcObpGcZ3H#Lk~q{0 zkT7`v?KQ*6kH_IoUAuNIgSC|9gnH@?hcBh5uLH zUIi=*-u!>__ANO7zyI*z|A&ts;bnoEni{w)0LC0B)@70M0yBdrFwoXNS`D>&DLjwb_#O5tq;JMG& z*B8v(w0Sdwl#~=o&H{Ns0T{aXi7A25#efM(8svgU+qZ4IWM*cjhcEA=^S5r#7_R^2tFPvy4&(uOn z_@?wN7hGyV230$9I-OLmg?oU__cfJ8O2llcA~bb7A@P_aIxtM%m%Z@;$p6?=Y?h>LL9NVp8kB2z@y(7&5c68qDOb_5mf!z zfptAxHGogc)3}_tfP-&45c?U$`NTP#m_Ck$!lE)VIVho|lmT;< zOG>9_OeV7uw|DtT9FvY>Ew>6^O@R2y0kV0JZuSDsi!!iqe~D9Z|pJxdBO*Yg+popThP%r1w zv(scC6aJdJk6EBY-;L|&?CL^ub3lHVVlvui$B01QkKa#Ctpjs>pGqd>98eTUN$DAj z#j-Abo8gtXmO+o@&NwtxC80sjN73d5S1PpJQUa#aU>?rXf1zx4PWTPbA(P2KB4s37 zLY`benw`l!L9XIc+v54+9w>;vMPkWN#T*WYB8V=!=NH@Ub_IVoGG4Tr=0(|&s!&2aj#2MZ8$52PI zCHz}5g+%|b%w;aw7*jL+Crix2f^m#u#@I%1Q!x%)NOXf%`D|-<&S`IZdhhM!LvM1z zIk)Y3zt4G|7wJ8(k=*?o#Q{o~fL}+X0<;ull2H`%faneF6DmRlg~?bZBNSUN#lXlk zU8l!3FaD;t_jixSL&{M|sLAeS1}_+#iZ?`}6Nu-T^j!kP@ZZxgLBUWER|f~t($Yec z<{vS}%>-_rl>$F?WTcLQ_;ORakZgImHDuYJ9#bG|y+TvLG2Q^R80%n4JC;ucqODqah%GFu5|cz!@hYp_Kj8{ zSIEV^l^GDG2(3l9!QklnLAzZlXwq4KLqEm z<@o8v>&VgNaPC|i8djUjav+g}5gaLa7hBWw@&0eeP--f}M|az0tY3WyikYuk1&I=* zjZ6WLf{(ChSb+b&A44x+k8PT;HW*yNv7<*ZK0c1@>}+^Xd;piLf_+9?v*Kg5TJc?9 zA512boEs>GfN4mCx%2iQE3^rdnmf=6I+?p&6;WY_55Gf{_yZ@rZSakZ#5ZqjYCMBGy~r za=Xz`U!M}h3cPSWPEM;6*34{ZNhN$c(<_TDtp?xlFm~B@vKG?7z(A6F-`HElSQ8a4 z^|CT5R!E*kTN7;hJe-~WRF*_P5Bpdn>{Mqb^m;wKZEZ>CJjIL!`T5viTg%+PIxLrE zs{liq0sZzLu&^@Eyv{3k>f}kx&d$P3TwGmK!+9~fLDp+pJ%4#OluaZ0< zKQiTw1x2p^1oTW6g7sLwRza!UFHt~PleHUhx7u1zI)s#v4JD*>h@6oB_ldaiz1UoQ zKh3R`&VVEjh!+HJ?Tv{E*2^&(ji{-vPWqwP3=RYWj1|*|ZwkLj=3n>pB=JNgFT|@L z@bF=mQqqamh-6f>N?g;2TE2xNuk0Q1mYQI*$rP^w`ZlB%XI>aT7oSwLe3krI7O5Hi z8J}l8M6l>knzxZDO>a7Ggsv-@4@6CP0WH>@32TTG1B8gQB9(crn!7W$TsI>|K zbzv=!1zZ6wEv_i2q@*^bmZ>eZHd@&-x|T;t3@iw-qF5XTQp-{ypj-(Ul1U)U5hjzF zd3L|NH+l2s&6}41f}QG`o}Qk0{dfQU|9^kqA1Oc~#RupljKZr<0Xtw7u!JjFI0+ac z=QtrO0_7|M5||(+Akqn{keQK@ar1`q@;A*Uvn3P`Me7kKd^>_QaZ!(wzK@LH^iq zA9(}=psn%&<0P}u(ii<%bf0u_we?9&=pe2nw!6DrsH&>kSy553jGB2;ec}S> z1cd5OE|=>qtIeiDj2DE(T4)$NzKJu`cQZIu%`vy zkPmLjBd7J-khq4x$|f zc)4EWnewo3>=P(RxgKW5!t=(gey^of*AhFM`Gpk_{O!vu1c1FV-Dhiz&%dG1fK~|8q5=m-E7Cu;BKj z+i|Mz6r9#k3bAS9&N1h?fEE7DZ*i{8I?!gL}#cI$;K3P^>q<# zuwo=T4&I;_&e2Z%Yf?4-*;v@R8X&W{7$R-9vckQvUxao2Gy3oON31(keQBBF~*2F%P_BJG<-+=99h zTcZThva?#-j5ZAjD+=H4Z^wUKo3XR40Uu5M6#mc!Jk~NF$H-qwFa-|AftM$~iaDwG zU(`9hC?lt-t-1hVK?7p_@hHP6)b?ybz!JoYqtD`Ofu)<-={yskDfi6 zX(Ht`L3Rij`0()Zgf>8$w<(XlC~UnddoBE7KknH#6MF-DP|`Ms+$#LID{UrjOSwan zpQhMJcUPC{^=?z`*GNkn6WL8M6-KSloZRns?o`(}Ek|G=Hc$>a9K_;3)$&(l8M4sI zoWw}ONEA6{BgZrzTU}dl@0j~kg}=aZ-A&!3O1tV~5phZeit9_gp!8yg#= z6CM~Px1InNlv<^M2db;`H`BhwC!IC8dv76DX0O3G>v-I{`$p7G+k){HXIz7{aM2>n zE1iex>S}zmZ5y9gO{=`DS+fQu4?Lh~y(u4}RElTM#>R~sRgvHrapj@#iIc4rX0u7N zJ32<hvUxLBuU;ML+uzg_86zfx(L&F8Qc{v8 z=sQlrSo~0U;Ow)S{qKaNft0u<*?-4Ry+`mx=Vw@u@fZnHorsG1x|Pb+D@G!N5FC!f)RD~AFe<{9V4VH zsvUu7l5VKsr?%+tw-b2iM*vc{bLn>WY7knp8+!04rl^)pnWwuNb8X z=Ljr4%~z&{{!bYXj@H#hXhrEb8tIjmmhycbx0~-1-5O>2DBI`uc=-3%NZp-4M^d^{ zq$c?>n##%o5;ckm!tsPxiv*=!fbs$*XF576tXAvqUbW+mq5SgH>#kc!nmeRk zAT01^J%h2oI*_1A-LAq#35O_6h%SL34YQJ?ahBBGV^UKW_MAH_bBEe47KzmDN{>0z z-0UwXD4=-^`$#>{^W!phn1)y;fhVNZj=^MZ$Y^U;7_S#u7TM>QlL?km-$BW%lARM1 z#;L&F!fT&6PQCz85JIakzcp3P4Rg_=%C7=&cP&s+Txa-O_Q?V(0wM?~78LP?6g4q= z^3p^T6BEHUl_NfbsWw)PQPZBLjR`qTOwvSA6YEK$i4lnp%mYoW5fe2hv7(rWfL4VC z0YPC|9{XZ$zuc?o5XH|NH;{{qMd1_pd|*F_JrkV1Jom7kZ8i zzKn+`BGz(8mBAf+ErD;z8EzHvHl7)i37W7vjJ%oq3^yEw=zA!-BKt`Vf@_2bgEW=n zMwH0yVuYPTq{U$eY@BM-o3JCnsm+puvMDl63)z5aI3` zGYw~C1~)o{{T%yriM`Fvrh*$G!ihd{4KT5DI|L&{B-EOQES=>tIri+?y>{>3y?dzc z%T#SJRJ&U! zjq7s~Mj#!l*tWJ-a(`8LczD#7En9LOJ^+yfyNsm@VM$3z<5N>pLq|G%c_C-xPKp-c z3y*O-dQ+(|apELO5@fR}1{_2esGBgs%4R_5lqpjlzyOSA0qb|e*vYk$jHXO!10Gw%#I7JfQM!M%j z+h1`P(N?kOc6DF5AUM|?1`tH+vHPkzD)IfbBY5!Gc)WdKEq(8*v7W}r?`p`0zEckd zCBz5B^VH|QX)Dx*V4LID@ShC|seqV~&Mf{dstsC1E23ewTk*6044yqR4?5~!2Q?iJ zv?Wlc*N*#pKZyIoAHYLl4^yKc?KVC4*5>*x0iIV-;EaIq=v!=NRCiQkOxRdD!0DY4 z>+#%x-{UXmUV>aI#}_Guc)9-?v>V$nCS(i>uI|E|bF;Ag>=HB>8{9^kivx8PWr<7M zA9RIR2@pH_BsA{B%l|}0TRG0^&%zdDrP;du$~LS``Ue6fYRJv4*i^k9hno%|A$Txy zlHNq|PbV=uW*)X*`UIK%F47SsH?QZSB|r=VJX`sFwR><*Uy5dPGrqn0Z*aoJi4o5hZY!YAQ}mLJgA*@#EO zrgVuV0q!H9TSZ@p2|NrCXXUq6eoPs>D(o1WkBNOA!OInYLX}F32vaydZ^%Qj{v;Yq zSJ7x`z>?Ud2vzh(Ny}+yHC4E;_jpX!PeJ~*9T=fVf!rd;*O$J)wwjNS96TH&gGM5w z-wgCzNR5Y0QEyhB)RV0zptWkz3O%NsdJMIeD|m1CMktIj%FuPdY&YW<-5IpB>Y$fV zPsv-ct?na~=u7bI*}3#P>J9af$z(7pj5yqKh+TGq`)s&CdYW{|eRk`$868peFHS|`h zuTz2@wcF84rhz%Yj1H3lA7A+puO_^XtWz^_x$_b=F$Y4SlEZ4TVszkW`oOiIb{DoZ zegLUO28~jKb%Qn{HZTqm%5dkn`62;A`dLhyHz{GTx6?L4VOKz}(c`q?6dJ0o65kx177HykCb<#8@(?=7 zS*t-0v)qChdn~35cpU$_{tZ^2UqP*5O^8#)BUzn{()LoU7`O^)p=o~Wzeg2tSB0*= zg0!#j=YcO%V-|=SLk(K3dTgoNjO4InNUUV@K#GvS5WGKh3q_CyYFQwnf?^ODtir*2 z4r5K+YxryYD~MF~MM|Gh7#2JN%VJ+Z!LYBW{xzsIT!FSlOU(>fs5Vw%oAzU@O?-!r zV0|hw9cPWd;7CgmhG~YwNN@G|ssqyHx}Z$xdE$7UxZ94LqtFXswh#8 zBoUmFAT(Q>5u=R3`s5Ahv{{j&83kRZjwU)wIk)R5T+mR2R3+sR!w3Q;n<_lv0J?XPBZ@fXj>qQ$5a8b3U2c zf@DER3Q9&rdl`)FW(2E)F)iY0d|6+BT{WL$RpLuDVPtej5-h$sTp?9D#grt+T9cTA zSG&RLi#gZeRRQ-nVOOs(J^DA$sat5onK8hCE0$^`1t+84UWfCB3T)MW2$R8#16RMs zh4%BJ{B!?2I?tldF99NE0dFh0zh8VEN}CcmOp=I@~>?L3RceV0&E zU;F9Gc>Ld-&S(=d+l7{k*GN`3$Y#Fu{gM$V;;|%@v z)o)(seT!M}Vgp>$24(f13lrtc1la@eUDFYIJBc6GGY+S7K`Td1o z)2VBP%JlSfOrAWMjx+R?34$uX-j!iQcC=hE+4qvu(sS}St^6)%9bt(c?fiPRlI8s!E z%F0UmBV6>K^?l@!WA40p&c4FJLTI&G`kmW9{S@=(&v%5aAD5k#f$04uWt@~fb=^uja0d&Z*!!~Mq8)Kr`}af1GD?%(hhKg7E$ z>}d`sSiOhC9>MC06*M{|W5^5~*Bzztff59GoZ^y`lU)Lx?;AIc8udju{OuteKmG%n znwmT% zddWfsrz}GJiz`>+%{Sg~zN`4;NxG8vllv(tDahTh0SbkZcl32S9i1qODV+|{XVim0 zX2eE~d5{3lr!3@O5aspBtqMG(nTV&O(-EWShqo`SWr@Cox1EuZf!y3&I!i8hqzDe6 zxVRYU)2Gu(%befBdCqJyIV)pR$WHRcO`Cio-lGI~o6psfU@RW+JaV<`(fr>gNL5l8 zBpviNnA8n&<0=jJ+&d2UQsSSJlS9L0&Xkm3=gyr@=uCZmJ&qndib<0ufxDec2z6xZ zc9vz!mt*|+@!ms9#2G>HUfa}&X&9_Z!Y8#`aiF0P(aL_PGhachs=sJ2E+HX-s)@*$ zF@w&{fAQi)`ff#eImLiS+|+=?1@GRo$0t4a^h|(I@R!63RGnM{r7DceDthV+F9ow6qpO0#-Yc57I`b58(G9W(QqYuwLKJ!GlsB~I4vt%WnV6tWy z>O1T4e)R_A4*P)Wb5{crgIrNjfr_`@@@$wWF(=^4o{mcvlmK5>aYKTKAUp0k*gNeo zSd3^gU*lcH5i3oY;H`YHd9x@l&R?nU!3c_9mE9XQ4yzJYC~d;2?wTtU5IPTfTlvaqC9SD)VT^zq=39NZS&^M+oq>y#NCc% z>a7aH*Mx*@pe}h-Jw^2y{hkKJNXgv*pb}vOduns?_Tvx)w#EOyaDsjq!Mk72eJR~4 z0e2(LsGB6f_uL)bL0AtNK}olZ{+%}JKB$0a0vU{7ENc^joeGt{$NGT~(q;-f=_~UL zU#e90^+w3Xjl!4Z)E8S2z^?+{Pg%$ZwYIj>m3%xq^DU`lZ;w>+YbpAc78*E7`bi>$ zvUBHX6e)uwC4En_JV1g-Wv)!JT5@IDq)cjryR3wS?qA)`*;()%gPGZ~&L+odiyG+oiOqSkhEGBfvsxUy<2)bH#Zkvqej=Vg& zG6XYM@`nha&flUV{h66ABF`=)RR$yWjHokxLxv2&>eZ|1{X|ko+G3^oAHto+iz5Zx+|XGw5`> zYWH1X{-5Q!S^0&hH0W`da1IqlANItH75viEAunS;4p!pB?)|%L^hHgOexRQ=}g3o8wuEN z1&o$W8v~cMI;su)(0L-=9q5A2Uw5>vjD!f~1z_;OQV9R_-R%Za3uTn&7AuQc7S{8; z3;+)?cYFQ-kMQ~b3DDlPfJar``QM%QBQu%2-|v8E!W%&%_@eaz3Ti<`=~`@U-Tho` zx81d?-*(;AU0Lb2)pfPDP{CG3X$5U@c~}af0#+c%BNzgNK$yHIkC|lNchCPYH{48S zl6i#ya^^eV%sshtbLalw|8vg&H&g+IP}C0YiZ~z4g;axJw3?UzBKwDs3UMeSp&EuT z68fc7PeFroz!OprB`rb-#!-Y|n#&9@Sg(s1SPLRt^$#(K$Ud2xb73H$Mp!ke3PXIU zh=x932+1hYe(U7I!or-JZ@zg#5o;cgN5QyAA#;(WoyYy}%&#QCb9$GbD+HhWRcsV_ zNraI;)1y)-6n>E&mO>#P7ejqF@GjbHwzf|{{q)eDJ$p_Byc}D==sN<2-y^e!=dKfE z0wf1O7a%&-)5yO5`s-)hci(;AwpdbfXx>_*)j+4!!5^r{^v zWP*JFK_>M?e5$fg`s9;OJ~nRLxGA(IhY3AW)6yc>Z6RTx20w*J#s;RPy8N;B$v zkE6-id^TuiQOHE|M5L!&u~N^D&+gpU}nrqwe3ZsbeENJ~s?Z;?s4RP^(o?`8BBPsKb}mBg{Ek#R6hG z(B z{BZYSY;693myrd=RbfO z(r-$Tl#iUskBs1vO->E30U#z^^1D3U==O9$EvR{AnctjPiQk`on=^n{^l`L0sy;_I z?^Am?juCCqOI4<$*ieiyOnvJFCc$bAmK6OB`h0yjVEck&8*XafgblV0aI>gK&#s-D zl8XgtcVSW1y{PZ2$9{V?Y)%_)$+#6u3%VTGPIdE$<9eprNX2#od+q8 zg}70U3vp`zF=S1(;iY`A!$mgxi>_+CSoZ?<+CE2$CKYqD=b_YCit))4QQf(hF@Xaf z!2^$m5nkiv4xZMhgGn&)tw?3Ya#&Or9CaM!doy>_wGXnds&iF zaJ6wd-{^bHRmU7h2Yz?@w;cL4OP|U7iF9=Ny19v(te=b}`48fn<2JLHwG`B`USpZV?U{T3TB0cmmw5n=?b#x3aFv=>o8U`26`rK z&F&^Nsv21=@8OuQDyEKUw%NyCE*1e~DY9{G%53O}~vrB4)hPxCSO=67p4rsI}K(lWQZ&HRb4Gnvb*@RrfE~mg8#~H{*u1 z8+j$b#7VgjvGhj~2F9Lpr7YZ>w68I4AH#_JC4SekiqBZ>?f4vyPJ|%4 zOWuBpJ_YUFYIxip7OxwiRcLuspkNLmIg1bHJ;L(?|8;yBa|XY}xa2D_H*+5MEB<2N zid5#se>d}Qai#GpF8zJ}UiN)I&kDLp0)0*vCAgV1y15;1be?ABL`1GW4|)1rNSgy{ z*|mRN|4V%6*#fmf!y_V_yMsx659sCcxtSK&#mhO4Gmb-+B^#ZLz-Fxpc25suoKw8g z>MY}R{QnS2b0L%b5`8JMH90KOFrN)|M?~c(Fbo_R4n?O8>g;uR;l$6dt!*n)+kUv! zPHrD{Zl-#f6PTvWK(;=Im!qPG=5zVb?rB4kHVK8M5>y&0pcIv;c2vX4B$2%TlS~Cy zrCy8QO?V4!?lxG|79LVk`?m1#pdW2iB=y^1=0))GR`Q3Ir^^p%o2ek;^A$fsgR=pr z+$XuZ@Ah>fN0o!I`f)s~NdGsD5tf*YGL1&Yzz^m<#MQi>sr^(sFG~2ocN=kavdcpA7WgH-v=0M z0-~8E`=X~BTUs{3r3ot2ZGCuCuf%mNI(Bf->Unw$;;Nb;$&0;b~HY9IZ zrBq^Z#y9a`_EN4=N!{mYb9gD{k(xeDRQ!Fuq&yz!k0KQ;Mm>E>P!fB)KSw5;$3mVGT`6w1+=i?4m3(sN&6YQKDp&1eUcRVDl3+r)z8pnK#h8{n1JjeIGaGw$ zK?>RGi$Ti65qIRIKRTkv90f!(OIF6b{&sIW&bZHT@17=STE%w!f%yT~nr8E)i&3Eb zehQq-St2GV(ih>;{Ks%>>b%jOKD{WUJYMOKAqt3G{aVaQiA)3;JUSBVMWqlR99lLQ;@*GS{x|-+WFC1x(ok>Pk z%EOPx9|nq{ra_0FAEy*(1@qo-G|qv;*^54RFSGT2b~_^W+V`M5XDpsBeimm~X!pyO zS0MWQJlv~yp1|8JZ^7DUML}U9N)4qK%*4uQNOL%5h`!QVr@Zzq7VkB9!Knl;9*|BR;k4U=}Yi~l7B>!GAZFw z>vp?fv)Ryix)C)sHDMoAUTz+8a&w{OdmyxdJ^e#7!^xBN_-Om~!B0O75#+lyIs(4OOOVp>Q*2 zvE?%-o?d$D@eY8*Ux0I{}(ix%^dCK1i&tKL8M}7vikmRB z07VD@Rx*F>k*RyX`yql%}{uFO4D&OUD;=T3j zLlQrisrS3T@eNFyb~RVMp``ifQPyi~arp2dUgVRWhR^36)@BE{)YMdL-tu8cB03zs zymS6c zl5d7}>3jWS;%de>LN@u4n0BkVOW+x>XF@M1Vl$MrZ`SRzP zi8?sYm-6{I6Gl=RdnmIUGXn@AASj}>%{jUp{9VD{qbg+nstV>5 zT2p3Yw_+D+Jx6e&={U;tWtg5d9X)M5@OykH7D`apeGCrY;Lp(x#%1y{TB^dDf=J@#$&)AH7cabkty{NZ z&0BAAyqVz|!8p+Q)Fp<<$jHE=d+x!VcizdEAvuh$4Be=xAJxo{woRQj4NpJ)GzSGq z6v1OK0g0bj$77;s|6fU;5wsJ_^V1^(GevvWK7yI|)vBw@m zQE>@`1eUfS?NVG^%-;zTX8~ihl9G~yN_)&2U^r;nSRA!teKEeB|1h5mAb9R=&aL>Z zbu~wnCACkBDY`w~sO>$9F-cd*l0xxi)09d9g@uKz7va|1<_nTR^!AQKw>UwhoTgK@v?;MF3U9O`s5rQAS2O?pwT= z$K!_&9}f9|g#?LzqNF?4>qOxMj{L+*rFlH&W#5k7ouBcgSErl}c-Ol`B?6^{^&QnuPB>{4gpiDq#taOas>|bR70syp@;bc7QuoUY1ahO#KTXBq@<)`=|c};0<+mv-I^fx>?{=X zFq1$rw^ZLeR3lHHkS@ssr*hr1SSVImSvll$3U(glA*UQba))A zl$oqo;gj|qSl##iiv#wnzO)5j7p(tX9kBVDSk0py4EurSko5%ab;E%}Srmdn}6~Vwpa@m>jtCN+uLfJ94q;-&f0l=6}A z2_kd?h09WYi4Z)7Ffj2B-(@MkaD0LYCxgzfG)Fsrnc%TsC&El%qSmG-PMR+hJu%mb zzzG~Y6?-vA`Dlk%7mH7zh*Seej4+THLXS(UUxJxOK7Qm#4TndR`jMI%jsrJk@}z!D zI_X>*pEPkIAE(I~`Yd%irg8ZgA3tQg=rfH;f0Or`z_s|_Pn{a{uxS~&{GREU z7Msv#3HwfdEI*fE9;wrBkv>a(!1DAkL{CI2IZh3bI4P$!`48NGKi+)v&4EyovikS* zpa_ogPa8H2_%5L{5@I9Oz@=0!9ZM*U^e%ys5+0{CewtlNLT`M2`SJnRqjNY2A${kL zJMc#tI^)25KF#l$;4IB16S{kP__1_89p^Fvbp_JP5~&FGGElE~001JPKuX61i76c? zT3aMwQD`Xj8~wgC`8Dzj^81M+VUH3aiP%J3BK)hbzS{rbzxTcG^}}=qepvvhV&Kn{ zgRl~FNsV&(`>LzYGJ}Lt8ThXJJ8#Io6Y)c4eb3&#T#8AWiQsfBN4gBv-w1hE4lqjume4rqGyQEjlU7cAMi?TZL_E?OlB5SqGmVofCyAHixAs4v zpoc^fEJyN`RQVl2K(&&j)51~O#Uln0Sdwz{0F8!0OTi@kS6T*65>Jdn=Saur+b56^ zL!>IwR68XdXVF3?Gb#MRf;;alFc=JRdFGcr_R+fz2}xx|#s9F6wCR9gBZt9i!Rpef)E~6{<4%`1dM$4I)obL#5vfQ zemFWs5CeDvClb_HM|E}eUOThZ^6+hNgdjw<6e>|GW8%Gz{2G_)b!ZVvLqh|pdqNIt z!&$f(2|#;L9zYC8z63PeuyWG7l5dA`aIHS#gBYho$i#~YZfbEe- z9(jfx9tqff!V>Jq3z9p5a;Kln0xi5Fcb1Ct5O!eU>eZ_snK5I=^z57*sMYGPo+`aa zeiJP}YqDDVo_Xe(|Nh{E4|d`#SQKf2&VY9BHv!?WgC*c7b$}6Q0bX*Tn*-grbm`J7 zygqARbrLj=h@=>FQhayfqg!(F?iYEdoe7Nt67y>drBm-P- bU)}cq4ZK;pjB9J400000NkvXXu0mjf;T60r literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Player/Build/Mac/icon_sources/appicon.psd b/tribler-mod/Tribler/Player/Build/Mac/icon_sources/appicon.psd new file mode 100644 index 0000000000000000000000000000000000000000..d8604dd38ada8ff5ffdddbea19ad5858dd4c91ed GIT binary patch literal 295912 zcmeFa34ByV@;Lrxl92lj;R+-KLINb2BX>AL4g!IM0O3Z&kW5Gq$(Tp2sOT`p*&mJV4$qoC7VIwV{)o(!*m@RyMM)Hy5C zb;Tu<*H-BA)=ryMuy$ENQekMyc(<{ulUFZav0PV`AF_IRX<23R>T#j@h2=|h$?zSE zRiPmSq-xo?&{V7lnU^y?B)z;s7ZMw-i7HUX#D~PjM{5(}6SV5ckeu?ekWAf@5KU5u zR;^A5(Q1=p)ye9_5aD0wcsKYqwxX~od1gkY&?j7t3oWjyT9K?$ty;AzdR0txdBswd zCMhXNrPivn+9-gCs$5f6mA^Wwta2Dd!tiA1Dhnz~R#cTh*N8WNNqKeExX@7SNc<-- zm%l>nsH`%YFdAJ@zFf6Be}zgDtyYPcq2X94Oir)RtHOZPd zU~IHno2*uk#pZHIsb_*Cr@XMFXbr=Wh&VJ!votZuG1}yq7Zq%95@io*P={5!%T zhqb{&_$jZLRbE~?o(|{q;_|BU%Hr}BA?bNBA#+R03d>hj%2*PB1w3O_C}Nq&$#|l) zY|3vp6om!iNUW%?C?x}4SfJ9C>Xz%usw!bLG}fb0Sdd&)Ua>sCYJC2R6{RHw`8eRJ zm1Tv(Fs@*Hj8#dX|81(pPDJ9Z)JUxEs!U>4a(a1bc?FELZhXvGmAzuwkV(~(ODe0% zE7qt)dH=Tt0(3Vg$38o3RxDp`Jrtx#az=SUHI7wQ#`x;$lEUPeRIOT@F)=YglQ}UX zO`}Q7)W)Z0BxJ^AYBOTeV}aQ&?PQ$Nx~#Iws{FD79mA=K&rruFOiW0M)5c|FXf#Q& zacL716Eost6EZW@v2^O?s!N%SH&Qnb87QsBnJ^yAFZ@$! z@Kb0iueNPE|JF_A)wWHgmb1%Bs>W-D-fW6(TTKJDR_ZcJmg*{@OTP5)5N7Gq-Dmc(=^(NF-fV3v9U24b$n`Sra-tAWdhm%H9MzU zUDAynU`_yh$=hynG$Pk@KdB>YSSNchCQVnA8&6MKmPCBjc=3!kJds8FmX z7HaVmsD)2jl0d#E;%5@j37-TeDGt6TXt6v2)D1pCDPuKBfD=ElZBin(O-e+Vq(p>C zN+K{x8uA^nB@t{%N!Vi&!I^}VXw{ndNKo8JQ1;09$b`tm$fQURjYbox(MD=wA~ms* zpvVBENr(ikhu}&Ja{;CT%#t{mFre=+H9+IXXwnkmwdr7TC&o@ph>wlUOwGuMiH}d$ zq$MT9#m1xygK0Hlw^Vbg{1Bf(e5euwMCN(KnCgGzi==1&Bj_p8-cEWX++K0@imOKw z=n-(QcJ+#@M-u1}aIbdtimOKw=n-(QcJ+#@M-u1}aIbdtimOKw=n-(QcJ+#@M-u1} zaIbdtimOKw=n-(QcJ+#@M-u1}aIbdtimOKw=n-(QcJ+#@M-u1}aIbdtimOKw=n-(Q zcJ+#@M-u1}aIbdtimOKw=n-(QcJ+#@M-u1}aIbdtimOKw=n-(QcJ+#@M-u1}aIbdt zimOKw=n-(QcJ+#@M-u1}aIbdtimOKw=n?QOwaZO<I_K`wj< zIt73sTKG$P96{>51aSD1@&J8u)Zr#6^YoZikzZB`k3Q+j3f9nPO8b(UA@J$R&EhJ! ze69?-TmgS8I30X1;MNE@^aK*H^E~+mBr7P6ICAX^pYU93m)gOkmORf&sPq)Chmh+5 z*4GZrd!iUg@8@UGj*e2YET)AO?~W*0KV+~bEv^;do?`&pVSe_AZqgXFS(-n;zA3BCxX@F z%25IT2n9HCqsNC|~Uc|nUD7!4l zuhdP;U#^=qao#MrcH(A(zUV+(@LwNz>b9(~GIzx!!6*J)Md%Z2L)uzu#!VpfmWeJbIZPhI7l((FpOQdsrlMCzc}58lq?A}d3N z+j(h4dG!jbqCVvnCGeEAuB>`FwDFjjhuGoqJ1B!|&;07D@=3ZfT?IV&OC}*cFDL-J zQD7_qP%dlv(h&IbKh3`bJR~a!UJx!0jde**j9q>7< z*c#k@$x`x!nM}C86o1+zu>zWK)5^Qz#rs%U4W$r92B9P(pTZ&tKiSTRCeLNR-^!xPH1{SF7%%RFz4vh$Jxcz%^jSQ zb)|w+IWqB<(EI+Z?Gy0i_PuGthWDNM z#gdrVioYCBcMf|hui-*tL1kRvEARbvM8?yz3MYQKqbk1NiP;x*P5U1F>SXn$=FISC zUVZ5OXTSdGa?5KU9Qyf6>ztyM4?nl_!^5Ypwk1rOyL8pY=XZT{K@%w9JBy(RA+K5*ZH>F2mDgU)iwQ}x%+FTQ`_b46^z!Qs0SELBT7#@rma;rmAyJFh=fe6Ydw<<*YQT;n-fpz1vS z$a($8yBo&(PiV->38t5?3F!K+%60s4tlvzx&-X6*wP5kbJ09Qn*v0!VZY^%eeSh`v zSKn_PyKd3@V-`D=`eZz`xok{Ssjrvm7q8UJ+Ex2c=O4c7#De=X{qp!9Tc+(Z_cu+t zGDp$2=`VXuZP*d~H`D9qjg!6zQ;mEr_N|n+{#9Zg|8T;$A3P0<;9=Zel(-5{Ody>JoDk!1m!Qmm6N`zjr@Ms z{2z{d^v202mM^>O+}+PlKAn4$=fWIbt)8-W*}u>Jb@_)b%f8iy%{cb&0n;?&|MsUp zCYTy|&gqALe?Oq`LGQXo)8PrG1}Lbhxc}Xyi=SM+DE`BwXOxQy9?f1KJ~XfO%d4l3 zY46K=^;phZV?X}WoT%s{`*&Ar!%mI=(W&o?@15Lz;JrPqgBrJOeB{YL8%Evt(xl)1 z2tV`vwGoeYx;%4u+6#A$sok*u{mKE8pE*3`@Y(-%#zeRT5veM@xXzWe4MgNNm>@|v@^a^7X` znFqF+PMsZ68FcW`R~;?ie43T^*s8qp=JI8i4jvsoy5I7bQ-3SYb-ge)^tJ8Iub2m= zyu)*bSKdn7bNv3t4m57!xi=%i_~UF$COW4!q}8V!-}LqUv(;6uBiGzDX?p+uFX~Qh z{eG(Z3uSMAIXS#};FZ(!3r6R!f84WR&3)&i`z1fJc4t}9oYC_JP52{y-Jd(Zm~}08 zC+9WtzIj7(g8vqr%?0IMyy&oJ*TRzV?Jum;yji|$>bAPLZ7*!78h3Q{;Lz`mEXvE$ zT`y`=Z+b0w%L73b-15@<_Gc_VH8=O<=X;jUeRf*TCqAdRd)5y#Z5)+&U(kC42V|7| zo_6j0*Ol38voA&r{L$sxqhA}|{^@%4-LJRo-f%Kz_oUW2|1|8~Hf`Md1(#!gxv%|$ zOHXzlIK3z`{GshbrUtA#x2|gYH~D`vkJeZ7+{DYZh8Le&pkL5%V*KBVH_!k6U|smi zxcBGp|D@%CvW-X1ep-|3GWE&B%O3n^d;Qmc|5J+ZJAV1+T0VYB`_k5}QwJ4|n_F;p zviCCYtM^;<=l=NM-MydhUHe$;+rKZ&e(~_sjZM$Bgg;nQvG?&4&p$DC^o66Dee<4s zQE|bYd&&RH)pG`oPJ3G%g;9Tuda+;4?>yHZ$LFcV{0TND)4cwiFyHd%{ll7e201?S z)yPLb8gleHxA)&MW1|P{I9;jATzJHH%%_pl~d-*S3FW1Zs{<*PX^sx^< zIAtnG9dvNeuWg?;^W0N!Rz5lY?|*T2jq+OS+`2~L@ZfXfoPNo#PT8k<>tAhSmOq&J zSnx|nqQCDK)>=OB&E$=5%oxx&ak+P~_C%(Bh3V&CzFalj{P8){ zQ_bfZKQOCyZ8-EB&&@it?Z*R$&Muu_Th%&h->5g9zxi;|JHwY(?mm_B`cvEHzFAl1 zpYzzetKZ$Y;ip&98ngHB*Z*|(ozn}~6#N#be)s8xr#GzG`1{S@JCFUkKXXOhxR^nS z3FW6pFFW=p*CAJV?uDnePJg4NEQ;qI-C&+E_t_zj9ir1|OaDhw|zM4;@}B?)IIRzJ;7U-oO z=i@DJJ$&V!yyDGKiZA~7=EtLZj@=a&IkUFTcz@BJk%wG!Jq~2PcI{AeUh(^hfiI^1 z_|WC`8|t>*Rr9ZD-~BP_tZC)gGnqLHo?G^D*`~_757oc(#ED5EQ!Z&z4zHThx^2?* z*E(*#eP!oi4*ZPg%6U$`=$D^!-|pBp zZ}&QfaUT00{4Gm6ti68KwMg%`gBMzMReL?Ves;`_`Ey*}GE97Y*?|*lKaATQZkls( z@zO>6!k!xW$}V^BC5v|`r!42W#}3r4A7hFB!LW1GhPga9Z5X&oOk8f z6PKUqY|r~1EbU#%UzP3&+B(FkdiK1qw~K%IgXgw=Qvd7zc-Pu9qx(6&`28y%?)`ej zq9GlY6Y=AR3>m%h;_*eEb$`PcJLJ?nZQBG+|j-;UP86Ln=}SI4HheUw!@Y3F-K4;`*Md*i!h z4+eH-zj)LX{@_WM&8~A_uU?SvKQy`3^B+IH8)MmSUhsY8SyTCxW5Yjb9lSH^#d9y5 zIkx@5+G8)J9cWzKs@vq1QoK5NOTA<8g1xtBVWAs{;asu$G>g7Z_doD zX{uA+Z?0F>J3aH;OUu2Ecr6zPmXn15dPYSmkJZ#r35+wsuhf2>*_KluCh zq*KX@JWbbj|NW_%wNGV*jm)b#ka@1-2*|hZebV=Ur+AM4nX#N%46fXpYWi-^A;%lC zraZLx^`*1_{EzEf(_c_5>woz0wed&d-&|I9<1KU8o@L{1wt&l<4=@+U#JK%YGV@k3FWT>}TU?*8=KjN%_61*W(VX33`JwjbI?H3jU)@!I zci5?%dF`Kiua7f-*njkyCyu^-xO``+F}HU7&*>@uG`0A*UVG(*DHQ|i=G8A-|IYTe z&0+b^&fnbcW|Z#!x2rz=vE8!QbR}iGIdn+BpAKLAa97Yr@F~eIG$s_tr9iVvpt2cfg^x-t~ufJ58e@`&3H1GJ? zP%?7wq*J3xkM8y|E!ej71nfJ(7%lG}XzIC=AN*}hhG15Sgq&xpKL4AZR?ox+JZfAZi+jZbLx0bz~Z(CzH?jrw~OmX-*;~R z>Gy`1Z?@js{*gI&m&G@1%)9|VFTMCt=*w4?*EH8RoLE+0`R4vz%l|lc{PdB1zv^@5 z%}$=wfYkoNhhw2`RUWSYd3qJ`84!g!!P$gyD-abcCzaDM-~mwwN6|x zdu;;yKKuZGs^#%F^4>bUqWGz}+(*`_UQgOJZ==`w-G$CBqn`a&^sdkT_2AwkpJoL= z85Zw9YTJ983}EM8$Q$y+CCk7cn*TASZ20R#{sErt$@0#dmX>pSOL^{-K%RR^ug}V9 zdDi>a4>$br$c7(QwZCQz{6PSE=3Lfv^CL^m-+W|h->`PERg=rl28&I04erZx?V;zI z&zNgR?%zCTaLA^HQyX5$vwWLUyV!DHR#w?3&%FDutK9ijVUu1xadqZh7o0BdA3tK| z?$>_$W#ebhKY93K?v=S)emt%SOW}W`E8?MK#Wx#Qs<`^|=29U~SbxO6U$i$R36hfw&+DWY?7xKZ%Mg>zQ;kqi`OJvqfd$UGq^$1PC| z62IT8h!9|4k*&~hm@^B4cZEVrzT?XM@r@-U7lmzKh4+t~o9O|LgPfx9_ZBCjad5m+ zVzN3e8BVriN4EhA&f#SR?H9A3FDpg-54`LkeMdm}*GJ(X&)O1LRDnpj3&AfrI@z}_!omDADX7r&P_Do%CIZ?1 zW95$QOLu>T{JjeT=Tudt zTjlZH=n+*js`Cq>8m$+XRvN@<|$DtLnwjO2WIr&vpI+B`Zg@dYOH`X#r zs)}_LsijNd?m|^DoZd$6E_mA%BVR&ByT6^*%i%_fnBc}Nv-fsv!9^{H#5eqvLnCe`yV3 z!y5TthY!AnCNJ-qw-_oilTHHT~R`kCO%1>SO9=bq&Nil-Rj?jd#o-w zcFq2>TAOPeI*QA2RX!O^#O{jU^QzXAvOBj@0o1x+ExWOZ-{<6)R_o9*u}Y`TxEtI{{v+Jn^rh37H$hc$tAZgbot_WJ)e3^aQrDsi z*Z`4NrCVVYxG?t7X}E&Z$9)6kOrS`*d!eqZe0fQkRIHo_#V&=_0100Ezk!A>19FB2eK6#k)2xP|A$ zeG`H3o(1oFh1g;&HuboU#CW`qM0ngpERyHsY7hecMcheTfiOaB5sP@bjV)qt8(Vl_ z`~L|q$s}|z`oYJ?XM2Yp9J~AMqv3miKeD+GcgW!eqyj1-Wj=i8xDjNyMv!3|VJ)HI zFA#_xNXR{JZtBdO>Dj6CCz3d2)|$LxjvMxZw9iGrDE?vlUs!}bVS(Wysno)!%;wKp zosF>+iGydYt_l(CvIoQ_b0Ot7lgoj0U^aZ4&rO6Y{O3^x8;>ScloS@iLXd5~l9wTn z;FO7*6ku#!CYM@PmCEYSz(F&w{IaSN*nE^p6$_xGumtQld?S~xD#RBOVSzgfb~*GM383T0 zW6SCJ75Pgm@>dkktS*z)s-k=a zY^^HMRnh`Vra*)EN0nN5Z*^rA`CSET%&DtODnk~Az%O5fsNs!&7YV56&4&ss#5XM@ zoKgNJsvv~!T0F6=uxlYX?L18fKP#cLLV<^%rxwBTUtXFDTLLGqSy8M5C$0;rs;Jhn zF2&Nqx#bmwc_nLgA@QtkdWDWwhiJv3^2#dPvh?z@N?idQq$t0%QWQ=aL^zfCOYnowK>>lZ|Griewo$dSb!AJdiUkD^(P*h;Rauo&${~nbQmPBl z!0^ziPUhtuPIGAvlhI}K6|@#sDP+zt!ywKn#_t`>J_*a6V~5F9jx&g98(t9=PLdQZ zGrxLuT6t-qd@!ILlx9JUTvA|NE>{`(G!fNED`AFxq$O*i$NbW~f_#*!RE2v+Vd8hK zfSv3G%TUimnz6^Jx{5L^l=sNWM8GuI!CVU3l~-I+WMwsw>-78;l@d-YVc=5fY_K;K zh{2lWysC;4_~oTy*k-)6*t#maWNCg?b%n0din0u;Sf=2vD%N90Nd+!A;TMfi&+wmW zRwb+}-~(N898t%sa;N{X!nOMua9SW95OG@y)G73>+t5uRHD@o`ABHkJ}E^edl*J+KyNI7w3# z20vM4MdeUH#Z5~pbIT;kRC&eas}N9DgqLJqY7*%jA??_l(h75m6pBO2ys~UCrID1o z1kv$R_T;FobmadxLZYK+*C*?6|MjeLWCus7Qe=^e2jV&!!-b&SRkJH~nK0dPCghhA ze?^8xoJ^{j@Z^oILRTn>Q^4<5FgAd)TWTWAp=4N^RDW?{5XD* z+EwkY_EdYTebr{QMg5}sCAE_}Rh_P$sGg+GQWvQIs(whlQT@1jyLyNEHT5p_8|tsr zm(*9(*VH%Ft?D-QR`m<8BYY@#*GO&=x1NiH$H0=gF+&}Ox^l1ZP9bVW6GXE}QAq!g zAl^kCz>nbY7e!S+K0OC5ZvvA#!%dnPYTpCmQaIKIpz13Os!sCVxu5gc*76O z%qV)Vd3aqk@TMACF6r&|- zo%6`g@RGotW)-A$hbj+#yth~f3wokOcEgJt*b7hrUF1V%d^&t9zhy6kC!(K{ow<`< zq|K^X5c91hEM-DpWh`(JxVTgN)b3P68kYVFF|iKOafvY)Y2-=eV9H_L0ZRtR(+PD; z?6wd}yTXI7Erq#BJ4g`XU-l!cEDnZ62vJ04NV~Ixy+JS!v3qY2+#3YL-JGtIp}Ro8 zHwZ>o0)`E5k=Wch=?#Kq_efY+CpjnA-XIt?$L`$$CavBem<8KXda^eN*7XL#-GthA z8l=*-iIf8UUljz8A5XT+NE=hwelmPaOxp4(>=uy}{M^Zcr`}i; zP{Q#FXE-az*~QJx{h)`dyPKPfvy-Dzp@14%=j`U`;}>8G{HC8N&^^G<$J5Oj>tVke z=MD$gID7a64hjw(9%dQwO}J%*XV~!2;6Z^t9tZ^6px_xl0P!0zG(r`dkevMW7(Uq} zAyySJbbud#02*&NtH;UBXTb2;_ujX1?Yi~r58uzP_guGj<$d=q(G2i$a{@FjoWjA` zGjM41_aE`jONIt|Iy;a;M;D(#5lP3P@ZN|)J}$5iA0Qpw{DM_!KSAMrs$f4iDC8A= z9Nhv!W3zsN!j-Y10Z^!bLMQjY;R(}zg~GK7!vo#nm>?IR#Un5*dHQcqxGp&?&;z=s zg(H$@V&VGa5%R(@Gf!jT80*43EWBS{*e`sHP#E4%#MxVjQtA~5QLtWR=v9VZ%K+xL zcOv}1IS~R!P-kl85m=#(hfTKQcEgfq$Q?BG?_r6#SokOA$}v0$2#wE1r>{#4qfQ?S z{X?`QJjjkG;Q4#;ff)QN8ixc z$*1=7uB{7_BL`1C*3eaU;aZl5|X8iOHh zNg_FSM`us}pm6QD$xj_?J;HnZvVB2DV&u?%-mcKC1p+{C4A?yV2ZoN09h;t&ojb!a z^JJc7ruU58?8&Lgnuy>)Z#O3g4E{_M*4-yCC^RBk8=sh*`~$>po{90=sEDBh{XJa~ z&J5wDC*bVk>gn5ez~GS3FbD%rLKw)0xrc@X4G8dYcSbl9XMiZo#lg|p)x*ojAEL>V z5KX!T`1^ReyY#_u(#Ulp1Rl*uV$owB&O$8eU{M&jb{4yS%VO6Kt`#ENV-VT4aeD4s zMFT!oqO^MTg%dRK8AVg;#^&9n1}ZcL;pq75xl`k|0t(1mJ=JdN>yj&raYg4Bq%XjJ%}la1yUaa3kS` zbt9k+ZGko6P>OX}%3cVC-$UWAmldzT`A$y$>g1ft;DODe!Y>Q@6c&|~Rc{z?02wPj z_~DZiiX$7y_xBn;;T0#U$@h23_cHSRore9q(sH7NT)f@z8E>&DP85-=w;Dd@O>k8x zTpi?%7Nz+_0lD1W@C8&_%nI@PE2uWht`EVr$*eSq07sy}Xy#2)lWzcp(QKBL93_p6 zvXbwF65gmd&bt0LX=gH-Wo6&U>*@grj$dpDCZzFl6bG#Q|^slzNQ{f<{ypwQT1k`neeb0Y{g2%ZEEM+ zBuuq{-vA&S2!peXoYO~8}c1ZwWU>}q=D4q;GwW6dXucaNl*+NGFoqJGqoyP#12{n3Mc?FYHTyM z@-0#qtu`be2gX*j45D4=L{5aU)!3r=felC}fdH*GTQ0*RMsjD!peNb@3` zh-85dOf6UsP=PP-N<%C3Yiu@2wR(#%VK@UKN7Tk9Wh3RxVK-FMxnsfVPBW>tG>VMr5w+21 zhFWE33qfs?)zaByByEj=*wm@ZkcM{a3+R`ARzG)h*u8XFYf3*&JUbdgl@9e~Qe3if_kC3siRM8Ow-Rn4X;BJg;bhUOV+Nj<2(c z${Ek|T$>qcO^joQ-PaA`Oyx{a-)Ak*@D8m_O`wWgs|5%( zHi|k4U}gelZsp7X*3{St{V9+jQ=^$FA9UCOP{uZZYJ-DtO_oNk4aq`WqKYa3icVUt z6M#AZsJV$(HZ=ecXlgT?gLNh{Z}}Du4%Jn(Hd-2h`VNG#NK>iKLVzqST$>(wYv7E~ zq1go8i4#t_yAEhF!9+n8IRhx2z5{w}Z3JAVX34hHp`9?ca29}Sg%3T~0$m!6O@LFf z9iGt)37||CfD-Mh!|s|^Km{B(0a09s8AgvY z16Vs0%1|MQrHQlXd1a#>#!k_QJ(|re##Ut;qpAij5z3&a0}SH>Jt_@g3|d8Z0X?E| zFgJ0na71!54lqhyX=;TIMTemTC^SEu38*rfKv(!CuB`+3?>6=0svZ= zdw1AfC5wNxOQNo9d!_ug7#?akV29xW)KAjBd#1``e%y)=GooLM#xd9~nnu{)-kHouNHJ z*kD!|zV3vDgz|}>yCDYRdrSTM1ZKuzJyizz$Y56x^1~uxV<*?aIlS{b9XE5pg!$p+cCHO!&RXjU zrN(4YHXM7o4c3%Sh5L`5yD3g6ItmntqoL-+=NntOW=^@`^#ez~R=%?_w8d)Cg$uAQ z%TQlf#^u1VI5WgoE|$)W@UI%mv`~`*aMQ$g6aziN zBo=DzzbH0%c7hvCFe1Jvf)(-8UPeO+){lZWeNlK^TH@l8Emr9@E;)z7;+DN|0gNwP z{@!*O>fr0|=li*jqpzQTfE&@S!NWR-o5M^a%;Ba8(@66u(`a*~DasUWR+-dhjY+E* z9zfxu4KZo4aq$U>NlD3L#*Q1Gk}@GREj?pmX6B^HSyQHF=j2Swojzk`-mF=(=ggfq zf5C!r^%KxpVlrdn2yuW8i9_0uI6y~;19XHqK!?O3ZAcuTBg6r^Wu>Ld z?$!Dt!l^;96_fIsOnIGV2s$#d zAUra%qVUSs72(Lvo(p7ZDK%B0yP)!)_9~2{3l!=0(WS5<2;{!hswM|fvrGm&j1s-*}!h$6fQD9K4 zC^A7orhxWtYUcgV>m;qvp~?~^^Ik?6D*{>y3YO$kEPaFGsMrNL(HZbWBOi8tDb*{g z^dn{ymHDW*8D^ zd)c&EPRe){aAFYo*9w;$iS zA0`EkD`Wqj#f$G=bk|+cgaiMeWGX_+{k{nC4_LqJ!j5r2A(oCTQE;~biyKdE0HQ8d*0l+ zb7s%Vixj9yrNDIl%$+xH{$GFR{T^mS1LoPYX64Of&}l@7vbnYvpkKWU5EGJT1LB!8 zXHaPWpmZyXyAY;Q-5XKyd|FQS)KP?nWH3V%!g%u_H`swC>d(TsQD8^`Aa~ z6T0%dc0RDSij2+lT%>s_P%wGYq)dt>ASheTV-d4YI5Qu5dEa~Qz4T{M^&dKA=fv#Y1Nsf~-dYWX;UwY$@-ouX(-^{Bu*6-Qp6H>;H8^;jNVo?)`ByJ@lyBlIbP=2=R8fi%LY&<_!eLrDqkckG7?)vgbx*Ho5mssx%XRpf%m>5V`S9G zkr9?~1#6RoHjpqPa~W&S^Ss6l#S2u8f<8=AgKX-#(W8Oq7QDmi^St{wB3r^lgfU#f z7?~<~W9qtvNxk_z?^+-*FcP5o2oZjYmG2gO-;VQepIxBE7;cuJv#2Y!c6Eo&^G?qa zx{cvR33jp-$!Ms+NL)M5JAR8&G=>`(D0p%^-fBAJshiTaKqj#fN$!kLj-)*H4a%gx z*2)*%58ylFCP2=-fkphQ(gC=5_A@iwdE8l|5_0kyauaF^LZ3aqA5_kr2!00q3 zOZYp0r)V{=0x|D|19l?=4+u)P^0357%xmBse+5lm57-+!BRVDEG!`954&Z43eiep( zHc&p@1i<_#f%1T$R2BwE=KVB)uRvV{@RuzBZf4;AK@;R86ML7h=bgx~e+!gfj)*Xf zG)mwp%!?Dh24-mN8x4G)f8nr?+z#lM8Aci;=04q6G&m8tNzXe4j)zebOL>oIVnuEu#fa{es2XX-b(#?rfx8S@^~?7lP74F| z2#TRJq9vXSknf;EDn>oi!8^Q?I2u4r5~!BBD~%euS2V_G_Mkg8I9%}CQp1L8H^HO3}8APp5ZUgFbbo940Wn`jYNw9R6{W;0|6Kz z21q4J0H~94B0AM3SuJWIQv!_lpzv7B4% z*PvBuLIX)AJOEIO+J@rnB(=i^Vh-FPASBF6lAW4Ptui`vAf~?^eTEDjG14p(BQ|x^ zj2<2wNHX66gMyEQD1!o>?jZB6gdFo^$aPPJ{P#4-fak*BNhI$*0W#a;Aj3^F-cckQ z9SNUNBtxx+nsES^P1;~P`ig^txwUp)cyzZKT4Fvsf^3)?Mn36=E4J;5ZoC=>4@wM$ z7jEprOT2xBC}lda_BYb~c?qMTGHr#O;fCV5kmHY8~`APYmgDS)km z%OXsQ((*@X4G^UjCQ3^~vh$ezrkQ@se+tqf$g(C=bdI->Sni_qJCK;L-uFf720Fn& zqRgm1pv;0$W}yq-zGUJ@{<0&>jy90gC=ot#Nzyn3SwCm0S~eof{s!{duh|!sCD%jX zj)ZbPNK;=I4^LDqNmRk0SSy?Pu-`Js-2rkZgye@wdKcFdKn9@$A~u2}z|D>ttZd|m zpPqzTjU!Jpu)@gGpjHSb2?l?6TagS8E79}APESdhK<0!UDUg71LW(zX;qTdHX2f2; z$%pp(8^R+VzRnMAetSdpqpu!6ab&B`hHa8e7&n$S_UWSi zzg*x0Pn21)NkY&uW4w@BUq8_j2pXPrmJfKG$W-8p2BAq5++WmSVZJXp!}~uVu(?V=Y`yv5m z_=qslctAKhp{|B{!Oj}UQeS&}DWqmo=Y7@A_r0=!MDa*1Nj(6D;lo^zfYBq-|JXIp zrH{Y5W5*-KqH-_)-pKcB+B|{qL6bMY$A~aM6-sbrjb^sl&ip0GT~tv)oo8;^x@AjM zsx&}EC`ec6&=86)5)4rnZzkAgG(=UJ5GFz1t#ut$h(t?K!rsz}?QoA}dadF;de&OKLOc7Eh@EJU0$l#zp zggT8l_{`u6c>zKhvjMhR4Qw(#LqdSGpg{vEIxV%{LI_|_Xl&}qwVnwLhJYd)IDn#y z&9r08X--#Idkf27K{@DZNiMV?AZG` z;HMn`JT&m4Et1fEchOU5@z@|nkU#0b%hQ9lNF?xNmdI2QTTliBg1kIEJ=~RqssxDv znYKZTM`a9nLkI3|uCzrw^=87vVY3igJR&fR3=nu%7X|4fR!E7VLy;yLwDTBVDK*rSs39TT5V8>xpL(UD7NNrE-33B%xM0(Z0O~pK zA&`m*sX>_G#1YG^K1}ZyAs0E-%kwy-a~Bh!Va!*{%+gN)6y)Ll3;=CIApcNCjAVwA zAbEQnMpXwudIB^QrDtt-HvrHxAoX~lNriO#hoI~T7SYx~Z;o?NIRI!A0{I0K5aw9W zMrJ_rmyF3l=kZ{3}*bK&j@f}D(L~Gag0*L8d&^j~6odZ^hNOF@CUdlNfE3|$h`ZiAl#+|a=* zHhJUFxqJCxr(~J~W`*9nnNxV{A=P>y_!`u^dD2?YuBV}Kc_YV_Lvr;%$`z<|_3(x+ z(VS#L-2px4u<5|2;7eHJE*Oq}=Mib}zoE><&4U`wzaXOTumsMMAeQJL@U-U`mN>&T znx;R)IGppwBG*2GMe)@``ctnuie*k{j$pXCav1Ez-$Id-Bhe!m{{tYH@^7GscTgx{ zw1FMiB)shilzR5H)pOi7$U$!VnyZ6vkZD}}6<5nSY~atS zzQmt+3O?9n3#V-&hh4+gPCQYhFC>j{o8(Y#5Z8}9Gl)+V`u?wZ`9T5ik`Ac-5C7!oiz zOcg(7{Dd8;`~%c zUsp$^g+4^v$E$x>!pyxbTX?sVtH+HT&TA#ro+pW&T1jTsT(3eWmFbMRPj-%)9wKg~P0EY=M2a>CJ9IC&0?DR>eL;P_#F zE=mQI^zj~&^aPaDO&S^K=0Hk(hK}3y1n)F0dVmKM0CL}<6JCdcS=t~^p&)d^8&EJe zc8HfFg$PZ>f_ZU6yd9O;#BUfWm>=JzAlMe7x4>Frz4GXlhin4%sza|j{O?i+Ut8bc zYwJCHseh5&iSN)9;(=lYdxFCOujLamb|*NT>8gR~qtJ~FPJ9^L*!iqc{oEZC;QpLE z`m1NRJjM6fI*#}{C~|NOh)8)IJYsDgc*H(%Vivki?;+aB-#yE_w3a1>`J?XyKMN;c z1cb(9ef~V}+ESJ{qMvZ`1)OgICv1hrq_6pQEAL*nGcRex0B_=(EpVI|obnYgG%_JQ zd-XcY`knV%*1N4O$Qq{!>*wv_Kn}0~5GPlkenUn?s$=7J#Pe~kF{)9+2Kl=K3Nr*B zaFi0BNEXn4ka@_?V9OAf!2|pGdEr?V@Vq&OCU6_d6>f~Z?ClJ(EjfCEo;<<9eY%$% zc?bAQj+zC^jRIeyE&3Pv4aOj;NcMABFob+nroWj?=2x-&THe0qRw?3TK z20^!D_^aQYsp+UU)fsC$YfRUSS39qmE}JfOTr^!Uo$okjIBPiPaQfuOr5G+M=N`D! z29KlS)0$>@?$D$+-8A1YT{qR6>rAyKXl}l0g6766#;eLpKR(zAr-mp;e0`%0p1#L6 z@T@RtW4d96Hl{i=X=A!-zGA#=f;Q%Bil6>wfJTZpFSSEIX3_>9?(HeNE=Wl2sRWyyHWRKtBu z8f~q|69w$?WU4iT92kA3tHx`_8b$3%(rQaRKFoV3Twzz1T8=bx*iw&A`J3Qr+uQSH ztT7AC6kF=-jg&pMj5S8qXmcGrBWh=;?6GC6YqMis*xw+PgE2d2H!7^S2{U?nq^^`Bg=34tg zmun#9$yjF+8`W64Ay1kfj69>zNV&P@w$npu!(V6g)u0@1C2_`*vED2+a@bsh4-MaX zeo%=U>y2Vh*W~kEE^jtDsOXIKrtAC-?kA$~o3B}{CcJ$uGS*=~7NL>D=4%bN`^9Z z4yDW_Gy&Rf$c(DBG(fscH;s7O z0cql}`SNwUtjJXcg^W!UjY2nPzBP&Ve`ehE`FFviz$nd^a1BP2y|VP zd0|^VQ1MY%NEJNYF3@$+Y~|u4tI0;4vSDmeG&8blZJf5P(z7n$ybPg}3)azueJ+em zaC!!7U?Fa|dnG|8@UeN?z+v-+8k;zR*)OW?s3NohoIoPvDQzU!2wdd^sYGelZXQB6E2pGx+WgPs&%Y2`=*ht21%ilI~Y z9N-i7w1E;D+@1kE^nk)EHlL&5-Dx+ar&FTmia33`C%;|9e%4w?t+kq-tRZ}Gu!|NO@+6hhx3mag;0gE0Z$ZSKo*(xl@F4> z*&z0JNd|)k7tbM~y>&>4XtvcGeRHb-Lc$NmmNhZW5*7wvO-Z8& z0Pz^}xr`3vSuX)tgb0Q@55@}|k0k~hS40%@5zynANP^(5$Sn#Jz8;n!B7nulMx(x|U97%J4Z|oG&)<{*f$9AcTp1(u&6;ZZHRrF{jsj5Y&Qf{dcJP;i; zePa`Aw53+kex$xZs=y<~BwP(}N*X2oh1zCP!$hUWlhIC)@MFyx+62c&-w0<+=@mCV z!y4s?qm333TLV;?^olc2!EYIG$}MNa3T$K2D{BsgV-ioX2eM02FGi^NW}X4>48Qqn zgMpX_91T4jE~Py8?L6E{?yzj{$url_pFMZxtl^BqPhY<*PZwv2VWki~M?Oj4Cp@YcCrwu(n-u!VB~7*2PiC=3@-!PsRoc(b z;X;k-5)1ki*M&S_9m#L9^r3Aknv%y>A{H_GRcl|F*m|CkhY_nuYGmmqXG_=gtPzdB zsNiXyl2K=yT(@S)C`+`}pd4-`amJGIx>-!L)xc?=-KGyE>S!RwTwqQ2Noc7FjmCK~ z0agRY(UDA`ZGK9c94rcDnKqK9ycI|Cm6u8E30(i}HHZe5f`e1gETz5tvo%8)AzQ>; z+qFC7N1khgmJE+f;lxxS zw7G&!>`^9Z4h>ydz$4};snNGL6<8_-j~W#N&n~$`IWJNeE{jzyqzN-UwliB=C9vp~ zWGL;iB3Bt2WW(XcO#)?*q3r6usQF+^5gO65m5gHZWz0}Y-b=D=RGFp49$VHZAwwyp zLFK-IsXt9lVhqY6@k{pPYrRmUYb{<}h+V2DZk*jIlM9$B3k|N>>KOBr-SZ(|8p0W= zg1j!8#nhp-)nud2mWg79g`_8MttT{Wm$EDurTcx|b0JT;!P=On4!i0*OR&;lo3cSx z$#RlxwOv|fz!Awp(hU*>wieu4L+CO9jzMM(Fgb~tIyn!LsXNWLs%(^3VCdM zc8zK+f6PUc`85-OHVdGrAS~9hPivEew5uIv>uT)1K?J;D?l!@OfO;u;3)y^@ zVzW264YI`T1qo0xwoD83u-Fg{9M~Sa2`$<;oD|M1AwZ}-iT2wj>(_=nX$1AHEdm(1D`YP_ile8OWoIwaRI{~}q+K?OU4k=W=3TOk z6i6?22<}G$q86RUB! zg|1hEp=l=u(;+&w%l5TLo_!S(bk_C&X>Jw$FWewyIXr7(n#GQ;FaT>xq>L-XWAdCo zB^9$HOzW=D44t)dV8VyRCwM=YpD1%#?t<#VZF6RR@TLzHz8I6WRFKN2XVRADm!tCmE-(y26EXDwoV zbCZOd3Wpk2+Hr$e32PGCE#jr!>fsQ2A{f|?A*;lenBtN`Qb^cYOSD0jI)$|u^FXvu zJ)Q|q@WP^oot2HErpYcTWm+SdA=;yqE`0(J+9qZ@VLyy~?DS17EKRzlR%}GO z5>uqOIY=6;2277qjo1RKYN-NKghFc6;RV>^BNvq(53v{XfJAjz@(z9j<0i`kHd#ci z!t5Q~ct6YXfMNw^4^0iqS}6}G4hZH@m9=6Xu+c!w15G#@8kFayJm6W&g)@)|tU24$ zJYW>r!-qTj=a=qKF+$WH90y8uVS zc`8xDdEK}dknRGcKOjZ-0Z|E3uwO&*ce8VmBwoZ*!tEb2T1p_fvpx+(Gppvs|FIV zDC@)oVCNmtMN(_xHo!Y-Vvu&P!5%=nO&}Z;8j$Mf{W{h=g z>zWc8-HDWqwT3z=4@WjAGMVA_&aOjgtTB)%hUTk;jGuKAaO-xsG1g1l*6P4uk^Ok~ zhF@aFP%mZR>R1-oUK19B8)W%62f8WIzLJ4Tk~Adc#duwDgQjNbjzko4_ZowX8G|hM zCguja6Bx=EgDWZX2Den~(*=>a6{y|+yGmy7+K<0MO)j&6rOS+ZfV2VgUadFT*!B{L9}&y9t?UZDMxlF-i`;1 z@hBH;cOqi})>%Z)sX0Lv#DPF~cCQ%rCf*4}eMOyA>tRDp?mMeGHvtOJ$Zk>`wN^CIjs8VDyOl6s@U|L@)eO7Te`^ z@z$Dvv>4z=vslssb|_NPsfRb#8HDvH)dQpjet1g&CmF!o2@*qZhMjCpv?g4x1Q@~_ zTE)a1q%f@KptTAh;rP2+6gk}h7EZ{2F$#<~ z<$8fj9F~JqY(|+00gq%n@h8RTFv~M==Wo-$EdNFmW%jfZdzZ3rB>%<8WM+v8O3J+v zwdrb4B_dhsP1=cOPYbOwZIAi>>WS*E+gDrBBO+s1NB-Jo)Cht*7s*${zb&pa~4WFH&i8D5Y@@kYkvl6P5Q&ViIMgLBx2Ik@ZgOr*QQflb%Sn~1z;*44#Ja;-6jvJvrn;rpA zxyHcLwDIsHY$BWlJb|0YjVI>_XT!H>$W#r6EL9M^2Ey?x_`~h^gYNhZU!x(fhMBQ2 zZY(zjp1(yb8u*J(-j0BBJYyJt*KP#KreR(z6rQCW%DLU-d|U?kXYG1-h7-rR1o#dJ z2~ZyApfq%L=HD;Y{Vjmwg3^<-7iD-Kho7A97xA}4A6?-2y#pPS9{>2gFaG`ivG*N- zaa2eDt5dn&`$;;Ts%3+%lVsVF!3Mcvz}Q#@k{A+X>ud{T$x#V5NgxCg2%#mEKtcyw zz1s%U37sTBLJ6UV-n*MD-Tr@XukPON-rXKfzVH7XSkm3w{k<}8X5PG+*|&FXJ@(8^ zUMlmSxTsoTUn6xFC6qsXSfzTNFVGu??*FCH}9jr z`rAE!`I5#U%obV=*?Y^?WLbzEC2PrmoL5d;NKR$inPjXLt(Dr;x8xgwD#Ngua#4C z_8jv2Y2C+9{a82W&-C!@_NTAGJ4RkdZl+xRqru-T>ig0^Nf%B2>++)%GSkKv=}%tz z?E?A=Sd?=)JdB%suV5{|2bYf9UhS z-v1g+e|?_4<+1J`c}>&K?tPg)^w1UC-+Ak`CvN@2vzwIf64c2r(0d=f=!bc^SK^HG zytmFQx|g1Ob!b!ef}3C8`{vCzJ&iZPeemp?^s_&(WWJ9BX*9Z{{_qKncPGE!mU--( z`{<(AfAiC?_kT1=KUKE=pY*}Eo_u@n>tB8T{-;wk{kQWQj&M6|x{ObmgJi5{?D-0B zX!g^Y1I4wk?W0RSdhRv<$4|aafB5~T$0yKb`oqUQJV5*Reuy`6zu15EuYT4zL!K%7 zf`IdW_WD8p+xI*P@FmkP`f?v#{^e(r^uCY3rVsw9XwFXh;Ah_)`26F&^r2@izVoGz zKG^^D{sSM}SyybwfiqtNpRw{eKdpSM?P419c+%^5|8f>y@kwY~I&trc85ix_`#$q_ z;fc@x{>Pi1|N6r(r~dhmiOKPy{wscR)(H)3Yc=~xC5s>S)AHBPs=ej2e_n%wtJHXD zD*g98qfbrI`}a@LPXFiczwz?BUw`(}uJf*Y@wIo}d;7)vet-3SPh4K7!0}6z&a|fc z)rqgR*-w3S;$&koS^vwL2B z^VTb_`bGP{{@T0e)!k3N^~%uch67{*T<5&~!a1kx_=u+8di3ilx@PaCCtp25AAIxe zgL|)Z<(~c1`As(r?jC*S@$)LHoM%2yA3V6@ya)Ur9vqmn@YSb&d;3>kzcO;=wq?2l zI2cH)n)QQ6|FY-VgLp&w2QTkKUH+R2YoFTxc*{>78JaDVX$@H_OI}gI;uA`7z?8S2 zroVaNZ|@)Y^3%JvUijq8&)jp%_2-{HH%}uY<8zx^p8We~Uwt!4ADEb$*gy5vhkI%I zzL{Ca|MDceW!5w~-Zv^&X8I|+Vo~oaul)Og%WnMr#1r)*eJSYD!W%${Z~uoAA2xqtm0A>*4* ze=0Tnv8zKxdshbBH4zV+29`pZwD7+&tsJb`PrV;-XEPo8;~KD2ND zKkt8jlD>P}_4i$KYVPB>W?AzBz3-|uH|+CI(0;t7g1*#Va!mfCl(KNn`TwFnsHc`a zJ4t6ve)ZzIjE5=Z&mQ`CV(JqgRea|Zo%Q7-e>&ay5S4NLJAOKA;yRP358eLq)}x&o z*+Z0a*_H3oQ}jax^56UEjF(Qkk5XnqTtD0Y?hk8jfhfH3ll+_qD5LE7zf8UH+P(*G z{R@=ZyH8(roaRBwm_2j*{*RvcbmGN(Uz_~v-ESQjD?=fb|9hB2G=1$6Cp`Yw$KTn1 z*K`!h>hC{9ANuUG8>d??ef`T%#!dj-AUh2T`P1pShAQ(1~5HedbZ1fBE2c=rvf zisd+F?nmFySs(rJw+oGa92H;Ca`o*W)9()q9-{s93qI`^luUi%<2cgn8aHk|6$kIV zyIDt3N-EQ&$^X^2^jmLYw1;+hGI8Ug_a|`U75Tv%r@p$J!ko%;@~f_S_KEx6Ll>T2 zn)xM_;k^sTrd`h5hF4tty9x88%UpfiwzqgEOm*!tDy&0BZ%H!VK@);n%L8hRx|b=@Q_ zd!ws*lFFHN`ehHleMM0KN_~Wsd|l(8QOO6({FJVCH~rSTH2vna*Z=gFkDX}%vb=wi zruvakZ~F1(;$VB9SaA}=s)IOD?t`-d@?7@b#pgpS96Wea?NNEz0C~p&x(T2Rxm*?n zlFfY*nk?hvp8%y@a(=_cE5}};>BpDL$#IeybM8hr?#cQ*FeuW9tm1xh|0mm^0c6Id zJx|@deDyPv-|RU#=PN2BzZb0f$qgsZH{f_XLI$Q=^C4&%A9Y}9s`@+qTtmsP-<^8z zH=3^~S>{g`d(kIEl|W zf*ba24c_^Qm7D^qQcka%a|^xijd!o`yhmlMeF2mItI!?bp>L_2AG|U3=||^lzojzm z%T^lq;Yg0ZV4C)A3*Ci!zNO@q+duy5+uN&fUa7q3>SwNQJ*vzO_MCCuhq(96AF=LL zOuzWg&+h%~5;e4OhV3@U(qlJYI5|hQYCD$8j9s(YHy?Sy@W5X_A1RaJX8TPz+i+@I zm@7%kikx|Wcy{+gyH8i(2FD$^;n8_fH+)0MmB;qH`1c2{aNiL523OsV}MQ6Ye?m*{}1z zp)#wEYq)6QVc9QEF99#sPd=P623mbVWgU0VTbC9;O5v%LMfiU)?Bb=t|CcbIPr%zV z0{zW*ONUk{WKe5+s(7)tv^1t<`Dl&!G13-WNDP$HG6LX{W{TEE7wGnyUl zf`USiHCyEbT~u zyP2sm7&I!aUaQZab;PvNveE*B-k#?$yU;1ME!XL=*-To!;OrDz?BwMWP9v#Bqtocz zvr6zfp%S-QXRtYPJ;nJ(oyl)?nynhSP%Q_nY8!}IkZT56lxn}uYcH}FmX{Ug+jUBv zBiG@`cUyE?gVAEuDugRJ#^wO8%LRZ4rA+oZ#*Pt0ni zP$iSLN$UceE5E>{Q;;L2Y%Zrh$KcAxixcvk?y^!#c8+aYo)xdTuvL}l4I8k`Sq)aGR9%e>R_@(c6z`r;#9W~0Gm)f+9Y{6dUZD_ob(U@>ddMu$zO zQDx)&FaxPNTj?s#Hy9jtqam-zZqr-c`Fe||#0}AN==JEn*$}NRrm`0+b$Tt{bYZu; zJo)IlQm)iNHq|a`l3$!<64jb=Txk$#_I zDsyqR5_jA4^6hrFn-GD#J4dTh>P!YQAzHZtC1#fuQ)tv`9WI^5p6@j2R0@U8;jp5} zV0YP#`!rM88glZfR&TNDaWb%BN?olrJM4P3-fYqv%vQZzj_w(BN=Lp0x1naW14>Cp znlqbiR=rAZw%M%v%u~9>IJZ}a!+X^#mBDTx-7z}cxh9j@WI#<8odWOWA$=@3q5#uu zHYgRAT$9Rdv^p$$y)D>tyN@g+Z|AYvKsK!!v*d zdKFWZ#pW)vWy^Ish?T==G@D$7&QMj>B{sW72L)(xdGJo6(t=!v)ub<+k*hcH^~&xn zgf*6<)9amic2s3_6^E*#Z7#dbVz$^J%g~HPo7!BY14}HZ3d%SuFo$nEy4Sj92Qe7wM6Q&%_yIdFVEE4?eNr0 z$3e6598GR{S;=Jak)^p{O%C0~YnooD&&-mm4Ebely(7xLSr;(6$%`Mr*}`ARytWcH>PtQmBuPWG8|q_4&pAxJ0<8D7Mua@ zf{1FASy?KbHb-UgFjaa(Rp#JOXq*%6KC%S5(H5Bno55f*IEx%cty*if<>q<(IM2>s ziW;YtmKNrkjYdUgj^1X{nDdIjE0e{+8nsI1MMOqp0S-H#=5`d8+jE$C1v6owdBwR_ zv(fBymP{*~D78(WF{2nwXmg8n*-Des>Bs}SklGZ1{%Le&-uIPqC}sa z0}Vv)#PMp?74o|q*Lbbl>MdDSXNx%E^xc-4u{2PHLDax#{s9zUVux6 zSw|c(&65kYsF0i8xn_%NzayidHb8QB*3%X5X%v89i-(O1SYV&Mbqa8Mj)2-J!+!G#IL1EbwrG6+^tYtQv zy|9oh#;&}{Tp1?(iBejvQ)yIL3Z20Wg`~{ZIuE&I4)-)*6q!nSX3mrVHA(3RoLeWLVGrYbuJwWusMaNh(9^H3;LWvZ|e8=YpQ#&*z| z>B=hx^(Is*oyn+mzT4w3pfIv;Riq3Swb`CuHr-$D2hRn{n8Lim;z`UtDT8@q zG}-fW%{B+&TX)%COR?9f#d({vnV3L5A_5B$8CZbb&+)-s&579GkloHQ{48hoJSX9e zgR7a{&Pwcbjz$b%77jFbAa3A7^nmPg$k~_VsB=5>6Z!4JcPFlpJrCLOn3x?8+4WfW zcTe?ZbniISJJ~1e9UAqI(4&fx!NH-y{(-^XL)}!*R4?7FAKEoGG&J0^V_>)+fF#=_y z0?=#{a2rTEI65}aedqQaJ;UQeeLdaXJ*az9h`dyfzgI!<{Ubp72k1d-|G?1b*idix zj_%&U(b2vgJJ64z0f8F!b^CjZd%AZp_@TjpfuY{sf#K27k)eU^UXX6IZ~L9S!=@Y2Jzaz6}aI6{k{D?-Lbq%?@(;-8Qn9~+tbtQADq{_y$@s<-np~? z&fd{suyb!WC^ItH(~DjXkBklP=zcN`XXLK2fj)5G&`^JG-_XeL(AaqY zo!!I3BN!?OJu=$gJ-BnHe=K)&sApgp-@!J#b)jD)yT{Ro!O4MIOb$e0bhxj3V0d_R z_g&*-W22)3ePg>(!!TwLb3ZsbKDeWMBt{zP9ZR?O4Da4Oh8t1Y0Hk!FueWz-Y^VoJ zHZ;C_bZl&RpucZu2&^-Ln}$*0@EC|WI)eTV^`Oa+WYgWY9X*4)cJD&-!y`kWYX9J1 zf6u_k;M9&I21bX#I;g9=yLZQqJ`f9(92y+iH9pwehnj}_`#_KZd?ZR@s=K&%czoAR zMsWH-RNURyOQi2kFjMdLZghFLyW8JS_4x<%qhmXFk{<{t#yL9RAIupV=s}eAYh{dJ9Z53xn~3uh-vJ@5XbM@4W1i=uwZJ?TQXHl$M*G4 z_3HXZ$3gRclnoE{^$$;tmOuvqHoS9upm$`?o?(0hpZ7yx&iAE#s_t#BEt#xs4r(= z2y;0&?1zSi*201?(C6=^xm^m>)Z5)ZzH9g0J9%oN2h8Z>82U7drXXyX2vl{bSKU7x zJ+|nI?uxBwVmy=d)jB#1PP=>8P~XU1ckLPlrw;{d>lsL@HfooDcd)WOyGMq`c8w1Y z4x;;*YjllWSvf8;o58e@W&gj!|z+>sPW$H zQvaw9+GKQibSJ|ky#qu35vDdJL&m;)u5}k3ETK#35_N9f=pHiM;XQ4(y(Rn6lc}++ z@xhVZJN?6%gAt^HAfITioLhLX$WUBbT3qBQJKB5Yvk%?3s>bWti?oUZr3NU9T~Lrj zBaT7phhc&+&B}L-J@HV>46FMaY=27fto1+o-f5+F54JymU_wTqocf{hhF}a#jb;q@ zPxZ*Vci-62RqQJJqBN&053a6a>|p3o|CnlI01F_v4BF<7QA=!QB zIk9~hR1c2~^pB4Z^k5a}j}&W&-H6fm5yN3*Vpw+nd5$Mar-synSPzDV`?eDuI}xk} z#i*sbZ)CWye`I_V#rJR3kh@hVhUx{^4-BGEfYbdXO6Nk7q)Y4^9ka4c>Q0 zsfQG2LUWD#2Q$&a=)_3oo*Rlu@sO&2=iTGXy45!{0kREUSC{{E>7jAW=T$Q(hD z_iUfqk=eT=RE|X#rZE5k-$%;79?ZrXi{4FzI`{3M0z<)y4khg$&DuF$V54 z*zv9^3wc9&o+%^83Zh_}59do_`HfCJJZVC;?1%f<>2lfOCRSwS_*Gs@fyG@~ z>~@)z*(wV>YR+7v3NIr!;5A#qbu6%;4)|u&Iax}r!3-Y(!h(3mo&gRieNK+0Bv-G2 zt3xl(7OLZrc?vNJJN(Wn@P)>NxCLslO*zXQHux?LGMT9uo@qGdnMw@2%<9isB3HqK zfLG0%98NkgWZhg6-tH9?zB0Jrj;17a&$)0B_k?;Z`p29@zms}mdJ9H@PWelq(Zm? zZ%spp0Z}uXGLuq!O5j#2Db!?H3lTlg84NjiUpt0i)W~CnkyIsz+fSv!8}yVqE$YJC zQxG83DrJey+{L-Ml+F-Gm z_8BJCw9NSDvR*sr8!dETdhQqeB?l zpwb|Ws8K^a&HF5VIHxUoNCzZZsWREMY7ivPAXoYgM|kqPNP&AP?dR!#jIE8tVV;mgJjKNb$(l`CO~EvWM$cE3+3KEdmdU`)q&o;j z+1#P3Xp@VCsTd*7L*O|dL6vNk-c?jmXwhr^>T>P~O_uy=CK>$Lv2`8ibhylNxz(NP zEG{l^nlxH@rrK3lb7Y|d zjdSQOQWI2tS%DhwLn01nmpU^WISS4ac&LvoPNpKiMQ%pCE4LUCnX=Mik6oTYWhyi} z1W@EzGPMhDOP%!0E#g+`mdlMo6tkF(HoW(giO7`s3&DX99XRc?@!sTYV?O3}(la|) zV`fGsc>!mRtGvi!wV3k4b6`TSS8a8}{|EuWreEhsbLl~I^gJKj@jHaanX2&h_Y#pNU*gSd$s8cAt) zt7X}Gyy(=K?=B#|Bo~^XlAb18uQlZ=xlnkt6vbc*T3$w82DV_EiUcBTl|DcqntQlC& z^jN77Db?ms*>ZVyc9z34<<2Z9DKAGrMybe>n;aH1@+}Nng$(KC4!?cUCbJeyLqMk3 zL*7GcBN2F=NuQIgvmUU?EN*h?i1O*B`3{>wm7T43+Vw`e?SNJ0E`h}=FUThmT7$w* z12} z45qyN$vj!EhXhb;I)_m%H(N9cg*;1Xbr(#yGf2#Zwrd?`jn3vY$`x9f+*NuIjgWAT zmPAC%xmL^zi8@c9A`*%5t8~hoEEN{(`~n-+H>+dPF0x4|K4|O*^JRI2$P?Nu4c0T8KR45^ zbvh3s=z-A6gbK@DrrMTQ;BlL63YTD=r1AkwP7qM6X9AXR08>gAmO(*~os-nGq>6SthMy0)i2mdL|F8p4sIrqK={| ze-TYlMaZBmLS`#IPch%8sEnfE$3wwekjh*{Appb_XHeBhlC3U6=0BB*-}shMU4+DF z6jf7HHI2{AWxN&`*W~0*3O`6I#FdO{YC7&K!fm9OTnK&(l+o4IbhrpD=t7r+w@wvh z2kw{Q!c-C9fq#)Ifj))CKc;^2+fO+{mEf2crgEk-(l#>~CFwc7;MycD58XReq-5@8 zdg>>HqH}P|DYzBS9E7?-C}+Se)%0Tg&s2nO0cwC?s5M9pKV=Hmj6PK3LUj>(!6FFh zIQk7tfGoI=d=7jecasro16N65paAqB6*41nGZTO=1}~GFnEnSU0V&8ZG{F`EH<4il zZ^Kxo2sbeIA-SFTM1BWKC&2?5p^M~i@GF>ua|pK#Dp_`KDfj>H^e&2`Y8NhFv8u7N z6Cat+YqoY)Q5nx(?x|SZw!Uk#ueEcvucNK2eZ8;4bM(=kiluF>oejQD=5yrATGn^w zOG9V7uW|F5#*LneMQv>@Yqo9i5v-?Tc@rva-q7r8XYMZE*B zJ9Tc5A)JJK3T0w0?Z<^6s6SWGT+IafFwCepXTdhZHZPYr9bve~a{e&UFQk1k(oftE> zy)#JD6)W1#YCfTBbMvCMCf}O2MJUmZUwdGhnF>}kZrRe@y0OC( zoL(|b{u+M`?p@?z=*&=0~&DRrpExnfKZjZy`;3@VhynJpzySc%gqGU-4r9VxD4TTqya9 z=hFd(U=N2rgFb`jZjaOB*+ux z)vVc}e@Oq3xtm?=t@j4_n^U~V8{ltF@e=P6zDkyPm+=)Z_b%rvUhiGcSA3!OLcZc( zd4I)Me6japzTzvqSMU|z#qW6E;VXX6`yOBM2i_0(ia+;$&R4wGyN|DU((C6d&h}>W6w?`W22(tn&1?m9 z0bj9;&Si>gSa*95c@Oavdkmg{Qexjt>*#`7n1N~+f zd#s*-NY%3L_LO?cn7Y};bQYb(6xXrtrtY8ue8MiK-=hPQ#wpIG1B9()R`;+rrEaHg zXDa8Gk5D6gF@zqoPA9Xzy$B4h>)^u+Z|$!22C{H%dkqtF2gz%xlF87vCEhak;}xG6}t?h zR_rpAy4Yo`5Mr0Hu83WR(Hp#6zo8R5NH$Gj=ZRdMFM9P1v8%+e6t0=DvG7%5eTrO# z5h`*Owy4NeSd$`GVVR0t#cC>im8|2!SIKfNe3h)~!dJ=S9=san`zpvqXgbN~u#C8p zxEJP2qV*D+CeeF|p@Vw^>V{D>jF>NO>};%WZ*SZd@GER-Z13;|z6bWo%Uhd#Td_y3 zSkmGnPL$9ddKHverKbki+kt;f4~PvCsJe=`>M9_j zt6mtc+B?hZf&RfirebEL4OC3(B;CTYB!VS)@*|9{7D2~rd3Fj|HZeIP4ic10%*dGM zinPocGh;(>DOM|jjyLAI6tJ*7rB8FE7>}^V%*!RPGE-NAu+G!74vG2m`5}SnLcsGtM;&`fQ=>iN=D4XJqpQ6wV>K!8jT}q)gCq# zVw)wkFmlQxY1JOKq{X5IJIxleB!C_*4ZgBEBYOuq#!NB>^w9DK0Zx(@EoGTkm4a~ z4Tq@7t(P@|HbTg3A%B^HMVT%k4N}8VJ;}B$s{Vt#D%>`EPB?8x^gnOE+4B*>CN^re z=1UgM)_f6kB3ie`!&ZOEqB%M+X}FlB5u%;&jEP$saVH$^B-9-?vy<+8YV5|*0ZF$m z#BzyEdu3ug7hdE!x?>Kn?wBL4J0dc~V>iwbm;E3%+@Fxjn1i^k$RrBGM5TQ$aQG48 zyg3~8&z?In0b;hw9FDERMu&_uo{@@*s_;m~WmE(wDx4xXwsn*6@WjPYq&oP}Ja`%PV>!$ORY_5w^GXymDpy*maAHhsKlZLJI$7pB!CA)`7_&*@6*^WR?Pm70((3Mr2DyLTHX1SI5iabt&+8om3vLONqzpQsVJC zsXSiC*+0e3fq-y;#B3LBGDwL$jy5IoIFJ%~9LN-S9O%?}9E9d`bfB1R;Id~RWQRle zcr-b{5=jZ97(wD~dI4uIY3yDTMJ}ey0cX1lV&MV|3#3GK4kZ3XZ_<)!l(WZA3Mzey zq_R61tJw6Cxe?Ln1pXZ`yg*n{IIhTaD~`d9s8C9rR~er&Vb0}Q?$CT(((vIq3m({s zR~c{vqijBlqYSDfD}$=kltEQ$Q&J^a8B|GD1|YOlWdNwel@BA)=tqdaD zrbJ>V1kct6iQ%~{&mSWMK~h-%Bu5F+ESn}ANsFh2Qi~I!b*bxLwPcnDp{24spi)^L zP^s%5pi)^LQ1IsYJyV}S8&7(d2V2p-S>EE5YrY7@7$Iq-XJYKg4HYJjPl+A5LBmM%p_PsNWrT^x zl?~FxGhD>jaZX%aE{>MUj+pS2*b#Uk#T=RRSa#;JBY)5gF>oPv1T_Nxn3@7&#;ue; zK2RwfAP}rH)JVmuc3+ckLvyRIi98q0VFT8a%Av<{lXy&_I{laeW+0qAPM*ER#F9jU zaH4ti1tT^kWK8 zXspLNLiUA_ZNYT|veEIV&5jBrhvk?RNrJ_Agf(WK01jMhY{MkUw%I$)B*AQxCP^^c z2u%_!wACc!iZ{`MDTC4A@Q#FIE%Eyx;qDO$42gG0vIjCX2V`pgN6GF-$=*khINnxL zAmRrkTSQ`aEW${(d?bTp3r8|Ywq!(*VmpOU9%+o@F)=uY%lRFTaPdgy^3U;zC_K8xM+?JOtx{+Cn7_m7p5CWk0#wZTbU*eXKU1?;cT^Cg7P!POK>L z_C%#gq&-mV)5)v>W7U6jPlbdJ3fNV6w3$~n~sPT_E6;b1lCkTfV zIHTcla-^R(!1=CF8-!BiM_{`O5=-1711WKf3?y3(M)pSWd&^#el0mXnU@Q_s&6u$~ zs}Oq37LA0^97`_!yaDviS_EU+o1s`z3b1PWd4o98k$&DFcpOy7aDi$Q6Z&|YSHRh8 zS}d!Q84(IC04|Z!fn;y_V;dHb5g_poJqkbBpA=N0LMN?C)aV2%QKb{8*z}UQ5z*-c zp1M-U){Qr(KK;DGR$?FU(gyJ-5f+WIt$EBk)G3wI($5=IB(4l%VuVRSrB9J$Wq=N| zl|f|Rlt>H&e8tfQiQzdm9@HQ&%cm0TL!DCTnOe9H4X3pJNe=j-VX5n%^pGDKmAauJ zQ0Y@7ndQ-{5X*-wMX~h{aQ6Ba;dl`^m*M%tei%~tt15_%* z11i&D5mRL8=M8wyG6bimz>eG)VMHHA z1O-t8A%7Vf7n1Qr@oYhlw9oA7;sh+vd`gB(`gsE;IU+*z$fU*2Iinq!Ux+kvviLc! z)Ks(htI}Eg(1YnL{y;u?=v4eDilkpQ5X~Rymkos92q5646Lb%UT*Krqv&co|gmKJ4 zekT{#$Hm@TVU|J$4Udm2oyE_Mw}n;DBUvcmyeCM~8xzfU|i#F*t|I`Q37GfJo-@bT&S$1wQ9K*YCqoXmR$c z>1=$$_z{|kSzaL#;ht`hESepbkD!?~B<_5%LleoOIXWS8bZAS&lddi*a6vPd2PQ6QA~;bI z6TykfmIzK%s6=q0(jysd^me9m(C3l#S03p>$S#;FW^utorC>pR;}A+2iB6 zi$nxPFt$nI-WlahcTs~tYWq3fP8hn6CWq3fPGCZI{44+8cnJ}dg$BW^)EYBY#Mb}bb`E+)Cbl4@9{dp|$ z@Y0>mj!$RDFNk(fLCRAyT)1((Lo7NxZvMvEo48oGc-PtcxMa~O zaSLZRmu%ZYd%4hL^NgFbm6MVwPip6>IkX!|r^+XEDj%Fr65SR+vKci4mlj%OS!|9FIj}a1NXE`{>{e5edV@Ve@pZd^%U2wdbzkgxcB4911g} zGozj}3~_l4(k`RjKM`!mW*~o=m7f>Q-s~rfW(VmbXhs6kx$?qf=V;SpDaUaGCkH=jJ-=RTRyejw5-qY#l_Y>{OC<@Q7$=0sC@F1T0L>96DS7kJHAkfv zhgO!koNN&RY6xXTCqzmiuoyW*>za^qJ)EiYaY>etbP0_*F%c6)vrYRXr%QA}iaTu4 z^(a1CY`Wv4#a1H$G)H=*q|is#%;Q@~pD>@$UKZKm68Ew|vKc$Jz0)f$=naT}mO!bP~O|c>Ets zqL0JoP*D;U29RQ6ACJ)iXYbG1auU*WLYoA@*&Fm&xPW{D$+l#YK>~~(rO%jW)*unB z4mt^XC__`r1LxQ-=`8xF1(Qn~Nm=yqv_U$JK5=b;)NrZV08ohr9&l1EJOY(y;t?oY z*+;fuiNq@iUT7;LhUfA;e_##-Ng-qbwA2+awPUQ%a7rs+I*&g7TfPWuNacAzg?L_M z2@AQHNXtSjF9PRsJb#Q1V@ieN(~0znYk5#ws+I>-s+I>-s!{?}D#rsV#PMQUI?VFG z3$eTyp3C!+66xbvHt9@y@r?_QH6EVVbS8cBne_4aksHyAXrzd!greI~yMXYVa3g!k zc0LPVqE41CI$5V$gyLJu+zwS$PkU zB6uOyEQV+FBl8Pg;v6*3Nqpc>rT;$FNS#e}QJbkYsue%S;d?7en9_9eJZ3wcJfBXU zPbbf>Yw^_HN0d z*)~G5Xtr6GELv#)CL&=R+2!}qrR1WM?4e7|L6@3;PO^JW@+lXfaXiCTASMQ+L{9~f z5*-ylvKc*g4x72cT%H)5!{+>sIe0)=OH`z45djKyES)@`PM%LE z&w~fpyLaNGu42__9^OK{R~wjxa$&eo<$!`Uh|X*gT2CJkrn)x_W& zy_!y*=XloGR17U6LMA)cu@;Nw_ib@cr$`(pDsH5^LJg*q=R?Fn+B1)vGtQawVbZeE z9C1n~&odi1^7wo@c|OiwmiIvkj(IPj7X%ac#nz)}4(4xLY&nXL7F&%3&>ZPWC(j37 z*^oYfKImW+*pVWMoz3X6`#xbLo5^F5Ay*U65|K`xXYH5L$@B3uqL2d5-eSZuoB*q) zljj*`Lku>w*M#XSk(NM;iG93HF5oGJ-_yzS@dX7mma5bVRBU>qjSn8JPT*|YC00gx z76p#&l1`qFA}yCT`~gOirYzlk3$#>ikUoPxeFl9X3LC|<;Z-J`JRf}ZOZ3v4T5t|F zkkrS=B!}nFCNw)7cM={S1C;Goj$IK1tTxiBRJ8=C#BB{}mAwKAI}Z3*9&q*wm=K(+ z<@sZD7*i@*KAk+DIPZbdQaK(_sT>ceRE`H!D#rsV#PNwlr3qIGvAh_b%k$~v`PjT& z?rXDS8JJk)LLn7QYrM6B#~Kf>-rR6zY~$kSxDv#``35a0Kw|t9)o(#j5W{o%kw2aX zR!>C-r<3Q!%?L4$;zs(C<44R}JdG~Ek3b43bTNJk^CR#=s#y%r@FVjJA|fSuei^k5 zDf63=EZ<3WAc;PmOdpjkuLvJBzRzcv$<~Om zMzt`K&F9G=*}5JcwpvXZE^0amOdLn8^1Iu(-ZoCpc&(CT596xk9DP7ie7UDmup1>7SlD3YNjMeHoewQOJR?oSKIegPtPOT&MVb?} z%EfSO8zu4f*cM9SIJSL~I8KxWBFxGYI0U1M3K1`uBSKPA=0j8$%7adXf^CP$#=k<4p{=AzjU_@z!KF3kYYlgPLgN6+#Yiz zk=e=gN}nQ*-4d&q1(p%gs@U`fEf*o3PT*|2CAMXrC4pnOq?6>MmP;;eBqhmjZcpJoG8eEp=kG+v>X31d*fTgm0YR6Zh;glW{OQ*;u@Q@fd zL*kZ(FjXq+UrZP-!ttRETmtJKe;5v9N`c|i8S=5Fh>#HkLQCA-kU5fS@Bu1SCjlyz z;QGIMOlG6$D$)7GChaI^wyNEuD2nxLa zf_p1&@Gg10kf1ZWx;TLcNO?+z3pb7z>AlFp7}hfB{P+#ct-hu;jT>RNvZ9v!hs(#y zkN*MXOCL6mG>mk9yw6kar#;T_BLnptI(_ZzCk8zEVpqLN(W`3Ft6I^kdeN&!(W_Rm ztF%t+s#hjxhM5bRwIW1T@_A7(51)1=ac^$o_j(T{ey_*C+{>6_6>A#P z`SIjzEslyMNi19!S;Wc>xI8P4CUbH#bIE!{VKc1Uv7cw>&D~oroIQusa z!6UgU{9yJDE=h1IY{J>aCE2pj7A_Iyu4LPV{4IxV7S6_x-?x{FYfrLgFEz(rYJNS* zZav8dUx36CeH=haba4R5mXFv?fPhN|NVae!gJf&a2r{zm65e&8VYX}}2Ip`&ze5kM z6Uh$jbY6WruRfhu4;~OrN{nM3rSt03dG!IqB;;t~`sm^qGZ7ejbxIh>X=4HezH47jgy(0;O(|(|PR5BexIv#|U%J6_nWq3fPGCZJC86Hp}hEGVx124q!Vt6jgr}Ow@-ba=`=$_cl z5vdhpr?8cc(5#Roh~c^H$ba${I6Vao%#9K9jSPF3bRIuDQJwg3LHKy~y4b3WX^W@N z8F)C^EX+Et1KC^2Kr&dR0Q+rjhSAZc&dpMsyowv`t#jL)) zy>S~OCJkE}+dF)L?}3A3mbW(fwqo()bwmqph(nuU@|RiGqE30_thoGcEjSQZE^hW_ z409OL`n*ZiY|YKahIV62w|LQPTbzxK*D_n@Cx;cX#+j{W93N*oZ=ai05PzOI3w&~u z#o4hX2~LGg1b1x=pTv<2!7W=tV741QGUk9(Fd`YYq&Q;E@5+PMgeB$?%Q$|2Bs=m_ z^W&xF#*^&DlkCI;iQ}2T0^SfH*@~1sJ7k80k!&TJ43aGy$spMZG=faTV2|63wrE=^x`wvB4|PVpAecIzu@#c?tHP`0Lh{`IxuN?YP`nL zqe-{UR;EeAQ)4%Adn4|=iCY};;fe8_klDfS&f|LXI24V@5YPUA>&)ZFhWir|9`0Ej z3ByFCJ)Q8MPWTURg*pDgz?&4r#V^9?{9r-hV}POte>&eko$ue+*%FLtS)XCRlt>lGbBOuv(>{zZ5Zj-&^Nd_sA$J6Ql!Qgvn!-8oS6Z(*w zgd5$7g0qds*f}H`hk^500cf4=E{KH-Fb|MyS3xpJsk{!HRAwhoiQG=064{+V#ip0c zjfhSsaJDNcmdZSEj<*z<@qC^=6pM8&F6X7w{RNIB1B*)KwDht4QTb?b%q5UnvN8Zv z`V>i4256NnaS`uSBC!)PV74|$49`^u{4qihgzbH0tD3+DNhq2RuvB`c7A8c)DXo8! z1BGZ<>iQ==R7mDXqOnJy(x*r=%cE5xmJf+H&jJ>jyF&LZ%`JEpc;0=13~T11go_0hMa-0V>4s3F&y?g*aXe&t-Z3U@p3r0?Tv5u@MG{ z^bCO}W{u!+P@r)kq9%%GQ#R5*VZONbYC7GYsMQG3B19LRGeJoZ!*kh@KRyUfPk|k| z5kkI^iLoO$RG2(IC3Z|7+b@9~!7%Z4NGv;Z*^xhJhI6-s*fC)JGL}j>SCW_63qI*ouXovG)LX`goI(Bh$$Cj4HZ6`K&bm9RHLd)=z3f_<>G+e?Ysj%*0 zDr1!}L3maPqr_PyH8C~Mi7A;&N|+&3oY36b(ROxMgRhg-!O+_>!`&looYc6PIr5%5 ziE6_u5PZ|4(uPiIYv^oj?<6PUqxht@C9O@t&nNhNTY?{tYi!-v+`2LNZB@Ik$rpY% z14EI(%(2G9YIqg32~Bt6r-S*lg*t(1MprseayHdP9Y-}%?NpOM$EZT;^y$=*)GTTq zbrf|Bbq4iw>UNLC8aDZSoyqIdc4{+}R}1B# zR#VHUg%E+IzQ)e3c3%ghn3uM-c4C>TAb%U0&-G!`1&g|3X=B&cg>5ZOo(+vH9lpT# z<(-W!&Fe!Y^IrW zEM?5lPHf&7Bw%3NSdQ1X`$z+g?O2pU7lV{rgf98Ec6K$k#9XRx!N6J@JAE-%R<<-T za~>l0QbGhY&=D$F*$%>EYC_)|&S~t}6#BBJxf27~82XAHwE7tM(zf=^5eS$H7DV&< za5uu2R-#v%eXX70f~CzH!7+g@fy}Gg+qSe}Dly`g6+TdOefZ838e1Df4F?z|d}(#p zx^1CLp-EcY+#cu}vV|+wbacTGU}(%VwYsadzNL9%E9nZkej=9nuEveH#Zz&7O4{5$J5NbY=UNO^vNF#m9Gn@*5xv3}3=1U9;%>r)*i= zc1~-3(^p1F@rk|-ovWKSZt7%TA;tA)H+yC>k`=fi{Pp8?30S?QD=Xg#0e+UfAM;*+-@!rdH+>-i|~jf(&kH3q})=%RrBqf{+#? zH#E1yY!2K(04smuYhTq)h8?^q0E;PR7$?9avH?}u! z*>reT3pU$gd<-?oAjwmVpkYvWVX;RSz-W*Dif16#`-0Ex1^}cW@RTm|eKGV4qtd~7 z(d$g*ETRPhiw?t?=mEKY+_o*7d_<83=QaGj>9n@?CT3ZK8v^D@-~!Pl(DI#;Z%~mP zzV%(5%|8j=!mK7gXlwO(s^^e-4A+QmHIsGlTwfEXF=#`yu)){vYhCXnq1_6maSV;< z;7$B5*k*P~b`iy!1AAOH)y+8a9>H$qc}$Qj(XVST|(1k{D~veDPEsd+`) zSfje9o*yZZo<`RLv?# zI99;5VO1ABL(!iD?S%I*Axe^QuWWCIR0Itdh&lSQvUOEs=cYBQv6lHzIitRrUl`Hq zwsV$s5w-}(5tM1y=GN#EM%+ScXJl<8-DUI~Xt4!~m{IJrU_71D;cM88HRt56Ht<_ew?#_8y&=C-v zKmu(-3VGV3`a@N4#!20a3#pD1)A=876METsKmH!Qm zR3)I14(n1{C7_WGZ=(Vl>F_oxppgU{{VpueDglio*eKB&WQE{bn8}z&$iVn+L?#(q zrAj~}9ac41_21A)Rs0%B*yI&rq@=bJ{2IwCw4QpwVO+DhnqR?qaT*}+n(U3dkv?pC zSM%!&X8XdomtyoZA44Z&ACi44pV@R+rKnm!bPp>-SM!VR;kDJO1w{Aol9yj}5AV8O zovi477wUL6zt{=ui|?e-|HcwKTR=_^tH{jem(%1KDVf!7_J2rDBd+w2cQ}xDwxX@c z*TOhOh-;hi&o)q}QT50*SV66#PQ<70Q%mrT_+-OQLd10$42kbD2V(}4*t%N7&g$^J zOdN~3JCr4cnphbO><9>$JROq?hpYZ)$E2`^|F4fpF)1g+;r+iWfrj)poiy`*Eomm5 z81w&GV$A=i885_CV6sVq!G7d?v~DCRHcTRJz!(W-h{dxcqQaFVcoStsgd1Vo6HH7y zTA|mG1dvn`A_FOv>5Rx=@&{i=WFQ?S9PNIch2m&m7ZDNIaPDX^H6ZE;a|mfpCK6(*6%b$au)GJ zN2N&4s)NhtJ4%t{TO&w{c>K*Kp&!oz6IMn{N|WSjkmuIEEwYQPZ|!VG;#Ok^o`@xR zE1}Hp#ula-CjBj#!4jO>Q=5I~FpC_M#TH6r2|sefWO|X@F{H>M_pm9vg%;Ue2U7A# z5;T(w#y5I!E%M|;t>IfJQJCbEHTYVPBDM@U^jjjySrtg-Z|&HC93iH+NPWXISyAb8 zfn|FkGOC;Svg$B|MVp#invh$*-q+EAoh#W@F-5QiP!CzXqxuoZ>0^3{)tMytk^8^% zKEQt^+y~_AL9PR&VzT$}E$}*z!_gJ22>(;MB1#p)T_Ku;auov!jjSg`maI6=N7C8@ zmlj8V3Gf$KpuS^0u>)t$omE%m^+IC8R~GvMmnvt~)z#II#WHE`3)f20B}4V_u%Rg3 z8+;p?T>XkrVk2SGuuue^Gzgfj0l!rE62m9UTSNH&IKwdNCNxbc5N}THth(A71Pr1m z$ka`Qyr#xeQ|GCk>#3RTnO*0psq)OOC12-MdS>GcDinruog+^-1XEAR(WZ>lLk|-D z*3}wN(xhJIWC)_SPi^kNlQk@*5}sDRo`tkz;)vy~KS8>8Qy_;okPIAdVQw|XJ_k#G z)Yuy~HR3rCX4laXS~M7)9ME84^$^--1Rf;`JXk@pyfIWx)^JFMSkKCY3ZtHtBC8#f ze9S0yL6RXEzhimpqSc`^Nk*3^C2P!t#QlX(8S}7LBuzPBQUs9imF|>d__y^*;N#Ea1FlVzk?^XG%sC@53-UCb8BYT z)>W>bSyfY4GqbX?scB|obb$k>{;MG}p+`gHjRxMuYsb9UOhBQV|2VSE}QGy4(YP6+qzw_+#T2&?Xt8MGf z(jI+u1)ia9goW8iEMl)`HQe7DvGuNEKCfIvp0*|r&iguW>tg(@UdU{{JJz02vGSy~ zIO+@-d}JQkgJq0w3l>zYT(qnsbPZpLVOX&UZM3v)T(Ce3X8~!OX)o9bJoDU1Zu;Je z6GIMx>gt-h05a6W1q(VDSAbS^^f6oUU>6>x_pI|l5zH_8;@$^~Jk3q>i%zRq;a#z1 zk#AG;ap&Tp@pDgFv;N$(*Vi=_9kW1n^wxP>H*eXD@p`syZfWh9w{?CI?B#X7dH76< zD~deKEuCl2FRCXOJl|Wf%Co4g-G>KiD`&3vB1>rYoLNpWG} z9&8YCiqGtMm2>BLYdyjLiWaEw>FD;R4f9rGLIQ2#+x((Uot<0eRaBgF&N;Krsh-u= zzOkYbJIM-fRYg_ROx!WEV;i2_+d8wgqc~7Ws2;NOB#$)_4+W{Waa~(i=lr4~(op!n z;NTj!gd1w@n8gfq*7~;16qcXL`Yqb!#fC z=T+6sn_YQ?w`!i(do+PZ-x+C{sbdj*HJy0sIDQ>bcXH|o&37WTuiM}mZ!_gIlxKYGrW|Gi}Q=Y&itXLsF4VyQ|%*2Wn z^Fj|lE?>N$tE;(b-ul|P8`f2Oy)%78JIvlNyK!c1Q)Ttc4ZfP$-nw<(b#?1%nc=Xp zY>MF8^45;dMm!@Ns&{rx9UdF5o;%a)Yiyi3yQ!vXW?k*v+L?9hecmQtwWD({v&$1rFmUD&g$^g&LYypx=FOZ0iJ{+ zfg-C2YqAhyhK2vIe2B2oVW~b)X}Xc5h?^!?np{ayAgPI5?zcdZ1El|DviP7*fqz1WbJA;JB3z9IJIkXcx%T z66zV*JhZhhZ{5&_on~lHSl`}xPFwrg6+t__Jl2%2te)kaz5xG)D-8`dv8lAadeL4NjLLo8V?2Y~(;cqBjk5$W`oU(WixNTi1q8VyT?Vgz?DlGb)(< zu?Jy6Qqc-uQ*)!>t)W0V*<62bC3E%(J}uqs_27YtKuaW~gELDAJvO13nE-TvN$DV` zs`zkaQG*vp1qA`D9s7z!h%PdFbYPZj?OcWfO*;u>86bGVlXP)e0}~KqnhR~#30vT- zfsRgQPuT2hDqlR0>u=;?dlH=(Gd+i(by0w&k-Y&h6VVn3l$WwW>W&D}4%@4DzuuiO5ebJ1(v!zX_;mGQ;a%YXJ$hvJwoo_pY#i|(zc zeXQ$3d)0O4HYu0ZzNPu}vSG!nA6Kra|HJLY1D7-xS`Ml1z5a{J(^fwG+EEu6{SRGV ze(Alc6aG5$`g<)iyVOTMcIuB#_>Pf^Xa8Gy!Cb7tWUOFIQRa$ z&Ukb5q+7JvZTEk1+O4aU6~y_SbHyy#FP-?SviY zZ2iT{zsU37`bhCC`8|cjk3ELb%N}^X%(eQ6r6WK7`^a@=uP@#7^8EXj&!1lZ)LC7> zKmMBC%l|N0vSr&HPxmai_vFtnJ@cujw?F>GQ=Jbiy6=GBL4RtVRC%cjzW=)yc*L9G zwjk}72M#)_I3f6VQ3Hv6l9_G@1gf`?1jqH;A~VM@LJ+-j)e28##ljU8D{v-K^Olxv z6^rT-C7)YUv2s;&bBOBd{N)ERWEJsT)J|}O|yp959(g}`N#jr-0S`L!`hv;e_nBuy>{76CvDvE z{1*jZ?62E@&KF<2{^tGrZ~BJ1b=vm6T`zjo)a5VT`t0$SJ!Z37*4?S3GF2X#k*eA@ zIBVunh4Qz4|M&?bKX^?!`P-X+D8H)t{-P6~e#rYJ{hOKp{76e3>#UWRU$d+I#-n~b z)HnCT6Zbi9+_wGNvR|KU{^YByAMc($de+VNfAF*S>t1xS^w__)KizZkJCm(H!L$>8 z`HhTvvh&5GPM^@_UZc%&s`axvX^hC;ivtTp^Xzj`45fdkL{{J@TF)8{%m z8y|RJ@7}!&&f5Ok6KkHEJ8_Hd!DGL;@7X6hU;ES7Xa4x2R+mkkf~&G~rr z2REHK^yo#$ZKyY0#+AL%}E@RWal_R@?Oa=Lce8qTI0%;)=F zUOD@if8Krf`r(Y>m*pd~zNWsuOHIA^4{G&J?VA_8@$JlOe=o24vgXZyzcKmRv&xD6 z4gX&7{2iwb{_(e0H@$ae`yCIop7-px-^ z^56Z1Y{c?pecf}^{2uwtTk|_FaF4iKWRIMAlq2)rhtJz$xa`6!A69<3Y>WG6%ApI^ zo@qMq!i&$$IXZjTqbRD^T%!Jdz3!?b-pxL^XkEGK(u-H$c4QAVxcWFJNd(`PTf6OKhIuKufO$+TmRsdJ>TeVk>C6910#+rE@*fwZ>w_P zmuJ6KcDMP5a{a@1e7WF&_h+&N`X605?cW!Fd8EwX`t!0pV`0(KlH=yBzT}$1bMLf% zv^3{;CpR6_uBrR&>Gfs5TlS>-noHVG)>v=<_a@UHfBW>`T|2FxExGvbXMEWB?d)^2 zZ(6wVxqoKmJbc|-OBBV&DSxY&v2gxvzx5vX^6~?hZ29$;%e^bM9$#WQxHzNoqU#qQ z={eQ#r{CQF=-{K7kG}Kh36I|U==V*(X}0ubwtl+FBE=!-p9_5H-Bt)UH<-J^>*JsUfB8G&cau|*mTSDJ2q8( zn0L>#GZt3dp8L@!9~S)M%I6=v{g_6tqxG^)OA8-)=zCAR{I}Z24*c<}*DgEz{qNp#m{e9^!D99e!EU}(zFjUpPBy58MFS}_5Sg9w|}ns+_7JC z&L5Y(tK7Q!|55iHfLyKr|0R^lXxX8Vtn3*oE6OS*BiSn}WK$$W8Bwxli0r*1G7~bg zGb)rBQZ)YOIp_2F7-`+#z4v?nr<~_J)a{(vab3v^i+q^nlhsrS0pkGr& z`>WR{I_kmVnfT94SE!*kZ3}*jcaM;`<^EjT=gF0u4a7|>rYv!+cZ2$ZHnXIwl;u25 z8A@eJ&ui(6;9K&rPp}dFpdPKE8rRm;4 zN*~|Dlnj&_0(jw;5rW|a{CyS)f;S@?!Z(M@3pN$-7u+w*DHyjd96MpLr#PtC%trX# z;#1jypsBoi)82`)^3ybXxbZ%0H7n-ojq5e;J>Hk@KqOp6b(cy_xNPpau$xeW@NpqF zhl&do7cQo!Ubt6+*B{fbI(2G_LbQloQkkKbIsE{!Uv z@YlHeGWaFyj^lS&-V*KIPjdkK`_$P@o@|4VK=1!bDCeQe5hU zbhrOyv5O3MPuiX|2oMXnM;|2@lc<}xJ<%!fZAh}ba1d@HeWIyMacQ%4jLo2(pwztt zbD2kh`)VK5eH1&u!9{CDH_N6Hn-Y|g-PJojG%nmFZ>4RuXleGkXoTEK{td^h_j{WW z;Re5ksivs&uZj)3VrOwWaNb^{b-(|()5&!-@CD>^D>1&E?9O>koI0JUjsE$?4C{Do9nmeJ$ipZMO@`- zm-E@{7vtU@vp@4m^Al~ZxNyqXy!}bqj!&}d=jsWWucp+e3r6oVKEZuFm(n!;!{uCO zYq5Fyh@z8UBNgy)~;)^n|FflR@Q6R>=LP#I5@QKt&yKGJX(oN*8kb(?%~#EL&%fXkH|=b1nlyj=sF=facyOahwBx{% z?31u(__zdSFJ<~ndW8h;EXr@VzftXLa6t%Nh=0iEP{YvnFwwB|aLRD6@P!E7i06?< zBh#YxM%|2Bj5dnyj5!{Y7keN!C=NHyChpCBrTaDU{P9T%#hpel(k zDJhvU+3z9NL+giQDd$s~Q^ivY(pb}C(uvb=W^Bl?&Ul-tnc0zbGV5_Re|B09eNK4p zu3YcDjd}KY)A@$^0|hDtErllv9~T`i$}K)roLI82B&>8#sbAUFvg>8v9yvUkFTYei zUSUu%{8;00Po;8YTh*zmXHQN(sjZf*uBA)7(a$ z#@uJT&+?l1nhKf)nu}V5TFRaud0x>f-df!z-PX`9*WS``wxjdK`4KWD&mc~6Mc1(m%9-PdXIy&`qT6y~QjOEO?_qS%rW)tVQ=N`{1 z%=dpV{jjv){gLeBgHQaQY8KBejx5=J-uyZE3*(ofuP49uelz{{4Qu;`h3m6R>rBt# zWgBW@xx8|t;YOzA%a3IvQ(7@|b4zGFj#*qn59YxOOG#@p_)Xe0&@ZpSbRL>rL#ul= z_<<{MQioonX$rkW8Ad|0WOZoC2rnMt2lF^r%)K$o;~!3iVG<~g(RATzxS%&&gGC&g z2>b*sL~L#NJu~QPtkk&SwJHraJo^SuWkS+`M>N4)oDF$85>(H{#mCMIl{v)4&dtLM z$$W^Ho1L4RQ+0Vn4($z|YPFNqR_tokM_^L!Ipp=>lD?r2FHF|6X~4aaPj(m7cly zIHBinAu%Dj`Cv1^7k@18iaiDNeh_$(Jmw(*0qEIh=sA2lHM9$YRI7A4H*Ngtxy>V2#J6SbxgNHG}vjx&9|P~x=zmm%Y`z` z+J*n>`~8rfGq}5F&Zwoi(J7l0HEOiH-~_Gou4=a^j;;zA-*@e==b)9-Z%BH*c;Xd zkRR@UfwKcoLT|xAuVun$XRS9t+d!PKkFVJVTGuaNasI?FFxx?Y%`0GKqv#=f&5&iF z9Yg4y=x}>9f*#L-zVv|E6NXzKx)b2w=Hy>@j|Tc(4x2KYsL^FB1wBjnHQ;7o7n^^@ z?kTh#ZDXXPcid9To(=UXeLkq)hxaL=9b!|kGYfG5y$ah3v`P3=L`FK``4JAD?`!5p zYKDqJ?H=``5`cm?!%!Q%%dK9@Yy5?gWw#6wuBH#~b!=@8P4><#E012<<8@~(@AT5z zN|kF8XKr(C{yJYfaCGQM(t)%0%c$-?G&z@KpZrCxDH!jn4oC9hIT7!{%(25~uP)vG z^0nv41sPG|+6QB7+BRP|4Zp*3Zq#CS;e7q>jN{`WncH5mwar&Wl_ZG=nz5Ba!wtKd z%v~6zv*@Iw_goUtB(AS}6hP-Y;I*s2*NS;6iH?~`s9bNm(U%)J2}YEjT142Vs&WmK z{Jpc1FRETpJS(yDHlt>gLi#2iFR_;kLQM;Cqc`-9C*OY%YPNg3a5`hw!Tn2%Wb}Kk z(|KW;=UelsnNw3`Ze~{CDOF$z(LdGn@V&A2jt;g5FYHytURp)8J4~1?9Hab}!&#lA zUwAq-NLH+&uJcrR+0X-`7qu@19zM}7J74S~`<(9~-N^Rr(+^)5@5xBcD7?fKQ$>GSh670sncW4Z{T<8_v<$-wN+k0RdHGo8*`czMI*w)ii>EAe zyt9-J@~PTR=%=1;K0~!pSK}T(QHP)vAwF6p;l&$%#g%f}z*x`}P!!?BES zz0^&77lJ8MEYlQ9ORwe#HlEl=fz>ImjhA<=T;45$i(S(Q1+YJ~Gu7K?cjQH<&j+N@_@&%b84Sm_IqBH^rG=uAcWIY#>6b0esrTN$&~eYq zRqJ8vn<^cA+U04qs6U}honlj2a=>_}L+@hT5~r0AKfYY5pXumBC-n{^{fJ1_lR}%$ z8CoZ}^)K`fP-jT0Xndn*ZYmE=ak_msMVFpgrS61MR=_`ex%epe$`pRWGq3g(+Z4ic z2Kd4?SH?<>bGz}m($C+u7T!B&hx;tmf8)bV&ZSK62vYi}OYe1s{=P`i6SOWp$|JdxP9CvE^?0AM0w6Oik!fn+^Lsp*XrO> z##`^fh{#38aK1_BNqz3hVh(x(BFSVH9|k$Kaj0iIcOkNn$jWclVS6_ z($0sbMARwEKGAl@D<+dgR8i6JosIBb{K%!@uI_Ea+}WJcb@$9vBbIF5CFnlzuFWU$ zZur1&XhVb@S)6g)t)lrt5zz~`Jy>cOFKuJN;eWT&$UV=ax#N>AH)Xe#0ZG+ZMLpeF zHYJMu#vX@w!&ka^6e=AOr37ts1SO3Z_5Er-n3T@b-Y7R-3dXdXR+p+1on==lPvJy% z>{U+ua5=MkGCPkd#a~&Lui@^+-0dAEy9ZJteF>$>=-82-d{uPP2bA$ z)_x1i`)O?rh4jj2Cq&_U%BW}pL#xi@J1#3mTL#S{o11T zcYcaJD81qCg;wt@RVik96UvIp_?m_%UN>g?u}BYN+a+;kZJ@@%OWBQE-XlCJ^MsSl zGEd*#vU6)!*Aq1dOPwWm>srR5^qpDd#st`d<7S`Iw!S>~+4@Ta#Wc=WEqjA65y{{1 z*f%^Lx7Jy2iiMv0qQ;8T$3@ZOE%eJXWW0s$DVXW%*@0UM$l8X3271pLG$X(a&2SVo z!lCYt@h%Sq-@bi=9@JfjI6N)=s{`Y1e*V^n&U#F-LqqIl^cjQWHx>~;^VL>m!2|#j=76%di%G*f_{%-TeV0+_d9pYEdq^E_e@Rtf&nl;TS@8bD_-P)fv*6#U;Dg~1AX}h^#tSib&Lz& zmIKE9@hzzp>s#nUK{n7mROrL=V9NZ5H)mI_SCPBy(D%^bA3AezL6a8fX=CVDUY3|N z4(M|r&~K%oIoBmj8W%L%2anrAkL7C7AhYLV($G=}#TQ8CfIdzSm|L693BJS)GUTjH zMNQ0gjA=NR2`D1J?}k72v^>#YF_T3#7e4+Kx7_IWDJZhSPXe$)H~(qyx6;$2o(~YD z`R)OMgQppxyWoe|L8)WX{2Hi|D9HrjJR}!dTME(~G==t6w4u*38AD$KJ;(@9;HzeO zP!1e{;m{M<;Q3@h8XoA`Z8mUkoQ4B>P?|hH@jSKKV&*1a$ z((rS09O8#|R8R#@Tbt-vo;8D}`+_t`g#w6t@S6f5J1kcOK3D^l0!|67wU^JR5@)n5 zp-&Zo%>onXyG-V0f;9Z>hzfW#ARWR_yFhSIAw76h0{K0XAS<+$#mX%JSHh=p+|o#Q zS%Z+|sDEI|26;E;4{9=p;6E5ApaGtgfE*CbFJwD0+2wlJ*$>Go0=g1ry3mrw6!HQq z{PlJQ(4!W#fI&4NPF^m4XcrK@>G1b>=`m?OjWdRjUzuqcnLs>4OHl9!Y(g&zew+dN z1wk54UQTFBQA*E_M${hqObV3C4jo+36YcEmyu9EnAOOx{V(3)p%PdeX7Z=Fo;sUu` zTu82@Bn(GjywFdeK@}jz%L|I}@`7T#ypodWN(BT!836%M2Kt=^Fc7!^#RLREF>nRW zpqQ8#C?+NbsuF_^a0X>0B|)yFBrk9+Z~+QPN+KYHX@q5jBZMZB3!nfD&Rkp&90Z^k zkPDnaF)l7p4A4gB0inP(CyxgXiFoiLBK#pkPc!%K*SHg8O1zE0#Q&5$Oz7eF@ZM# z8PT*6XF>Y{%sgNSBm)`I%z-O#4JaeZ0~!L=gB(B%oMFv_3xq*H5zs-P;0%hO4^RdO zi1tw+Af$P;V=odG={5Zdox z=0Ow`LtF#M2$ToXfQ;ab@CV8uss#c9`Y`k00+AjR1Aho>AOywG)q^tVd~gK>gqcSg zDv%MeD8_%LvR2C@&UQP85BU2g;WPpfoFg+Kq$1ApbQ712vtxF z$OX=z7!V4a5sf1}filbLgsCs91kEle3Jx@7aE+KsOpJpAv^aKl5JGU|k^??Cp{WKq zmP1bX;O0ismqTv&fZ#mPKN1;%hgBzX2 z%eN{WQW_FJ#?L2?{)6D?%fAx-N8u>B#n~lR%e+D#Xw9!qLKpv?5H{R@0e%efcy=C$ z6O_Emh6LP!e-*B!{tIT;R`>6~e`Gv}ItK@& zCRhlvUl%Y{@5CgUSD#Wx2l&XJ1eLZGZr9oAo;TXgH zhzrDcP-BHlqD=|G5dem*KeE9x{EuKLlE^}Al|+A3%^y-AK?aVV@N<)t(Z8(;cECFX&rF#_rw3B$^JHV(1m^9 zd;wgX0~%^EHiy)QHsr6vW$I8_*w$$6|CSn;;VX-yt3X$Y3cu$D14ptjHeEFo|JVaD z@E;dh>uD?F|1tOvhT>(z0wqBQ5Ekw#(U-p$Vj4AUoqt{SRXzbzUzP)PFQgv%|ymvg#m>ey>8bD%TH>fe{wTTDwO2y(Y8_2km{8O`$l%4&t0Y z3Ni3y#u1yZ9m$aj*75|w*Czc*{67j`JCY;4{IZ4+UBIgQkzQA)qwRpP4`i!(5U(xE}K?D;=#-o3cd2}mZ?S22e>ee6Gp*8_+jR5LEPtfsST|uL`gA4_Z zl|2B&{Mw{dai}n6VFuvHHuS%4_to}BcPaml-BHvqv)UEZSE*_p?V|JlN9`Tm&e0*n z4%+r8)u9>uak#?%D@J?>zG5T4PO zy*NChUO5c>{07zv&-|-~TixdWMb)k2jVS6s{_hv}K!HDR`w$#m1mhNH1Fb{tcYPjR z1&TVlYBVWyxPm%5Y26U24o(Mc9==0~m>a-EFAPFj!2@6(oL z2dx*=;?Y&16^PdP`d#>6rjC&LHTuWhTKbRdj;;b-B`QRyfipr131RB1 zlR#`m5!k*^9BSzt@IzF<+mX>7Zm_>=W0Zia1p7NRkEmlkDnCm5i`3DpdjQ9@b-1TT zscrrCy_So=NF80x_wY4#UuDGaybp%|ya>pJsI9O+JNzISus;NcodCAxpL+lTM|BG5 zu^SztCH!%?3jX6_Kk|tGI~=HB?byEF{BFG-|F2Rt6Km1pN0Qy$OgUN-$;F3%OXYr>IKKz5Ji1e z7E}!!M*^e)O8_V^9eQX+Vt*s^a2K(@fqvv0|1fn(ewZ>kgydh@E5p<=9W}<6f5hzC z^xtKEjZx9PGX#f?^aHyBYCp?IQHP5lB@sKr&un9wEo>9S(yQ5rsjp7@bu(QqzYhFQ zEdNi#f&42ijjS}#9R4-xXfuM}g01$epPA``sPfWNl zdjn8~KON!z#g!Dq`u8Su&>>LV zPb?2B7S6{oznla?p=#hbqS*TI*x*O);y0+TD*))cpaBJ@j$-YXW6Jl+gQ{i6_u|0O8wP6!-g?>m;)im+}RLw%LB zKmb&g>xIj`K7g<4_+deQo0}g}U&YPJo(A&-07MA9X#{!T4@i&M5BqH!K=8k$ca(Ym z=kTBP?yD5Ij^$y^BUXlsLLCPWW-0dTF8m*m9pyA=fBTsLDC$2eva&Uxt3-zYzJ}Ve zAFSVnuO#({)K@|QSqKg%f%zlsNtlTW9Q&0U!|-Kxk#_(4@&6!v-L4wiF@?3X%5@Ru zS9AxwYr0|x@{@iTg8#mvzE>=OLj>R|P~rD1BKhm3qo}W!wX({!B+&66Fu&Xtf8P(U zB=rmGD~ke(a2$9jWC!4pFdSm@FU+Pv1rQviIM~Pk^!h&n|7Cx>(largDPaB|n-W8I z0?q@QYuyl|x*r!=T?N|TkPv}G*mYg>m&SzPzwS?eC;)ON6lNR@Pw-n^Fx?|&*0G`m zAY5X&!pIIYk4gSr@B5F!*Xu4ZDundJ3(x;y&4Wo1(k(#eKboFGaE#L6X(psJlo$Mt zBcb5GDvGXhZ3y5fYMAg3%(pB%a0rZ}paoi&s})Tbf+M~L3L}YC# z8vdu@Kj;C)`B399!u((B?I2a7Wd~Y^>ZL#MCfHi%^~)R8Dc@%OJ$@O68;Uw9l47sEV= zAnI5dVpNH7YLqvkRQvZZ|EDse%l{kHfxK%UmVxvH^?Df1FVF1|v%&E-JsVK(O2sa3 zKCE)Z)kW4qRyr3%9j1n=b!8U91zJh#hiLc@-Ew^xy6P416%K)M5~!Fs_lg&f0q($o z4gnlVS;yy)3_uASNQn3$(!i0;#y^@vquEA6i2Bc^TU~r@*?$|pTEQ!*Lu`ukN&xmy z9tbZtFim!a`p{~`%&e9*yPO52UYowwtx$%86$F+2xuU+$2k^h>aR9dl4!HyJVq=ut zP!VzHHQyL7!?+kC7Ys*Tg4)3GJ0VamumM7S)thLM&H>fle<9c!<^knD`TJkNSIQ0; zUiBL3-xc70#XO?oKcEhY2i}V=&H=BY035bE+RlF^gsA@&7#j1>!%;@W$P9HVqOcC2 zM=Jh6b?f=U|1WUVmjRIP0|48@l7p}4fDhEH`vo2Nn^mhn*~ZDm&dI&v6FSGF!RL9* zO)ptOA868ptFjB9C^fpptZqY^(30Z7)mBT}MEYD=hj=04Y;WD9KA&xtG;dd<{*>D7 z6tk>Co7@_w>XKqq)iIws&EtkB;Pe3W)&&BoL58D-IpUdvrmVMjcy;lg6 zUzBo&Dbl&hd+5r@t9Ot%l#bbnK0cYZtC(uj6U`*?eU?pkglKCm4ES%HS{N->`B;*A zWFIbm7T4%bo6<&t^eCGH!uzW~M^kMkOqjtdAfD4cl};!@>%j8n=G%vQ%5(zZ1ga4g zik2Vk6`T)76UIL1pCTL;)7mOuW+i&-?$&tEMx!WFndDa)(zj}ow9KV^br#! zW);d!qP4>=MOFC|arUH)FS^VGAT# z_q$R0;R=^sV!!W)%`Q*3(}x{LE*5)QveJnz|f zBxGkI|0i)7mm40XW7psA-*KmOvz_u|ja>>mGeoWSuqTV!L<+hEA0}!k-gL^tT_CG9 z)QTwV%SP_VEh8FtzSQ_{Hw<$jsC~Xo_a2iKovTAV&ka5Q=NlIoLOtSAk@5qxr+=dzIeS>As_mx0f!OZ#8>UJ9zmFsSts6y2+ef@=ze} zs6kaU4{qGGGRbdkt$r>K)Ml?QaZ`!QTnXl$U}cey$|ZLXA|lAu_v0( zkvEB3VMXd6ditn6Ap$QZA(=_{h`@vGqr`!&7HU@oDL?3obK(T6WmmhINgSE&pb6PS zEVjX{tn_Z;^o8_U@u4`IL`J1YwO`2gdl|GxL|)v+YrCtqx>q79DbcXlfZB`Xn| z^Pu(JpnvX?%QPj!@njDrLh6D0kwqL2!=B5Wr8GS3nirea_z=H;Q)6BGEwlW|Jqhvz z1)*HWhf)a?O3G;-5aQZJ8IDB-pVW-qu*d5HuJ@5>`Mk^NY1ZjB*6Gdco9&u%(Ip!D<4vshk4;!fz4a@oovDLdY%83PGSLzEv#N4Rw@b8h zvo#orAMnT17Y(^|Aizt1XNc~Bh&tV@0sHMliux^_QGk*VW;#Nvk^EswR0Zo{G>QLtz^0nWXc5R0Zqp_GcR zAJ5%YD0jofI%TF(vA4*H?u5pCT;x&qDpt0V%5>^r4;N?c`D|OwPr42wmR+}#3`fMr z9GeruOD1A;m!$J{JM?oEE9Kb*V@-s7t1m-m)=v-Y3?+T z&cVAlbNV6iR^y`9>ozQMWgRs~!#v9KObP0Un-vc6>(2M@!kQX0^>jkrom=&fye@)`5t zwnLTYv`#WosJb40pBj5HnD0c+-uy<= zc2U-uQ!M167TY!A=oi$ z_X_bI#Yu=dd(1v}?pDM0xo{@wTV7sN1ZF2v_RqV8gpK9WhPT9&=ja}GqNcVDG0r~5 zPKD3#*wQYYOFzxs`6-RDk08|>RhPY?V)`keQ?@h{i&d97+oA?^ni~&K?Rc!Zv2~6% zBkjQau-w(|xZbB$7ax#o?PwOJ5DPaM3u`VdRK^=nxcOSamfgh0vlTDc;{i_Q`7Q!Y zevy51`24&Fr^tK+`6)(;979O+4P>LGUfuAvukw3Ol_bMJd-U34u^7!}>N_R}NSluy z$z++esF#&;Vy7CU<1j44Z5+?pu%KUZHf7F>ZcEU$XtsfyQY0TN7!_EG&pe@PIel`v zBI&XP_TA}<*4vMjd?=<})Dh$llNY%vI}0V%!Wav-l;RgrNZrPr_)V!#@#JJNS&g zp25?!GU}{yRM_W}gf%|AGkzl(wu;njz2#KYkme=Pwws*EdQI~oO@1DllY)ESD&~gT z-QN9b(?NgD#k;zW*GV!I_$fbVSzV~;exLX{Km;$yMWIyTVN;>x5R++UR@;GHxtST! z!*maEl{Sg&XbH>S>|+1)OgO1k*B%NJt4?IHF9n+?)$C`Xjyoy*#(+w3Y4@nzES{afR_DjAOl}f6iuV)a zs7T#+U7M8(FmzMZcQA_K+bX!lE>gjSIC+w+=
    psS;}5?^+{Kkekdmz^?Btk13N ztn^)SUT;EN61qcm-=2_B1w|Qu?#`LvAJ5&Q)SO39B9ym{VgE6suGiN#bluwT6nkW9 zyK1E^{evN5Xm%2H?$B9%rFKfCOJ=s$^CwBeI*^9-JwOfFa zUJ5IbZ=<*jKSi>^w`fPwYyEqD#Ji?Wv1}JRE6OfPH@;BWV6!N2iEH=c;IP@q`saL= zPqR0do;SD=-Fk@sWdC@8Dt2G~+pTIzeJ18JSRJ<`sxOP)Ds8It*xc3eg|5!E)K}c5=pC@g>HJJ9=?T_v`ZtnF-XeI;k(P zH}QJ1ikdi8FyLD4@$hc%-W@=r?(KO#W#SBD^2yBI^{tz0n4`*`Fge~&B&51W?`vZ3 z@hb3m8IDYhZRc&T6T}p5^DG<>Witm3TfS0^mn?l_Og1k`a*?1qs|3gWKskA65^>A3 zg|;b&YOZf(GW((vZv>r=IS}lrH+qFqMktj^Lon98y0tpR^a_Wrj=bL#%P!{Uqvb73 z$7egaXd*B9=I%e0EI=vaV!^8Mz>|lO;_@lhZCibN_=AlLo*iz{Zm#5|soplpXO`LI zC*WJ|QIQMe5Gj2=f z7a@Im>wrbrHKyv;=dogGP3=s1Gt!5W*mO_tVgrG= z$vWTOdOGX~R^>?i0nv-Mx8+pRHuT+Kc%Wd%si=RMuFT0Ofa`^Xla~BJ8jUiJ@agTF zl0s}ge%zu&TyK?{5&j^vXMc4>qN^qulWIVK?WRL+?hQgM0lm*y)rhG11Or75CHiQp z6cI6UtHtX(MMn>siJae8cYE8-XL+LJ?0dzvu=WP9ACU0U$aqEDoRN)xrCEp0+s9*= z;1Mrg!zP8U%HvEjyAz_rPaSJ)tlD7a{eV(^&O{EM%~!Ks2>UvpYs)?s0ruMxCY0q_ znolbmSzI|7upBuv@X)l-ks> zk8!pd&a>P5ov$ALnv);kH>qpERd_E&ILL?o+p`_l{6`LI2Rj^(_W6hSZC|t4OpAPD2n{;x9H&$y54@+@EjTXZF4uSO zT-o*^IftGv1%Y?*>?pq;$0PU7_*#3TxurCg&2lnSs(f$U#`oR!^6J<9+}?RcCv^<% zE$E-faL4C37+Wt~pvWJxi!wn__I*Jf^~_63`;7DN-@m1^c;1~)$8|x+CARjBy+ff@ z!f;>dEj_!zT6%L)!aB2**MfG1<8%VT7cZFNeBe{&^2A|1xj^4SEh3v;+$BoCn~AsS zbVRAs(-6jc8E*H!=y!%N>iLOYY0df6@7O=7wZo#CK{t5t`2c&+gF*t_q-ToTsvo;A z@ma`+l1_PYo%8E@b!>dJ@Exm&;HJT^JW3?Ziv5hYX79>uvXJNad3t_)XQ|Cl&Wv@>U%YX81_GX!`#3v@dL4ldm` z=+vAAj=q1> zf`9tKty@wp!m>2@-&++_-J*L_$^xRLqjHc3?!Lx+BNyFc( z;T|6ij?+bU-vB)~dZQdtlZl%}6(QZn@h3AsRk#Gr>~*7NUf4)3HZ5Rdc!N^K)FYyE zpqeOusMurE`}a@$FGjU(?<&?VS14ee+a4e0^hCkv?k11G{+V)xsPG8#L#Gs4WMd83 zdtdlFip3Es9GTqh9eV z;Rc7{%weC~A|kw#BI82jgmRJTx5nZsy*V_*va6rl+cqhe3J{RWM=kVLpN#9fSmNWm zU9!YGP;w}#gHL41yJ>@u#if9Y6g5FrfprR9SGiTxlNbVL553@xIsC*xktk_Fwk2$d zM6rZlR3?dApXc=#y!@nbA;M6(^TWceg$zMleRfUB2ZZ!Xc!h&*nkp`63Gw(G5bz`G z3Ypx5tERNZ*Es`=B-9U|(jf`{nx+mld)*&CI3~Qmy%!x*Vjrb;Iax;uiBrb5Hc&$-YQ% z+{szB_nz3$vyDogkl9B*baRmPYaE{Vkf@f(##v%J`3fK3U~!}KWvasG!Xx&du3aAT z$n5%b-q$XCSEV#*v_;pXkMun@HGa#9BF`6TPx*T+&W7s5y?dYc)|Q9*GugREEyA`G zxQXidJ(RXZl^nK@BlsNzY)Ksb#1>ShtEa9M2(T3wM+W9OE?#zb2(NpW#h`06C$xED zhxjmKWY;rcgLw78&1@aBk^O_wBRrIgwxyGSfgXI%IdMyqjJ`fo^OF7C|8Q|Ig5+{d z^g_JDi=(D_RfpcDQxuP0A!>^C3{2i3?06z7&PV4e(cTSJ!G)U48@WksbQiQ8c^oO2 z2Moy`5>|?&xW;>Q(r5_JhG+=lMVU9foqOUL5}6)cR6M87FC;*%e89t*jXtj3T>b^_ zSB3Jg1M)c3Z@*sTw;z4v6H4&8DLzrE{L%$iHi9j742%Yv=6oi2lotf?UAK7}?W8#* zJ6zsrPQE!py@rldn2_RH;hB2M6i4O{nyp{1<7%Auy;M6WHR-v(Q%vNJtb=h(YhbtY zgO==hQMQ(oCp#^6ZE=tLphW!aQep9s#7CCNcOGi5ns-p;59lhUg&((>{dDSn@Cn@g z=Vl_}q!R6>DS4C#oK8`nN|6i<%04y6-`ejZLUe%rJ#BGXaGaGrLI-%aeR&!^b6#@4IsKtcN;{ z$lF^iOJ=so#neakn=dfOowlZKDtf;3YKtG09kamHvHj!uRBa~uucF(he0SFNneq+R zq#yQ@dH0^t;_KzF!FG8=6Zft2m%?vR$uHFnZJ`getqbQ6*dHe6a^SUqc~sX*@LjOb&ttB2as!;&6baUZAZPArS7WJ>|3c2$w3;WS4~w(+ zD-%h$z8{x4k+;QOrMq~c;{3JKjiHzBbQ`>A(mU)Ces?%RH`z&b>SKLV-QHKVWLGXy z(~E|@i<^y}8GZ1`F)+=pzANIyBEkIGGSDHgf3*+pFJdPrtai#`^u`L`UM1p2}zS6Lz1*TLSDw@)?Xb zy1WsZnNvRU$U7K+&%-!o{$=0s91R$aW+a^CI*-r=_kkq*LE8W~!HOK&84K4}u}P9RL$*x_7I9La5= z7-Qi!MI9b7d~S0)xd`hSKZ?T)?H7krtd>eXFIpEV%_X8HVO^*z{2nZ?j{5<=|kp zm+w?o`;p?jU9Mu>QfC&@0>s&Jt=^9|J>sTj9wBM1nH+ZgRyQ?E63N8V@I}#9NWA;& zaD`m)+li=LNA-YlY_svh5yKASMO^$$$13Lp&X^KM8uLB-Iy;(_@xn*dA$3wkaL1%? z)Nwm=J}b&Gbvce$^McWurOm~;SH3;FCrqhqdg>jG&J#0SS+*j+%kIK~gIfJpMNIgH zifO4;Iee3ImpogVy(qD`*=~EVv3+c+Fn8onUfCLd*p+30{5Q!%|_`2ju9#wjvhVh`6?&8hgL6AFT&jGfPl%su0?Cv zo2R5cA8(hSV|{U0j=x_A@0v3up4h@DR$$OeGyS>{V`0JTezaN5JNtYp=U%I7gciTP z_tt>jG`w4yDd*_bcS1J$W}Pl6fjEp7?Bj&-bA*G$bQJHMKP&_ z4Y+j(&76uo?f%-`(n_E;itPda;A0X!Z(Wi)JQ=eZtLl?dM@06?^|W5maj|u{_=d|j zXt+@;%Ny&#SF2cyj(z=Sja+YDG<~8cd3s-hD+AV7@1r(Ysj?K!4rBxe7%~Qvu{W#j z^AD>}8n{mI+O9D~yz1Jq!r8m*o1}a;K5a24Q^iY|-ch=vZ{Hc*15GTS>_ZPJx4SXt zoC*nT>|{C@^YVnzbL~b&)$8WQJ9D?N<%D1K8uAusXTB`NI;`4IaOE&(nuK6tkV`7P z=EDVT<4>JiTW$;a(WAE0!v665#)Y8IHIX#5;fbP0ijMBM6`Q}UodF3{@Ci*MK!Y?|QX%?0} zNXrb)$UeAdWL8p$-nW4=_;h7mp<Md6;d5Q5Wjar1eo>F)jL1Kol&!ffLPAe1^EEeBLIBleS1Jmt?w!_81Jn!89ej25 zy+`y>+Jk2&Nw$hoxkkP4)Srl!zj4~cOP*08q-STDM{vxE=SH61jjC9In!D=`_8rKG zJ4--wIae;;GH8}n_58&*dxA*g?H|{KF}$idkk>qN2lx8bu8rPX6D4rob5MQCPOA(z zNa(a@HyhE)&f4mhMpfn;9Oj!#tL3vKhqiqz%!lk_v6nGJ+K z64^{f@svDxp<=_o(6j5v=QrENscJrFQ`BUexB2Y-_H~Rdk(5`v-@rNmDl@x z-%t}Q?9^ehaMN*GS6khnXaC^Sz+!qk+}z<)^M)fU$Ta-(KU^l8 z$GymF);i_2+wQqNP7A3M$7Z{o6h|GHDO2+26;yW`Pj|kgkIxYC&u0rgoz5Bk z4)49Sa13h?FO0QNW^ORV*6EWNf5rcX@}^Fz#}pRHM8M6*Tbs8K`K8XBl)*C2 zzz>wN98%tN@17yAH6O$JY%YLUB@=8XF;zUKg`(9&_&QVMe8Hwj@8I__EfHk~UrWE`)i(XPSj z5aQ7FG_j*IF&3I!f#h*dT|tn`Rf z$;CtTJl(}BXHp6`yvwCX?d!oGQodLBiFMTr;tVe0PdwWjg z(|FB);_R$Hp;_t`oj)9@=CUU?W(T#f-{M`vDmL%=vH>b7ZTdzYhW)PdS-$#$&ZN$4 z-MzK9dt4}ci?}uzk4x~|B`gd&;u=?)lWeD2I*VtkpzPdHz$&j5-^|{~8bj!-jHPYO z`c!D4iH&%C`s;4ZK3bAD#W8w*6@p#1Tf4noLSD*IC~UBJzms`-KK3T%i{UL*Z&kwW zx0>apCV6dOmeJeRqP&n98}|4_oYq{d@os*H=!`|#%rm$B+QkFxbT(whKF$lk)2_7{ z788Hq?bgH*>{dzRyVnt0hIhM{r=mj{?er3n#<`JqLB(1!?S&bDiX_?7?e&*?0VkcT^0=9P7EX&z|o*Yn2(@l%$hy z;O4w-H;4^~ZI$YZ@uFTQGur3Bq8wj(W9u%n35Wb{&EC2iW)FQ@HVeLh{8zT`bQUM$ z=IM)SbBeOFXP@pa-_CLD^?bFKDUJOC%Y^QP{O#hAHy$J*9I`3pvVwUG_6rqGB)ro7 z&XH%1fBMuXkzn2ARHsu{v(#tuS?=wj=#O>`2W{?Yg??%#xuGE?(oe~Kkv^%IxaxF) zg;akdT|K_2hf$E&%d(AuMPWys4mipTnX&unf1osZsFN4rJ{V#fN#(Z?g2mXBHZVCR z_UyKQQIAdV2bIGiSMTYQAI!LFtuRQur9dv)QZRwUoiU%*Jul@2qm7>7qy>q2*HZM; zp+-Iq4&$cXLn5~>JkA|q{5-9&O`+`rJsLOgPQO56wOR~j`$9-4THmz+hbaKx{NzrBe`@3v}+p` z`_88jHKt9C1;;_RS;ZuEoZE_tYsd>q0;;wq+&7Rh+;(Y4Tvtt8D4i0&J8yhOUV-u^ zo>od`?snSab>xYKtT&(V*9h|Fc?FUwJ#wj>AyK^A(Q)3-v;B%dO?@!s0jj_SrvT-o zcz0~2j3MlI-iJ9-a+0ptGk2XYe`Rr$ocX+}l(zdjZ{8Z>0X|!jnOZGY75dE6lHR0B zHv3&{>KWy8#P2+jch2vHwAG&az&;LrmUhaeHK@bmk;?<}4mK zCd@%yVZ~PCmP$x=`SFa&lWfI+7OtRfSIu$9!_~pYUw2S?Q`D$1F)Ty9e40n^>`E4cIE|kp$!gkGM>fR%P zahElp_Z)csrmF_;1*yU33wKopxy1vAd4uOuHhRB1dt#fAax_!0pfYX?GnS-)RkuR! zw0-Ss5eHK4uq47x)+QY8nJ-w}Dt(5>0$8Mc_sk7sH5l|A-)8laK#a6U<6zp7Sz_J( zC-Wgaa(8B?-S$OYD7u-`?ES!SpDy`ljZ?RH+_JX&WD^Twv+AD1zWf49-$LC(;}T4r?f39 zpIlDBp~Jpdb>8|Z&XKf;a362?vW;!`SrzY)jP(+4I2}>kM^eLW7;}`~()b}|li?5% zt}0iZR}Yp-GY+{<(iKOSoqoJG5Hc3zJBAac1`2;pwpqC7^AGK2ZbG#Sh}EBJ$9P=g6VfczHNWmyUmukiLWL1`7Rc#!_U0Nws;$9Gt+v-A11{y{bo74SC^RL zS$1vuYusY)M-OhNUHNc+faGe%l59)by(7;FM`)ejO#VOizB(+*EqZqV>F#cZ?jAr8 z5CQ2Flu(eAP(VTuL_|PE5kv)p5ClX7MWjRN?(P`68>a3zgMjCF?(_WqxPRSy`OeII zv)TJyYrSjjwb$N*bLgz!3c*s(&B}@UQRz52Xo@<& zHT!()Go9gr5#)gp6%@+K8T4vV*;gLg-XkO*b z*)8a0eXa%u+A-BopwU=_*rOD9BiC-SijmU`|#g=rB%KFOxmgy3++*kWjBHw1;y{TVr4WqoqYjte2 z;nXQ!akKhbHsn(VVcD7Y5hW1*@7?Azt>r{0<+EE7ip%;gc!Vuh=>gg!Ee@poMdJ=o zyznaD&>OCI9&9SeY2>Dz&A!t-=xg0GlcA!}ptfSpo2fUbBQ%>z+uM=%BmM$sq2~Rc zQsd(>{T*J`FJs>hL1H>xx@b4(-(<(C*Q)12Xk5I`??Pq^TB5JiKH4Yihw`dbh@9)K z8+Nw(B3q%(IxA7+2Crd|kIcMxd5|)#5Bf4S_4=HdMAHQ~rBAPeuevL@Zz_gUy~KS2 zxt%%1TQ3fEEewcojNp5#yG%UfJ`z8MXPPqV{|&1Q;%|AWw72&Ao2yaTHl@99O~=$a1s~3wnA!k(Q9)^FNY39oAY7Raf$g;>P`XkX9t|pB2x{?VeKzz?mviU z-aSh``2K@m4wAv-hi^QQYQJbd)gXWC-BqLS6J`*-7XW~0;T#?EOwI!aLCLp zXl&k!t-+@hIO$!lv>wuDKT^v_!u_|nR3M+;_9f5sZQR=^sXFXaF?-xFQ?&j4f*H>Z z#N*yyzF4Xn&F#-()lAma#ULui9`|b-DXqfdxcg0Q?*=~^q)9Ey>?7dz;jz4F&=eWo ztalAod#Q{5%`LBPv!AM89Jvgo+Qeq?i=BN6NxDrfe?8W@@Rf`;KfpBFKf!%~=ITO% z-nU=7hS%N#BrZ1`sUyNR%SO#=37I-9oDi50>1^nkVJOC7(4cpf6s z-O|KzuiPx;nrSc|6uCa45maccWWg3Gs>A>>%nF?5t2rxQ5rMz$Na6P@1Yc@jadaiFvA&C>xqjoXtXw{A@SEpP#3;`Q3Z>uD zGk+pxX+`s8+JjEkK-MZ6}U7yOp};>q=w4AkT~sj|WM=zkPVcdwW8^V)stw)Ho6k+ZdP z5T=qjX@Tb@r|ggv%b*O_b3=9|oH^?s`{c4cqq zg1|Sj8m|lK4w)1TI)NOqdVqo-Hv?GuJx zF6=+jrPdj;eCP5gDtqyb!0wNS4zGE&#WTWWW%+io0=6`|u&&{3gMi*8)kdNR&trsoQnuM% z6@O-Y^@-uii?}`)R|VRI>KP zHK%KDcvj8kuDt%bp+*!ca@NvOK(IR{iMaXT{&nh|G`YtbG%-Gx`_6max9!sx5?0ww zU->@w%d3K8;YnrqL|VLMulY)>;Z2{j^e?x^v^vh{-9GoyGiy~qFRmx^<=ejN7PJ=c zd5s*Zug~W+2v+RXRhO{)2Tvm>FO1EQ+mS*etpn3tl+e!Vd_f#y68&cf(jmWvHGMLDfq_dmIir@R@ZB@HmCd-_yp3T zj&YuESK||diV8l=W;MV{ED!Hm>7;#aEZfzs*S0pjxN&8=z$ssYGk&VjM|UFiMN4Yq z=X|XO@Gt#G-`4u6SnIH}O&G{5tViO)`(M64sOgu>(sP>CUZr%CmGZnse@W8KGp9vK ze?UeK;t6FTk7_A{dMlYXwNOW~rU?$+bbNn3Xvk4&%O{t{yHWOeLdNurws-87JhwA< zLco*u3$Nc6F0?4UtGPiU`!=EemS1zY&zF}8g?X3bhUo7@ZZGj7r2=j+ONw1}k4Iy9W*qXPfw*|@Ix#m;t8m>gj-dmdufOk%rv;sWzda#l?g)LX!FNKM^juF6MAT2 z5)j-)CS9!baIhjnyk_Wd%7WnQj|zSQb+0({d8QKi;Tr!k7D7DkOX^~0*;L&HZNsV; zuw$n;yuQQzt)zQ7+b>z0c$Ex4w7IQo_I**jYhyD_Rm0V|HolP(IF{AB@rYL2>6cbH zk*;7~;KtqhM`BH}n~vReIipZ3g7^oPHy;bFL0^q_*w2qf>c1(Mx9r+CT8nsCFq*A( z!wmRrnVG5ifbP6*+v>w!c(m;(+Q7-%{4C;hbCF z-rEu1-pXHpL3Y__SdBJADATd5i%|$Wk!O@#kgV54k6NNrfTc`;>2fJND=3w0YKIE; za&z$W`vJbZrL51&SlzPE2l)Dm4#;42RIr-9;+CSp>vl;NaX!spyHDE7)ZWuuybG;7 zyHT~_x5^ipFD{#)B;yJIkT+?U^p~ld7flvcZFs*L*x5UDN8Povyt`?nE(G3x;Li*<($0o~2n6}3IZzZxiP27idU8>=(Y%DWr2@nBcoQ~IlcSH+;4T@r(0W=Nw)UhGRBx9vVQyIf zM4wL^jack=XY$i|2>&Sj2%NLPHosh&l%-Vq;Tc7sxnV;@eYhW>XLrNE405+w!|-wY zdyT;cik&&eHZJDd-*P)l$c)2CjGsTK7X8rkR_+uZK}TiUz#GM`AuDjn zffUt#<4Dy@tgg79jGr2q0I#kl)aSqSbQ{0m8ac&P?n-Ce0>Pd)|24Ia7%GJ%~!*( zS%h43x{k3UO7U|a^{1??iZk8or<&M#aht;39*Yd5r~Zh#rI_qt7$~(k3pL)!w*HTX61fZFPCTX!OMztMR&I{qJ#lDTbvxdaTifq5TXUv^jylQs-LL_&mh}N8``p%?^1{c@5^N5(HNxrLJoVGA zb4WtHo$wDRbKM#mk_(M9#I&>`&%1aSovPUog~6p0S52jI+$TmV2ZKJmc=v6rIHaW3 z`__Rbs&x~aw$&{@*E2%EV#x_MRu;*q@=IL4V6YdpA{#vbU zL6%6_>(&BsHhWpzYni%fCCPXnzUx!Br^mJtep?;YPKv)U=%O=Y{L@iT-m2DoFZNs1 zpeZDzhsU_qO)w%Ok-PD8AdXAEU1Pyq?V~7D><}iU%f_;=Tp1s4*i~Gh_frv0@>`<( z${;pJ_aRM1&wb3jiJP@a?h=v0ZaK@vI3N46scWyNe21X3WYuMF1K&a4#eCZI5Hl+? z(P}I)u?&<{UWBsB&tXqKCaZkDGxLBM=oS6>Fj4LZf|WDH3tG8-@X;#iI(wm zD~qjSYBULW&(bTPOQ`f*@!D5^-e{S)ET;o6nNG!JGy0vdI9i3<%K^9=wao`6nUB0; zT!4=t)HIBo+~gZu`1Cs#o9}h7ymQ|r;5AXmT;l>y+rAgcF=ZDiQ5Ox@laHWfpWZyN2)ACkNr6kfEl~E7&S|*L(U}yoKic zE@qRC#L}>rUdCZ?ZLTS51cM%(cPB((yP9;;aH)n{6S+Dk4AoRuz4YV^ciiFT50GxG zxhG+LPR8<)W<_Lo8g)goKn@p)u8f{k;ZyyYB8rEp;qq3Yi%lantUpFd6}eotbXt?UBX{FU<#dOPW6;9$7D@`!TO)IG8SnESZ*-!bJ0b zXeN$r+>GuE?ks@AR(rjNy~*j*$n4hQs>^7>$lB)cK)>GOw~$pb>+Lm7FFrTbT6HS zXxgl@^Xb&dX3d5?HdmvEc%FsM4q@x`eDsNl$rJRL7*KP+Z|SJ_QNWQp%kyIo*}SW+ ziITB1X6F=QrTYy#i59jz3Jb;Kl+`-&_^MrXp-a zz((%MslbZ$?#20ngGg-VJK<-P%nl;AqT%tzRK2s@i#_QFT3+WJlvDK&cUD|6ZE85K z(J#)k>2)1M;w#9yxEOW4QTl9{;iEO`NG6n~;8UG+DYwR4;d9Jwm9oidqT5puJ?@8j zYTEC935$E_B?rpf5CHv(apO$QuWrt?9}Z)lcnacK@P*tE(;%v22+R0OBiNW<~y7 zq$!I(|G0dKy5~fd`aw6D)AN`mHlT&4?fZ|h(1^D)T7&(C*>kO4p7^%@!#kJp*}RIs z`wwy2k9-N4NU^v|uAsPLsHE&3jzbPDZ&aWaYd5|x&9L=&La5`!?A$M8bHB<29beR3hRakO828#{Nr zweRYKIcpANN-uIX>N*QXfL~{@dho=b0eE_DN(BY9^?P%WQeLx&&-N(cV5^%9m$D|C z-Jd@M;1jxCly0Z&EX^zrs0OuUhO8SRzpL+jEcq20q4VCPDAZdW&~s+*y9@?@#FQSg zezTaHC_IC;2~zU%E@4cIOGFT9xoaxEqv1r##djZrCQ`~as@Gg@u=p2gS*Y)OO_;ru z{e?q*skPR6vr0YTa^0x+foL9vlX+7>gyW}*rvV93R=(?AQI}FPSiJ&=0P5R1BH#P+ zua((ORjCtp-;)E@!YcIu4@u6AC3gRYYl#huHdWty$(5{z`Wu~w6rwJ{weN5P{!j8P za*Ip_e6UH!ZJR+DT?YmfFJ#b=}6Nw{nes?@WJe>F1|!WX74di19mzY38OTB7tI9w*ao ze?Rft~hq8~4zw z;OT^S8~xgs)O#4iTn5D-XCrI**`PmW{H5&e8QYRq{gcUj9Zz25)3e?CIM4a~ssO#e z)_b2NSk7gL&zLg!>3uOL*{M#S_j&kT?GL--?Bx1}%NeL(KYljf-LKVUBT>_AIOweC zohgK>&DJle{Zw{m&c&%_=DI-m>Y_|^zrhWsJQ=pRQLL%JI}q>H!yJ5PSxep2!TMRw zL}E*QtxZl>?)v9jzXts2U7v(SFD(j-ZS)k+-A)RGr)P%hRR!+GPQT$)3KesGyF?Wl z_WIRkiMLtFGv35{Zuy^WWDO7M-SyeU^SN&sD$q>R*OGNiM1}Jpcptb5o#XBu`9WGp zWi~z7>oOwEQEvHIY+YJr_p%o8><_kY3!fT#WQbUP-YeR?*A#P}YOvcx%9ZQ##O2#X z>r=DOs}`}Nvp>oo1bT~!WZXJe8+D6|O)}%-LbmWN)pue<9cdokeDoJom3PnKc)rb^1rH@W$*-^Rt6 zq273IgydE2d0|Z&cqJL)*eZKgJqF@A)^R?b`pX9P&?aUfE4LQ7bbU#qT z^0pk_6VB35+`Hg&zuiN=Sngum>@ptIV0xsKm^weuW;NPq(Zv^MdZYntd-Zbl^RU1i zj~jX6&#Zb#?>#Kuqj7Fs4an?f|5~;H3YU67*j$<)?2m(U(Pf}^6UMhbCsDXr7e_6* zH=sf9lK4yFLUd=RZ(bbu>&HOyu=FzVfKSh4_K0J=>vhXprxnb^bia?vsJ&Ob)+8mP zmk{LZ2l_&)KkEHi8HW5xc0M0xugw6Ic)fjVM$7cgvvl+y`plm7{*e(gmFs~UrK+8D zHfi+TB?0sCmh3MjP19XtpPR1VG?t+(9?otsNtKnkEqN(NkZ3sEX!P}Bb;a-k@bB|X zSVtbGj5RB&3^(fIyzR@_tIikoUj67lufv`(@!5aXArJ-0EW1VgEL*XjK}X(SUyn?E zzqNhYp9I&NGh{P=ou97=T%9JtY8&1r|tdi5~sY_{(46$C(``Pyl%lMbEs`pOGX~3vy3Igg99QVK%NdpCa z5;w)~Ct)eZD?V}XAe(4^P3;4plV|Dpl5pOmirUHWVgNbwO~Pp-rJg&?&7%PmUJ7gR z>+feeEycdxWZ}HpFm(0BJn^g=K6(DNNGobI{Zv-G3w}3_v?UW@C43VcM^Td@L8tSmbOhw}dYHj9*XCx0$dm8f zUo4*&6L)<{i6fL&dm4~?Hkp)Ku81z73o6p8_1ylcQGZO7yHa*?iJ@xM^Q|+Rgs$9` z4s2=KUY_$4`u#nf{iUm$Bfjf{sjra5(HiY9X43>(%{#seo5pOnVT)!dXD?nd2=yY{_=-IZ~>Q%?vx?v?B?yk8P#SMq3V1D*YB{_?qm?q_vv zbMf)GU|u(&zR4LkR~^&j55`d1rR2S-nF|l$c^#eFI&B3^YL)l&zF^<+Z|YM$h%omg zXwlwmF`fHFnpIzBXRXtbJ1@#%RzHgMDV5Ac16S9x#I{rSio@G#!oe@*M&b(YU!Ts` ze0};Ylzm{Re6Znc0!t4!gxK3~Ryd0txlY%A%d7=E+R5R_7w$&yu3%z8%?ujhv}@|-&iz{mZg9H#Im zVk;HC#@rsVpGHi{Qy;R9bQfP8iWmBJtM4iS(QZ*zymWy9i*WMIH}kKfUS3t_HS|=v z8m6O^yy5nPVLIq>edfb~ycX-FL@p+>i*je#TwalQvKZ$8e_v%nK6<{{^O0|0yuj+h5fJ>Kj+#@o=B6at+?$x$O?5>l=8?d1B7k`G?nA9?m; z)=$8XM0%rf--dgViItU&=EJlYE&X1cS8meaWscPcl|)P)#p=uNY*=oQKlP+=>ZT5F zzA057eG%7@GA@ijCaIM-Td~5iUJt?iDLeF*4`Z69>E_h?#G50_6cvosNjAPDvpM;m zvO|+N7h=OyVlzWE{IC`;HsKU_k4Xh~Den0n?zLag_9Z#vCS~l28)hGj^CV_KG;kV} znlQd)xu^)2-)`@y=T{ zHuGm$;f9wqXa_zlFD@ZBC%XVqj*_G`HPN@IsntdF-VbC`CA7Di=QI%s$TaRLeNfd- zO~YS;ho^jbeGb=Jj6n{ET*@**$+vE?NZ7FJjI;3o+0tCDzlF5v_&De4^0O>o{wvzM zTy+E)PF-YYmu^4$qQ_8Bb)op4ph214j*%ib+uTwUQk`ExuLE9u`7Xj#(TWAEPaWp% zYI+dc|5R}xCCkIj)5gaF>bIC=;9|uockxR?$IZIvA^DVOS%zJgjLOM}QgS=)%Jqi& zD4`HGk5t?HWCd3!DsWWYyyMJ360}73m=G`MUJHPd2iy#MS=HW|hhOG+Rq7Q(C{iH2vdHp=%+)tnvhx!eO5! z-sy&T?_9athcol?{q3JsPGc)(w~gSNzM{NrcW$IkKZ0aApQ&`=^X8|cP={nJ*hs?e z$t{BP7@mChA5dnmQ&>#@b(5lY@2zm>oAH+9(V3Rux;MiuS6Q2TFgM8`WqehpTiqQE zod*z4; z(7?PSUcYzx&}2T`u`YFPG1|>hc!}HkmQ>hW@g46sjK3xi9?D8N4(r^+2+AC_x$G^4 zlbW#NR94(dawp^bkEeQWjyHe2;u&3I9%z|x7b^$a6J4@>a?)j`(%vGO$66*b+hR5V zE%`mJdpJtjw?^w0ykYcWPGBBsNnZILFRT7)uq9@wVPxGAFC=gw+TAhIO{_esm82kU zJMj%;j7z@R7GQO9_1uyEs(zuVFw%&k%`oTj0mp|=A=^mcopCdNace}Yo8x>@Zf-E3 zd0xgXs}N6DsjyX-W7@2Hf*)=H`17-#&-2TEwvu=ZWclR-UFoYwAHiCZYe%#036M`1 zN59t9G<`mCH`afV`Y=^160Zcc{l!c#nqTY`lt7aRs9QkI?BHprSGVgR!D>5 zM*^v5ct&0H6{@k&-)WS2gC1&J9@AI9?XwVQct_I{M>=IMS-!|2Lsom0GyU93>fsr9 zk^iMVSE7Y;XD2z->uPWK&rt-0;8l*{UXS1DFU+h+$AKEWHovY2d|R7)C!zBDD&E26 zyhp%?rE+>@-7Zvo#c9YXt-ZxNeY(Ol9TC4Wk><}r4fe@GHIf$h)8=b+acki`Z|`)R=wVeAL{ej!O`Ut~>&wWI1o z5Sqc;^}%q&19n&^`#}o3|J=dH?{l3Ym-jl!@7}BKBCkB`vGuKaR9MZv-&wh3i>MCS z4i^hv$QV=E@1n0VGCy#E6Cgst2m_4S?~+)i{lQS=L_Do zMV5}kK6t_xk}AfRC_wjmauIDPL_5GBT%}~N5Rv|@n{@|g9NB3Q%x)fpe4Ye9uszsA zO)iCO%2E(%BR-773Q=>YZCh0Dg&|x;L8z7&TUgz&>U}r~*uiS>_mg&6+p1xs=*s}VSYSuCT?*dFsEFccHOCIo_~0XzY*Z4^!9S|x{KA7x`l1{; zQ~DeH=&shV3k$QVAAGj{@Ax&(nDG9~ic6b9wl@A(riTx|w26fn?c=o+ZXhzo_lG=D zLt;Ubnt}HVA6494dlByGiil4_HL_1uWbKIDHCSUuAwA)n?Z_JPv$JQHhdsyJ{Mb6J_RV?5erXSmWjpMd2oV|wqr6pi{6f~h(ntm$PA?N5j*sna zp$a_Q+mYo-@PXx^hlSfMVU;^!qlck$-s5NO7YfVM=d|GKbL=nvD15 zLt@(3pHUvnLe^&9%#kE`!e$|_=7uW*+7wa;5A|*Z(^U6+K9VYSzFY}ugG_h8g2T9? zJ*8f^Ca4NU8{8bteq6R14>2Mb3lrko_g9N4xujWg$z)W>)Ham;HCsAT)Mm-%;4aoP zcGB?elyg1QHC&u`lL93ewv*RW*&pBG+NS;Zn*B}uwJ*}QbMpww80u!}11Id{vEM^& zYJTP0=_0m-u=L%RYdPw2+v)Oj`x?F zu2&|QGerhL+Y9bHKS68MA_(fJLWP>Esn3#qA58|+NG3U|AQFPQ${$@&nv7KW>}P+i zCF}w>*1ibIE&QKyJ#!l=78Yxd&nsaisiczQj6MtJGD6_Sn1V>pE2S7VM$47xJOcB`gOhf5jG38x;nw{jIHzR|k30~9THK;MPp6%2OdyOwG zok@Nz*_7v^z1isX5FV0>2RlDG7FDY=lU!H1SwM|Ea3w7E_czZa;i&!ohRp2lx@x@o zYKy&3WQT=gj{UZ8O~zaI#?C;r{SxEaZr+Q_-I4Go?F}Tp`g(v0m!wxN0?~O>!Fi$H zJ?Q*AteWmxf9ch)tovA+1EmTV?-uyvs=1BM20o3Mjg=br_HVWl-4OM@(w=pGO0qIG z)HJM0hRUfT%zS~myV1~KX{s$S88SBXV439oYsb)sg%3%ml#L?CE-~5P4~lRerH|_0 z^U0FvhC38r*CBa`lc<q4+JXPPf~2^(yts@6L_$guA}t{#F3Be@&L<#->E-FguL=xz;Lvt+ zyA1$d^YQfJP&bhh5Hs}hynfr>;rRE4=Pz)$x!8GK^9taQ7Zn#55YzKSf6es&UVyUl zv15E=jFb!q`ZvFSBxE@-cN`waZBjDwGXH9mm6U|Y|Emoc zB_Z*ES(HYR?$PS59 z+oa?r0so+X{;*9}>Ti4H#Q!ZrTuM&r-*%wwJ?i&|4500(-#^-Z>-R^SwCvw_^0Ko3 z<^_-;DT&efkFh}8ksbeRJL2k}ZGUk9k(B*6r+^HpzwjVZa)0px0iyR`{7FcN%l^fS zJmfF`ip$GOAKCE-_Yeu$f8znVNdKjayqx5}WdL#^e{m%(FZ-9@q#@$}mMbA4@fY_% z8|1IpkpB-3q~-pKNpU$@$bWDu^OqfRGBSVJD<>xj`4^|+a`KSBxR*bU=Rf$9m-st& z9{YoU%2L(}=u%u6g+!r8HSdX&EWt ze>?!zFt|WUB(J8ecR|nl1_%Tq0fA5`;1&HN3Bm&5fyh8CATf|4NFBJ&0k5jSbq=^x zfCu_s2c!ow0vUo#fy*3Z2C@WMfowt6psOGUkTd88$Q|Sc@&GP3kQ3+%NFV5}4_vyy zr31PMT-qQ_kQVSx6Sy-4Szvl-0i$gJ92)@f5`bj}yj}qMqQ|1g>4J;_nQDM^4Pe}P zkQ_)H&`}H|3KGFwLLedFRTy|iUjo2o3E(+^90A;GAbXGgK9uPP1K)<5j@q+k) zXS9_NU=$NzIrd-nLohb~X+K&kBh2I2{>vx!JN>rb^B?vfS)u#e`akVA1N1^Oqy-=x z*>8%mAN~AK`;Gpv|HA*kevAJ@_A3H@R{<%5&SEZQ;9dvtn<1dJA;<*qp9$c>D}cvr zfCt+DPJsVC02a`3;0SmW?K`vw(HGkCi-27h0DHB7XHAU%^?)`_z)P0_pIrewe+d|6 z0pMQ1j4=S7(NSXvTxysI&;tA~{fF<5y?*3-L5%kWfcUY&_}}&){^!Q{pXYzjS?*_p|)ks8qN2RKw7B4F zc6#!gSMhOiHL<8T;`mo@lGC%l78IA3mm@1sl|;X4TYARk)^`t)NCW`EjD!Q9Y7Mtn z7nfF5l$Dj1mJ}Bk=I3Q+eoXuD;a3Xk17X_7%DHUkF$5Oah* zWi^9KyD$s@J`%RG*jrvTy$2|O02F`&3LpRl;D7=MU_4p@#2oxY0r(u@Y-@4b%*G*R zJRb6J6Wa2ldmp1e0y7-GhQJK}Fa2S2sCk0Xs-Gh(dl(DwkO%9Nb*0mX|7ri9>L1w; zn}g5eHx~6R?7-3Tf#GXozbcks{~PP!3kZO|W9{LyusPT~!9w@f&e<(sJOK)zZ*{D) zYzgr{v>q)Uxq#oD-8#Jq01#mSMgf4G|FQkBc?1BE**v)cc;HBb(TcLgKi!X9rI_m* z$+f+dF~7V#J3QLkHaL&?!;{BUz~*59R7TU}It+0HwK7ssx`_NoAW*fZ=J|{81`IDR z_5LW$c=>sB5pe3?w7?cni}>9i8!=!Q!!Tgm0N9aR(NV#$(sWw|`0#|rtZ%+N(9%p< zO8Y^2nKNCpW2;9Ibs#L!k8{VLc#);@ExzP;^LsfC7=<|;b(LF zl-3IUGwjiLum$*k;4L8km^G*s)H2JD^CXm)L*4*ihset*q@ped=Hg{0qGIN}Q8haU zgu?MeID&*N5-b5mV{{}$dlPN+0%jf@%_HOr+k9O|i46&b!MB;wI0X(qyXu~BaW*+? zSz1yYdh_?4r;`(y^njMIMdT8i$bZ_sa5^_oE8GLE_3zBAZDdHv*?dQauknFJoqrbD zv$0xM1_;pN5;N+4oIROwM_j@d;s1d;k37j3s1>fd5PMrC3JOXx5;79=&WUh!A>p%@ z>hu%}?nR^^e3&ZYGOzq89JENc004(8F>4IM^8D|7EIdw;hRU}vGFASWgf z^D3I28+@+BA+92-8(uLv^MspF?v|~^9bcn2y@*NBG&*w~vkO~7oMN8@>ih;Kb0Stz zE8DYzi6*_98(*jRO6C*Cibn}a?E=Yg|KhYsEcbt~Rzn`ULbEv6E_vOOHA)#kw;ZxBRAf@39?Rl>S%lzu*#m`pKvp` z{yU{BEl(6Xvy)S@Q1LvT`^HO1NJ>RVOGU>-hR5k0CCMkj%coZxBhSMotRlcHPRq#6 zK|w>O9GLX8wX1h{7D#>Q;6@99p9(o)X!a=Wt1T}MCh9ViQc#c(6PmS@2oMqC;*n5N zQc#lO(aN2ZkkgXq6ia-rFV0LuNdZ33MNh*Fmg1$OlQ41dzyGRc3OYNFndxXTuoZ&U zzsQ9agFF)<-NAsHDlAu$;_83jEpISDDX_J^N=ykK5VHfCmO9#MG*7gcT^7IHCZIz}_| zOSc|A&HnhlWPB0B9&811D(sZMqeWADDMt~PkVNoVnTwFTrz0B$F);}-2^l#l5g{2B z1sORNEhRpIl%~E43oR2sAt@;%KR3UeBoC*=#enOQGGao45}NKejWoQ9X5bhKVW+ZA z0s^{TYe5RX)+2bG6rs0 z4RwRFT-3x28b+c5*6!{>QBvoubOiNP#bqqtjUs=`LaqHxBlLGQgId(;3FDP|kbIeq zh)7VA8EqA#-^W{YR1CuWGz^~Yt&w-W##}em(bF({P*&FUN`{b>(LPw6hKI?j?5n7H zz;$LOapRj0^TwAjF$h~loJu?45sEII5KG8K&8eDqs>%im)D$EX0t$@e4D2jy4oyE) znAD!!S2Zb~njY$(8tG|kZ26koI5PvSyi7+yFD=i>#4Bw2AV5_{)x^=|?whin$pv&E z!dBs@vQBwi!IWvpMYf@s^AJ`J9(ponRyH1fRxWW-9#)N?Q<(v2o!u4H<3QS-#h#pi zBIoEQ@|~o4IWOG0?)56xKjL}N)6&ka{xRs{krdb(!TMhzKZC9X;fu_5m*{E8C>fb} zmBo2@`M`4aj<-t78Ez{^47F|RgXObh$;Ohh%e_1_!R6nBT>07Sh zkkZpL@r$0-mLb5$BViHXZm#%B^1;N?+$I&!)Q9A)ZbG{h;|NoJVFRK|#TxYZA65CZa~T@koSsScG(-evh#8sih`hU+ezYHH z^>4yp>+lWK=82Uj4wypMcJO(cx%SEkHL&!z*-sDx96WM58U~ryf6#?4CXV9`trYSU z>cqq;Ou>nqqk3!!;bY>8fi^mm;Sy4TwVmH~{gW#6dVr2EG!pz2>BPV(1g6}i2~Zc~ zAjY@tYjxwKBxbh%(K#{={l~g~JkiiVuuX!kV<(=Pw}9D=pk~Rw_^a?x;+}0D^X6b6 zmw0y^3GmN)F={)gQBg4Q$vI|@Aphll^o%(&3wDZgV%Ow6rg}y}sV6JVDETyb znG{XsXce_MiS+A%LhyH4d9);=nTBHsf^DI;|2A#nsBWfc^XB8^XQFrim3dA@jf-5b z_Rj+6Pkvxqi2s5?VrpfondyQnf*fr8Ja#{OeIR^1CfTEi6Mc>|%dutfQ=pSLn!w;7 zXDFspAguJ<#*%!Od@gda^0*Eof8$`!bQc{y!}(O*)CJ`3RZ#oxb7U;`=bjPRCTt6~ zO|aAV@5wxluDKC2RMTHHS(rKW#o2gB2*??P-THt6ACp?nK$~6$7#@Q0 zj5fho0e#3-OkBcJLUX}d#Ie{ zI_q^XR!Bd54Qu`LDj550Du8ovigRir6oUgyp_va>cZ;97_(t5)6jO^HMrUOB64pyT z1elCh-unLI$B)9VY6yVA!Gnl3)H>)h9E>gUAR6%$%SRms#xk2mZ10`opXfP;))FyG zKJ}QMDfXd(X>5+4^>x>h8N?C%j2|3)M&)_!3i>dpDgB0^}pq8qQ>V zLO1LPT4oXfBm%5&f(S6S{5t@eVEeD^GIq3zqUOk2J^2D2oTFsqrm-t6>4u`$1LQJR z{Z%*^%L{-;uHmdMEUqGeg$7Lvj?CodCVUIK-k28&#*&TS-9ettgHvMAOCMs6Y$Q7% zCC37KPuDGoKJ%SD+-X`u~+(>0k&msf2o*4FFx?!uv3UFr$2Nos}8djOt)6or0dwJASm_k}k|cdmjjpF|yhPKw?IY%2J%w zxwm|KU_4$v0bz^WCFBMI0}JAX31JEH0ThY&0}-+FU#LfU9KJx(>qbUQD`b6zM(yh& zrlLZuVvoL2-v;9dD9261HbGy2S!Xe@vvrDcYVhyHfMjtZR+Q{=@&MD;DU>7Wj;?uE~S$_5T7pO62fG;_25SLNeMGxyy#Ytl_kQ8eYbZ~zIo;k$6CjqOuhzqb7!YxBh1?{tn@B5L#p{tW>&Et}fQNyP8c z0I`9+HtGO?2*)C}u(r`~GBf)->)ZdeJWm{*qac=urW-3Nu9DG90bh-S1dxtuDAXoa zmKHG2Be#G+#%Lxpf4H*-oL~MuLC`BbCN}_w2JgfegeAkKFdE^G6@Hys2j&D~6RX?+ zm^>qPGZpUB1Rt^SaN5bAf-N1xj-Qybo9(G57 zJOHSYf!M~{$;B9o1P~8SwL6K7$&*5uXr=Lvx(oxC|I7-2h*-xOyNLv2n|MdU>#@Ii z@B=8LKc&g%Y1%zB7sJmlqrods@wu~+qu*SXDfVhg?-bP?B z0QT3mcmD%n>U2*`Fkf;*(NOnN@;m}aAJ`jXo@+qjuZ8^vnBLglJ_R_*9aBfS7#-fI zRl?aqTZoFG-_RO>fZV{IdnUF4#wuxV|Iv8{fyqBdQ}ehpnw6c?&L?C7JH%s3{;BlV zY3CC)W{$g}w*#jdZ~RABVAp@V1vyn=eQWK%x}p=wX$^hM!}{hQ+r|^#peL~KKNw!y zME=J{4*F-2f9&ct;4t;X9VeDSk(h-ZZP&5i(8?a3c0FMXIIH)5w(RJz~0&(Ky2ge4DRe82T{NSXJ8xM zf;F&>7(k(4;6N{61sgzaVGnE#Ao@X@i2nV}tyAC=UT1$tEpiLggai{c4}b@NcpL-| zHUq^@^B@9D05mmkZ#RPn5MUhiuYuJJ=!xG1+-?C6I2hEA0D~G2H#bk=PnZC51zKz5 zmH>b<0Dyo80kr{maPuJg-BBC3xfy)&22h%80Ngat1hk+f^n)7_{kY)9{zh;=Vsm}t z@3Rx&*cL4WsQdsx!~hp~5D;)GV_+Kq0_TE(|9ie z^zi3|skz^dLT!TpZU~#vCIB>n2jO7+V?z)Fgn*3Sx&xE~?BD^0d;#DQVAQb{Ya9P{ z0yKxthUh&YYJd_A+YGJ%F0>dlZ%2+m`x)a1H2cUcsv}wbfCY_!_81$`zE}s2;Qyux zIQ2)91m8yc0xe+>oCjP302GEHzzG=lW8O3kpt)~C43Gj&I3@<;iZkHGMsUN%2JBSC zNtgnM|3?!EYX{(nt$Ams8IaJ7M#P{FpyLR*M+Xkt05m})fQwJ4?*|C$2ls6>fcv&L z4o)TfulCM6u#GBP_?BEGxpzBu?6@aRAq}UIMj#0RmW3pc(3TSVQXa5C2+Pu0>XL*K z2%80#ByQ@GCE1p{-7aQc`e=X#chiS;m9kc>@C!aa_e!BNOk8tgW7 z3}*EB<+pU^t0A368AK#ye4IOWA-$@1lFSx!A=}zjlxLVC@4;4MY+`I2g1+^lIxR}6 zk}28o-qK7B7K0lzHco(dTC8Z)Pm}j>2;-ElR#`Pc*ft^5oH0m(a5XuFRBD(@m}iHY z^VJ&q9`r2p7*Ah;^3RtbgN_s6<2}{NJjkHi8)O=D*HLz-)y;}Xq#HkynEzGLPsp(%X z0U2Th48%$}0U|PKG8!u{8I2RbYrxMyd?0va#8M71c7%((W4#8L{9ghlKc4*do3Fnv z{L1<@@0)K=o=U$Oz@*bKMMj=4l5xk25XzwdlgTck+r9n5mGF{TGXtP7m+wFaevf`; zykvBC79kS$-2Hq)#YBd@E&40qCC~t7K*i1?paz)rJRK8WLbm_c`4seVliq6Jb~4~4 zjQyVrsNm0=4NQ2+6?$*yQ|t+9?jQqRa+SSD&;$XH!#uoX_Nx1DBB_hZ@sc6O;U~s? zM)ZdEg1naDk&z*ten6{Z(gX?!&ZkR;h&l7XU)$VXE)|#SM~1Z>oo!7W`XMsUg5v84 zyRoP3tt4qMmo6c(89b^8I0zl>E6-F*)6#MWhPo;X3Uk#3wQUXcU4wwkP{0ZvCB%YQ z?C1i$O_LZ6g{u&w?k&>PRG~;#rtc~;i>*SpOsS}-xv3Wz99!$CBn=Z>L4q`PY-2%3 zR%&`-RYQ-iG(##^7pO%ke13AOP+Ho7(g2nLyaI6O!ZgGkrWg>~l^b+eK*VU|a8G|{ zaawwQ$Dpn@Lnb&A?fk0ZJ zAIKF5#f4p(;?&bw9VJqkFfCQtfDjloEcR@GnMo>PgMQ3o*3p=r^88%+nX>|kP#`Gi z(^o3e1&YEvajNwEK&dFRA}dAKh8$;hhiZ_i50>!_0Dw{`Yjrc@InKcPp1geK6k(cufZCGGiw)YO!-DMEQ> zxyCR!a9(^`q*CP8Xa;oEGQQ$mUUo@oUVV?HA8CQot!da|hZ^jTB?NX!X2m+XYNRtO zRa9E9=BK0xWO9i#v%IylMsfDv8Rr`++w^)}jhru$3DUCi6-CuG=bP&~?KvQPv}bUQ zk#NLKtZJ7hrxkQ}HkJql3b{xmQ)Oq##p$V~z2Mr^cl34E zy5@5Vkw7Tm^9y=AloE*`oi7okCG$n{>>Q;sCsU%RugOu0;OVj~iBu{SDHMWaX<=nm zRehHZpgns=4_w;I6G|k3gLt$Z+DfHTk(sX+@&y&$m4J?RMm;wZ-RIZRIGDLzx z4b&Y^lgK18nM5Q~m7h}y;aj0Nqo}RBZ*UMSIF31)!KtFhoL&VwQfHQt;HC^ghN7Up zS|ZA7Y?ATQ1%mVxzF0tetII@ibtn}k(Jr39?Q|nM@e_f`!rm;XO6e`q$bTA^aipCOtT8f0o zSbnantD&y7x}YGxK&2{c@94XbDc}o=s`JvMqQcI0sj8w>E>jki*LE0CXbOdJEcU0- zi%E;<9Lq2pzSFp;47$*puI~7 z@ih;HcjcxDq%uLOI1@~aG$Z?5ai%ou!hp7`A7$sUPvxWUZfj|5?d|E+X~8J>57+~Q zjFB*c6m1<0=`Dj%fk~@SXTTXrqAac~lu7fO`mY2?I0Vb=`WUgX%pqOpq=M9RiBgpz z6Z823X;!WzH8oFTYbF)?rlN-C3GU>)UIR;m++M38xTgY4U7A>_%*x5+3xr@|f0mBrab=evn3k^u7qjD6U*57IZO#IlN};`DSh3L>GT zth=+Ni^^(IL3XNDW{C6AvS|JHA(zf<2+;R*R4E0rR$Wu35DalzN_s|}U0Zg}2uBPZ z@;Y=6`$(dl(EC^CWq_{g2f9kc0$7g9qWYe>n!+m)OETNrL~Fhm^U$nPB_Z{cmGyR& zz^*ARZ0RO7Zr{}LT%dRfhNLe^%vki0^ld56%#ca*y824N-l{HuKSyqXmc9mtw6vFgk4Ol?6T$bO7U4*&o1V`#EOk%K`Y}NX<!Prw^SjP7PJ68 zP61(dln6)Mn81o2(%lBIq9Iczkr(9U6_=<$OT}88pEC`ywHH*x7nY;WX~mWGV7bSf z>deP39%l<0H7ML}n&Hk;><2xgZ2q<%=pBq~PF6-L`1Z2mKFULJ8K^ixBPF7G10pR* zq~x{tap=OZPM7Dc|Xb-<^q`doo1U0#rzlAJEiID(Zy;5?agFVYX@`LQ39hfs`L9 zZ1JjhEg;Nf7e@D(q-ab7zPf>)j$*JlDycx2-KxjwJH$JXNJ$Ii4kt*Yi;VQsN9q$$P}7oDVTl(nhfptQ8-aDWaqw zGW&M%_GY5vibdx$P^VJXRu|--Zz(S*EbgGl2`x#K94{FKZ;M-b77-(3m3oax4FNbxg?$1t0pzCE8W&6M%Q#RcUR>V^TF^<$6JkVFYe!dSmK%?n5pp{3Auu30+yzWaNnizK; z8yp(50Mv(sJ16x{LBQNjl*~HNN_NGd-Pxk|)V5SL)|C~=&ZMf_^>~|M6(OGFfnl8I zu~8IBy(Y-s)MB3G%Zm_Fpr@-{CN_Is25oCWMqz8Sf-fsTgb1t~MLBQwMBQ8P!$z@b)&Xgv7e$xt80Vp_FUl>bsLqrZRYC?6Z6T~& zOp|;f>KH;KAc6(hYb#nTtrSkuXqjcgn1G@~#?3nDVbIoR2_@pfip&O^Kk7WDKSkpt zVt_UfXaVg@SY}>s<4InhnVkiEX}*!cM%+B;sjZQvr%H;8`Pm)ABtK~xV|V@&^CVyO zn?NG)B=@$?PT8f<9qzWB%{+$;jY@uAO?yk8xTGIhPV<=SxK8!)bC@R~VI0hp{JCg$ z(k_V}ZU8Ni*MvZeQt`Rk*7mB5f-amNwTyE|8-OP{K=CAyBBFVc3*!vJdaxBY4|xnW zsZ?+#%Ttzi4P)QJJmFd^{ozHDC)pzg(gJZ2Bv0~6R@oR6jRj03-V?0D?tK;N9ECt2 zQa9QNDHFmh#yd%#uq4Z{`(RIJS8=*fkx?;#Qn1)Duuj={l4+R8 zWXqBa5TA#DCB3iG8bjMWdA@^iK~siMDz6y=Ii_*fE|s(o-a&!Mz{(|glH+EZEUg)2 znV4i@)B>$7x88Gbo;=rsHg)R+uL*FHlSK{PlOT?L{s@Vaq*!KHvtm$Nl9m90Ok`Cr zxeK7F?>-8Dtujk0tr$ibT+0Nj3*jXDU;fyRlT;Ej)>$VI=$xdT1;sH@RlSt=1GHXW zbwN>%yclP0S>1mjagv!C3n>E{H8wNL*hl&_%pLA7RTt(JwE;P2nqW_MkvK_-$tIwG zVrJSdfHocApkbSakUri>=ef+Ryeb1CFo;RE{wRr)cAm+v@2gK z&ut%t*vlkm{5;?!chNXW#id!NG{8xEr)MS@#DG_g&)R4+aP7#G$g2&=Sed7sE3+gY z69+5@*`KGi{4(oQU_QutOg{_BCVC3dJpzB4l42fr(-bPIbSQs`l5-~5N5Jh>*-#fzhGm%el(wXc1QsT!M;U+lC*qvOA5Kyf#+8uDZHT@2EBTo%^EVwNf?RV4q z2a>}Pj=(J$hmJB4NH&&(a@wh8k4?|mCDY2(BG%K9d!U*zWxttjhL*Z@Zi|`7Avo}3 zsn`w0Qc;GH9#6Ye40BLC3)O&eMl486i*+3Du_*@xpB4OcBjzJW4ejDJe zDEqDU#yX%tw8luLd=a&9QJ82CRgHr;8iqyqjeg9lIa=9 zraRBJ&CU{SR`_xTv;?lOT4yW(n81H#xC_7s$gv~3q{V5<(sFUi0TrUBBcdeGq=mFR z5CHB7k$^3@ExxS^c#^6%gfRfofVco)OPt|*;8xI`91=YMC;)g8fE}DFj8l_L{loeq zXoexa4O$K25V?Z+36hYR0uI0)_MtVUw~Z5^gor1pLR|ng z#kT=Zg4^C2Z)xSBPLbdY^aJxGmB5pL=M-W9Qukvs`JgJeJO zK%ikJc@m_3Vj6f7@>$S>3PgV(1_X`8I3qg+(SBrZ%rl9tr1n8Yh$lhh4JLemCqduO zAlRd_1?e8>9N|z`fte?JH*5kadzCLj>?P_@vNQ@aS>LE%A1xrl*CbD3O zsoUIT>bCa$8c_lfDl(sqbC|ed2zpHw#X;fpOdiTTwC5Sil>kqIi3D751Z5F%Hp21a zCdk@dnI>U=IDi50a1@m`LQL#eSQ3P?pq9fK@Ulmn2aM$Tnm5P*l3co~c9ReWLzR-N z`mm9#b1}a}3FC($>TXjw?lU^*LPd)!03@(6lbxQgZ>rO{c!dogA$uoKGfTSvkbJGfp4|8}xm^XJ%z#ap%~#YD!2aC|}{p2^qK%LN6TjBwa>05WKg z1olXK>lTd~O6s#8;ClW+1|><>Ma2E7xas}nVM+I`@pX3!*mtS{;^5>OFzK=OOvBs} zibk<_*e)=(73z`fyZ?bTJdd!yXgVH>i&_47LFOaj3l8QVPVi^D1>JTQ!r&m1>a=Od z$Vg()G>UmFru5B69^B~ZNm6>+Z3^4^Rhkf=k^VJ7Ug#R zfBosekyroteRCgT7R+E%zMRU>`dVKM!wZ_bmv3=DsC(N=#FKN9n9Otra~*q^vo6y>Ixs#Z0H{5|<;9+;zB4#{>to;ci62OCMbqyD~QJwvsQ_MTaGBSsc2;KR7Df+t2@oBR>?@TxizV(i2GYC?G^XgkCAO zLNLVWw&C{g_5{0odqc^9ebreBZZ0fmj*qve7mppdcJqq0yOJW8e}C+@>q4PkPRx!) z{=T8Ht6~EDSKjsHYfy-!AD9tX+c;3egjU!p!t7``lHlhVck4EI%qwMYgmB&1PR?Fl zTn@*@J2*5hDl#-^!J+c(djmtm7e+6R2#tu_@x~Kd;-mb7;J5c@RyOGk12}`%D~gzGtkeS=kBxnbm5WM=$Odx z;1HkaWl4`dxph%=sOPdZ{=xhA?SJ6|C_-{7yT>w&M#r(x1bsH7*pyp^8(FU0#b2nO zN=$nG@rBSet{V>?BrL^*~%)R`0rfbEt1HEE*m!I6f-&`jt@;*X=ocXw{nK z3Gpj;{`vVo-t=OomU`WX>CFQOZ=k)`r~QY0aUMZ$H~+NSCv53bUoOE(^0|K7Vo#2T zkGF?s+#?UhFWkS@-NV}>FzWgp+i%~r$cGcSO5uY~nM9zhEiL>#JAYz^7|={wu*%Iep)xAt{5)m2nW#FfBseM3vP4zf{5 zcjz6x`GE;|TM2n*)}aNZcL(_KJcEK`H?N3_jg4OS$Ya-sZsqH==jrsl|KLDb>Z#Aq z)|6HEH0M^doo_=^04vJ@?x5ABB}H3?Lh27E`1l2cL?-QC5g8c~nRwqD_b*-dY(_uk z0vrtsipx2?DMc?8vhs_02&j|N2Y5kHh-dlA_(dU}UYvlf2iM0$Jy{K@ct^{P7J(7s zMoeUf785MQU`X{tPQ1Xtptz-*cCT@9W^+Rm;-aJO%B5Kh%0?jNp|k|8K6@8Q7{)Rf z()>xB6U*7vb78`YI}gTloP(EusI8xOVxv!)WvnX{C8I^rU4(b-kbe|nQ0)!j1gr?> z`ULwf*>)X=eai%Ob!3E45G{t@0&Kuakp8vp%NBCn9&WCAI?{_1@~E;w zqeWaJlZNIL)1Z5X>%2hzR`lBDC@=Qrs_vH-26|lgPs8U`}P00A$Xxr z^ogv8eEocuzFpmG%j3JL5t&?!7)>p8onSwax6Cw1H=|Mg;q4Vi)(bT=}?~=G7CL(OX12uY#c3~WZ^;Rnh zaR6HaI0%Z!85ux`ZZIBjZuZvq)#g08=El1>L_{y#EP%q3$XjHOkTl5Ktpw~4PJn~3 zw)|{lq(MmF1y!Q*9r*eFzh?g9iB~>+Vdt`KpF`P6d}?5Q3p|J~;z2(9>{E&dp^1&( z=0UpAF@%6ZSDyw1AN%0;y~m)!$fHkX4WMQMQ-OF8_O|1t@UC`c@;itI!N7*odeao! z@arfDh9A7MZu4HX(vK7O^uuqyz4wPsl=Oqb6bJ-ydJ^HGfPtKBq*x7BQZK;r(<}sn zur~f{nvny+_=DBpTl0ME;di%q1x5KkqRzgcLzzJ12&)V@5T}-PZh6 z`En#X@<=c8N6o;1`~zx=xbFrAM5q7;1Z6}~F$_pguNy^q`QLy*x=D*n4Eh)vzc{dI z`=OUsE`RjxItWbQa{?=m0zouG78|f}bsz{noq7y3y&vx$6!!4p4M#OdB;Xn(1;QbV ziin57OkOPnLUIyDDB9!E^o%>lZ^`}l`fr!Q)J0a#fdMh2G8ywA5ZZ8p6+!!$dC05# z*fRHnf0rij4SKQ)XG4i`7aIlQvgO|+xJm}9Dl(S>p?5i|3I&iBRgZf=__0{=diWo* zv3tZFvqK=P#~VjDS#;F#toji~*tbWh+a8-XL+;R^;aX$vt4z ziWkpgXA=1}Bm=_wh^~)uWdsD-RLb-zgCAF}76nuzlw zkPIcRfPc^v1J$rV2Ff(bEq(pYeH$X~sU0Q2_hwICE&kDmLk;Mt?rz9>dea|oz57QS zyutca{)6t5jkz$QVvjoIpVn`_-{bEgC2Mix1f%^!^(Y&Vt0$lchWf%|7Y+A z$e^6(HRM6nS-lr7mjS1qo+q@>l~O5`eqW$z~`K~&2~UOXte{Fae1RL7~p_> z(2b_bY|L%)9OA>?3Ph5ia~pO|sRlFh9+csk+gu0U!`@uVs!%@WG$i(}zh0_=ea4(jm?um`Ozgka6p6l$mg>p|;kfPzAhFG`7IB0X#X$J}N* zkRG(2NPvTCBm?O|KU#-dNFag(a@+rJoQGX11~D+a@gK)LNCLwM-W*k`;y;La(4(if z9nIK4SP-qTnDO{eVIH)iYy~R-EBJjV4+LQt|MQdwJKs=#i)C^$!$-N!u22{s%Em_% zF`lt;@d=45R7imTQOM#=FLZxn<$bwrdAOGwn+1O|%!$Kdb9|$c?iu>Z%KhSogg~APdD)HS z;vTegNA8za?({>e!@XU}tB}m}3R`>Vn3W^h8tcym304>)H=oFjN8#Gw?!;hsXWKR3 z=q<@dt!{T*x4;uFah%{We$m@g;L`pz3%%$|G22t&(t+zEz2Uj|u9zFqrGrV4J}zv0 z$v>97bniNbON-{;b?p+RuWOpPrishf(6u>nZBAU96R^YFyY7E-Dfm&d(7vbn&ENU_ z@c!ty8Tv(UBX`lY5nixmlKZc3)aJ9$p*vTGp}iJw$q^eqge|#y&(aVN@*4P6o}sJm zo<45ne7}X-yWLpMyuc;fQec-i?1Nq2jfFmrTmqlSHMdlKZRO28zcD_5^a;R)V7c%D zW7h7;Jz;g9etttjhzB@NPT1{m^$d<#d(-RX-&=WuXZLMPSl~nY9Tq1Kpl{&sBrRWm z)9r`fAl@|p&H5Ja?RVb$;O`%Q`uRV;Fdu6;Za(Jm#Xmm#^rH{ofA^iY-+IgRH{wnA zBZu~ESsfqZ!*vF?$c%)*A9M&L zwr|^f!^REk*Te6VcxzWKjSUa*fJT_XFXXb=;FXEs+HvgZtSFs(Y4ae=sMwIb%vk8D@6|O2)o>Qm~?61ar$rXFl$5r literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Player/Build/Mac/icon_sources/default_document.png b/tribler-mod/Tribler/Player/Build/Mac/icon_sources/default_document.png new file mode 100644 index 0000000000000000000000000000000000000000..7a14bb34910ee8dc7bddc8552acd7904deddb186 GIT binary patch literal 5282 zcmV;T6kY3yP)U2_%hu+Hzx~b7q;1q3OW#)WMTtn-$v>%|IACNz1PU zkQG~6Y#G}&76d~8Kpje>ehy&k_U+r>aV81b`Bck1 z09ydI0qotrefySx01^U_5;B*XPv$uw)Gf$7 zhlG*?m>?<2$Px&$kS%kGG)s}?8S*SgmZiwD9C@B2O;dF(%aElRvOL4i_Kuv-=YRVA z^Uwd|lTSXm1>h9Gs@OIT1VaHpYJUg79)OGAeDlr!x(bj|s{WILx=5er`(2u#>6+_4 zX#&y!w4Y^}-qJKhnxgLUx?_9ll^>+YH04%YAAV>z5CoM3A(g3?c z2pW_vr36H=0Yd!~Lc>uo0VQAkSQr(V*If675SUCRm`s#ttyv*P{X^{QB}5jCz|Jl z5J-|lcNldBKhJr)M!Z>mGMNBq1_9?MzU6X>hYuglUw{4e-y9wu{uBxXTi75F4AlTY zRSUF673N4bui1{9&YSYOv_zL?Av3_iLBOzS0`emsJa~XS&*%I5`=1^j9{vm)27(YK zfPN|F?=P&SvzyMlf`FH6D?go1^{;9BxdtLytyXyO-~qO`w0WO zne|f5_SyJDHdVE`p{%qO1U7TQ)p?U9;GNGML^?+i#Jzj>uv)FKyStn0@9%$h?b@}U zK}AMes3yfm2Z9g)2u^K|UDLTX5SVgZ977Hi+NaLv;FMA#O;g;ta|b&+JJ{RXOAZbW zKD&19+Rvdtu#FLeAT$7i-JA;un)qz|w$3;CObugd0@^2sk;+IZarf?B?C$QGAovAF z4uVl?03w%B&(GV`ylFnnayD7K%xpH(CctzOfif9&c6Nrdv$F~a_V@Qcy?XWPFEC;d zj2ZxdI|y9(n?TU!9P$q55djAQ0OWa&qoX5iZ*OZ5T)1!{IXE~NH3){z2G*#_a~~>` zr+w*|&hhIiy)5)TZxg>}mZN>MF1p4|faz3Hx#QzwY;SLy%})*v4n76&*7fVx|EoB6 z%C4Vl+Bi4>Vv!9502({63j}TF&FiK)F+m`NK%VC~K0e0QRy7w^P;zi^@LMV6TOWP& z(WpSMF#rJkK){d9>%5aqu-E`Z`+RfLKA$%=KY8PgH+~CXR3O+W008bF zXuD3wUHfJ^)7hk?5p73DN0`lKZOtDY2sXk8u+`z>6WBW4JT~{a2FUZgV&_dWz<&WC zO;Z5aC=hI%1|aeo>0QTdri88~NumK^`USL|oSf+PYzhcA7647uPJ|kOI!PJp=?cITZfod0q8ci(*%i^T$`r)uy%`Q($8C}L-4 z2bV8j#$%5?hQ}X&9G5O#!lg@>_$?#{2M3=4;2_`>6a*WQ2$DddB_y*O?QQV&3Cw5! zs7DS&=3-L5Kb=l7o6V4A8J5c>?%cVf-!LLsUw{2|Wzp$$I>ockK8t6bc?MUmT+!3F*O8VS1_GUEAJmH77P6F!wlT*FB`#jPD4u`*`G2}_;liK4`s%Cy zNA)GuU=&3IsFCAgynX9X6|nQwp5Q@9YY%VeDqEx6J%Ld>CD+|rnmWguK8({ zXIW--f^BOIgb?`dyYF!S{(U_4)Kk;fUVH5m0RQ&E2OoTCJO*j}_M#|0KskEqlPa-D zhm}%l>`B-=xC8+J@K8^^Gss83IYVg92hd2K&*%DuQsSLXr}|YhN+T2TWD?g992EEN z-NSOZoWJ_&tG_upIeGQtk3YVt?o6A2OgFsma~n`jpnNazB7i3WJahN%-QTwrG-GQz z7l$OI6o8QW#Za?eevbC}ugr7g1)qLlG)-}Kc81f_Q!JNDoSd9su~=ZaTE zQHS~R3154-x3`BYSFRkr^wLW|0dNFh0Tmt4o5{T&;M$!)>vfh72;3T|*G`2K)NLjs z2apt*#~NO)!eN0G%=t0L6R3w1N<|zPIH7lr`WHfI7>aN1gFw>j4M=60!!2G)t|T z#|DC?kd%HGiv^x|;)#EJ`Q?}Y;l_;{|A%t)=O|5qH$?!zQ0K4et%o!Zt=g#wF-tXF z0?{1#Qc9pO3$y|#NfJyFTLU z_)jPkaE5_jD2ZSK0P4h0lRb*ck3WQvX?F_iHoD9J3IK`D3ThxQnWXw>vzZ2f?HO=z zQ>WX0X`gtHkB@Qr^5qwy{4VYX^d5ClO+cHyGtW6l(~)23Swveq>%d4LL|*aU>6c7H zmgkrjcAs}*6S#N>^UmkF1acx-I{FIKD`#`4{u{Ksd~Hsk8D9^r!xs1Vlhd9%pmRW} z4WyJvCJCm+<10-=j&w4qjsCXC#~k^*!)c!qOj1guX*xso#Z#Rp>qF-{7to|3+~fd& z(9YYC_Ea+e4K`rH3%!p*ups94$^8~`mG%50|XaMpq;^@ZT$T};;nbCx)bxf2`(vt$wcJ` z$Oqusr^p)!1OmYteSk8I(`S4;h}mbKa^8UJVGhWNGx;ksm#p2UGtB)2DU~D1)5!cS zBm^4$z(}0HAu8La++hF2k~=VMetkqM5SY%ObsrMQ%iIE2{cX*{n4$pF`SGFk0--&# z*yIn`vWU&cfRqKl5XC2P?KyubB@01SS^oV2;xhm-P7^RBz8_NF4+!0vMOP4T8P#KE z_Q+>oj%{=Al~+v=gr>DQfo(R`rMxS@ulz3l;UH+pj?yrP1uOm;Kwy@mYmNKm%>qE@izj_3iW3M7 z^R%6(eudB;w!3Vfwp9~rLmk7mM3WE#xs*ELRXT?1lll}9h6X`24PZ+Ed6e#S(5!Vi zCW>5vK$<3`CTP2s0FV|_$=rqDv<4GU@60|C*DJE00kvTS^q=Yh#x3O%G4-cIh8m!o^HmR^xE}Te+8Ny&^HcX=wXC zv3bn)S)C$LyrDr6%?D@#L499Q`-tUEVp5Ua1_B8bE5Z%0p37v#>TRflK-rl@%hb-A zAw$cWm@|Qs%CT)MHB$|*k$*Kfp8%lR~fstL6kBfYqE!! z1MbV1z>H-M;;sh}!fP6rm)A0a^LCFSXE4&*1tHX5Fk|Iyxdbk=J1Xu=aZ{h78>BZ-16mt-nJYMh00W|S+fj~%UjSz;?`DFqQL5CaWzv;AON12~0`dCf8=82VGK1#1u3y`}`L5wm3a$Z$daG@trj0)+pJTk;CbJs<*S{pz^lOvN z_d`z#MPCm944oGUn=y3a6D9AEx^1B#5f35x$meog{DYyi7ALUn#(c`VlyT*al6Pou z`fN@hkL!Fv5R%sF1ez+-*8!w5t`V_|d|z_|2VTWz95>WSrh)UdJ*Uth2o8WgD$3vP zhxhWGeFRr|sCQypedhNDLkt!d8ch^nmiICb0jGhvitdu>Bd=*a055Nvo3`trD5?`U z#9{HVy}hS_I~+k*nXW+a1p%j6AP5csj@+hbz&YM7NIxVc#ECc52oA~D25MWxK+lrm z|IiTtu;$r7n@#irf-2}%F7OEU<#SCN+Y&&wx)PB<3i}sFhERA7;Q5Af^j?pY$WEX-;cpf=m9qS`!n#w)KG8tixg-(# zUi)TGT`9g3NVZFsFz9zP{$B};34}I4x<&&oDPxKNs&$8>^_c)mwy&>h^DWoMG<35M zHx0JuVO|Gdoka{qQA~g@`8slDIhXu^e?7(0u>g}rJ{rNM2{vjraA*Ug=LbCN@d1qx z?Vqjdn<%P93jrz4iy;9TTB_oBk$^A1Q$*mv*XMdH6A;Y@&>a{+<`5u+4hwyFU3`SL zYv$h~jG+PIn*auap&`vW%7y0b6A{?16VK>1KpUM`6ws%fj|K^mENY89VyS;%Bu?Pa zkzHh)oYPId^{p>i)ST;xpmA5Oi*cCCw82LnN_(V*&>{aDK zNcSnGGGplIDDckcn#?MAoimGVZD^4dD-*aDAD}De-BYOl=@iL=;foK=}A|C=h&Q_{<3u1>B+r$7monMgGg70D+*A&4PR0 zzFBra^Ki~<41MMfM!zDciA*$`Q^X-;4|Wqm6;-+&>W$LZ#be^^&OV zc(}w*mA{*(tK}Qv1N0$pTiYQE#RKmp&$FC6uZ!JZuM;@RFfiOaf?)Fezq9L;8#oXD zAOywJ05$-VdM-4bFnCGzlJCevw{o-BL7|nX2xg32!w9o{UGl^5?VrvR7hM0BV$u#YTX8jTa^L!Da2Q%>y2#g*SR zlNgdl`;8!15wnIVy>8S|Ro{az_}LLOpK|f?m`Y0H)f{ zi$*PN%%-loXE$~HBLMi5>EUxA@LWJ|6A-FXNDl9GVY&W@{7D-791P}l oZwlElHd&84J{;LPdT{FfuNbZVkOur?CjbBd07*qoM6N<$f<(&0p8x;= literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Player/Build/Mac/icon_sources/default_volumeicon.png b/tribler-mod/Tribler/Player/Build/Mac/icon_sources/default_volumeicon.png new file mode 100644 index 0000000000000000000000000000000000000000..5ca6d539e49a534ede45dce78852ed586682c5ed GIT binary patch literal 5744 zcmW+)bzD>58@?Oe17+Z-fk8-&5Re7|WrEG}_+<)Z!#@aC*2M;$>~X~RlMo6U6%4S_pd!kN2E8&=E?_rsfxXEqu84{t|C zMv{~j6%Sq%n@%mIMIm_4<=a2`^PNA|V$%rknoIWlxtC;g&U2dX=8hiAeNFsF+lRAp znGVwl(p#gGT}|GPN%(QBq$6x2Wdo%K^$!e$Ra8{$3=a=W6^^cv(latJN2i;?=WDt- z@gjr}ugWRU4^k6UdpWgbA3&A?oBr$R#$|dLA3>kv6Lqm6hf}hC2 zPSJV8bsjZS!KV|CGFdJ#aJ5W>v+zpO6BCC!t*xyy7R3=r0$3=N{1q;A(4_uZd7wpD zkh!(XXWgw=X2!<$ZjLlrO>eJHUT+`K6F+%n?}m*TeMoU}@oNtMj7ZbM-@JT$L7Tg~ zySAVcIkP<3QhLyV-IFJqzST7~_lr$6wc$dFv0=J7@AB(3suAMl81^WH;fxH_4(eTB zTr5#r!qiia;ASqZudm-!?C+>$0?&Qj*xcCI_=-aMWzZ2WiuZ)#Ika%EcO}1%NQQao zto8F;=>3{V+kew){MDjj{y5pC$|Bpeu#xn*rg%W@SX@MZkg`UjFXQ{TP8e(Ch4;|& zHfgEFinW0UzUV~wKoz=FoGH;ML^>j%G1j@FE=>A-=?zuUgvk)`ag79Ne?PMI;}7ms z?>K4N{==b4HG|rv;#8d+<~})?Q&xup%u!dnn2wD$jozQb>hDx7b zX*#B0g4iu=b2kbq)$ZMX`(!t`inKZ0;JwTVI7Hvb#b1aq9E#OBJVK9m4_RO7L;rTN zXVkc8=}DtmN;6lpGnj4$c!>UvW&Ky_?dG1P{Mg~XMdm1m)9ag91A4 zp8sn9RY2QVFk5U{g$NecQhsE8-O`sRDO?z6gOKkRO$OUdRZ9>&`tQD!1VOY$@F-5v z-*}oqz8k?BL+5&=zH6R7T)|Hn*p^1Lg$o;xR{c5Yia_K0Zn zDy~<|mbTX`5%sL!-`(cJa|U3!fq*aXTQQa;d4qV0ZJ8aQ*t?=^%b#uUqQq-cKXt=4A19w5F;nO1^?ee^zRzb zbB~;das9N3?JCd~Q>14{1y+x$LA+QB<;!P);qZotglpi#Q?LIA1&@lw53y@acY?27 z4Kd|h;rv|BYVqY`$EbG=QdB5=sticmfw7P7wj0WV&176L9YRQnqWs?y{;LsMas<4>Gr<4k+>_>&s}q@8mj0p^ zLez;00C6XLG(-MJdK`)$!hZJ+=#9%W)P#)w*;iScTe7xv;mFU{9bO&)fi*VK2Xuwt z!6;~+;e(x$KU~^Jq@5M<=qvDC4HJu9H0~a{ejSi!uiAAU^nU9c<8i$S(L= zHhAXa4ckXlo>L|0L=z<*^KKduf|#~3(bew3@gf=M`8$t((CLSUEshhfyMP84`F=aN zIjtseU`3eB4J_!)O*!&iRNB^Um7?+mPlzLi<60CyYaxwEdd46c9W6s_%Zjylb1W3!HScFX<`IIhFHB4!*FY51-^Q7vnMagTlj;AA)us(>FRF$Q2`)&A#di zLnn(O1SZb5 zz`D(Mg%WZav5`s)xtG2uM#TtqndirLGRVr0~1X zZ)|%%rEIex6C~oJ%s%)U4c6cEB)p?4H9U$}!kHX8J_M}hiRnf0TH<2YZ z-Gi*`k~n?a@P+adxe_#ZrwU#)p2eKjhY@`yT>a`4?I9}Qo`lSu&+bjan=B6^+0y2XG!63{*~hH zU&4a+s#&q2$aG5}n?k%ZhX^H4y*|eKy_UsHXwHhaC569&7$Mi|OUT(RVV9%KfxAew zf80;J)=9X3eOHV!b^vm;t8H*&Y$?(< z^VQE#2p;9rZvN3}ei}?69)&dNyF3rms|fY!u$Dd4!+oJ-)<1B1#`KH*>tI~IB%$ak zA!PGH?{s5|Z*5ewtkV;_kMFNgu@mCE{Fk< zsqv1O!m{Er8zsZ#uG^6#QrXV}q-`%ZDHtKqB0R~d&$TQ02EZkSz(d#5uL$)f{wxVB zBwLA_`M`tG@z;V?{Y*8b%%Qq@ghPkRv1F{zXaI{WPv_4ARjq2z8n>zs=}Eeh52U%7 zbMeonE@EYlPd9|dsz@*khM1byqj*#uNi5^8k9*Yz2|}6*7vk~Rcpvzor>&)MTVBXJ zU?hk%pvu|%<0hp(dn$Wf{~5&H=@a4lF2}o6f>5;dI5RJP^Vp}63$X#-uw?X`iL(P) z33XFuTm?|w1*mSlmxy0q>#)b+;d=ms9na0T#!Cyl%MCi(3e=S|S%; zR}tb*jrHnJ)yCKI95fYC6+v+%B!V*uVfh5%6-tqhy38m6epb ze@t0HvA7p|?==^R=k4btNsf5UXk0Y`U-yEx@7FAm_>Qwt@Q*I^sUv8EwwD|JfpJ4V z)qv;bbc;SH4i%4oCQRcwgNxLxzh~uJJ+r3#<2N_FQ^>yYG zT|^?=Fee#{2zX;z(ikFZM)f+sfW%R92}TW48MwtA0nhHu3-c^haY_1fe5TEC$q_E3 zr+p(@A}~(Jk&;pL;6J!;L8ukg)jM{U2Z*cQbN)6CVt&#c-!^DtOtSRVTM^XDys?Wb z!V#D0+Z&)-Tb*j>4udiF>h5r2` zkUT0cwO;97LHo6LPpH?jQp3jD`s48G+`Ok5)`~|1TR*sLv&P9G zI1TNo1mr8vS2ZUVn7HNc{?UbjKx@;YeV#z;Ng(4Z&|2F~q5Dp`$Hn zcSz3MJX*U2a|-0Wqh9Z)i8Q5F<0hIAIfJETYc>&wY|@J*39u5EUBm6pOU$&C^26>5 zZUIUyikBvQ4W)_ru4Vy z&&CDBR=T)d53G|b#f`kcFl-;~g(+5fS1(pL-v|%@mU%TH`c|3*)I@=?*uA`A(O+Po zWg+eExDQ!-v5%0kY`~oLMqG-%B0sQmHlk zUM}8~tm`WW`%gc+qlJ(2hWCt~!8!xX8pm7b5xYT|ztbtxR8@k{c=Ph|6qS`zCrj|7 zt1Rs};TMX_E^;u6-}Kv>1GT>e-@DBnv8BRBk>Jd|ybFB*HX8~6{R2|X{1o3=5${YR z7tqMxbn^FZ;Mw6aCDN^$slK9SO*LaNPB|D$;4bm@!B#8VBgSujZY%Qzq+nH8Oz$cw zMgwt!o*MgZ+1h^=!5j)tQaxKM{Ia!^dCohKsXp)^C@gK|MLlKMMSmo^%u#L~KKuB@ zslg7nR5lr}noU2RsnmyR>1NaTI}IP6HhOCE*^|M2&y39Y~8=tpW|~NFs_)>#PA_u6+1q z(CDVWCL~|wn`M=SPNPlavi~5{o&9aSg~HH`P;yKr3Ta$eA#i?y8Whp0Ik^7sp7Ur7 zQwzRw_t>oQ%p^FMEu{DR{Jda>>dOdB^VPJN^P>-1Q;y+k+H^#4&y5xiCq0>MI1CsI8bW;oAheo@ zqfOhB1zVfOfWu>DWo04PQ}x+Pgp09n-+BcGsu*}*1|Eng@hdf^{(ch%Zy6osY&zPj z*ou-mTfE(9v&rFDsD_!i4~~~n78gqbRu&c(vL}jy$lt;u+4eBla`kNbjrT2Aqz&Bt zW=Sd4Kb;d7jmTkIsl4+e806L9~<+3l$nC(|LiDt`33Vlb0k#%z&rdhxa-5QH+0~R}tj$ zS)<%cJ)gbDSvxzjCu^%xbn3^ehG%3F1)w?wErxZD6RQA7f_9%uP(QeH0bp&RopTTe z41lTVvavH8@^+*EhCJ2LK{{CK?|&PNN)lFoOUn*+~&bp@aOQE0yDzi@oUfF*(bSmt<#P2t*q9_xX# ztO0uungYNLfD!X(v%0wppZ3?_a{DACTV$T~UpKy^F4A?XAMV?g6>N!6+YP~eBYOQ2`W)}@N3Vt8F<7Y>{tn2&Ra`LeJW>UPssm6bL#0GT%W#u=How4kgMFPX7Ev?DI77+c+FA^FbD?%nYT_W5w`Lz|^*N&Za zX~rL7hG}WC9DzOo32F;S<_Yzs<`mzz literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Player/Build/Mac/icon_sources/dmgicon.png b/tribler-mod/Tribler/Player/Build/Mac/icon_sources/dmgicon.png new file mode 100644 index 0000000000000000000000000000000000000000..24b2b222967969772bae035e0abdc93246a1c3b4 GIT binary patch literal 15026 zcmV;jI!(oiP)fq-GAr=W33pQz5%S*hIo`2^Z z7CT|<6}%i%wjQ1Ypsb8p8U~^`{&X->C=>(fRIms-)vcdEoZ3Q1#Yu20cFve76{HBA z#i3}SxJV1mMVe9vU3AH4Do8O*c@Y3v{ z{)5_I35102c)PPorScN&Au`Ldhy}ypka?cReBYm2l}@L_gag?~kw`?U@#OIsuAED7 zMh+ZV2QXoi(AlV}I-3w`0e97IH`w;w7IXbix$DjQH5+^lWCTdAnH2i{K83t_pI~RU zNk7l)CG5yh3LH=C(P)&ykFRO){P9E~7mKIBnF!Ttl{kRU*=!aBWFb(2a{*4Y{f9l- z?x^q-ENoG&1O?L zh)ZwncKgp;=r?OL8gvvS=VUSoilT%nvxo=$CIfLZtzzgH8b{f`WT@-9WGFe9Y8%q& z^unL9!SH)7yr1=S?DCOVf(4k{D^Kz5TL9XtCPb+y3ZM6SHi6%gpdamLh$&YJLKrkC6I3wG&+D8! zUVFahjRB@bG)r}pWC=NUt*Lby;%VkI; z5`r?$lG-(_na2i$0cD{uJTOb{9$)?!%?$7hwzKhjY>il7#cNMM&7M z!4=yA920&QJ}Hn$Br;R2R@+1ArZi+$x!rCvY@^&Jh_bKRWc|GfdG5g(dA+xOs5~EK zwBF+4;v?nWqB;U}_Eec1op*&@__6mLtoj+4*Uy7<%GsAb!l3-|t00w1G3qCyXWlqG zkP-J{wnsGu$1NNVpRd>JGU5oUXwD^>#%+1Ivm`IKmxjJ5o6RzqjKyM%XSmTjf>GY* z@`~1~R4Ve&-bx3O$>fJdqrpO~3^R|mx8fOcfFQP5EV3g*!8h25ezB%U9z(5GJJ^9j zp&++ft-m_oeF7Oj50P1pMx)asbHHk~68Dc)CrU$K2Ypc-KhI<`fyH7eXs!vARWwN4 zN`^;3kk4AFRAO}C^?D&5k28KHYH&Cl(Cc*p!^-BMgl1#ubQ+q?CY$SayCDz=3^p+u zjo|b7E)O>4NEi$TeffO;6-7akB=}#Z)9C;c5s*=ZAtXeRyh7whf8YS5gj;3*Fe36# zMu0?0k5n#qK)|b>g*rsvM_GI<3*lE26=EKoqstL@KwMjU{mpDP8|WdI%duqC=%*tX z-CHOpLZJ}rcRHOf${e8h&Yn6sow$*@o<1(=gniCDKoQ`mtjK;fobzY;-D9=hUri?6 z^LgV={%#6iJi+m=N;($$=Q5$Hj(>Ur*mkgOQK1no{1SjRYYmC(h{9)$Mqw3`xZtMS z#|D~5MU7vToLt#ou~*dbHBgQgQ#=0Uat(Q9VUI141!x zDPj6~-4PEU7@;Fzm^U!HrPF(}R`c`o5tKe*0m{^I%UuJE0HVqT)xH*6Bk#C}!Vkmr z(X9X#jTew-e6OuQ7UaXh&ExS{Ady7?$LAmNx4;-#OD(!Py6+O1_#Xf4EctkI<# z?@~N7M(%Yt-H+d+AC|wTzF;q{##iVa{{=dfeV8sEeBZDc109#60B*Ot2mweveT@Io zHe7VT&FtaPpR~bA>X(X}l#`KTIPsm8+mxHJhaN=3bad}ATMh8~&Y(0mH}{-O$5I3H z>-XtuK^tB9EkKXig}-@m+_E{^;hZDl)0^c@8U!R8NooU3^G+cdNu$FCXMTN)9!CG6 z%y<^9ytz&lzSk1g?ji^3WZTUA%c22J5Powu{A+E2zF6ud7en%lw9R+e7>5T3t*opR zV~h)_xw)A_p%7J7Rq1MHx7)e~*jF>*E~*Clj(thbrupf-|Dt|ssd*$9@K_K-q;gpZ zS-N9G4D8+Q~((T)y<*uBiS!dneSD)dh?1w0N_2GuCudK{LgGkf5-enbidzkFhaJo z6G@E_5~ABhv{T;G^x@10hOv&04yvoG6LW*Xpg`3?$?CnE!!Zl6&lWsz6xdD_v;Ixaaa{b!_iDPE{4+LxvQ&7r{}V=KQ%RFsHX-30a5KY zH?Z%Ll9H#S06HjT)oJVrg0<`^4uOMqq>-uwFO2VFpx>u6jZ;x6`%@1e69X`+UC41QF8G6CpSU8kn7( z-Em@?0HXw%Nj#)zS>guXqxkrg56<49IfeN=2AUHNQC<~41uU{wMyHxe*!RHTobqb!u*pkt4 zyXVa!mX?5WF8ZwZ^S)3hj2}4fAVEq2pb6;w020;?((}jMQJDCH`-+*KrzdIy>3SVD z^(~INP@+B{elz)30P?LhsH!RofA`As^6;uy3dB`pAk!2{VFI*#;NYl~poXY_Y$kh{ zlmcUobCCdMT_2=J$v?N!-fqrGGuF0cD;K?slBsjwIrCH z=hbtJ>U$b{b#=8}MC#n_+qb{t3QD*{KVso4=gph9nWvH$VZU}ldJ}VZah`lv?-@NA zGiHoIgq$_a&CTZg{QTFPtZoLYRLT{08`=w6O%<1H2GyABcvI zf6&%pgUJwp711j&*8D6iCJROeM0ynR1-Mp92zhyVK}AJHO%Bru7sx4W@ZiA)niDNR z@|Jq0Y4$PW*zI4jVN4EujsAG`)G}OdtVEzO2*XWb*p{#ZUN#>D*aKh=G~<_)QY?;M ziswTTkr|PRIl*Z-F!`X8gx_kZM|40GwqDwdB_&y?7xxo?j6+(<9K2XG1zRqDipI7^ zrvaErK55RNprCR6J_2Hp6b?eqFB5i04(g5a<+#~&6Ca)5fD!%{1p5v|Xd@YDOG9Ec&KgJ6oQ}F$*1Mt#Ny>ii# zVS9kcUjfv#01*jB_WKBghK8ECa1JIzpk8dwkN+C`tG~mB-`6AK=seuDS&^T(2Qk4< zqcicAXR~MQ8#iu1WbfU~omO&lbCH&oru{jOc=RvIzP#q>H(5F?RRj!^%~Mgq@(as0ZjfTyf}1PFD_Lp>=BZSzi{D# z4%^hxtH>*5?b@~MKIy(+uwViEFCFtqyl>|Ni|7yG_NCOjKVnF)>PKrrMmP zC*(6&4oS&z?Q2@IkVTL)}r)bM z$YRI2?F5DMnPXgx$oM<_tue76(;Fb$<7!o>tSpdW__f*yc}oFoKXt4e7PVcB}K{1L`JC; zaI>?sl^P3ctg*4NsH&>Mx^?SNT3V_+BW3B*rOMx6wUG@Zl$|?wD!!nstV{t?WPkV> z$4^X5)K!A)eh_Pl*2CU<3NQ+(?t2s;sP3zzY#sR8*9r z^--fnDIjLYjvY$Hn#v)Yak0{!O#sC4>g(&3 z&j@nr>gtq&R904&LrBbVMO5>%SS;sLTG+|zFq91o!nY~i!SeS z0O!+`2|^e?XzJ=HaK8!6has%<@aPI8$t2FzzOo%TdQ>hCiB7MtqR75&=v6kxc1t-38d{3&nQxF-kLxN)O` z0Uj5bIdf)r<9H|9>=2=~hObgs)p$Vb1B{iIF4j(nn;0gG#Zt)L5OBz*!be54uSBwO zjos04adCZi|L+^uo3&s_T^Hc028zq_`toJF7QV71R<`s17iR9RXg5QTViUWLefz$t z-m>NMbaBfZyg{tnrd5EUt2)baO3d^36hR zWysjISU)CGq}95@#sEB?(%u#F?h+25vn<|*oy1uC|{*vzm7W=%fB7hJ^K^Ee*;l~@PDn;SN4*ih}Sdh|&E#?*_7 zijIyNHEJ-0qQA)wR_|H$^~a?4W@d9Pbg6|SSNFynZ)6J(w80kvd>)VI=H?z8K79CK z`USiwWz7cc%Ml8;PasYeU0=H5VvLEEm6aa}0{Dd4`XoS3PR>Zi2K3;f*QCLo$OQ4m z30CTuE6WlH+1c4CJl-!aFYmBaqgEg4RXrSP)`r<2y}fV7>i*p8Ya95IS>yylMTG4i z*gC2dWTbU#qUZ?ycL6x6izVk~1avS3wze*1U-?Tfe{^ea33}@KK>r!nm*n z#P+_LJk1gY5L48fgXRG7pm=#UC<)-ts%w9X|1Wo3#`2@fP}5q2E#qIq4@+lY*?)eG zv#n<_XaA2eao>2vzaE39k1j_`dy8e}n7@q(9iFCFz3zgXQV4JfE|Y?j7R4 zs~uPI#D@!T*ZzqZ`j2!RYyAxGH~m{XA82oB$JLfLyi)lxvJ$iLmpe9KV*DhO)t2H& z(SxP?QTb{m(B_oRLha=m{Al0_|K% zIN{^QkFY6!GY+0PfL{)K6wjVmiBB&bfk)VB{CD&h(a3ZULvnb3%u1Ubv~k19bG(s} zk;A)10KoynWp0o(RJ_T6gXa$5-HTB)CcA6bvx z^}Fz9)egLTawA^N--f?cZbn$2a2z;Wf_6^_?6r0{9Zu}7-i4W&4sI6yog zQK~={*k+Ug>%Ux!Wk-Jlhuwj=ka(noq@d#baZFAwKtoFd4mW&=&k8F{``_N@_AcCX zu;3kRuYDcS_9#4_xe)G!oH7~mR!7-t_Oh9?OELp6;x^_17^K56CYkUjJWUwEFQ8H&d@M~d7M}?YAs&=)?aaF z(u6LPm;Kf3v7RCTjkvhDxcH#9fao00vjNo#G~2@uXBWfcw4t>25Z`X>x zTqax&*L8iorOn;wCapn1hyg(nKv+?!su}Ff^i6T5z}Y5LWSbq)VQ$n+f;kz#Uyp5QqwC>bEAmu1WlF*Abpp4o%*?M;~2ZxWub{3EK)RKXr1IO3l7 zx&VRACcby?UM*ct=Kt{fc-~)7P|zhYa>|q`oj7f^v0&8w_ur2R6DF8$0i6KBB}>x` zmISa8fW;Y);OFTNW8|(}sD>1F# zbd=YZcbT|4c^CY`9y0!!h9`2L5Ce4KxudIa zuX#?Aj5Buro9w=S|9)-NNsQUEXKQgiZyX>S!(L3ckxdpnRLx=C8gvIm0CtoFFlLJJ z6ivwx7y)Lf6zGAo1r?(DfHS{OCTTkrtDVY*#i8 zH6FB#$)}~IZP{8smXo%~%q_rV(Z9xBrgD)<)d1D-Rh_Lr{CA7D?er*PmClw_mI7X4mp|_- zT3Syt6NcMi0f5 z{!>wXwHoi9Ej1k&)faU*RUy;o_%|;(VK!B_s{{zf92T#s0datse6q9xjedZ6wDxOb zhp$tUr{a?5k`@7;mRe+X9HwyqrM;OgKz3I1|5Yu(Bn@LZ4l4raM4(iC-){nlPQ#=T zWQNu>^W@}PB*2`(KN0S44Ssj*w^)$*E8z_e?Q$=rJazw;EnBn_-8D5eovMq87!N)4 zkR||S9x)GOr0`?Lmp=qh&0#@+kyJQ=aKLS5R-iv_to(RU&SK0Q^q{E>V59D5DHFWfctdw+)IM6mTDz!6}odyVE2AtgJzA>GDI=aQMy4 z{*sjrZp3#=0tgwB6c7i{3N&)S&6zm5z3PElgQgrH)Q(%Kz@8?6I{6#TX>tJt0h|F5 zKyZLlHG_L<;<8F@PXZ=%A!rwNN0?s(;EggCs5(476S?Wx;HV@3t-*wZgc!dFz#C;O zQ1t`UM(_gdcY+yc!O2Fw&c+(ktfs}%KtzYo$7ds8LYl(N-CMnSb*J@5pro3x#qU_V zcC8i(VvvMw=gys2zka>;O!i}4JC?Iwvu2GY0d&}3s#jK2TR}lV9;<#6Fv$X2@+Se* z64L}yh{0)5FBiS;cGS%$c4SUU)&P{8e6Fu7My4d@Ly`(Oy|S zY~8wb+UN7;&C`BrL>BI1fi8Xr`|&$ig_>>LxN%xvKA-&vvSg`if;$NEG^--V@7b|q z2gm)OnYy)`n4y`{3^r3oD^{$~f)-+c+KSRwpF4N1woa6QV?&R~yF{!Dk8aL6Xl0R_Z%^Vi} zZue^o@J1`R9PvI3NfD=0R8*v0WM`VNoEWh@1Tk|G!~xOApFe-T7O>zz2$VX1{#x}1 zR>UU8h(Qt6*y7friAcs`0xQEm}kzkoLn21ZJ`PlMiDk%P%`X z5FlGEmtkx~f)V9KPGIBUI1q{U=49EAASS-gtZi0ZC`^ zUPyEqpFvky#s_E2n9&(bC4!uRl%-3TYR}^RVw+rNT1y$A#0IqzVpcN1R$N^C3HK@Y z7%Teq_Qv0ZDJv`UELpOoR^@oFpeb5`TrI9B3%Jtzy45no12Kd+4~}2Rsu-OB9adX_ zs4+LQEh*r*ZyBHS77GX@>CxNg-tMt>z5QDd7RjRwp^z1}jnho#842JJ64gnGO~fYs zaPHNfE`8P0rZzQ_;AYlB3Ks-v^{NGgiV71F-A)NE)rY@1zqjJ#Rkf-Y`H!_ygEk?x zW2u{^8dSsKfdU(?jIEp9I1gf>#aOdo0DU+NH|EMJ$5Iu_;DidSP=gj$lkIve{>>0U zdbC_r!3osEd*B5*Y7<*v3^_n%hO5BI-l_>G0Hr@5%F& z4My;ES8&~^K9c(=2ZTmrbeP_qDRMM~#YRXeny_jepWe9o#;r)pQV^h@QSf)!0+%i} zRSI=+n~LKPWdGg_LIr~wl^BB4rz`8M2!O9@YL5sTyHF(nJJZ>r&97E?xK$fv7(?jN z?~sjFt?(>m@S@WvP^n>WsBbtyC&Nf(dUw5H^77z>9-@&G=r%Vsov$!DL|r)GY<*)D z31DP3zDLS3v~)AWS=!&sb>1`2II_abl{vmH}7w9{ycXaU3gtzv3}mgU)v8 zpaLh!+XNX67Jm;bxGR6om@#AWV&mc>Y%0xc!5AwZ%d}Wu&$22euKog^@q60auU;uF zd+)#d_PxDM5P~6TBFF2w&|SdOI&z#A5_43TZNv~VjRpo0q#xaztRatnQ6e$`$!JiO z{z?>Cn-Mwc(OHAl6&m%)GQ&DbCqQpRkIO>p7xg(c==1X$5#k3DZ|~7xUHasEqbKCi z=hLFkh1XT15Htf%+x1C1@O7zsdoSexNrX22>M+WY|NHs@{}Z6SD}j!xO4DC0sU(%9 z5(ps)kPx#0A;Gk`fHvwm+9TcQaTIlEaplnVv|EeIIPTqQw|KhM9;InL+B3%nkMzil ztvzTvDgrGcqaw<_CbDM_A$u+Jf0w%O@_bdPs%!+gb>69@>eYSk-T%A)dhh?+tQ;7z zv{|!eIUNp1MRs=fPqMPI&Pq*99VY)fL**_HcP)GOMFN2k&QkqPz~r;&_FIEKV>A3V zVBxf}8T-=b8-5=!Ut9NMysqgp^S$C*Q!tFvmm~-v)8_b2^*W~Bt|655(Nd`+d(x0Mt+?zL>#*%-{>ibZB*GRnRv3T8M$?evg z-{`+-h6xCA8X6kLi#2jpRaI46T3Xt#w{G1!-zr6F)b7&Wb|5qTqVeO$FDxxB^)QM+ zqSO7zm@&PIp_V)KMC`sgKN>*v=G<5S5UHiZ2rE6C%%Lgcn<$`oSpIJo06aJ701%cx zEhi@@S^J!H9!_IE?%vLpXwL7=x%~x%-ip}Zz8`3JePLmtQ_R}$3-|qfZ@ECP0KiPm zLi7?rq(kq|(aX|8PRAHPa8gbcuLT+9W)M*Dl@%6=U+E2Dl={}>rYnx z&cS84`>VT9+gSsT(}Os9jdG914ZfSOxp@IQ=p+;_P^)?05KK){pR;jZfpxdtXPp zzXK)7CHQs4eAIjE@yPmzu)A&-hL6m^?@oUn_k1xASFODQ?ST&XTDx$T=Nvp*@e7=l zarU4S-Q2fctiZ$L@ubQ4a|^~-g9ZR@DG}L}rje=Hztuk5Xo>C%c(JDTQ@p+JEu1<0 zOtf?z!Gi4z&@Ak{OJ425@_jZRyzySR;}V7FGcY^%I-H+zzGC%7!-|nmm5A1^HnneO z?z%ZBN-Dyg(VSx>dIB#=n2_NBpJ!zftS0MUHb6^5YAz zw*C{9Vl2g#f)^*fj23ST{&n-e;85p5`21e^&IcD9A_|h>w{^ka=2Lzdf1FPU<$*)M z=&^g?v^n8-22gjT4#VQo@nGpA80#L3h1-9R5B7WjUz`^K`F@xqT@m=8_aJOu5p{tS zlnpDxsN@{1s{T8^Y5E3^czHIL10I_P3AT8QO&N=ua_68Rr4TNM>sYnGWM{H+zBp_S zxSj6U9lOD(9!o9I9OV0|Rjbx*+O+B7_uhN2E}DM&iUXty0thO{`?L=FG7p*OAnBbm(Voxzv#;p)rz^}@}F@0ORqZNS!&IUV@x{`&} zyHpj#!o(t#|7u^`e!SoG9_;mYl&6+s%II^jvuP*tlgDCGMups$7iso1{BhSS*xR}n z1)c&_w(Ul$Jr%Bi3kN#)2@p~-bIjGaKK}-!yV7IU1&oTIOt|dcDuOWsKopRX0)UZv zV{teOAX2+JRc0$QC0!7AA^x}VUD(G==s9NU zs{g87ip6{Xgvzce)CutHeg`TtzNf+r-gYmByN0Wob>7JHaIpQL45~wjeHF42v*30l zAv1mivJ#G)p0N_&XbSROxNNFSz|(r82s(SAkQ505)3n1!D}H)YLL?5LZd$&)4ttxh zuK5dmB=S7jn~WA)o6K+r%G~8@R_<@zr|kXxhYujmDJ(z1jWg2EL`P>k*42E8*S9Re zfyRTlqx25UDVTe#{~x=rjo2m21a8L00#UP0`>{F(J#b^#1075d7tgKD^3|II4gg_6 z#^iA!JsBf-En$v@lv|^kw;tV-AvCrzVoM=tY{-AfzE}VFR=wnV%1ib?W0A7&0 ziKvMhO~6Pj0Q2WH9o6`{>1$E+KE=EHm%-j)!+m4#RRw!j*ME%rzP=k@G_Ql`cBpQx z#uoo()yH}1({M}SKjX^LS4JHG8p_RR3CJYmo}NOX1=&{pKym;ofW>m*Zz+IQ_J246 z(VpMf{I$xsd?0WD)y^8c-S8G}XuS~yNrh_0XLsz!5wY#dQYYfM^5-!r?ewTzx5qVL zD^4)90HE~%UHVtJaJJAQCsqK&w9^_{EdM-MeI&se8vob13ayeg z_II7*{?Y+70Q6W30Mg{Wa6ol{Pzs=#14QBgqGrFTaTD6wTH*4=VQceNl#VDxRaYhQ zM&#kHk#}Qh)qmoH1AoH-G4aMFkHarZo=|z2tT;@#1wK{;r}oY*&9tqJsS)@&pG}9j zdrF!%bu7 zD3|M^9e=_nHLH>3&Ov4UZp_?xIlKWce69d85=N+uM5UgIhuu___0a@)2m)?3zvY%&x)lo#1puuF=sdYh z3KXj%7&QQRAwgU*FKp!k4wYpo6d>rfE%!`<*YCv})qloqWp`k-Yc&36-`jAo+SBt%Vg-Cla>B}IHQp+<}tfhkj_sIBRGg8}n)aKs0x(?SiN0UOLH6Dm~Mq2BJe_GD>8C`phGBK z=)q(doBJIABL53`LE~2HNMZn1L^pE*F64M%+(Rm?``W(6xc2jZl3C$LN_;Y=Buzn4 zYBAp0_a-KYMZg@D(Ke=jc=E|7@!*3GsE01v<_RXLU6;Be{TnUkjY6-+p#MhR9=DtBoAc1d6xFZA;kLt=lzKX@&7F-ihEGOydo_Nw`3YRJ z`bs=K>9@Ej>*A;*YE-gF6tZ-kj+zOH_&2}TJ48tsRgW_`gIQOU`MUXx)FDVDGdHmU z013?_96Q$v=IyuNu2{d$8{Cs3Xvhp1vqvy000b4l33Ndk%|GJ}MXLP4Xa@w21A$*t zF`Sb>7Xg0&f7|~K7FR98;~Rg8hOT;*ta)r|KC0R((e7(k3SqRiw3gZ2_{oF0N_Eo~ z0ry2SF%xqW841YQ%iPAyb7anRZA|j28^sB;TUjN6C*i3#XpKHrk0A9ip-FKpieOX# z;034v3J7dHEdq($6>IISbI7gzwD4!@P}=*>Kj6jfe?V<}4eHzLoQ~+RQ zQ9)X^UIS>g1|ZTTIo4@bLjO5zb7kIJg`I?m`v!kF)p8LOJ7^!j7q{ zsesY`CZUZL<*l@PJ};HQsZ*z7?%cWEoHfQ=9LzIhZWJl>C-C?E5fd<20t^ikm2dYO z0AwIa1!;+qDuBI-#94uXi$88Hx(zqv--y>L7h%bs*RitZBb29=qb#kw$3D5Kg7&^{ zRQQgj&uCJ!-8@Tq-vkYx*PJ^=-`CMn07tDGiVKPjqb@{%S#gHGhzS_QfPl{-$HZtA z!H5A6Q~)Omt8^FvptDg~vADhp!J4}v5PnklQ+4#F8KY{{!h5nHj9}EkB%`@TId4_v zqrU<}ngeJ>FsPq#Q-5Qh1ArZC1yEnqY7xlE5_b{57yv@xB9F&2jIuDC0yvb3 z&WtlgL)_02&p*aE*l!SodS&#*OOtjCjqQITMp7 zPgbjFo_XdOoOj-N-3Y<_q_4jEsw%FyeED)!uY~SbI&T$6@7>liFqvIMu@daL@3Hah+OWo2cm1_2#HtVzJg zAPYelpKZ&QEm*W@5kCI-W2JR-z*!@Kbxm0X^ZM(r$930Tr_NolV1cTV$><}`VI)#> zO0FKFKZ035=Fgvx+}vE{kKp$l5Bd{0M$x#0Nr#|Ic$jc%7G&7oX8_>t6Lzfq=8*z` zM4=b#&O7f^B}TaTfkf2qS1!=tfRlhE7FWYCDJ9QkVH!r)_UzfCib`23h8GWt2{5_pGR>Pyl-4jW<-SMpoI-rApZEQ%^mG>C=yvJ$>SdC)7dW z6Bt%m!CBCTQA2{6V52vYpkY~Lt_ETW6M~8T^EufMiNa^&81P;hh2-b~-Mba?8J#2XUwGjKRaTV?IgIXF5|N83IFra#7}()G(cQ|K z$L|OL&S0*yd;Iao)$bV2HTX4DL0(4t*jOpy*=L_stA0YIjw8`XKrSF9fi;*2R!02z zeB?X~9dqXM+4vnnz<%k8q-&VhW?uyb1?oP@2{~3gkDP=YAR3pj<^VxOkRHL(oSYoT z{{8#Cy#@d~qG=%D~qEmMN_0vl(F&NxWHu2 zoH+_?T&GBGwPM8z#TEEHxn8UuL7`Q#h@4abKtoSlpr`HOw(|1wVS>=NMD9){>*65j z7+`$bvbW^g;~I zE-v8UeX<{N1Y;PM_rd1lPaZX6Ge9mj$|QcQN{7`1#$>bJ3kxg0xMgxFg6ApA_<;s<_gyT@=`NILA zM-d=1C|3y1-6!hDL?}_li&AMZmRs&i!k;L843s$+3*pI;v}>MRSl7xDu9zU7>mnh6 zaAF`F08|cb>1cw;FgID7j%)KE`y@A@d<*3V=xvh4-lUj0S-{a#qj=&(q$r_x zvqL6rsT@wBn>SV^&?ZQV%gD%>AP7@61SmUoO(DuG9T$j&xRIP2?eenkqR7z}fGF#V z15nqJ|81gAjiVwcmKE2o`C|xlyO<4RWF~KTy%^2fe03!f;L;Rln^;Y#F%tbuRpNBh6)@EEG zL|O@_77QexjvfjgIK#<*>8L_Lkf>3`j*XeXc`++1!fK$cK8tqN4pySZ6TOl3QxDJ+ zGq~Z@A++OGrp3_$Kv#%x2#U@W9l*hLakNz_|1}o~mkX#rOr5?sCzf;jvyWK&GS@`X zYa4Y^X)1#B)d=@46EMVgc6NG0i2-LA_iub|ZtleV{CpN)RfMMpj@E##O=R9uJs7ia zFlYWiiIi~HF|NweWs|joPUw~=TwvItLxI;*Co}y3wHTn zJ9c7ebYJV=69h^eIOyo}+YzShRY)s9*owclu6B28YsvdO6WaL=#Q&BUu!j%<>b<6>c1L4F zLl*-iI#EKngOR(TQfGB2HjXXFxTkuO6bEf>t%0Vd!<*4V%{ylZ;vWS-ZQb66hWa)U z0r8=7=olN3$w(N{XLJ~yEK9}*!@Se3ULI7$t@b3Ee?tJY9BKA8G<2(%8xa7yv4`sF zn(AG2C%`p_KGg>n;WlR}&szs*^>KV;3rK^|Wave4frAujreed2q zdxc|c>`~{iaO6c8b68^7dG}y z&Py@z!Tay-eq-sI_cSy#Zotu9LR18qrOFpD-Ha6gct_l7OG?u>GOdgwG;7&_R@3ueSL?Zf7u{3(?ywdExsi_f_yLWZ0{dCQik5;Z)eCSZkI{CU5 zM~mii9i6Tl>x*PTh|RzS2m)57;`Y|G;K9FBunVvaE2cV61y=-_@>}zBt9d8${QHzf)IZYG1_e4-|fK; zDH`0;QG%c^UJgePFhh55*bvkYgNH7P+!j1qOh<>F`JtwvGt7MT1(BmK)EPume2_c1 zg1^_qKLvn+2Y{wcSCBp(!HL=#JbXA_GCUBe5@edyi4*(t8`e4;op3jOCuLuH{I3hh zye=ruXLa8J3!?&q=7z-D%nX?w8qv?L?nI8AO=xgGyNR9wE&)-4L*|5jzBoE$?BbE* zf)>vW@(H#Z(4X6Pq3^=*sPK^3K%0f(VG%LD3;Wpx21m{e@dX&6d)e8LB(Zb**$qS) zHj@HI+YFA34zcm*>DnX6#of!s!>gy8x0kn@i?dBYWQ2`>$V?ko9~(Cp7jGLkH(w7I zUzgrC?SFRtxd8Qz4xZ&Z#?QZ9C#3q>&54bT^6k}Y!GZ-n7r6I~jGo=g)yK!Dmy263 zH@6-@(IaM2L~P)~9uYD21|c1C{6b=aqC=x%LqRo?8#ps^UTi-*JEZ9KUwh>Oqh2eD zi0MgIv}aIcc&~+lQN3Jyy7YQ285sKl(syulNMLM;9~}KreR{Zf_HgkY=jP(;;pOY@ z(%r?)*TtnTVt6tKxA;}tVJC%dLtRGKI2^7ecj!BUEh)M7Ulagpecd_V*~${ zl(%W$lS9@gGJ0HOWLSSgbB>-985}_Wo}*1o&=1z+YFVU zcRv>|?;+kko^GCgey*-Q9-f1S^zQBF<>Bq`=i*_w^a28W-{Si~re8>O==_ji|LDkY z(kY_?qhmtGEs6^1XE!D!CURbMP>9`s->k8rv)*)S?>Gs{`OrTQ)^}8NC>WIV?EbEO zdwqa=t8Qf8BvgU0dFV>$58W^P^>Rh$cGkDE-)Eit_pEPczt8%Hb9_W-Y=5_QZSSGq z=Nbvsnjhj9Iy)o=cs#wF-8{YF=naRDvzwQjvzxaE9DwWX;q2+|0*5QWfbZ?@?B?U; z?C#>~?C#nd4j*SvH#pqg00(lsUEly(Z2MFssI&@)3*d)JDxV&Oe z+q$~-?%V4xbKbJ@oBYWi<$NpWeNM7l8Vf^+kp4Zu_eg1P;oc{I$7t?ey}dk~JzRXe zT*xytl((y!;p|3Y3}<)1d%7BEJe(1iH+hCeg$4r*oB`v}+tY6~-Pz08+qt*1k29FY)z#V6&Dqu6+10}t92tmQy`919VQ}RJ*8*G$ zaFuw%1q1#Lmj-xzAJ;+NUT%Y-%N^n|#M{fm!+)TkpSzdWVAnxD-ku)rgWDVPofi9B zH#ayxjL%?v*z5HGvE$D3dQ9~{0p9r8|3th&Xs41-GH$22I?eS-7WgFMPVee8*C$!v zlZZRLtJ7ScWPwj2?)0usbA6HpK8d)~yE@JFNf!7d;!f}CG}k9t;FE|uy{pq)pJahg zBJT9APIG;d1wM(m)4Mv&^+^`^B;ro*>NM9US>Th1JH4yZT%TltPa^L0u1<4(k_A4A zxYN5j&GktZ_$1;^@9H$yCt2W=h&#Qj(_Eiqflngt^sY{GeUb$}iMZ3dI?eS-7WgFM zPVee8*C$!vlZZRLtJ7ScWPwj2{#)m7ezF z2*5$uqIMZ|Bqc~E2oi!KQw%aZ$+QH{u&(ukTuU;ST+d3DQt&!$MW)T(P1735G>1IH zsa*$`OoM#H<0vLmT%?cuqarL#O~RjJy=KjVoG>^uNerbOv+s}XuQ6_rL&3g+a|aEo z1qJKz21Ckhg)|2CfO`GaAFT>y|5Xk_{NLn}mO-M?@?V3M4gT`RjxL5=ilOaMxlvt^ zMQ9{9o5UbX0gn2a;b@nI+8e{($WovrwHZ!`tDOhoBIbpK!53Pa%$^w-6EZR|JY?LE zN#h{Rz{Z28gn*x*KPIfVjR=kzHS?PwNVT`in4nl!NRfKR!P?TW$cWj982yb1nHBqX zdQfa+czb#|${#a(PDegUGh<-2XGqM1un{qkqP(MrLFNHk3I~f!4D)+SHfwft^x}bGp|d+IB{Ee0^A0p{vVucq1lrjE;k$$_&x)m@FHf4 zQHVRlLrgr}f0#c;flK&v)MFDh7@u5sJ!5bHMkQ)7A(_I0ffG3HVXH7co6Y91`5X>k z!sGHJCL%sxWMXP$WMX7wD&doV?O%q(`)7)P#}fzzVxdrMEEEcj(Mf1*V3GVA0mKr%~)Ms)Rz5P>B0*DYJ=O$^a-OgF^m6fzclgDo1B9S!@oM2e+elGbtFA){!a3 zD0B*qN~bbdOg4kYcZW;~jczM-Weof>(8%uF6>d!9-}Yq;vhQLt=Il&&kLZ;}gIUUr zW9uKi3X1WR?a%z)!SDBR!9&jd8S5p#G5&E#L)NPEH|JF}`a5nq@I&_I+~Oxq2M=Ai z^;A7!*8J65;tpTDUD@nCbmHs+MbLms~_!pBSC z_@)TNU5rm7bxN=Sn0nmCFEh?=KK5+&%B2rC?hZe8Y{?f+&x+^8uGw;H3MVq#^2DAa z&wcXlJa1WcaGCz-(_QND1GlDZ=-FbuJ9$4v=~EFu1>a0y%@x1@vMW+El)&!&@|A+( z_?b`lK7Fc1!_TakcVg#fQFEEkS&wQ0HvK7ywh@_R?)l`m+k9nbh0P6e8r>36sF zRUAGud6(I5lN!FcMN#%1@KtoFaH?M56Ciq|O!9?_By#^zP88sVh!?tD0DaXS!}^4fy7j@8)d> z)vk31?sSit>{vE0qLOjqW}@a=wfT2l7bgC&sdSiaYLBn)Wfd(r5>Z|-&3Z_cSJTzh zLv0Rq6B{u>7?^I$VdFcC>@bHhYhU&?tBE-vO=j~}Hv*k;hI3u;2 z%~uceQ)z#IFaL?Uq@&a4T3362`SqO37sIs|E03q1xnv$Nl5vl~HXiYvAAW4}vF4y0 zD^d33vW3Y5Q<rGoHlpodX zX}OS|j%OX8{KLe$87FkNPY_s_jXNxzew%o?`@?SceKLk0rVd}YJLmg9CQe#9>&)Em z?oN5`I~7}vZ;d>CcyjGGj<>o^i=24;w(FhEV|b?om!@61d(V4`*P(Bg%z2*T+;8xl z9zpY;oQ+x*$H&)19x29;C#t7Enm*y$uXVS*E&7DhCw=9GufMLb_UmSKXMpq>K` zU29&oXs>Yj)eX8xdGh2_%c@zUs!V5u<$t?!e#w-1G558pU(NlIz!a9E1hq3i>}SRO zWu+Ds;m*>UG+NWoUu&k!Pp;iDV%hqs>(=dBSM>7muV44>y{GZ;gvzuvFH|MYktrKa zAAeQ8A;M+uZmWUf8o7yjnQrEZeNCF<(`N*fxvtL7@4usM&kfg_1v%qO^^-MM)sd-B zCgFP~UAkT>o6x*^QYGzj%z&LiW5$lHjx4#fWa^9YkCtqxq2^7Qvs8cb*vot7)A02n zCv>5ex68-Wt_$iWIrg-GD%z2BHew-I?ewB;vH4fQ;ckzzH>DYbCgi31hTs6Kltk6pug z14H!FVjueKsjAJ$S)?9NSN)}O0C7V-eua8f?F&=SR-V#37@grax;S{#xx1y|5lLr{ZD6E@)^eZtZn1tDacKSTpT$I-4Rbkq zre@Z;nyZ-uub#hnZC`QlZRgaoYyxA*!lniVC@p7Bih0yx*KO6~2)BmeB`go0xWTKw ze=^Zqol_k`U=v>Ee)sLtTSK%zq<`_n{2EQqHQtR|hXjm>i@P;xwF@&lX7BL4Df zN$-> zmTq~P@y89#{BP%Gxh7q9rn}rJtYU2|+c19o*s)Fr_aCpOqy+h4%=L2z4Ie-5=3qu_ z#-a46+Zfj7>G?Qx;VXt@$m6euZX6!HRAs6SS$1N6UE}t0-A{KW?Go3#@VV9cQ(1ql z;_!{J#7*`1f(J`RrI@kw=5uH3;U?6gF6tR$q1iRX_j>EJoXXWD1h!<@NWERRISqba ze!ZIVzn_a zx9ar09w$sTJ$<}%V_ith?>SY=`b|u@u%Oy~@nMJ2$?jg)a>klcrrF;xUs}3ioaa6Q zyAhFgDf4Q-QQB~2zZdgz$5bzl{PIj@`NX)U6K}+dA%SP+0iL>}fysP+MKhnSEg6?z75skEh+rJU-vE z=gHIMDVuI(*&O{oU{i7Vp*}%Nw=|EfcAp)unNdVwJ$P~49tHXH2fv8;!p+?*AT2*C zXQ_7bVAja~*Z0@_JZ#j`ZS_a@B)0516kAz%_0^EprExRvhLx3V4WE5*zEyQkJRCpq za&}eD(3v$i#;#fMvby<5%ku?#xi?&xM`XsDWs?#IJZJn&V1MS|tw+X;C5qKU@|tvc zrSs}~%-fJzZJ{{pz$-hxZg=;@y1t$V_WU|E=hlld&E%jp$LcE@Zz=oynRtA_{&IW{ zfnBl5d^sz)kJGOHXMcSq%yvET}@l^zin&y-nMww3QX^+LC*K{|G~HgV~FegBd$^C`{|%VjxpT9&HA z#$?%@m})tTp^6f7f`%=hGP`c6R`GP{pfS?1dwvi18Nt8uyt(Z5AC#A&n67O5nu-dn z>+{>=u@#5bD|**1c(ycV%g?J0bcy>uGIf7?8Sk^WC0*w($@J1LDL=mQn{(d!y-i0n z_l{_Gn4MTq7g2U7SZS;DFrOFR)co+PeiD5PI8MZ^rrl|u zKmX&)>jzp1%p>Kb`O@p_r-WiyQBuhZLHU}w0~V!OFPw_gS{{$9lWn-XaTiPd#oUN$ ztJ@;^XVyPi*SyH74H)puTU*jYonz!~Q#g0ez`eDk0dt}|JO`6%SiEijm zP1IkB*FjMiHx4NE!dt&+4#+tz9s7LGkt6~;=DBQVdEFPy>KQE#s>U7q>T3iRd{6tR z7or?CDsJh6WchR&o8#zxqs_U$&m}`Lm!?zj|w9nsvc_OZt)<@u`tSf_WJTGc;4^@ zntVKsaj{_VFh=aAjo*a!J+nDaJ@ABa3I1FD?6zFbc^(AjRlaigGUB${{NC|Qx$^hs z(|hJ#7$F-o%y!M7QhmucC#cE^Im{EsI8INNh0h?cZ>Ql=_>-LQ`|aS<8@tz!TK+|| z=YZqVvH45yoxmUW(LK^%c@14Xs9!&-c2!Q$SnWP#-;$aWIkomV1=eL-YhavW0FTeO zQ;b)=k&*LSMmzL@j1JD%LI`Z;^JPdyFC=Q%C{JySJ^t9bG3U;&)syK2<~QKkvW(cA zj6{9muT{n0pBXmNN-%kHo9De>3-hY`{4uFy#)FuxlP9glzW29`tvA_(##yE0)2=P~ zs5c(Y@H~uRB5WYUCL^(#5CpQpMq_gTj)fzJgrb1L=pQI-u))|^%pD0dJZ;+nh*5e5 z1Yq3&U}G$bl#D&0NGC>7K+Fk|V%SWI`D^$aO4oK8c)X^#qIel*0Y@~&jfBxN`!8!k z$tV=!Jpe}`#?>vyFo+|zfB(%cXc%m%)Z5p^(-(HL>)5UhL}0YN6oXzA@!U&&Er0o5 z+8Y=V!XJ}DYcC^28{gD}V)dpT6o2qvStKrF!!`xWg)S&Y2! z$S4$uLGe12kN#M2z~d&_WF(>^S_ugXf=T3irw%;51NhiC;zJTTk4_tO`oRC2KJZrG zK|wvzoA(F8ae>jZLt@8<#Kwk3%tn!WHX;s)jUD_>1fOfzmg>uSfx(cCdM|8Hm`%s~ z0`HHcFN6Ac7DU(4{&25fc#xd? z5hKP9kMURl-{ku-(VqxK;fDuCgbo|yKL*_*5beMSKh!c0;drCr?KeNzK>x9?%LP=J zfAp}3zkCzZ!G-R@h>Cm*Gb#QM`M$|NdMMmx=gs`AKgu=yf#AP>r_&*cj_CYj-g$Wg z<%#ZiS44{OkMWQ0C=W72hsF_u-`Lae=4(tO`96#ELc~hi3XKS0%n&{+8b;0JnBgru zblgYhxM0{EI)Rh41|MAt@P9D4MS`JsBE0lAqL$u6GNdDr>{F;D{{;)VlQsUBg^67QKcIDU|N`pj-nTqD3Dme2n4!mo?TEbHMDdE^yewVS}01V>Fh$TTi@)zGq{v z+MX|ase9^s5It!+0CV0m)xjKQMHZNf|L5gC2HIATEc$2VyJ;0hM5{}Ts z5mbCTRgH*N=};n-sm9d6zXLx=P|gWZg2GHbKv1q5kud)R!1t_4*th`T%AO=_eGlN; z0VLd`65!^sBs`!N;I=s=JgymF-3k((({=!~ZGzYCpkRd#C6buUSTm^Jg#SrUPV&hF zGYKUMj8KBYKe?Zv+_ffQ;{t%6^(0}-5`b%bNx1t{fYqZ(*tZ7Y)|n(cS`DynDGASP z1z5jh^->czP>R@RNQILCIvdVy&S0 z01k;fDN0b-nY#(fEmIPf<^f#cM8amp0Kf1cVW$d!n?{hZ#|wa4r;+f8CV+K|NqAZ- z!1|v^c(HCbXg)x1LkiaEcOlJfSR1Js5_wXT0L?p*W)ha>0bJok!e+MsuJ#~drwV|Z z0!Y~HIlwKGNO)KSz}i?6p4y>tyyutvXwps1J{Oaq$t;*hu~MhOa2 zwVj~ckdd(XBEXLvNLYRo;O8DB?C=QS#^EIF@f_fm$t3(mBfz>?5}w=wu>MC9j@51l z&HLzDq+qrF4}y}$(qdY0x1Bg7&Wlij!b;mlP>N(EEIJSHLnR5ziU6*1BVlDZzzzN+ z>{hjnptek+=n(VJSCG)gKthL6DU(_trT>v?NX2R(h3}@5De8~M@vQ_k zS4zG!>ySC@5Xssa7dn&fHXQJg{qANy;#8_Qw-@jNIkR%$&kmA z7eVS}FOW;esBGDJNIg=5Sd=o#hSY5v9u0(2*~a^k&^!f?Zb<%!l1Jq{It6mM#>-F% zRHje_21er=hiFiFH$fVxXbYnLh4w-Kz0 z2v0j>E}g=`s4THX_W>bi)jJ7JYvDXU7i%e-26rhcLm+qX9yWc|@dq!}e>U@LA6@)4 zWMm&Fg@{F^h`@qeBb%Q6M@|imUH;?G>mUB|a{Yr}MC*TEw|xHG8Dj>y+R6A#Dn?NF zbhgM$={_)Ea^Re>2s|p~TRch_86Fxm;JGI52opa|gG6Lq|=RJT-2* zaOzhR#ti$+vzxWCfCVuY&_|`S1jd#Qy}bJm9y)Az!U)k9!-x8P=HuGcS|;SsA*@4S zd=SGCnpoI7^@7ch<9vm%ZL&*urIk#~g)(E{aXc!W$rT#OEo_wzUE`dDT^*EmR%XUR zE{l%BI5-rTOJlHkLW!~5Orc0HClp*WxrtQ7XEQ(t4wn%Z&BtJIU=D~-lpyAdghBz2 z%Vweykoh=;PEN|eSqW^M1+yT4kxGFJ7se(wSbxkP^TT|xKJf8@Zy)&dh7pSw=81Vg zz|NgKyJ4;{{_F+Q6rJJIgFJVnyFmmGeVoXnE9MBJ00$TaSYyieZ#%$sB|j(!jSbKU z!RGY`!IluoGkxo0hM8dsILrX2z|4WeyQ3o{4xd+YWym3jmXCfi0gPEUPRo`-_#cqxAeEQCHNgp zsrD|j^y+EVZ?lG=u?@@pi^co0PUc?23ow!#yMY5a2IP1NaYt9Gzl-0~mFeyi547dF zhr}c8V_gOQg!S}c(V3L>U%8`^56gG@@2bUFR54Md}^Nv|fF z^)0$qqODOwXc?_9Z)X2E60vf=_%Z3o`DH8r{qICL#~;LvOIT0*DSYw#xMB$~_4 z@0#k424}3VemQVBH~;?24n`fQ(l_ZyRftjdnz0cWTOZ`6gae}*W4llM@!-)51rPtC zkZ-Hh>9zPP;B0$%F*Rl+;$*pxTN{7kQsHA9%8J+PU%jgmf2*wZz^JLXoHpMdF>>6; z|B`g_a#2O?>q_EKR%DjE!rf+WrgE|~q!C+GzX;Ur!f?()h793qOf-Gjg+1oSDUM+cE_nxx1 z3yE6dIouCwi;n;DCF0@^2-}r)KL1X2T|L?0NY#g}poMWd1Ale%jD}jhuc#7u8gHN8KAEhP|DydzE?mD~t9!@r$gMi;pv9kn9n_^~_s(!f!)Vs$ zD-IsJRQRA)|JoBjv_S>1wA?#)Fc?^xF|PZFAL373E_zt^id6I01$zuU;8F>ppCKNO z`M9OU2uj*WeEjL*>=%B|{Md&o^*u=DGaaRSgY|EYHD}7cgs)=7HKLydtjzJdYWMetVue3wfIrPTbEvkF?9G1pl!aFn=l(_ zTcCl)tWQ0Yf2*SL!^>2U7XoeTgA2(KeGx6!ed2Gba|O4bHmS+Fy+gDzlP zkA!RSTOb^o{m}Qr5jCe@=!t5xM6{b>ZEWetklckJ@b zvKoVp4TZq*~3;P<>cRf*Z`eUN9Sn}sl~5Bof>YR-5=W%V1BYbSAbG|??O^o zUqs1qpR_6SLQ(0{=GVGAL|z8Mmiw2|qWusd*M0i7Lziyet!im&zo&Evya;ry4=-mf z_9q4Q3E6eza`D~gtHLxhb%@9ys*S+# z(`3V)+&rF?G<0cFPQjh>2C__4kdp=3H8-GBB}IJkL+ZK0(ua*5t@I2Y%Jk&5P*;2g zx>58*%o_c(D!1tFV>OgPhe})p&r0xh6H4={k0{uEX6-q4wdDSD4eAPz_ZOqG7|2>5=4CA%Mw0ap`}1V}od++qINYyv z;3szo0xm>?6xUX9_3-L1NQHheai6fdL0z63NE`B zMW?n+L{zL1YgD;6%N{qO5eAMw?2uJSF!e8Q=Iom8j-mmqG3zrg7ME41b$EVz;hzFQ z?X9!>X99r+oPEN^Lzi#gf7+slaiXDm=w|k+hYAU;=n&(d{={slNNw840u0sX7Ej2&5lKCG$GJjC` z!P8)wYF!~z1^L%~$ZvTF-O(^Ie_(9l*`l(Cwfds=B9=jZ>*M?*YXiu9zyGA116$OA zHyNsM7xFbv3y%FZmdqdaz3M{o{l~AMp$++^kgt7Kcyh}GGJnL{%uBZ)R5U`TiSlu1 zX%?=1e&h7cDefq=!5*{W(3O&M=n+cV1wenPt1dpfZ-z69Sa94YZaQ+c^kHQS5a>V) znE3X&xY@oalkGh9k7N0FA@erW4(<^8x;qyV!v~UizJa?>7TkMO1#vqt`rS(@G5tyW zz|j3Ug=LSQYY@J%>~i|z!6d$aWZc;s_a9el5nlZuFKhWQ5C6*YQnP?>?$*A~UqlZ|3ZoZjB->9L0pK$MVY_KT{iWs&Ae9GsK0= zv7WN?x~o6P7rb8k-3gNo;^WJX=-#pG|v47PXZfwMQupS)<> z4-BvFUREvaj~Lj!qY};+mp`f2XyJ(nT(9g(=CXk(hueQ%N^bG}@(Q?jA5^niHFfu{ zWc@IdWE%Kg>bVQYQWKK&DfOxKX(g(rbhaw<#F>k^S!;)r6n-m{6L)Rjn|=<@)n3qF z)LqhECa!4nG*|I!ZTW2ly6eq_HASq;Co>b`Qa6l3ES%x%_U}l)Q26N8TaP4I``EEb ze;sFOo9`t3xp_Pf#hv(*H}6Z&y?z_+(Qn;$JDx>QpI(=rxhF)1!b^Pa^j{AgyoMU_o~(8t-s^OhdP#XK-pA!J|WfxQF2Vj z|C$Q-$NMjV&@iOv7`7uq=rvOpJCTGA)3&6aD=aCmYuBwK`+6Ppz^I(f_b+FzbVY=0 zr#U+gUA$5HsNNt@r_n-(u7!>k-ELdT^A7#wNtUr&)ZU|4ip!ofG&gHov~YveYBXBC z?mYC13~oM zSbbevK)aMQor}=4@d>&4tp)Vl#7(PK;LDFK!M(AhrX;)8e>O=DNSbzLVTRvrZ0OSQM*W}akPHgZZb8J=}z1)`1Tt9W< zlo>OFvqKm`Ur(DfZ9O76wzfM-V)~z~OF$AAIXmpzc?+@^GUmrbghs^!$>k%fl!$~b zj#6E?rpagIZeO%`$?_8`HQzH=u2}lrq8+)~eEPM^hyG(ul2N}(JbzVp4bNv>NZ$C} zlBI{1v6e3RZez*?DET$og|sbIHVFMmopo^!4) zx9&noZu5EFImVfjsp}`o(Y=vLG4%^sxN2?7PgQFhe{5LOx?1-`>-V~qjPIAt^*5)r z!C;>XvAzd_i(Co|-l+We-sgWB)w22jRaDCf-n_jN)555oQLWCXR%cY}?YKf`R0~-E zJsNaIwK}6(ol&jMsMh~|R7(d>)BhaR(!g`|KSs3aUq-Zk^Z9s0E4V$PH9PL35v|c{Ka6PoJia}mB_ABTVAWd9kNDc>KVoYe z*EFnd{Xw@1`@U_ZZUtlI(r?=1S~Q4j@il@930WwR^Uss9a)9a|Ct{rjqJNr%brNX)VFK21Ao=^rS7-*(-%q?c0u+Bc>FT55 zDLR@g^|uqQaMHo0(PD+Gpr|d{PynK$GDaUvi&sGT zKR$&DDMvSxk4&M`!fdXOO`p=@AD=pfW|Vzw+LRXm_>?I$VeMnnrL_3Rr%Is-aUY!~ zMNTR0oFYX|8tR-P)j37#|IsN@!S4i=B2CCCQeUOMJ4I@WVT#n~_`jPXHOdfB8oci> zr%3G>O3qg5zvIIxQiD(q!}Yhn3@H8XL-KX|>4SjMFCJu$&GL^=ky_J@B#|%tctB}| zA)q7*`}-+Ui`oN96aGG+6l@46x%~dl6scdm$$%1f>R(Ke`dUm*kusV2K|twXs5v=B zN))g<@%<@MDL;)dK~tm{6xo0oG0U5Oc(ID|{i??A8&eP zC(yV6C0Zuv1&acE!r=k5&H#0Xcv4r8)E$=ODFJ7T!D7yjegd#BUw=jcG#VTI4C4z- z`lM`z-w}{XEo_)~uqsT-9P!QOh6jBqya^K{7kROTJPPGL;EhJVAex$Pc6Yr*z})Y~ zBUMMez1R=1YRnY&!}hyFq)oE6Eg)WPTU9``JR*qJHBRh@SS@BwHFfq}wR_7WqV7v< zV;ga!=YT?d=YS_|b$EWihuABOMH4ce;!4YOIAPpS8Hxqmff>uS4d;9L)Lor%vA{?8 z2v!C%=!18B9a*a*6wPVF8T^SAx;qtz#+k7X5+&Ra)o= zHXC7rT8!6g%#ION-=+}J8?UV++L{P$V_xE8qUzLxN2%jkC_~~{Koms(iR(1#r)uJO5C75T z1+WT(tr)whir9<|EY-sb`@HFSfD?@=#t9-#+%1nFno}qC;26fK?CED&?NI3*EK5n8kp716Z!DE;LscF+~g10@eb(MxFKW zYH78exMBSYqq9e^QQg;a;&x^|!utmd~aX(^-KOI!ofyn&wX7870C^57KseO88S%7QjM!L2;7Z1w7$chyAeS3oa$swbFn zOINR6zOzyfTLb)#f#rDI~ZTaJ3f7k#-l%gki7Y4{UT&_{SiR*7h03-jt zo?uM^3`g2G8(QLKSH$Rg<>kIRM9rn_5sP~QW7l(fg7OnApQcmk0`G3X#=fm5uxV%; zgkd${@>SyymvPv_iN9a2#~+Pg(Sgh3D$s0$9VjTUM*@YB52*;)MZxJZFuV!{1}??o zxB3*X&}O`F98^nj3QAtPb2QROy#UmUf!etVs5KF=g9(SV~Rh3UZz0TV1u{{hK^8P(WYFvsmi==~WH zk2UqH1kD(;+YiA`u&n?znrs8CHKII%S@hTnpnF8pjm|D@xecouSzT}8Ee9jNw8QPG zJ$`Eif!TIIxBv)WB0}cKKeMmat>wWwwr=;}M$}ecfmc$hbznl>*EFDv0+tqV03a0y z<(|F}?+N>#bi30C`&Xpff*EH)J}zv+YA~iC_EuS8FqE8OwD#K5`X&uHeM9LM8)%&% z9f6rOVJ|U;V0k5x)~y9&@?;YhuQ^<-ze&HoZKVqruGcSa;WsI_%$wjINEgHu#Je?N zG$zNqyS^*cQ3RWRa{Um@K7<3CcF22f_#p;k%!KVcdyg}UTDD^4FD^a5 zTe)f;>|p?Fky6V`FU)kP843++8-7nQtKindt=+$AExuJ6YkKrTFHG{wi!L{w6hBPv z`$d&C?9E}`Yx~1n1tE%i@T)CL@_M~0xpCv!w6K=qlADnxFai1kziVbOo?DV4r2y-pGbhOHo4x->;~aroo#V4V$UHUA^W#>%(Gxe|WmD7LBx z@tKvSQu0Hkz0$pEBIZJoJ)7^?vzzTOy}hs~3j6g+U!qSphl&NO^DxYs@VLI^58au_ zRy#FpiTYc9|2A7Ba9>Fmr@@5#rR{vqsxn7>d{)ZDaGk!fQBC-n=ftnL-`%OH zrLpBFETq}d%JS(du%Vd$+0f5U?vO6L->-E__MaBtyuycbUrkx3e^qE|i4ys-&way=&6e1}1?Y?+Q>8_JlN zMe{^GEW;+Er?@`#rJl`yXt3xZ4r?;~pEc(F{KwN448_UD*(YW8H&W z9Q5O8bSgH1D87FE(Tru~Jj}8MTKLCGA!b_f^4hhMI5(mnE#UG-5J>H||BwA$VHXRZ zb(ULfDA*(*l&rCwI~t=nZV|6^hqWG_E5tuK>Y-EKi2q@k=VA#MV42%)E zWBVU_S4h^{W7Mw}g8}K>u1W_6j>$weR3+@mB8T5XqIRKM!VkZ&;VmdU?a+C4m^~Zh zu^ezE%}KP6 z52zuhiKa7Q$Ska^)^28=aa&s$tV+LPgWg+_~jc$g5iUoYWP;6#rYi+Aw z8rxcn*?cA+wmLC0`)FA^IC*@LK-9yzt5Rt#GBTH&Sjib;b2Bqz-rsUsn@Yrdu23Ly zuyJ(iVkPBE%`C00j71Wu)R_M_oaUBRW)dNnFE%lu3oUJ|trS9mnS>{IkO?J5ALXP< zjf`;^A#P@CVQ%g0z@tfRWkyEEMtrW=L}vV9vv@Q4Vu_K7SSI5z z7-Ca}Tp|NwDXbKBPF;j-iIG%l0_Nfg6lTJ|U}gz~e36LH6AEEJ7@pKfVkV?B?450F zl-4|%lUyP-mhvRVGEpUqS_Da58S*gnV1NMdd( zwzLEPFc$J9CK4mT8*id}Gx$P*%u*qX1sP(I*xZQE zml|^<)}|sp7nGYA@dZYvGHJ6!EheO%BB2;c%i)`fK%Tbgq zO2s^(saRkvg9bG*mx)C}BekhoPAEK$1ai61L}qR)H&fav_%b7@kys>ZXXcx9v*imV z<~9N*UuX*6AT?50nShK#mHj@O{O0j(nO>WNcuzMa(+6MyBrWZNp|5o0@j- zVIg3#S*A{oUF^Czn4A4GJ1!?=8mks3qN~VWOrdc^<`zOGo5{86rqtULNnEJy4$YnAsD~&^olccL-7h4lPhs$BI1ze`Y#?e7*Pl&pJ=@@1_KF6rLt(ijf zT7XC_QrJp`JQ1)vIB1kDT_xSRcCl4RjHNWX7_4JqZxCQ75i(32&CL~7j`og@7F`@T z7++*;F0-(-u(h|cFt@OFaA;LF_Y(K8>6IMl zzRl$p0;#Pv?nre|Iv{kjvc+6t+ue%I5W+<%22T)ovF`@Fbb+OVqhphj>DblL5_G&& zwg@B)p&0xSewa%tWztz)khpnXf2nESQL@9LSdyd#pRf7k+Ma> zhmD?ud?tg&mRLF{+u-+o04wKlg<>NnQ*LG=F|}^%a$VVC%9ERzNI62guC{hEahvUR zWwU}SN zc9vqkP-6iIA&1G}i=`$K9z%){O*9Uj#b$H4Op%qj#_T$YV6eGtk+D?Xj^NUHz;0^9 z5t*536oo)S2-!x^XmY98#9m`dVg&H}VKUn;N^?`#`VKHmg284&?`tjvTu5!R(pSubaq!@Wu%F&62hm>3~2uAWEZOHGV) z(n6(9rk3Lzh6v1{km<|-VNS?e5DFSmCO6eN0Lp;?WE+VzG6qT=3;~LiIwj$VGPH&a znT3go4z{BzQfg3!1t!zAXV}P%wXz~*E1)SBGJQMN8nE&rWivR2rCx><8p+ymYM6Ef zzkR}Datt_~90UF)rQU&%F_}V=4qca+Rgtn@Np$7Q6%rHRC9gDS|5_!lt0UavWm59e zDOA2xvK$@ktdWxzC|`h_Ys!qj{lLh@83w35$-TWiA&mECV;G1SsB&T-g6Gayh7t#}g^> z2la`%Bz-!bK~GIfPEFCp>kDSt1W8hu}UZs;tr;$Qd3h>Qj?QXGcr<>;u4b6RPhHB(muw7CE$tT_ykpE zYKAH$^+0@DX1XdZ9#3JUCmeuk{iP~uLSiBw@0yU9a4_X?mMT44m8^>UGahuNrKcvV zG7^w+6BFS-;X`A4CmqU4fJ&sM#3f~BsIs!-;xkmqDT%0{$qB0LjI@l@xTKV{R5*Xo zQr=0~hfrLQ>78bd1Lnh- zsx;6H0#Xta(=!sF7}j9( zQc6l%T4G9C!oj$N^yK)oY|xSzpOBWW!ZRf4Sy^dG$*Rn>IXN>s12ljhRhl}vG07_f(%_CN zRbp~R29!TJDKRZOBO&eBk)(K4x+*y_5gIQgIVC*}RH{;vGqN&NX=&-r8BNK&6je4V zg)04!DizPL23{y!c2+{#(Zj$@%2K5zWWoiJ4i-tmRYVpoJuNk(IlU>#I~iO8{1x2w z5HxaXQsQ4QtC9~N&dN;BOicvqrDkNcGbanv(o<4ZSx^*c4!F$lL?wuNPqQi+Oq!XV z@iwzQU2{sEDoo2hlAfG?>~ID&S6W)KF0pG;%D>=FPd{}^6Vaj~(nRTpj~`1ePvxbEi+9$eA_E)KsHXXAo%u6=H+RCnT%V5PO&GtivZS2b)a` z4E%b=bThNS89}*c&!%UbI08EdejPY7@awMw&CFVrO&P-UOjX7qaKhxoRD)_}_PMX7 z1)3?f3K&Q!SY}^OpPqAO>J+^KHzS}sZB{mC5ZOc~U6q!Zm5#$Mf)u2Yp8FMyb()m8 zf|yL0zeGe?oD5ZB0=O`=7M?*#Pthe(k4!YLRTAbeP_Z)^NvNmE#4|ufhDx76Ju=Fo zSlOJ;%gW43OGWZk=^46oJdJwl3(K3zmULc5c2-9Ak!-k{Qqo$}DZ{L8DAg)0yPhrDVo{|usnx59C!Uh%sj+>g64p&1m)Kb-&hS?MR z&=aKs<|vFTQaP&Z^wi9hq@;wTwlvHe#tCVxU@6rAfo=fJDWCn?UxVbm%)WwCPw9iAqK80hg;PLz9kmg`tCroeIte zZI+tcmX2j03~`v12`va8Ra+*BIK+moD-*7z>?}3D<(BPH9?n-okR#G^m!>jp#hz`bT#YvI8=#348eHXRx+H7_IoOaB%ZW@aWZ)@O=+8mr zF>0aZt$>dN&PFa|(e+LO84SB5oPzOJvgMo%z|1EI@dn5A*Ina<3;mo7Q zQc^$<6yzutP7X9o`Jvw&I|jJqqenwSP0>JuFeO4*r{T&82{AQA0}aBA2%^mdmq!B) z95(3u7efti2$S?C110F4;hUjGKiGN{eGCJQ&mc(R4M7q#&-_i01PwS0K@v3NFa$|Z zAfzKmf`%IC4duH*l7AX%U|K2NRG~29>)8~61Q+ua)^Zw^O0zWMu{b7rxs)k@SOAkH zGT{ojToKR6M8s_7HNC|q5lF-$Imt#d|F>*7Tgu`~L@-RYkx+d$ zn{Q%eX<;d&iY?9I$%4ic(~A5%>F2&t6ZTlf#*J`p#QWo&Q6 z6^K5{NEL}CxYX6yOd+6RY-3}YguykD$xP*z-R#+z(9%dGl8D$$K0LyI*dE?gHcu#) z2&7W@5n{fH%t#<*b9g2SnU$k0mo9*faHRqUo5hux@IGY5XgJ56&tGAQ zYpxNW4G(<+o)qc?>^AfYAMjUtqvy__HTFY5d2*yDCrk$H5akK(%Q%f$i ziwSsx5T4znAWYh9q!#yZ5D0y@?0>;;WF(PjEYwPotq^`xgC{rT!Y^7g6%Mv~8^Vs@bwL8? zTn?%{lO^~I0TPLYg~qX2DYmz@l0b7asB{jKCa|!#)7lvLF*%D#7dcv*n)0kVD!}LS zj4ebQ23u@t>F5aYkuDO4E;d$j@JfhA8wo@TD}w-;fJ2dWH8V4{aIkZ9G`CYS;oXo# zF2T*&)^-*OQw6X?e5AX$n;DzVvQdhde7UKq70J!yu`SF^yDIgL0;L0loRmt1Ou-h} zSmBO@gAU5vB9~Y=Di~Cr%u)_b#pT=DIzX}MY;#A)u7=o%IfQQ@Ho}q6_(C3B{1CK) zOPMARh`Q)(sMZ#*gCkr4gTjTUb%m)|$j0a*93uXDTdMW{Y41FsqPo^S&I|)H3^4Q# zg2>RDhz&)JjR1B{jETujuGf^C#1c!49aKb$pkj;07z?5xy$LE!5ETm|ASk_eW?=f9 zbKbWPpr9g%uD9M>%QXxA_9@>!ea`Iv{_o!y3uqiMP%<55XsDy932j!&^2(UDj25m( z(Ls$PW0crthMF2oS&E{bj<$hR3)dtYN3@mT%4IS@?UYm2Gc^?%lZ_+rOjT4=rP4IC z)ZpqvEg=F8MJ;%Y$SE4LN113SiAKRB9YtMD1|6Cr_0)A$RYZnlTZo2&vNnqWr$S@2 z*hB`kk7zPAn5z2POcgbn_Gqy&yrZDjEp;tTV*{3gf>;+OC@4@V3M>^h7M;RE6JX$D z3QdkCPnT8H*O6$+Xu~TDjNwg}lcy=Fsc7klwa^5xcfpfcQ&nC`TT84B?Y&C{W3drA1#VM$4I`7$x|-@_3kj8=FyKsR!FQ92oP+{sJ2r<#S2s|Edqh`NLYGm5 z37QNI17(&b+O<3{ z@B%=-yjT#L@>E4~8m>X8$r$vul4$8EtHE+`w;CE+^tO^17#gUtP}zcfd9jci4d0;_ z7U<H@!a_veuh{H#`Gs?RS-Kti;6z9oD3XE9ce!^;WJxi zj*PpE8~iPn`9kJP_^bdFq8qfkOdz{kIm5pbp`B$MUi zD9n=ikRZ5Zr zh*&I+h?gcvlZa&enm9$8N?eyqxq9(RfvBB;vF1hBj$9l$Mc~f}ZU@03=veyR5mN*X zMT$F7j^JquLQj-Mc}lULvRTOcJiGm z0wH{~CS4rxCW-*7sXG1Z-=6aYjSXd`T;YpBsnNJxeKdh!4oaZF^L`UeK(jOY=sJ_J z8T|IBh@gGhoVE^;NK6hMjYdlD;|lmUl3U-g^$lDBOZ1jnwC z8E313CjbpJd5Z)T zoC+t8N&NQyfC9YRymY?Sgp2;mtrcXI^w~>}{GqAmbTitkq`hU=7?aV)pRI8EenWuI zML{hbS_CaP2e=HlK+bs&gcew4P1dm47S9bcdLi$xtXrx-bzQPx=Oq14H_tO+XiiXd z2yfzp>xo1eoDc&HgJ5FE2cQhFZio7$ueG6jfMQ(R#6GoOe&!ZihwS=3PIK$i2vb9c{I=U7P3!@84>KnSaJTBx#Fa&=N%FEz+4}c-K@Is(-fJgl2 zCJW=IyzCr+qa8$Zk4acXconHlH`2g*@q#ip$T=?VmY;swhkN0w6K^~(Zf4EU~J`G*T+8pTrD-89tv77nTSDO#``u(=~a>l(X z5eNB0A6^U2MJW{uw)7v=PMgm?M)pIn{sj6VSS;M&I%adLq!Vs03H04md>V7{W<#~8 z^FdQbcPm`q?fI!8H^_r|>yQ2n2hHx=i&PDbOs32*H`X?A{Qh6|Grc;WE-FZtf@WOGL`%m;rsU)=;#;4^^vpejsS zY;UBvw552Nx`OVis3+YRiD!7pe(-BX>nU<}^k+X1LRP8Ylu-=pqUr^Ps;>7@O$fj6 zk^ym`f~>A-+W-cHO0t!qiT?g$wi@bVeG5fBb-hGE1Qel_6w0^sM?t8F?o85~HO6e& zER9LiwC5BeZz_z8U(jPX4!Vt2p%?zqp9sMc7c4WiHZVH>^xIFTXt+J?VHNa}A&%T2 z*SNK#L-ubD#?zq7J6VYoT&^vT@giDbDF;TA1^{BAut>JBhy$bP zlOE+0;p7%y^XdIX82otzinV%PYcmm6Osh*a)`JY^b4}i|*mFp+lTS(Y6 z3tRg+NvNm4HJ?qhw4bu0Rs;_dM4>nVj}(7I0EqDN-0+%R>b2py5xnW1p50l0ng;CN) z1m&bP#%8)~I2;hLb&fsTCKj@FPQlpn>yHEk9Ny}Ju_=~bC@peP`*ToQ1(DxCoQ0A! za`eOQsmS=P(^NR>#l*Q3>MaMJRoYThr=k_M1dHGYl2d+7*gyILdRze5=s_9 z=^qI}dARpbzqx-#Pib5zuWZV3>F4^UbDDTCr@|XN#vu?QvU z;a6ZH1fd`~zR2A|+sO5_fDb-&^cdi`UY`U>rapsgdmxy{m)27od15{!L(~yJ4oapd z3LHosAqYlGS?MBGt8tqe`9P@b7Q=-m4Mdxh51IVnbbgB7cac%vw+*FWa=ZqRNpM2$ zq|+!H-!7=>S>ToDokBS2)Yg5_C73e35ON%X9Me$_#4g!ig>o=jcfGCPBS>f*P{yY>|mm4y5!psYB9OXLv7Ef`q7qe;B zj&pX^!s`i6f&MydnP&1g2%u7q#*n*_ws!z3MXu|BmHKzj1cW@lHQ{)`Di6$)wqQqa z5g!gUVyN77$2}?Tjcq+3m6M?;2R-sYe~^kC(RFg>_m8{bXSSfR6f2{abHQ;14i@xH z%rC*qC?!HTmLy~4PbnWIIDfE@vC=}w3M*>_4X`|T1z(I2a0IR;0X(&j4vJdwhigDU zFvSN4MXmUDpd}Q#_6~+x*;hkW(&jw~YGr>3IlpG-AgGlCcggurcMpPE3Ctkp=Xwu< zS~(0CAnat?{y|VH$1jreBmDLpiv)y_P{2;S3AN(ig=R8D!EZpV z?C&7wb@{x4TG?AePVd;;i&_y8^auL_LJ7g;DvVGozal2i2J9;$N>3a*6eJ`P3UPR% zTWP#?I0UHXiPCPB;I1c`?!HGqKWhs>h69^U@~Q z!V^!wc-sv3g_oOVpCgEnCB&`4IVFUiLnh2a#IR0^6?eY^xXIE6*AD{mXdqc({wr!>T-<1{HSLImN!q(_-Us+ps z-?_x)1VNmhT*A(+tFPz`bg8N+n4BPlksc6M#y=e%vT&g9?dj#pNC`*0)#cCfr_X9K zEULQZ{Y~s$n#aEXtittVz!HL}Wfv^iwcYZovo_WCi3G7{(Ov5rRMjT8%I7)kg3q?` z1P5o8nEWk#`s!{ zb#_l_aXA53LxN59Bs(XkA1YF<$er3f{dPuzMFLUt>-@(=mEGZ-vd*6Qr9oJw$-k)-T5z^P>i*! zw%;EU_J~NZ24AGg?zs}Y-ZqLL_RSI#4R#3_A(z2E6J>J*9^U{i0yJo#f#n6KVjdK-dN@hR{<_TxSs=^@MZ`{k002_`+>T!XPdTf zD#HoUM~P zkeuDRZTsd!oqkmW7JD4_isT3FZGoD^@KVv?(?<{j^BTXbqC$cb+T2>K0&JZvP2i8K zl-75`k6PfA!n2>ouO_O%QwO6^RLDN(@?dl1YPt!cy9Nw-jpcX+y_To~pL1s|s;C2a zMBWu-MGAP!o)<(SI!PqjTZExaP{1m@`Z0;YQvaokyx%h52R%D-`@I)SOL)zte;t20@CO6fNZv&aU1IW}(53vA zvdq1&jsU{Q3i~6rq^bANDqw>4%7+nq2jY^LfJ@qO=5ARFuV*imFkwdx{3zQNG;p9< zML;J7Z$6s(u(C}+o_J+U%xx^mP5fv7?|*n72oxM_4D1Tv`V0M}2ZZ~jKFa%jx3ArK zD&|&EU8fMWM_~jDm+)FE@-t%2AM^LxzV(lLTgrbYwkZ9v$1mjU)f*4Wnz}@AA)?o9 zC5fQ3zBKP14|PG5*j%`L8N=l9%0%7A9;rfvAM0YO90?Ac;1T8uVIN;$nWNIIGte~ zN}U`Ix0@#r!CZozkK@q%ArZsa5@7;dNC)jjf*RO_gcp21Du2%ref$-E_($arAQx)ZCOF4ZeWr zjD*alX5p}PKdjq;$Kh5bT+S+MZ@U?naK9k+($(0|@W}kO9?I*mgukw~#=5*)cN4G1 zRtWN@>76c&|`m~^YUgUc64#FC+ezd|tN)FMb;Gswh2Q$yL@`~sy37OZ@nx&2P z?SQ9zm{Zm*XuTd0p7tmyJo--C-NXmk$!QNt8^uEie=QI5t|x;{B`P*0IyNyT;kl$F zx4x^o8C}UTF#eG`R+e!+HS!`p;EUJ+^kiM@z=5v<8M{L7weF^U)znCjAwT*M?{0YCHhu- zeNFc;bidB3hY67nL=7p(-xGela#*@w)s2{#=sUdjEbzExSG4zPaYO5V?N6@7$KL4R zX2eC`Y#p}lSD%`c82h+5Au=+lgab!?AOe06-LEA*F+DOo^;*o$jKo{OuqGwBx9fhb zx8jl_qslw(W!{L(;tWUk6Fp0ci_5xM(0nfz9^bHCNL(DT?k6k;n^jI)20R9$Zt~y_ z6umT#SoagP7C()TOwIt4QbMWteRV&vSX3SxeKRKNYJ3CQ$&u)Om_)JpNmWB${?ojq zYtP6V@WaS{)hX8>Wuz6>*%ogYm06Fz@h^C<0VXd-;a ze+bzR=jC6`X)lk8O0WGOvL99)n_5gUEh3=(HVAQj-%i9^T=8Ak|NBKl*Z9 zUT)MAY~-pRUKX2P&S^?dZg_vykDH#7c`rXRtLa0je)zMDw7AHq8|9$byszpfm-kcsMCF;cGVX!SG6L04 zT$-AEqXHV>N38lu3KOGpIq#?XNuJ%j+c9F*59{VhKdOG!9Q;YI^w~FR_(#>Rx&>Oe zKdOEoRXenXf6t)X^e4O`xRP_rSvFf+^T~)uc zL#cj|M~9*M1${u(Z_nVW-@bQJ{k#UMeqL`^{kFYT^*iJ>4ApPT>#E;C0>GfE-@(^a zzo3n8RQ)z@@j4JF4!{GO0%QWa{JDO@15#g^{URUfK4qW%!`1%0KWcv;wZD(9zitdV zqxL5bBIS((s(}blltZ}ws?xq%OS=A^myF2u*AaR6S0np$ep_UC;GSC}a{YxH(zOjH zKFaenQG+4BzqDJLbDKMC^^bJ@CE7AR3w{2~&CqK7AFf8G7BCF^)}nWD{Z%~7*}nQG zGezA9ftQu3+oPKxc>6LpE6DrMy1gjz{^QTthIS@8 z4lpdTz7hNDLkTxC%Z7CQ6|6T=vhYx2>coW^_l;~Lr>V`hveYuaaO+DLymXACt}c7Y zFOKRO+HBi-QNwio9fm<~K8;&yG%s%Q%Rz6@7&HoW)~d)gdIm6LwSunAtpCh1HX1e8 zS=HrJ&z}P>`5i7C%Jnxn=SOR%Q$duK*%T`nsYhOYl1KV*)!kKN*LaR`{Us=6NY~%4pT~^VSJ2iNWv4a9!FbFG$??_K ztIpj-;$M&J@8YLc*3(z4*tp-vcio1t^m~; z-+lPDKp@dU+bq!czQl5riD@EuN=IhtyS@IO z8|$_Ot{(3g0xJK9QTDEm8n4oP5g_Sk|kTYwsGFlMe@}TtR$Hd+xcS^x1YP@1f}-GqMz% zd{ZsvZQ4C+CZvMCmLTlEhm#L=R8^21oj2D^UC-su_jB^0{soMd=cV(N%pEt3$jK)G z1FPI|2Ym~lkvsXo!5sg&gVuzok?VPYKZ^Tyv5V2J_wn#SKc$YdZu308fAFqa9{g76 zIx>0OwDTkM?{yGzUA`8qe}QA_b@v{D%VPKt?!7kPq8*>UD*qJ`5A~^i2j|{PvhSN* zdoRhpZ+7hUC;Ps^vGN5Z+4psqo&TRu(e)$yzV6CB~=*au%$h$9kAbrSx)RzCzk%tMS;5?Ozkq1tS ziSTSNo$wBOj=Z?Q*R;8VAp;zFK7%{*e9sMZ%9<`0I5? z-WLCXq;7EJMFjp9^3su~)aS_a*|2fPKK_AbU&?;puKipe;XeLe(H`Z!-oO7kq$6)L z>-Q~xd{pIzoo2N2iWld2HCS%P95I5M9u~9L^ zZ7yVNQwK3`5gEI*K`i_?886~NEZs@Q{}w`w1(ESi={cx6jMjxJ2*Um$D9Ow&0GU&1 zmvAWLsiOjgnS7R@6dRGT`hAG&Y{}T@Da36qWNcFhF>etWPi}))_#GK9;z2CkMaJI= zA;yBpc&GF%)O-o}yHJ52_BTO^1^g`!YQBI&;jTI=P#Cdi2uh(Y8LQugxW`ca1H4t~tBV*@gh=pI1@w{$`rCZ7PYazs#9~p0zo`IS#0C^cI@W=i_H6!3Ssu>D* z)k*p|Bla|^nT*x%LR@1-#(ISiw>XorRSm@5^U2t$31Yz)WbDp`ShA6fR|p`+_LK2O z$!VzhER7GfQTAh}2uhSZAIRBM+F2Y5IjX2Yk&pV5pgh(lW7QmptIWw*rvTz6XEHXg zg1F0rjGdbx7OWs+&n}3i8_9Tu0AkFSj5kXDgqr`M2~Y(-*hzvCNf!VMoJu{7Lm>-5 z?LZ->N1gz5vo;y4WIQ1<3{|hyb_! zg#b#JFSVz%Q4A&g42>*NeX-*NC7MYVXlL-_1of6WMFgu+m@&ry?X3?5G33z5hEk;Tj%%?Im;-Oq*qDYo$REFj`RKZ<+MH*QnOz?#Xv}7Gc8gQav8>mdpU9eHKC|%ep za*8ZdbG0~xpy44fiCjyTsr41?mmKb+_i`aE_b!+uUE4DW*t`^3ruOs>m?r*CPx(bI zZRc8;)V-+xBr76|CK$p;n`S#2lRR-uMhW=iI_4AIzuB8o z-pV`AS8A`y30}Qmij}?!ol4fjqR^NcCic@7to&)?Zr^~w%7d+ebOtlulVlAU)JF3BG%*Ul-K_H)2go)&xVl`)f9Tpt1^Wu zr>JgV?&RvW@beWbL%&g8`Q_(J+-HopHr7<8%aW&-MWNE@%9=*z4il%%a&z|xo5%9> zaGO12@>ok_ZI%L!3QI|4lpvD=OUrPKmBY9RlR{mTCQWd5v|*3ZR%OEGOW=A{qyn^r zrJ-Xunr&_wYNcdp&NdyTr^#Z{X%un+g2I%Q12&7ArVfC7!wd+0rk)OC3B zLMCjA9NRV6^RAeMtX%#|VM;}jZImL>TuV!A^<=xh3;9+Lpcjnod z=bcO#hG5H(kux|%+5&<@2<1br)ueA@KA+nnI|$(WK3vy@=XtPg8`J3&olb}IeVIg& z;*6pwU}%~qkj-YH>pF_Xq9C#?%cDH?F9JjajHo#Bg+c+fS}jGAku)f-*XvC*+tyRR zz6Xy+na*YykH_N9FbwqjeTCvs)&Q!iLMG^1R~UVaV4hm2R;wtN%R(F0gsedLN)Yhe zTwHnFN1&02fs6a22b??|v`OV((o zh|%TsVGl2tud$$f&R{_qxm<2H0q>G{MqP`G8;u4W#}RR}*+i?=5+u8ykqDTiTkT=+ zd@G73lL^}GHnxkJ95vkm6>y@FH85NbMO?4f;~il$O*5fEVh?EBHqm!-AHr0e&*%5Z z+bIo}%cc0kZ_gsGRx1Iy1MN3Kgi56npDh-PIET;WE@c9jm~Y7I_}{)cpMxI(7^}LL z1cE4fmK57u5k#~!gy^9j^2vu_%koW#z`s#%{)PG@(XR-CeKH>+lI&H8U@O5;5I-C)Q?FJHv%{d38o8r0bT;SP%LfJaT+4?!A)2ZS_d<@lU)d(aQ4u=Xs z-pOJ$x`y4@4mw^3nMej#d)L@l$-(!1OePZ)i$xY$5TTz{}~YI`?E8$dw6Ift?=gM^w@kr45ebpj0YJqj%f3S<(>Vz%)&{ zzWZ$8Ablv4ay#>AwOX<=$z;+9AfO)^UO9855YP~1lXJV>Ru@kQ!C)|$iL8OXU#1CU zM5efCf(V!(CX01Ix9&$*l1-%yRbi3cLZP76)JzhIG51@l2jT)f5z{{YH|{yfj{uxk zO=}Zj5T2wF()M5^3Gtu>(U6ag*c>ESK`((8yaqxJLGU6%{(|6NC_Rdo{0Avo#8fE$ z0Mpt_X#x>9gt%#Fnndlc^Ste*?{42s)P!M&cQ?#r=9y=n^||p6UT8L(C(`7L9SI=0 z4TXmR=AeG8ykfD4iPhaQK*c3q3557B8>$~6sC`9X*~g3-hC!S><#IWUif1BTlu9aY za&JyMD15sTHWk6=Z6Ra`sOOLs#k52`E1>JTF{^ZTB62%GTa88o8c3iJa0L&RN+nbU zj0_4po!Np;r(?L!-3h1?0lfSDzQDqGJf7UgIgEjNkk98cd^Vce0$XRvK{gK7>CNG5 zI^yq#m2GT# zH4vDas3QNIpLT!Mse7f%y3M5RA2n*5Iwcm8bnjZ0_J8kFFc75p&MjKr(P;N)ogRL9 zKpX9K0>2?CtffdKLT(kVYyoHLz`(2eOL~6i1>OCWruz#ydb_eoDhVU|%8@lsY3b&Y zkTg>h7!HSy1a7>$PJu-~Sv?;$p0q9n%x1H~uhnWoKSL7QpGu`d<#Kt_idZb>NI=;F zd2Zn$$f){LYt3fE9*;QqCH~SvXLwGX|rh z1Ed$vv()HE98&7nQUJ?gZt*$9=SO85m)Il>v~bKZK+gBuo|wd);CJ0)3o<> zPnFq5u4#DFe11$29H3KLT)OJ+Dz6as2bvBpbJ~?WX=9A?dDZ%|bQZ_#=hNS_m{( z(L;L^PqkD8F9{f1CuwA8nKzpW)tq0T9rrufW105(FIcZedQxq$rk8xY4h{>e7&%S|GL|^ zBLFU6c3^_g`Hv2Q*K{@vMCimsEU;^Qe7qp$iR}~sOaM9vz3P1gKp@ohj*gB+6$7hke3=MubchdwP1}LD~_z1-g0!X@0gDiRL9oVRv8!2MzEjx3W8W&UF8bwb8{8tn!I5Jj3c+> zV6F|n9lM81PkzMan@u=T2amq`Q;v_t9J%a?M8Zc}s2Sw-qBOaAjM@6fa8f1Xs&O@NcKKT&O6*$x$WgfXqc2rs>+-xsc+B+R|TN&_2uW6aCY`I9%LV4 zX>A$l(N7VN#c^i(G!oGSt}kB07mq$iW}*{!UfuSxz7_SXBy0%ES_@1WXm4!Ch1MVN z^Rug{Z>YluyY^tBFo9pv*D$v75J#iOu+gxA&R8dIy|{^|#V441Gl#oJM&$puww$VV z!1RV(k)tApo6-lh<(iRdNFl$rgl}8E!}rMx_-xOiK<}m#*e#8iuuUeDC&Z8$-5pmI zLRHpF>qt2V@pN5~kIg6sJ8%3iO;f9u&Dr>eM%M0 zkFqjE9>Pc+grK9TVK4rAHRGF*b$EDK5)rnlkNN5D?rzBkbWdu%0y+prz#qmx!a&Mwn~d9f3wV99=_E2^hmqW!^1XyveJ0^fo%$MGU0w1^)iZ7jtJ;RD0%_1P zjyk@LmbuidedH4#EgKjZ@X<0sLOp6mT4qu+&dqoDlAt!eKzYK+Z>Dknk8|F0W4UzC*Q=ue8opG7Z%Vp2gnCVX8Eo(PK+ugFO=UTK<)&dpwIfi|x z*$`D`sI*SyN8bA;B?;ZC45EDL3ncfJfZ(#(h#G5a3B^vRgj7Rk|K;cwQ#Ie@`VM5EQQlf>r$;#`+#YK6|miG$_3$iVhO0pG3NmiAl zV5R1;25CW{5Q-O1>6vPEl4S~`dL|f=ce16%@R^rm605*I5v*(z!cYLV)Z8XZA_9|8 zE#Sz4uv!;IyT8iG`c4YxeaX4a6Zs%|&d@nwc@9`h;V}i%e;fKc-wVLJwZ_CzMd5EB zYHie)u@TWdvjmE?nW75}8b1gXsE#?;c72zo8!rcUkE1SapND2XUR9=g*(?^*mf%(CpqckkYPfeo2d z9zHm)pFn4xW&m)KeKSM9tOY<62#f9VQz}71OioUYk7afFq-!YzOfZoE z*bT;U3rJDNN7|L41%f80!@)*{u5>Mz@(2K)Z__yfrMI`&(cIkJ$IgWq5V?vOPtVsP zVunL=ZQ9gX{a#C}Xh|nt14HRGKjZVhTbK@+EhmOxOmEtK45WMs%d3=R&4%FD~=Z4v(o09yWnON|0XI1B_|5^A=G zXZl>@Ant>XL;_(N0BuAc5tu1~8`riW>H7!uIJIS{DKDO&*p}Ph6_sc6__^wLqkJe(GrL?p8xWDuDy%ak#k73rQ*OU59H=up&B90Dyk6#1V*nVY4*ZNgPYS6 zcryEGytd?xutSt7?<7YA-zKRnMH^J{$t#CN*(&Xb@=S9Cz$ne7^M#H1D9b9xt|hhb z{P`KK+B)SL&wxFEl2b+SS^PMTQV_*I7NH=$0M~E* zf!4Jb#Gy64Mu8HFD8^O%gvBVKF}(yvM@M7&fS21}!Z%}IwX5p~Sk8CL$x#@SHd%Opd zPr{naQk?wzB(hR6QS4fYQfDd4+HI)LtPzISoQ++!Y#F70z2pbfDd3y{xN+l#S4>>Y z{PNAiO?bZjIdbKg3Qc2C(jwUGHl*7ZVQgwtu4;QT?=1vEeq=aqgTs;(VUt{jpnC?` z--}mYz_4!!6&aOS>sp7)H!rh?1s=_MEX35^6EEb)@w;R4gp>wJ37@`rY zs;YFqpX#1?Qx{{2+Tp{8i948C3RLjPmV8!LR^ogBualhNL!xLbwzku-|Mw5^%-!4Z zY4;~Ygu*ZRh33rr_~eN-JaXiSDFQFJ)k6=%%`^JvcqwjJAJTQI+!uK3MgUX_|_bMP+5B{GLk0>g($x)Htrl8C7V} z!Lr4~E&;P#!LFsX*b&-^XU}iP#qoAo<-cd)>Zpg+mdR^dU0t0phMoC3OG5Yy(-y$e zQE#kTI=%al%)Q~%Iy9G^MT#{guCcKg=$O)(^4aNhCe8xX>8Dx5=~{A*=O6yraC;TU z36TV>UW^bW-6<0Ne?o~FB|~P+P#}gF>XCZa>V1c(OmyA=7_F;V5uA|#YgU{!3sS-X z5IqPffJ9SK6Zr?y(D^z3o=TUtY}tZChYrbw-_(vBJ0|z>tljg5`;&sX7o>Z3;e z)aYD8Lj$&M-HMKmjtDT!E&=OYCoc+JY2W4tpST*BnwlE%azXR*4fz^-UhkQ z%>SW?Po9IL1^M8?gHhqMWHJ|siw`hUQ9T!V%JemwC^Xg>rBXW~y8sRJizx27=mbU& z9`hPrGhyBr_>F>MGvX+b@>FklJ7#Jg(-cW6QQCPT8?R!&sfellWDEp^9w}-BfL0{6 zMlm7(PY}4}24{H}@jU+(fVpc8ZsNMa-^!8|KV%!s(>x3e22utX(gqq#z#*RSb^{HN z5GWWAU`ikflQIwzA|a12K=W$Fwk~N0=T?7$rKzWlTZv}2p9sSHZ{gIu8nOB zmSk&t&q`OmUhPUNX+?8kXV<%{UES}VbMCq4CCR{4W%03!O&EFUF?{h3-`*3EH^!iB zG2$L4@1&!us;WMi+NtH?2^BEG?fK;J;5m_O$=zYGSWdB-WF>pl1E0UoZ3YODP=M7p z3uIJRS6|jsS7qqCxy03(o&}cx;6RW~pMW4h*!tu3!otE59}rQ2px4h(dH`Lg?8Bat zF9c2!08%VOQgpfT$~oso7%u0C$3NM<@@FVC(tcuU)&AG$DtN1x;r?O9=Ak zRFLFfhicgH)T%P?9JkG>?RorIwCPc&IRLcLd7_B<^XDI6Ak2U_RL67>ECyr%-4hqg zps9`q5wqI008rmYd+ycY6u@H&Zr;3!Wy_Y8tXZ=rCLkD9g+snjixTu=X7#MRY7pog z$p%Ug&d<-^&p?=IHVCH22nFk)di1sK;l4c%0JVmO1`a|A6N;vS5Y}SI$fjlotgHnPBgcU1Rl&oI8dX`xrSYtA`3c(6m=e1l~TwbWXyh=@qyLXjkG;ak3 z1^d`;X>oD!@o1;Wm<36RAZW!3syoe}Poqvcfavj|BvAq+79hlg0zy%Oni|A38#OBs zHSiJ*S|G4{m+acL>qMj=7B2SG1ld$^kwVQs;@>2^bn}9w#U&U0ijh*<#|v zi3b<}qXb(*M@#_-X5-hQ4@FK`@3UXF zAVjuFRGy(w<}|u~>)|b&s6U1sRYf>6=`36xC(4@2@Qw32HVu9oEgdaLx21RM7+(o? z*$d(s@4Nj*6!H1I^E{fA_P!Xw6rU}l%@hD4A%D95m%9g1a%Ugj9JT@RQaol=OvfPm zVAk>b`1JPQaoc?xJrmRM+4oN)H7?a(xr_K(0mBlfgv{FQPxwbzd>_v&;pR0Gl0L;L z!N^*{@M0*~!ls)ofNgD^Z8%kb5>NkQJW896!X>%T=ZU^}_xd|Xa3)|;zeQ+uK0t1- z7cjfm93XMEvU$71i8lwog?atv!zH`$d3_m*{#}G^Be!F2?;j!a%mDm!;5szK z-&5){?D$@Bw;FC%p&*dR0$g?0s)XqKjus;;Mbl;h`XZk-e2OErhw;gzBkb)KOgW#8 zG*24#)cq0rs`sMF)`Y1&b1>F14##VcVN~Licqi*Ud{FZ{%2Q2clgEy;0@&!tFpFzBALAe6~VRw>gETCo85!Hug_)OZZm)7N?p|pgpb~g;!SLUrb1Xo!?u(2R-9@f`UjcHeJB4?!1c^ zzR1NN>OMs2y(7Lgfxh{Tz!CKaqjmyoG{R2=VfvT3uqWBEXWU0v_w`!DNj9{$KY%UH z29MjLco#hF9@IA0Fuu0oR@*JeHW{xat;dKNf(A$)p2b zYLYGD8arS%`SF04luhwCd9;(PEEa2kr%93-Fre8%HlGCo0-IGBoNPFO+LT%}d+wv9 zvlX*4=HQ|AA>vqPTi6IrNS_c?tJFLPNo#u3^f{;vtT8o-sX?f2tHQ$vZ8$gSJhoSE z#i_=VxOclAa)*qf9~L7sF;i1-&{{i)xrp4VAPQYk7tk}jK6bPC+VVow+3Ikx{vc-b znTeFR6in$e6%BLmV`%bFZH-ZVPErY2i?%OOEB{z47dhrEAR;|u$ZR&}_s7Rof5oN7 z3V1qY)H?6r_M_XzNnQ$lpSTa@bBL*DNbLcdnwA4+yAzK(9-<&?6`o?#R|k{QJF9nu z=JPPmX?dKrfQX4-KyS*t(0rch{Y_Xj=oP$tX#wsxH6g{8itUqr9+c1N9EHqn+_=$y zjY90Gpy=}D%l-FJ*j!#-Uch@PYHjo8&6qZATDN)xG4l)P#q{Q97GT-nd<;0=4`UO? zQLY5~IWm+?(O?3PeqezIQ1V8A&_>_^?%iPlPSG}Oaf9v9}7=izGO6>J{91&;U>2FxqECbP+tCkMy?YU(7aP}ybFh$Tcb zSz20}p|yZ%EI^GiMW1mzgLOk*$DE8jY`Xfhpr1c`_N-EcP2H%&p^~J;R$5xxZT^qR zG-$*mM(vH^8?fxFrTFcQU!#Y$2W~w023g4?bY}x{Yf*7N-Y8*;M~iE9gyciud%KD^ z{NH8){$-OH-yh08jAcXe!IV~oEwnm96$rKJE%Lm9P}6UBiqei;du)o3*65B*8Hqc+ z?_kx%0(_BkPE!w9xpHOL@6>Xr@H&E#M6+8W8SPLg(=}3#h9_nr-P((-7OMlEA=aqV zYMd>Mts^3*0K>C@+wBfI3+O%Py~=l)aI|5=h_}N2Y&bvuq5eBLIoVu`P^4a>%}>4^ z@(XL(07BUd^!$q9Qb3qFLsHnR64rIeh``f67p?{v;bJJz7G@;jB4kd2DxyU1b7qge zsnejbsZt+Vqa&}?$w)^(av30bvjmj?M7FZjy(B(Vb;bPF^sb`t{xO%kLaVyT0!b*` z=&KwnmgM3rDU%gg_WsgIS0IXPJ^+CTOI=oNfi)xzz79N=yonYZq<{*ID&YK764*DU z0DFG|Mtni+gkWIf^<%|4vUJRUO_BEWx7L099(6@tz8=szPj&>_#CxJ)fru0#zKt5? ztq($bvDyP2i`PiKrTfW%LB|x;MaKjNtp;fmJFIn{Z#-3(8&vo`kqE%3g&-9!q0Z8t ze{peyMNq^>bpR<}I2gza7A#OUYLp|)bBGr%Tg(TMVtTpCSvdanBWiTBWF2@ra(`aZkeZb%c1 z3IIOpT(9Sy;uW~bu%SN3kF~Tz{p(%JUGbh^YO&b>j9~zjM#C;fS7^cO6B=7H_zHQ? zWrvr|un5U8A!*SlZw9T$@5@>wSOM`{Y8b5~!mR zGrrB`>wGk0K@kSg8C-PNJsy_b!5LRUch3w8 zjyuYz<9Jq39_q{jjt55&X2u8(ab{pf7~y_rL%D-+RAbiG&VAVY90&e>ue?Qh_9i44Tvv_mS?Q!EQmU(~t2Eb6`DHIx;k3X!n1jF)qoDWi54neP zzXTGS@{oYfAH0swixfd3nmQyIjG@x~(?H*Ty7sF|+7W*gx}UzOfy0MS2Cr%3Dq#50psQ&aO% zk1XR-HX@b1AAEIM?0{OwT1K!IeS1&)a>f&R1%fyyCjp^PdItjr4DcL0c<{#$J@n9P z0qeuv-$1Y`&qx8Fs0;*}1(3tGdJR7ZJGqjOrK%(_L^?7;Gk=U2l;LyI< z1RyBMWNK<^=eBLz7Oq{p_I-4(&*|tdAoMi=;sXGn)=z0Y3m5=)*!ctWR-9V}f*Jru z_lx8WS}k8@yAkIox|7Vpu5_KS0m8abvy=|7h z*085~?pIHYBr=1M(K;8sbl-y2NWDN$FdV5Pne;zmILXb;bl(BSN41S0TD@8;kO!bHW`WRO#$wa} zU__eeaHy_P9JY50dfbFYt=^1XpqC_8Xac>`6A&b0u}^_ucO0OQS;FN}fvTL!G=dm{ zneeh+@JNmhiXaeelna`YVA4hzL209XIxo5d<`RaaQzbI2f`S4L1OWotD3(C?>BJFC z2bB6d5L8%SUtb>Wek+)lPB9LtL<$708iM6JxP0t7+} zh^+7;C9cR8FMuMCE6DbWyrfMlU7n!MN=;2AKy+@|vgP&-8#Wx22AxD?F=QCJ-vPjU z;d1LO+AlS2`vKwaMR-|ey$=o%OJQLV4i=l>FB8|OSFh`>q!B<6=Y%~WfI{5`>puQE zd-m-6(9McRKHjdvRXQ86ne~ zsHsi_Em{cqjg%Qo-Y0{xv9a;Bd+)vX8FZ&qZ&QPyFDHcPfJ1vopr>_SX8nD=`x-hL z;8dL0b8a_wp4ov}d9yHo;NK%o=_cT9_B8V)8JB#Qc-GSat^w@FJ3~{mYOFjG9Cq|6 z3s1OyBtYuB`M?pW`#`Ig?FT)6o3PxtQifz1K;k@UnEd>FT6?};HMk_Quvjip0Dxlp zyfyY!?fYmR(9%N>;+=-KIRFwJNlXw-2<&~#&i@vUUpj`LRXvAeZC|08{as|6i~SWT z#kfA>1{`faf;4A3eo(TExuXh75{Dz+5g(qnR}!QnKxilcKw;r-q4_zVm(ggWmS-aa zMkYg*=O{=Vp{{_7*d}=hj+g_0<-W*dOBbcZ(}H~$K0%f%3#A#Q*mdq*{zB&toQp?G zRv3Suw2$1TZ7F!o41cM?{@e)_|NW2=;Xm9V&nU)^)noAKZc3v-#}UJ6uf`- zeVl4PiArxJ=KSAn%yMFuy0;xu^QPh1QO}{((}rCQ6)1@tj%OzP6n|{|1AcRIJLVS7 z*%xy;{e5!quCQqW)jE2jJv$71clxlT!l5p*-2PS0$>c6vD&5#sXFYVn^LO zc=XT@(dFyH{Gz*XUCuNNNg0MS?F~4`{@Rn;;Y;u#$&AfneHky; zu7mfYm#g?kM-zQZDktsycENX$)QUeQ>=wZ>zkR^V5sHe>aXRXFXeLqp;jZdb=K zLpviWgHIJFf4{^v0&@yyvo59zn@_%uSA8$zjB*CdsX(090SEi8NokX@wD=M3eH11Z zfkBgXSzd0FsJ#Ne%KMP*47ElP!J>fIVB5HW?Enygi%6|{KHBDUqO+a#>+uc-?MqlX z>|xxIe8m})V7?W3^$xk!0B*u55Vb(N2({8iOs|N`YzV`6FQgx$Ujf= z;*7fy$=+m)&K!-y$%oPGzkq$M`*7`F%Fw2?FmO1~rF0?Hoyxt5Hl>XjkUlsQnP!Re z;I!u?o(sSC1kyzMI>b+|J2KlW~mM zC8s^7G1xT(3-a&A4Ousahn`8;n%zS;Bd}F2sD9M|MpnI%5qQghTTzlyg2Qcmt58@L@jqil zxEs^+ZpLi|v+?V*+qir8#IPSDJ3gnot6)tx!I1#B&&}zc>67YK-CB(dCcqJ?BZBoJ zBJbyq{}hyP*{Apd23b6s+Rx*?)9*1mBNcy_eLa*oRv%Xy=gG=;W@Aj!SpFTiq|d^z z#9McBeXgS>M&*SuP zRhrnDh=Rld6s8n$2DUD57ppjH4NgBsrj5jm+!^?^`BQw@^dYWGpN7T5?#KAds{^Z! zU^)$!nRnr3!0BMZC8JQz{}JhclcvW2f5<8(wT)0x!nwgu+hGIP4uH0{w({T$&|)?* zQOGi1Uh^1|nRTDkp3Ky+7g@@UZc%`9kzx9Y^WL_9s= zSxnErS$pKc58D7C9B;oW=HHuX?3#w5WHb&PI@C4*M86B@!3b2>_vs@mnL+j|ZZut! za}5T^560H2EvRd&<3`q(onK;L+(4#IhvKOBC{yRzSU>tzIQ>o>YxxRO*gYdsM{>1m z)*UsmLfH5^1n7*QO=Kl^edvL{E z1~Zycm|ve5?}FRYfknd?VcE##mIQ4z4_I#jAp=34qZkyX!DJRA?O=r(-;~$ZCo#Yt z=JZj?axsfJeJoF{LvQEi3})D!{uE`un}qV3atvZ!L6_pijrljBqKWzMyR)&h@*z{- zWu{JM>kgCGUgmAnUe6%rLF&yoHkzw=MYw@h0YXEw_0-RIc2ff$YCVABykh3fx1g+` z3_r+RhS!d7z|OiIaQod@PPqe4dmbN6!@wb@Loy z>fXp+h*Shj2# zii(QD1TRJkjndJ^4ggHHM6Dn4jm*g?O&`z0whK!Z;ZF^F@L}U$q4*rQcHmSTX+4V0 z_70wtGcL{%9Jkjq%@u=*DT@~`)+8>sqP6U7jeGa*<)9)Uy!6sbh8iNayMSKQk1#=D z+_&q0gEy)-V{Gy`G&t(lrG zM=D$o0Q5{+(6MC663YQ4y}HqNQHvMZe?)0*Z3&6{i+O@qgbkpZ!$}EO$a*g0prk># zA?GHnI`&`q_`=6L;qG-g*I`liBK+dm&r#h}g*VyX_eOl5117jmz@!mKJhYUiNlxNJ z<(_-)3A>l(=o7^&eTMSiDyEs4wE$1wPuk@0;lrBtfP7!H)PGb$!OhLh<+S?6uFC$o zX%8Maa6e98tj7@N5R_z;;OguN*j%+4gHs0K#ffV$CT(m)u?I5#o_z92%SIq+frSef zhMhN)=f|j!M%cW0GdBeFLdjGDxq)Ic6f93?Hu3_@3hBn|=~!F(3Lg0Eex$^w@EJ30 zZI>{kU^w{$P+@VPx^c z$@iyHm{f=#UiBEZo!p8O?rJo5HDl|EEtrr#5jhFDrd~3&YB_b&!w)~qKPiEq+Da$k z)21$3d9EV;tvv;%jOg{Q2rrP?j!TYRUC9szXojE7J87F{1$#i$lUam5lfdEFJalSU7AE zo;dg;R5b5GW!oX-IrFe!@I8Ern09QaJ3Kpe>Qo#&c#xla>#etVODC2u>if0I{Q2{F zjsgGxZeDXXn?}Xn19az3I&AzETYR@ zftk4py6X#L4FF~rhblIzdo6)l0h!sM;NAmBJA51}XlT|vS}}o`=>m*gz(}7JrgTjs zLN$_T0T9OZ4GkY_8-SrTH)2bVSq)&PrD~rGB;~lY`WyiWRbI>9*4Ip{R$t?b^+}<9 zsU`d-h@xz@BNTIex7Dsf2~`V*Mx@;uz!5D$nVEA(Ne?NxqS)w|{Bhcsvrtlz&x`wr zo_)+!WB6bWg<<)zw29}5effJu`c82zjSJ8wo<;lWI*5D4wd2N(3tmtAB8{hfOrK&a zEiL8SsZ%F|`(g_YNcHgEd{^qUaP@h5xirj5sYgZJK#gv?hK68i=u|FrA7$gD`zVM* z$Ko3L6p1S3d+C@0I$9Awnjj+Xp%AY~ZK@uSq5soRG6j#schhl2MFqcxj_G>!e1ol9 zxAM6JRA>xA0EO-o2{vcVnuT4vcJbrBX!-r#1<(tZCqmFmqbX5zPUNNCuyJEhcoc;b zDOin)JZ3LnPpyp7wuS5(&Y3+qczSRLpIjUx0o zif30`z{IsOKVYvimO5={=~M~gE+f9WHoX@n|FCGxZjC6y*9$EXNr1|ZAW`7F$W7?M z<@KTw0im~tdgW)$4>2{|`nB4=R_rU*P}%002ovPDHLkV1h|^2}J+^ literal 0 HcmV?d00001 diff --git a/tribler-mod/Tribler/Player/Build/Mac/icon_sources/docicon.psd b/tribler-mod/Tribler/Player/Build/Mac/icon_sources/docicon.psd new file mode 100644 index 0000000000000000000000000000000000000000..b343238cd9b33b3b1014301d84d81b5f64cf01e8 GIT binary patch literal 223697 zcmeEv2VfM{_V?^2+1^WblTF!Fiji!3LKO&Ll%f<1=<~&7fk;R&ML-1X1nCfZ=#W5qPd2+V-?=l}Qh>zg`~L6$eVfh9&YXMBE$5zl=ic8vb2oO} z6EiRxM*X?M*X!iL!dxiQto57EF_IQU(nM8_&ozhsqWod~vU$4S zlFNT0!vaxQ-0LI9ELoJ8v_PAseo;FwC2f>{{f^!K>XhVB{!d3u(M(x1UOPYK(N{CH zGhUrKGwIa@NpZ>kV@7jEE*ZXL;i84wtVH#ag{f(o!1#cuaImxF$qBB|S|&QTw7gEKVJ+(Zs03 z!-q#|hHGNgmOuZ|9Jm^pkvw)pdzux6R*uo%1Bw1l>$klcoSbt&(0d{V7ltlLTof8MOcQF083acHarpQQZDN)-9*)tdJP&K49@fOn4A%^g ziX0vl_K+rgxJEM)i92%IQzqk>lAfG0cd0dwSQJNC+|00u;Su4E5je2*i~#B(IWa5o z!BIFe_K1WmNqWZ2^z_uxR7E~9KRqiwbAI|F_4pYP>ZeoElG7JwIuypjY>8)NC{nJ2 zvdNl6dg?)cP$eWM+3H|Xc19{$)yYYr+EndAZCX|))I(VJdPq(hJ~usMVPe+k#6^oz zQ<4%hR47IcV)w#AMV#|Ct1F{A8HX~cm@bT%X=^0SX+R+gsL;E5- zRGB^a(Ui=r^o*sUHhuq96#-T|WlG=0v9^bW3%geZDKb1hJt-U2RYLse?Cg}};hJ&b z<0B_Vg+))CI3a#qSXk`D@aXaJF%zREhQ~*YkA%YZl5;326_${enU$E9q_xHwris@? z$4rQciwcj5j}Hrri;NmKAvQKXIx=Qrye5)rbW^4bcaZB9XS_Be55k?~P6VPTPBanbQ07#%Y{ zDlTrkCNj#B@oqV@Wcx4c=d?>w?qLTgq_-U)>hLF0mS|I-iBEy%m5J&$EIf8(XrCbt zTDFHjdrwJ+ke-rcw=@F=58BZWgYA*AIna8tcT-wKbZm5FNTen%T0@?pqGH0rsdG3< zq0SLNj|!uBB14c=40(o1g$e@-oPiP<8>J!7P@!-Q)d^=XfS3p{4|E28ILD!LEO5dZ zr4<3jg3c(F2rwda23a_hvS31yVUZAu&X8L;Ql8uW+vT?D9CoUH8#Kn<3abe^&iY<vF1VKpGejv(vhq z>Kc#+2E^>NE~mN%q=5l3JFUy9t^sLaK+I0-a;j@U8W<3>)4H7M8juDC#O$;#r@98D zfdMf)t;?yd0cl`B%uefas%tvF1VKpGej^L?$0 zV}IFFn+9)KE{0br$#*eB$@erL`_Y$%JqnlTu>Ku~mpLCnFLYY|lyw5F2hp%$*di?_b?Ti_mpYQh=grJW zOv{8D>g1mw()2KM2 z$a^uI;f>Z_q26RD`6erwOTO*ihrIXbe(%yu-m}RwoGfW@$a{z%$qW*acN~-+`A0`o zmsaxnQ7m-sTnI^pGlQhiER-L<{>DZLhY%Vz8_rf9R0d(C*FTY4Lukd#FR(bBS0Q{fA}o6LGKF;hD=aiMnRglA^Ly&E_rRKg8mrr zzHM4^=Cl`IN`gB}?MzR~3WGZ`otf~4X=-}fJj9ItCTr(rIo^-UN?&NXe;tKSpEutc zj_w(m@Xn_;^QqLynQ%wzmW~@424o2wO!6in-XU)0yo~hhMcqh@^o$gEKU$lXy%1!0 z6K0^;;q^P9!971QJ1c#XHcguWul$mY5WO#GVdhZGhytuw!oqoK`1((m-vwThwJ6@A zTvRpPDN{1%Su2O)VI-zz%}kuvjZ2WE1v%}Ktc1)*XHJ>?BD@KVm|0eGH)i(y^o&=> zrl!oZYD!2I{!uF#B$>(Dxry1SS#ZgIMVpb;2m4c2a(8y_i}T1g%p8;}rs5wr$rb=2 z*wpkiq-%Co`l4ysS(#d^VW2QhDi~5PD(}VgEHJ2EbS_l!{2uINYZ;8I{1Ogd431pOrxIYmPdoVEziuAe*%^#zSX)aqr3kZ}?|K-Jq$c#5njAsUYHlyKMV^U_6`}&f%g5&=+ zGkL<^4O!8Ole4aCn@Zo_cPjfv^Ta``H@;K$`LCyMwrtw+TluZlr{=!$?iZW4?mu(8 zEoRcw^A^ANWzMz(XDi#q7@ZEebtUt}WVl6#n>3E zb>@M$@2veLVG1|HtNOV;Ny*cWjL#&nB|pR%pC>Q|fn7Npu^dai>pLg@<2kNgea1e0fsvdvBy%K3e%^VO&XRY5u#PefHfN#n7MM zWkxm?4x6*#g~i`jd*tO^^tvKeee+YubDRIR=k~hy6B14)1g_rrSwhel;&|)KqgQh# z*Kr=p@GuV}u(wv$SN%SVz#jj-BLc@iVGsGo6PwuM9-BDs16I*jZ?Asi$joMskcS@G zGV71zqKzBJ%zDNrvtTiHX40|K1onGD`;4@pUApI1WWM%vv(yjhQEBoWFi6yp0)Y8aMpLqFu#js>dIh`u)0% zgEFTbRT0?qp9&}EH$5aw{O9x;iGiObOe0R%bDW2N`Dw#Ev-z2~In3&%zKoSu%D+mO zoESN%c<)aYNxMFzz1%tMg@|h3Cl2k4^A203-x{_5>Z94K-tyUePUpk@CHRU`b~Ja|zO z_uMCs-CaFv#niOyqpkWxTH)Wze_i&svv|^lO>eE5a(I2=A9vTUsi>yUiT=^0G)ZSV za;R|W_D2Oi(qE^(*OFu$`8UJKlmp*=`}#HU$oQ(26MaLL?0jKkTlV=`hlP2y#X0Xq zzB$H?yWo;vMr_;1YfslMn|*i6D4WE*jZZyYJL0D)e8tX<@5Xjz z`P4Rd7DTKGyfxAve6qn~AdM_U;Z^4g^phQ~sNefarT!?d$snB%r8x2a*; z?H_f|y^v>qvNkh)OWC_+sb?1jR&SfOp?G17p+I)^;?yZ$jA}o#C;C`f>?g0S3|hGN zOJRVID1$CsFz=U!szq_|Ne;xcq&W$~OvxcOc zG>3e(J@MoG%pZ&~cL+?bzy1Al=avuZOlpdI?%`P_@x#}3{L<;Qd0uw4k(MxV_xG~T zW)P?B)lx)Y*2*ZKCN$2S_RG5V)|Z1u9y^1t`~06Nzi-~zdh+b6Z%^zP^sdGn_1@c~ zPn8!BN#YA%Irh}m=Iu9^eR!dI)RD|@cKproW@cD)nRMY)_j19x#W7XePMePW@!8&; ztFwn*%>H z&b^GZxzUWE?O#umKRN#Knx!@L;@`jfW1X@6RFm<&@^ACQJX6b3OHZY3I+FC(-ee&O1cC$<(4*xfU`%hwscTD~%(CcyphYj5tG zt6y-w)|j-i>dF z&aBAG{<(fjR$KAf1YNk-v8mV3E+2Jq`8EPu_tIlYtvjmUGhKM)p%n+)c5h*>s~k0I zNA${HDt0W+-G0Tin2FCMu$lvSS1^I8b{}ooefLV|{MpZssoq^ae6;?0;;k2|pPd>s z>-kkx=k8=al;r#LuHx}Kj}TZwanSH9Pp{aLb)_(5$^MF`Cd@uR;nN=@zY?aTZ>gE> z@g{#v<6HQqYT*3sbN7p_AJ=^I%$!eVg7>eJmSe>1ei&c7LkKJ&t{ zip<%{hIrBKuBq?8@XYY?=Y^Zc$ipJlURcHehiZ*Z1G}^4;0B zU0%tN9Z#E9zxGk;Yu~J$I%V>%FU>=1=4Vc8{i^dNjYLeJ^TveOU*Gl*YNk!NUNX9D z<%cT@kIl(F^UH$mtuJ4>+Za}z6n^Xqflb~nEZA~l<=JnOPI$Imj~TRltZ{qUwmC6J z>(g_#?`plH0K$gM_T4#=%J-9M|4v}x1&bci&>O_^1kN@tyZbAEO z0;?Srw`}jqRrQsxzR;R(Dtmav2d#>``ll!EIkcO({OpYx3k|JllJ??HmTld7{7U9O zu2lOzJv?SkluynlZEsDy_Vmu0RTmdS5=H`hIlnDBD)z_n7jJCSMuu%UeK1)0OYy2p zwMQ3k$r~b2Oxt^>W%)YJyGf^cHHBCp{<8TNuW^%a;|6@w-RpU$V>jTDD^KqX8ZI|u z8!a7cxxEq_cIPCMb(kMJ=2Xeb{9}h&(l#&j9x?dTg50v1O@Dl~Z^2i!ISW_)uw(6l zHTbHaXurr|0nFTIXH0xPX7u5as=M1lPJKRCs|uMr@uA(bcWycV)0&VCgGTtAH7YyG zXZ`DiJ2K&u)z8nV*`|rx5OuEOiyOwRNnvMaO`MySzA1aO?sV9fyZ*864!-8>uiM;P zR<}Q~I&`?l+Jt?6XLby(_;6Crl-8Rgj_+TVVSG0z=5}4lm)fM$7Y4T)wy6SIVsZUr zCo9gZ_@!<41z}fA?ZWRqTJhQFD{bkMYDa8a{;!h+=28j+f_J;dR7qx>-#h;~hIhpz z-woHY2aS11AwRq??rV={-D9Wwd#@#0tWPj9L-u!Zy zaa8N=B+rtNvKgz5s$CmC@|-$Vx^P=|>OnlIbN9)50-N~Rcblc-ZVrEC($fcXIBUYa zDq9~bHh%s4<*Uyep8MkRZOWM^wzU>aSUI-!tntjW(v~rWH~&62`smwdKgxOj+s1c& zpZA_voxUt}xA)g`8f%wFU7Kw_TyTC~X?e@enX8SFL!Y1Yk1;nUnIA77Q{X<^^xD>+ zFC41=J#)zZ-@p~VC7Rkt((^JQvGxSDJ zGGy@C7W!w8tt8G+wf@;R%11uBv`%yE(5>b3DqHeP@PB^(&5HaRE2N9b$r;3XYG&j|j{Ik#84H>P2(ht%SEXYw z0wkcup1|hAbru|%f21q)-+BqS1n&@YVU z2?zvY#jqD?D%Bmo!H@;sNt};8M%2EZ2bjMU@qU&sB{qu_C4Je|Lfno*sl>I{4r=QmOKIi zWiJQK$6gLvuwyysG;18RVRU=kRBRN$h{^&pO9~GZevg9O4lPVygr?;Py+PsVj|m4% zK5P>=8-fLN8bY%WQ_nvu-PL+=uWuxXVsIW#9XM^^|4$om=saksN3x^If|-dK^R!tr zv{_jxY4Z?{3(q(Vo063^z8jv)p|&b|GCMIDf>G~@&|D3G4p6H_OJN5TasB`r&vkqX;lC0k{A zY3WlEv$C`#L}?|evK$sz;#0EbYcs~C&I6P=Yd-AaMWW2Y9%NK53$m%~LwX?q>NcPg zVU)rjkOcPdLI6PF+pbqv3rdgrcqU-2XwNVk{7p)(M}in@VP@oF_$Jp(gc}fv##@+} zmXa`i;&k+gK)hqq;!(|9g~OhL<2nE~cH#_MzJLpxn30gyCoXT5LeF5tMPl;LY;G7r z-A{tYZ1#(fj7E3VA4J?Y!f%bq74c2X>;~mSeln~p2V^u8Gbd(P^Mll2O>y#gyFRIP zkm>2<>IP|rNR-qS8WF&lVd3126!=DYchH07IZEFEv>m?_Cd3MVXl3hlII=K!M1s*A zhHFfkfVib|Q5@1IQQT=@a0F%aRfq%r216$S5)VF@lM|O}Gt?RsPil4=${QNXFtN$7 zVz-oBhf!l1stmA&D_WItO-)>yLGmw6K`jYsk6ckoN_`Qn>bN3Lq z3(_6$Q!^LM?RKA(isJ94x4BC)76A?Es^?!25d7ho1IOPe4Y|F8m^i1AhBzrmL!3YU zA`OM#S3}_s)DRQN|0gx{$k-`&tGLaV9vS!UiEX=YjsZo)!!ZgDv?@85(h#i@t`P-Q z(rnTWXLr|7;1eXr0Bndp;|a2UXD*#FAHxDt?Q0#Dm~3CxHIPT97{19Ev*fEso_c%4 zsM(!480gQjJK44S+LYJDI+C08M=Jya+@O!Nm_bA zY+43%wp3?HUZb!_GhRhYa@g3ktg+Uxv1w_j2NWcxWu?G!Q)1?f`H7_UvB`z4v}ElP zTmJ`^H6aZx^})WmiCM{6i5TYh1BWwmB!?Yp2}nuG;;<0`D9I_X%_>}xx9m_9pL#Gr zc&I%5ArLTnG~#(7|>8+K(|EWz&~TYRtu@wgV>=KsTo;I zQ?+EUT0=5`R7UzDSUOA5W>N$yQLi~W}rgB|)9yFg1 zu;lyb(na&NX|U$2&dSKvT9dR1mnT zB-p6+6$^W&I@ZcR8x}X!5z+P{lBqx*p}{~M4T~k~Eo@jsgthY6p_5?7XuQEc?ENNaN7Cy??gB$=2xBPkK7)o$g;Cjc6L?*Q;rN?L&I%$8}C;^W$kG(>bv z(Mq#1jsp*sRIsiY^Hb(_>oriZ$0shzw8x1kR(3me7W6k6D2DDuo{^Q20(#zJSzPhX4}9Z2YwY?ooNR1+cPtW7FB+<|DTQR_q5p)Tze>G(vj zJ@={7IY!;hME-Pyq=}+#{ZTD)ubG*Sii0XC(^f=EJurx)25f4i_Rv|G+KJHGQA0>f zB_CI0t=QU9=yY(b)MjXt?dr5lZzNSJ|S+b9@#N!ZApD{r{fKUtcy9#8S8cPr0IE;W8 zyEBDZQngtXndKf#;eBPzss-ya&-!hllkJVXMkjPY{ZkwC$2$zsEh7s+k;uBFbQOvm z6Ul*k@0Ewgu@us^H62@kWdlf#99)ypP@Ww+HJuz!B0&vJLnnBIK+{Ed5-P%%plM8M zx!ObRO|Ug`VifVbhOOah_!^-`tTAhF&05WO8aK^Y&3MfO%_L2NCQ0*_<{iy@nvXR< zXf|jzX>v4qnthrZnp>JmO`WDy)23OY`4$|0{IKVSV9#UAu@La`9gdCgbMa$i>j^it z#+oM>%%U8rm1-}7rI8UMh&dXThNIzW1R4?c++dhUFC%UduGj`bjpb-0gbOA4xK_J} z6qKujD2jE+W+@*e7=i*wB@a|_Ghi`iE|z7@zfc>k=WG#*(S5E6{dkX= zB0L`MF;5KbK1&RJ&>XQJGsFka5Br=QLd@iJ(czgHYVBt1L$c65=Y^qt%nC#AGbgk? z4A`Vm+HG;breznk&cLi@M9z<>!67+UqDEns0T(q3>M=L!duG&4S+-e@!edxWW zM4^38h|qJ@aXLgMW1aZeld)1PqaEu!qx(dNET6+Ph`hJ5S_V6z_7Sr~ZQg4F1S6wf z)1S~jCqI2meJHe{$Ha%4Di|EIwa{eyZN^w>phjR#NT5gLl%GpzZ^Q0UUQi4GZ-ia>O6Zi6>^?`j}g*1c(7%G zUq%-BqE909#7~jS2Z`{wqh11f7#jzDbgFffG=uzQHOfFpo3YP0LTAA5vCY@QOr8wx zb6_zAmM1bG%|!4Ic>=D|;Z>XIkm5Xe$flyv7}fjtoeEjF{-k2}K1qPl-Ybw^CN>e$ zO0y1G1{gop<75N=O8u}L*)I%>ij6>Hj2U)1bmB1MfXM;?be6Ce`;@Ryd-KCnHWgYW zl|YPT$! zjPC!Vo=6%hZ;y<%|eUQ6AfYI2}!gL4M%eC707LKB7V zcr*|mgh&I6(`)HZ`QnklWN)vfuyAU1`Tr}or8v6-(O>Je6lO*I!A|c5(L+$*AUy>1 zfa!ta^jdO+??E9w0DPeIhVX&d9mMbE3}MHeNwC7{wM63W7VzJ)bTNS5!s)gIOH59; zC2~Ln4bSPel;w0=a=Ic^E+dM_OBxRAVO5g)MiSkeamt^GW zGaors-_Naxe9gn@yySFVa{BJy_oWA?@BSj}UQR+z-+j#KyKnJYcKYt4RUoJDKIJyo z_p(6GMS;GT1VWv@`^c2K!~VAQSx3m<>AMe}`p7TeIS2-OffxAgPMCeYA@D!y!Tvp}q$9GlyY_CH2#R2eW@`vAI zaL43-@*{)2wq&q@DfN-S4~_hHf7E1&z2NKxrRLxm8-DPB!G>Ri5DL$Ta3Nd3=W$pJ zR~H)X3T)QNq0!w~e2Ke<%G>9xFX2OYbG%eaxtPyky3%P^(a*}zTv-D7VS>|DQ#Qdv z%4aiNXx9+kg~6Ar4-zc=K%7#<1?qJ~b!AJuLkO%jFPVF~*PrfH5BNvuRfrqCzi z@x27oJewj`kwlSi#N=NPQ6=F(LU(M$J$B+? z;&YUEosGD|POP^Pf3g!BY{cz$Vxx`tqn+4fBW|-3n{C9cc4CW-xCMxo61CchWp-kl zjaX_Yw%dp$c4CK(SPVqaC#twk8?nevG}?#;M5L;t%SO~gy<3PT8&QYq9dXiWW*e~( zh?7)ewi}&>+lU1Oqj8Xjkmc$?%O_aB1-kP<7;ULbXF{z{0a|CgkA#i3wzJam2L z6eOPm>5|4<#B!lIa&^W8$T%SBz=e+A^F(FN%oSEuP5{z7M2K>BZxuIM#(z8%}LNUjePsX$k)mmM|GK z-@PB}K|v37;6Xhk)%!8fvmYq-ew7>|0A<2374*F{>qr79qlbE+{+VwXa3su0$$E9g z%B`>SSFU(9D`ieXbdWph(Rr3p%7}zn&%dZGo6FO_`24Jd5h32>!$V*h*@Q%oo{;eP zJ80;zu#yOF*s!5N-f{svp=douRAs(AA8(b1 zyM)hzXBvie8;zBT2q9Cz=du|v(!wz_M$piYW#NnxCeCnmLnAaZX2MJ~J>5iu?PlS3 zqu>*c#Q{ngi;ciyV8i?fEE0=?`$+giJA6k0Ukq@?0Cx;F60U}$4QGG@QKQMba8foB zDB)Nbc|>BNuulv+!hmBK?7jaG97ABI+Cgvx!&M*_1mo#IxC?-9G}ctZw-4qGqfHeY zKA0zrw$XT70cTJ6wtkf4c+`Wu9}Z_UsIe1m<188P$K zjw`0iSOtEGeeuGD^XJceR^-CypOIa_INpe%ZZi`_{7UyLPO&01~vT z*E+AdUbzY%(`BN9xI_m*{Ja+uEH6Jz3LZap^w5Fd_U_)feM^aGjehmo zHT)`m1-pz_&@WxQXujY@C5XgMo;-2<*x}#z@7w+J_N^uQynVH)rsaqgbYmqXO zQXHscyHQENq>dci_w$y*4Qp1fItwzp=HCXH8k-C<47Gy{D$>bgf9&7AO}}Z)>JJaz zB4sK;ruwG&1_t#@K>n=egkSKqXyWv#<3|qc-CneL?Y}W)Q$Cw>v8dNV3&I6q!E%GNgE}{P-XHe$@Z)`3D_# z!IWkYyi?iHO~CI8O6vTX6NmO~*Khdp!`EL)OMCP6*WdWZdc*D$H^@fP0%A2)L?u?$ zUTMBf+``E!54?8u>J?PFGbaxJx}$jG*B`&V41RxgZgR4AUdqBnS)Uaiga+0MvQ@q0 zAd45zojkf9>g?Ok-e0*SGcEO{mlmX^y`1^Vt1I7Hw`l`9pO z&X*rMxMyqO4_|)#?#fpeXW^Md7Uz|vudR6F?GILM+_9h?B)dQo2gRbi5;utJNb>Tf z3umE_KNaPCyL#1oZ@sbNHGDa-lJokTZ@>4!NB{hE?e-TtK(g}=R%O0(7o^R%F}w8T zii>AY9rbOm(HC&4vnBBf77}(Uz$J1zra6dfAQ6qU#?lRcCBt}CrH)btpO<uX!JT@18xocJ2W2l43*g4kX!GL$9u>X{xTji$gKV{N5DYxDK^(?#$^^ z$BrESU%!UT^YUfrYRLx(NS!(fY3<*;2PI^zBWj7dmfDt@R*Rf#`C25rsZx)42h5(`x)rfj~qIHB$`Nx4v-+I%#n61zX2J9 z1`Ekh&w{h%NCF994UP59b*;6XHRfspstG9^18bCl%55?z6%aXo6iJvtq6H+3JtV+> zDGAUYsRbl~1iBjVdZMAVz7>SJdI(8KqenWYvWAjDGA$N#NG(( zZ4kZLfnA0a+=KnXS;S2=H8!?^M0=eHBnU8XRFf{1w{Jt6=RqkzRbr4swSja#BFRM8i}Shiv-?Vf@71YxP$~~P0ejht&Q}C#ztd;${VeUWU41wP`*0AvZ#oxELatahTh)MQQzKX$$h6IR}Cs8l~h*; z(T+iWTHD&mQg$|Bq&85U;Wc!4d{KOD9nBCI;^}BcaS@h!#cOEQcS)AkW-^)%GMcV# zELb%aNds`8NKpArP%?T$J!u?x4HZM3ExZH5&5g)(=%7O~w8h+650O*yon$zz0T^ht zmhi4lqJ!vcCVARgJ6k%N%_xorR1`9X=2kMawXFkk(AkWWq4fH?Ix@7Wsj~&rhXhDj zf(!-wAp;v5ED=+p7#WDwqcB>du?{4uoOQMso2fujtif!o2f;Row1bQlgHmG+WLSNd zk?16h%?{zlW&-p^g|?C*t&kri*##jO6;ju1Z0u}>^r#RMgpd+-4aO#*w*kGq)!1T! zd{FdSh^d97cS20&W<;eymun+;w05=NBpF&?SG5tTse_E(*n(N;U0nn;>w1tuNpwJ_ zx?0HSYg>WZ0tq0A&Q_C!+SJtm)K(xODsG|HTc{MTHSU_OMu@i!h@EZ5R?NbC7kJyq zcstrmtrRc4)?`BTRtvY;LdSrPcVVVFhzW&tKn{skivnuffZkwihuk2#RV^(*r5U@L zAd$9?PDt9S-$qkyS2Ia%Z#T7BYOAge*qa~{6uH?(?KIKNCNpkwg)@9irY^dv%hcJ` z0m?EN%`liVb(%XcAfV>h)lP3~rwppT+FWmH>S}`+J5Up}Q11Y>xvQ0=TCBFd(p+b1 zA!)`Aibk);@muBwlG*`rV^j;MHvzQ~s3=xQm1=eMZTKCM+749QN^NMy?^>uG&~7Ze z4UG;|E3F=AO^~Y&V`nEuwe^N-yauFOfo8PP?$~IiPOEGsNoxZ>&;^yBhMVy8YcjQU zwS)azY1i;NlGb6Qbz0c20I3<+K$R9s1yEWbQKN-IYalM-^+0Ot>asDJiAyA{9rSO} zOM{W1;lvfZ0a)9+%p@7XMvfmFkTx8kv;G5VqW}^^+{5ALdPa2v(h&F-1xMdNS}X~o zp&g$oAnl<(fHa>00ci+cqdp0k2MLr}?|TDjh;k1g&2+VMfIwQ;RVIm|!RW|z*|p*l z8YN#~ojZRXjgiaC@zYYkOOC_15)GAiZr@sJ(AkhQ0O8OS9J@RaAkBDH8ioM4 zou)m2D2<{Q%Rd zD`~g?9G-T!s?7n}>OYoNSw*X?GWHPt&%1^?;69Q8jfX!fyJ9sKM00IJqTf_C>q)kr+Cf2bN&hzh_g(R>o2Y7_|9H>w5z6GCEpp=z!59f0)og6(>u zYApas0a()5)zAr$8eVU#bA|bTZLO)swfgSeYCGOX)*{MUMFq+h!qi$xlnf>py#X}~ zO4bcfYa>B2EB4kmphjzHZL(l$Z4G@RYG4;A16?qO2CxjW0qD@ZQ8j2Ah`T?i8p&uy z)!G|7?}e(7aM=J+wT@QayL(Satr0M?-jEsv!1WJLqfjr*-xM*$|xe zp9b0L@M;o{`zt}VX28mNf^3M^4P>h&@w4tA8-m@eARE1bASmGNuSD5m04s}y&x767 z5P}v32jU(HINNB8qgsF5)gC3?)j}TNuIAehcQplxtx-;F?trhkLtg%`x~p|ub-8xs zDokF@mx)U(a?FPyHv9}xj-AF&Nx;jEbXNPNr?VPy75U)|3}eH_Kw9F}5xz zz~%(loB-SZB*138MGV|iji|ikfO7p`@KS4XNUk5C+Mo1L`}2re$De}J=y$5_+^RA; z$PUCmtr9%jaB#9ANME{v`lnTcs|t7!IJ&6y3r=gfdl#m?1BKHX@6zsAywnh)M%)xr zUTP$wc3&?wth(+``>EC4t%bZ2B<2WyYT!)wZ~CeA15;}RkF!4f)Bw}DpPw4S)Oz`; zo&B%yQ@anMMqx?d3ftXJtqVEP^vh4}9Qe0A*iUVMh}r}F)Y@u096Y>~HV-c=qDDG| z^~+D~&mw9RBt^Qzp*ed?LksD6)^9(xfgx(h@eFyk^#PF~KQ((HP+rX7)^$-#7wU7@U zMOv~D0VdmzY$+}8dh0Nej|w>7J8 zTL+*^|21xFR+qGPa7p{qZfi||`5>1tz{Z;phZl4Y{jPz#1TGq&t@zKx!SGohAT%-N0WCN@4DSs}0O;4dUvBsI?lq zyRF?O(Kjg3U+A`m5Ve2DZLNt!%DTI)^@XS*SGI15T3>E!j)>Y{?6xKWAj$*Ys`Q0- zF8$yw%n)!X8wz`0qW3@JNVl|5coS0z?@ubh=}QD}QHn4jCWeoMJd5F8L|!2}`UuDm zAI^Mut(6O}#`4H-P9w(ibWh$j-xb58VwDf$I@W}V2EVub2=R*t=AV)J&c~BkDvw)O zGwd(2nJ{(`dⅅnF&T+-uYj{hB2#vUG@>t`sv~k(q9SViCq;$r`dd~dYSAF){1!u zAB)NAG_Mh09}>htABB!M_3llAXfK>VtHwGoFX1!Kd{BGwIze>(-E|8=oJsqrhFBxq zjbCXeR?`uO%bU%uMw|fn_;@Tf?!1ZM>=U2i-^UIp6KK=YcOUJ>Y#?(f5 zhnFq=gdlEQ+{B;M+(9TxCtQVhAAXYjtE%&Kadl-&)x`#)abX0z1>OXvN9H!02>PSJ z)g6TD>_O9w--z1yFA1}WF#cNIg@fm>`P|x-i(c^WfR~E7)6NjYfkJ|~u6fJUN%+BV zkZ6OS%UZ>I&D3_cv$eXDsCZp}^M=Vtn0LlS$hZP{ofyNu108)ct%g{iP}NRgxQXyF z7yoU3O>3nIypLAfBxg(jV)8+qYOkx?TTFEg_}=JCS7aq!U=O7oVYj65ybbHVwMVHxHV$}x0J0r_NHVchQ0jv2`>b|Yt+F0_YMN8 z{Mv7AIN^3MGpEuJSB=qKxbh|xD0m-YAz&T|s_Qtg{swVq*afI9S~gS+y?k#+7eQ-% z;sS6=W`Thkabw%1ua6L2$19E3-WMRc_;Z5T@r}vcZoYMasGmA@Sv2Q7#^eRB#cPMD zeg^SlLFE{8+0iCr-9&5&PPm%>_*Xo=9HV_)3F>|iTaFX7A0Il2F+En|t;80$zrnBB zzIOuSyJiyCD{J1!A&8EX+3Hgm-z}k{et&cA?yJO4M>@Vc1#LL)IJlWtL=HRn``M^p)elz&1Q4{skO*UZBNW|@o9*I$VBGU z=Sd`T9+xk3=Sg`YDNpGxV)u{=LQ)I{)5F(CeqSjDO3O>aH}i%_*ww&R{P7D91iO~nT)i^3AsF>K%^QH;1}S{m3ny!WinT` z((|5$v&nJzLcaUZA%h18`-;S#N~NDKSKy&k$V5zjUrHh6yrm)$mn-0lq^dzf0$|@M zq0(FBqY?>3lD_3|cauCq>Fw>~E)=kNV!6y!=&knk@e*+45{@XyOK1@k_pAk6Oq03e z@(6cNA8!G}O`ueG%9$dS%F|orH8@D*DhTwHNu=&#mOv`&DNDEn(0NjzE2LiTOjm|X z<*5*hcw(tS<>51Uu#CZ$$s{s4&XaJZzKR~zkGsaQgnW@iER=}YY}mTXLoV?WFj+nU zUIBv!3EAErQi)W~C#y zpi&_bNkpCs5!>t%!{qUW3J;Oo8$@}0u~fhoNrfVrjLqcmL}H#&#^du9V!p~t%I9#n za(9^!%I+Z(;XIrl1^c4$I6Q@rC*X=yzK|)gP$ZSGS&%ND8x#b#DdjLdJUJ4fyId&p z@DK_`LU)DPGe#ni%LF36#7ivk_4W~QB~lO-ibP0Mz!wA!5eubK4>4EBk$I`)LYdr4 zDHejt%;QA?0st~V5G;pw3DDxQVPUi}RDo=@6D)IM}2nBKl z?jEa8&1qN)Im76|?}W8TYDH{?NgLeU%E>15K`0diu0LkIH|52+T&H zi3nh;9FDtBP;;O>MT62yaX)s#lcVIZ6;h#u$CG+B_<8#Wc=7o6)Dhvq^%P4~ z9(=9SQuyDw$L!C1tK^G#eKPp+@DgQQDAYafc0;N*sF6PO^{2KzEQsvOQ8(C~NpUq)&;lV1q zA?3*w4uPF)76fy|QmM-QstlJfERTdX=orOvxkBmbf8CGmuaf#vj|!45k&2~0LDvFV zL4G!N21g8LB6F|saC0XgDln#f7(QVzu0>l(MZqXFjvTuVCr7o8XKHMV#<%D*97eNf z>nII5nniJ-H{1r3BibMs(FVaLQGNhcpfN4lj8p;dqDeY)u-uM3b3Uds7ZJsT&K0(q z(B+zeisuOnH|w_+=?%HLhJpfpK|%fincZ^p@^UxlnDg*_te~risVmUy3hoy>gA~{V zNs9~e^9ze%>x^Q3Zb8B3-1|v+TBK0ia2Q~-i=zC^u*-&_FyC~qg0L(?20gAL3dOp@ z!s6omO`CEGN{Z129fkSkzKf6NdFE`+(d%`(LY+RpKwn&}FU-lyFDTC2xLKz&^%m;L z0g>Fzc_qa>^UAFV5dspzkw-_-1Lop3Hth{wCd)B136uZeDSrIai&pH|X*TN{aFgMLD^Ucz(~S z#W(xtwi)vC3Up8&;4+k!Y}#C8C@R(GfkyLlinib-?nOC;x}qX|Z&|u-YYDD17Y+mE z7wC!$a|#WHE!#>;ii-;hw?Qm=U13R)uDB#`Q_&XK2*qYl_~uyM)-A)ujkuDExB;TAftgxgELP(t!m*{fy z^Kw8me^cJp?fM+ptp&1TD1*IQic1Z7phmq}=bBqkRG5e7ar28yOLU+oy)Ive7Y7;i z1^GDzr6u_V+qUaA>x+sC^YaQzi}eM%qGC{cks(h96^`q%V!VJ`2$cy|19e)Mj~CMn zIy_&kFD)xFZ2b{rVTYK4Jje-XP*()oz2xZ(5Lg1TrRD;G9_pgNP*jA{b z2PK69mzaxbMTMpUUqe5Xj2AOYigGsR>Pk!TVpCB^vC6>+`>YnCh+A4xs)t5cRt$Y+ zbI}7Octyo!r6pUom2S?{6&jizEI=4Qe>&(eb8-swjSuF>3t7-_K>4A$H5yb!+pG;R zug?+yy8+r}ejz+z8uXq9-6pEJ-iwvbDef zrcsZCQ!Y%sTyiIx4dO7Mb`i|ACXBFtmit=+e>x!!HlREH0Z)Ygj zq$?}EA0Mvg7>WvVi(o%S=#py;GF@SA-zl36gpN^CR$7>+(^nTs4Y~s8n0j|ECIekp zQikj3(8b&-5WlJoZsxM{^xXn=SRt-+Ere%C>5XD&n&^3D z>BUx+#6(^*odZi`SB&eQ?wJQhXp*9 zxQX38J!MQY44g$`5kP*va-8lX_wp1m8DfRIyMoOE?1RY!w1_EW^8_rJl+Q5Q#fI9% zU}_`xQi<;?#z3hl1#B~Gh!C(hrI)XW#+0i(-TAEhrN*ShcpRKHgeQ=As=U0V7~jiB z;=*9K-Y1ul7CA1H1ru9ku&}E{TvTJUuvc8b=CHsS8h`RC*{pgZ%jzN39SG zgc80hM=0)Dg*XNXs7Nf436&BTj4o2RO977r98V$l3=R^}-9!lQ1#FZpR7rYNKghbW z5wgq`aNXQoU}_~6DA}&Ap1vNwfq@)14>}Ch)y1@)pEM#${ayH8qa}{zJ@DmI| ze;HdMQP&i@Q66&y|Q_S_??GfG3dx1S(_$Qp)29m3)Dhrx%wAz$u>#@GTOB*!+V9JfTo7 zU~m~?4~3X35_`z`T$WH~mc$5IQg?s^eAP;&k6O<3K`1PU!tWE1qJBZ3W%*z!#^mx8 z0NV=%J_-SsErdyPtW?YsDFKpK`}-+5Y_2j$4UlYaSuw`sa6wmg*%2O|?p(HqkHXK- zODPhu+~mG~YCm5I)LGAjdkb>|gZx++S1t({sutibbdi4mt`7E7t9bno76VEvhlMAG z%vr`xR}K-tReP)CGO0iy10!#BC*-~?x}yd6HYEO_pdc@qlugIPUNU!8i>KO)^8gV9 z@<4w9{ss(*s>x3szl_CRqI55y<2siGU^Iv2L>Ad8BaYmLYmKmaCf^lf@B>ZU|us+##^jmF42< z%790iQgT%UPebd2pv{%Z5J+GZ)ZO>G58GEMv^*e4x;znO5^;g7*y_x^P)7E%-N`9o4Wnk|Gzg0=%# zP4G1f2mge{`2;}({9y82I9D{1QNZiw z`Ea)wQU|G~?zD(5g8ublaVtr+i9=302ztfhS)C-^mLwgEsB0r=6|+#`$UIt0wiuO+ zDh4PaesLRt#hZa-DQqW3pSyTR7lFNxh*q_iZVEvehkSz!r54~r*VGN-MkVG` zp@>KkKp!YVvLrra1wa-p!H9{>KNSh60yP+(B?XGfL@=yuP!=aEw+dQisN;CuJ|xre#gn zW~OInBxy6%BmYl(*8v#Cb+lK2dKZW)!BGxylTL^x7ZN%cFt)+*@3_PQ4v;M*B8k{I ziS5KKwqqx*DK2pbgiuF-=)EHZh^`7qLcO;u`_J3Gz1z3U$Jgu!33x z??a8HlDJxRx3bq$p|(V73Dh$e`uH2DKN-wkBK(~~sPAc1b>xD?=i=tASrs=uahZN` z;`I2~_N@EaHX4#5}<~0q&iyi9*#fGt3lE zGEX8E$CzVC#rK)-BNabjet=Z`GxN_##Z%2wNyXF6(+S127wttVzKHJ8=0lENEagZ*ERNPP3k5Fu$Xr9OvkHcqYvUxJ8_+In9 zq~iO{_mhgJn5U46e{TLcsd$=s8mV}Oc?O}F_NKi_#ShS|K;Ye>pQfKC+zng0L4;zR zw~oYH=d1H2-R-aQClv?k0!hWex?oaqs4kRHOb5~|lW1&q=x69>n7i>}-BcYb--6=l zI##{~#WQs?Nla$zW|NBN>gJM)m+F?1ieJ;cMk;E!|tB;`ep$lZwC4eL*Vz zQuigP_-ozQq~dRN-;#>I*L_baUZ-0}D&C;mKq}s^+fORKrn^QezM;E8Dz4I1k%}91 z4W#0BT?eVys56s_-F5DSV%m*%V~WS)!ZwT^Mk*dm4`GTYV0Y^bIs>6t6Qp5{5`H(J z-dGbh5ogj%*Nb6-7t?+;YX>Lccf0A>cH_mGPz|e6ld-!sTFq#NH(pHlq5Ck!G1%SI zw-hTUcrlzSz)qT=*qvqvHi}u@t!o|i4fPGfTv(n-Ws=G@I*pE4K29^9RIY+4uM^AP)V)b8|C8=LV)$H%UnAD5iM zxu2GO5U3N}g?qh=W0%D1W%;|{&aVWQIt9LynZH&8{BQw$dJf#}dEnl!g5Qh41y7-n zl48EG&BcbcL{43klDIe-wrY$w%={&AoTurRCno7N_e4xUd$DEAb_;o<76+Pm8N5=d z%ix_#Tt;51)Mc0siOa~DmAVX0t<+^O>Qa|sg^;=o>x$H6aC$A5r!G%{9V|Yk;O9wP zeL(W+(^6NFV=2xViLv-qL6p&c&~y zRbBilTHGyHt#UsO^n#mC^tV+<2Fth?+!ts0BA>?Ddy%69_p;`OF*A&s&xlQlotl&s zyOxb>td31e*0Z14z4F|IW%?Ikj~qEOUXOw}+#dR2FlJGj3BWHBv0$jDGCv=MD-qCp z?i=oS7;c5S+5b!nkPpJL9!F$74k-Anr&(F+B6J$CKk%Pn%&fF5W5g3R3zj7bT11b& z#i^qus2$Zj-UV8GFaGexKOjCoH)>osEMxBEGA>DgfJ>8rsmN^HIEjkxy^#0>5~?`XbnCE zI4z=&V$|H)B2HJLMa>pdsRK=m*5IRHUoGOo=qW+dqBXdt*;9))8dozJgaRFtLy2pM z97@G=BhU>wb>fO)ql}2+mLtoe!E8Tp69|%stA=yrg2fDS2ZVKUybDr3-X&oj?~<@W zyB!ZH3@sz*WMdAAxgQ&~K(=JlBP@AQJ?pM{eDc z=HDW#;&tP5BB;NCjlLNi%H%@f%(Vh4o!fRuQE~KtKig}b4KW?q^gSdR9Z(R z=p>a>f)4j?obH~qI!e^1Py(bC&{7}ldI5> z zzFMi}YO+98?5RZ?jq8aFLV=RVp~N*r4kfN0awu^dD2EbP40}puDFC%1tU(}%xMny- zE%4)Fh%z4If{e!~mGKytWIVfsC+134Zp zuqPK0SfEsB&VdsC^LN^EDi!SUWsxeMBBkPv`YJWOXl^8IIwHRX0xyzQ6wfO%&2r#y zLsrNo$%}G4GFHhYEO$`7!=WM33l=;OC!#SBCPr~Nb6^a{DK-YzTB$JrQiUrY&?#N|5UIko50T==z_x92CU=79ackfly-?)IIYJl^7uG+;Swg55 z9}|J5IkG~o^MykZ;gTB;FR?b5P4Ua`QBS1j-_&`Jd!NR%7n67?dZEaZley3|7ero|j@7?z0uoq(=KRYxlsfUU4FFHPtUjiIjkWri zfp&~!hpgyL+JZgX&W_%OLS!Qx*UVTgf3>x-W2j^di`4+-3AWrRQwDGKfJX(QU zA(L22MzB5;5;zcA!yOZub>lltnZ&qDBa;|+LS+(*Z8gzwIhtru$RMBCx+4*IOXL_N zgnJ|sL(&aW9D#H#0O?x%QE~WDar6;}*wI!}B;yB^xQf{CSi~uD{g6Y6D~B9PTr+Hx zQac3>M`{;%bdFpg<>bI4gm@H7xog2g*J6iup)JywNbI=nLR(JK3kH=O+7d>#1cuN- za7cYlf!X(1-2@JHfPOP>0`VyaJP>Ugap$P<_=Sy{Sw0-jm!zBFkXm2^GI0V6K#nE~gNcHcNg^?Yu|zVh)tTW|31JA) zX)?(%RG&7`z+B>Mq|+lSty>l$AWWnnSLA$slG-FupQH%bgq4s;fx(efB1gnxX4@IR zog6t822RPNCk6+XG-tNX2*RJ}2!ct1jFP;1C1*gy5^$P<2tf|@c>_3K7R&}3DfBI}U4Z{)bO_!1 zBI}d=vPcygbX2QCi;hSYnsh`eHN9wVBy2h&cipJN){Tg%SD!a{5%~wiv4Q-PgoyHd zYl2$`Je5kC`n*A;^Txm~M<|O_`4lNO2GC&K7})kr&g4Kqt_0S=IeLMQ2WF5c@~$NN zfTz;&bS>Qn6}zK#s3}Hi?%Y z7YaN%?FW5w!N#l48#r%mVW5>dd_bzy;R8~o0uQ809X=ox6L_a=JkX0tycE4qx&kxh$L&za&f=*JKBFDxSpnaUOSnBH7q29&3Mx9DJ4}hix4V*iRMGL$GOnJ+ zZ;x@umqmNk6;XQdkEN8J+lUL(AKWG!$XyY1Ajh}ia@vjW2;@)-HU+XsasOTxsq!gO z8ij<8x?R()y=30S6T;UH>4rq%Z{sl`74_AyTDQ2}l)gX;7_F?u{kjz<4(#i3f6g z{lmkRBIH6FPfo)@pIorK1U3>SSZ*gg z-eJ|ab1uFoB(@uO&v9Z$;dbHcE^%tqZuBjNy^#jUW7Nc!;{gZzx;Y1wNG#Zb$)(2k z4ffR5ev>Q5H*xml(oM(radN3$k`}>kPHx>|dpT~h3H=sq<&+@h5qAP-Za1Pr<()c} z4}wn$!xlh^3%tE|E)pvNN?h3GP~w6vhZ47mHcF|GgH4(Q5ew(Y1!7K)(LppsA`Rm} z%vG$sij~Lq+!F+;cHEeQ!Q^Zv?>R$G$|nHqk{|xD(Q-Zm`pvBTL~4AqFP9om(%YyR z4N$T2;=?YmrgBG45C)b_j@wn)MqHhXHuW)S-7l(SBt6Zjm?S15UYz0|A-d=(ir;;xDeLfli4K`12& z);=nN&5KYA)JX|$p3j*FFXmR3NdmNp$Y_Y6qMaH_F=DCVKS)jxh2sxCvS(8JF3%NZd=j0RcCO;a6-hq(E2$l%@PKiuGj9|xu z6FRc+F)pPjRS>EttZwQG}v*WDQgpz4O)ppy5ib z0gx(m@PJOKhlfZNx_F2bH}WRlzBx!dPkm(ilmp`xDdQ?>%6K+dih9tNAf7l z=-DiiO;xz;Hg6YTofBbZPd+`a>Na|&m03|Ny_Im*F*5X2Lqm*)^xvdjaj7zz5jPoLe9wetkaNGuN1Z9)s8Z4U}H>9%3ace4@ z95<=5$#J_Xn;f^R&XEi3stTSLMAq<8y}!M z98*iJMk3S#?NPz=>?<481L!RQMv)yUfY@KXKG7M3To)>TFY z&s$#o!e4q_OU{80r1bGI#pyYy6D&If?i4&e2Bdgg*?vV3@!C+WO3f0GD%{qfTJaT7 zJa9k~c_7DEK&QxsR-T-rgFd-p1<%{#c7?CawijUJk&C5NU|Jom6$EeGx_S%Knf8@SQ#;fkhMd%CQ3aBc zCtkZnRY8hgD39cP9*DXtHdqDEOS=(L5+%&^$&*KzH%As-L>_@s%%DrjlT{vpUd%L0 z(KGVM`~VeE0-m2utp&*ZDgeu;P{{zISHbi=w7i$~pz&$?<%vmp%{?&tDwtjc)2m?m zpEQ^rw3u5%hzMuglG|_2to{^{8u!j|YDYEWYk)jjG5?&2$2xf8?RFkR@2Jm`csK_$ zC0MTI4VXiSj`TbW7JQj?yCNk5S3zdYl5*l`yu~(gjF1tD1unA^0KK#XsrY%P4(x-V zlOx|&B*+GoxJ9&gs>LaBIhR9;+d?^%xP;p%r95fQ6Ss@aIY%y#a&l}Af*=wRQfVny z;qxkd9)p9ZXU`**6|8*@+#D4?&&$uq9y;8t$|jd|9YiKhU{=ZDHes|)&@!S|!kK$v ztc^@-7k?ClgVfMgXQFaLCT2jVEq z+1c!oO;I5FFS@r9Chz3aizzy@t8Pi zRxE2tK3v`rKK_@KUOj9cU>GVsUauKzrZt1Cj|@y*o}y2}pBT`DOI_7@OJ4Pryy`D` zHBj=4>BMLN_5~JsknL-SI;^oT%bu9qjTWQ*vRnJ$!Rit zX%djBxO#YYK*iOIMuZjN>TM$HlHac4@!3PTRXo0Mht11^@UZEe*a0dY-vS71o^5|k zCU(ZoiarS-X>A?VjBf*EHrtggvWnw}34Icd`*L*Pm&o36vcN~Q?clgzkSL+jEwYN^ zk9?Ge)8S&H;;_aT;%iF*@U3`*tm61p96$K>+`0pzgj;`mD?pG^9yM5fxws$ z-^~k{X~j|wJn`k!zF{K94Jf4sM8)x|IDXmaA)&yFyo_4>ZPZMe&E&=Oebc?%Y23J|>`TR@^0j*W`rx7ub%Cn$97fD(`Y*~@~M`2%da!u$tR zsx$$DNR`@aAjLO7vigeKdfV>BnH&geH7@eb(F;YMoa=)Dabf*aob7{Z@iD=N)8YE( zTD}izS1R&~Gk$2Gm5Mx&D$MzzT9t}CkSY~ej$;w8w10#8o+ zL7!X@cooNwD=L}}F&hs9tyJKFRH?uNsZxOlQl$b9q+$Z^l#K^^F^QL=7mB=!*jvTMhUscdrB#7)}Ya5!($9*1Lc=VVUI?I4HqgwZ^~h}v|Bus;w6^T@Q;_HfE0 zJd49=GD&S$0skuC-?|kR#0S|oDM+iIP14E4qRPh*N;>>1-e1N0$EL)x(O2dt2k_;^ zPzZ1<74MI|?Z=6&wF>uF;r^U%No>mD5d+*2wC!*al{6*ph1gRvO94y+VHr`6?dJo7 zV_d?b#wZ1W#W+l(iiC0i2Cp!D1eEw5+g@tK>%@0FawrwbxC-~TB;UCW3rxF|(sN-F zVRnZ{jysR`bBHVsLr(Alpmsc5U{5Y0JV1$u3gl2Km35#~D(r|XL2W~!?-nYj$UXE$T>n75Ip*dn;Le5B$mwwVx{BhTAC0lc4_@n zoG65fm9BrvQ-x@b6gqo|RQVJs7I~;uOyoKBCM;l9?*_=lBwm7CDDdQTA@s=wfmh-F zxP3s=A?7N=Kr7tbpgB@1@Ib0m;DJ=B!v~~d0`HWK2YNAym!cPnJUN*QO>;ryh3QzE z0N7dvi6&-^AOt9&a1%-e0rC}Q9ZU_fjEM|IJP17;w%(La?%XW-4YW=*862V6>G{_$dsHP zh=RB*27%#;nOJs@;SBW%9!%iHZ@|XRh)Z4_AG>ybTyhFLz=6in`lN!6i-cNB7?28f z55o*AVUqAz31h^uk_mR4C)$-vLM2QWBIm^=Bqu(VvfA0 z9-|5*MY!CZ(Vg@uB#k#F+`pEPv0_>sMR;oD-X{A}(Qt<nu~ZVZOr&AdaO$Z$sJp2M z>R#$T>Sxr`)bFWpH2pM#G~pWE@ZQ6{b#A&|I(MD7&R6HJ3)BVcLUmJh({(d-vvqTI zOLed5-q5|JdtdjN?sMH2x-WHK>%P@}uUn_vpxdvzrn{l5(lzMXbsai`&Zsl#%(NTr zPJ7dSbO1e=9zqYJAE4oh1l!n9PmQIXq<%|%ORduk)P(80bUr#iU4Sk~7ozJ&_on;M zUbKe3kA9NA0c6}TLVLBE(NMq}Ow?{v50v7ADnLGwXVcN3E`q^bPNe{ID@cqt+i3I% zOC8F%WdTA9NWythBp4^%IvNRYnxZ$WKxR{*5M>HKvPF*2e1>t<>vN^^1s-+etk^X#PD_km zrdb{vpR8v;&rOMqk6X%>OkIN9% zRQL;hQc4_rmz)VbZ0M(^tmA4esm$T(e>&|H5PN z7)}zZomjYV`SN5~%9yUrk6U3GK(^no94}4MqY7e^U{U5ST1IXc01s+7)Qfo7;mli^+R_PN`tOc{;R)EB?O@d*5I4N;;B1|RpIDUa1Ms%t5 z&Uvv3v0TNhz*sLmx@O5*?h-diGvboirXd+zoV*4c0cMPusvcdFFf~4IMFMIHx;`J4 z`8BaC;1*5fgR!e)6ZG&T6*q zS0hqO$)Yt&(2Y}9F?S-Hk6&N~7@R`ns#z{Q9=9y<1q%f<)>67KY4yt31iRt~*TBdx z2VG#~5}eY-)1P>3^^C+95~ePD4z4j>hW0@5eEss2N8?tkOu?_9;;GNaY3^Y(i@m}6 z@xizmpns@C4<$}Z(#Jl}jl!(B`1t9G@rg-{@`EzyV^^U!SVb<1TeUh~|8Q&^j0`-z z$KA1b?P}0n(A?PgHDEE|abeJ7G(+6C)nNr*iO<8(a+J4{_q2FDxP53E?6@+Q;O$6g zB2edM#Y1mcy<}U&6mV7y-4K@qZZmraB3SqvebU28sN0sCSTeg}MsQdmL4Q7&7zefp zWD@>{nNL~CTDS#qpwjc>R>h@g#v_Y|F3$kf#O()L#ytcG%Ly9hsF?{%6OlPZ?l2l^ zZfF^w+M(0Wt`rZ0xm=MHyL#pASuJp}h5j+4i8_g%VuUfYC@*;I`~q;=`5)mK$fbJA zGrKGSDqx?|Wj@aU`@)!XkUai66BU810K4cgk_jz9*XOKVy;6@1sbyZRpO-zJn6!*p z)*uW4?g@JV*%GkwDYj2wB9rw?*QCV##&Qd@n*1^`L9dCPi00AC5tyz{eOZ6zV8_84-T|>6`NlgT72Z2>oL^KndoUcy-Q9(n&Ugi`E{RWAc9S?o7 zU1ce`I4fvt!V0i(&^dfYpqHHcZjoj*JYYnSs3K2)apDSC{6Jq=1(>!bDK+<_aMPlcL|C&yr;+B)&@Yc&6Q2UbtS+&GZYgD#9rjMRnf>pC55{W5 zVpx1(`CO%U%5|Eie<=={iA>GIpy99rK4~>|{2Ai^&DLYx!*C;s`n@nI4z$AJxPY2N zTNWlf9GkLo@uRSo>47<8zL_7;qel~8n7syx1*=D3OxLVR;FmD!#;u*UwGlO!v2&m= z3y@s`8A8K~YO=PA6Q+B3n>V-#(PI0z|GgpL3~l6$#HHYYB%~}{@*H?2tb@#zf%v3= z*0F_i;+BEAV|0{ppwS>&JrP?mq+1_f2`fS}vTs&bc0ugw)vV4VGQtfgjzMX_Q@66T z(lVE#tmhAi5yLzD#E2(4B!;6A#E5l{h&4~$Ax=HP5jEA@1!5f~PV9KZHi;8QGtFkT z{2}n5VgF*?kFhQmD#KS^RK94zWx{)* z#&}qZn6)he{KvG5E#g%{wJlaYvkU3_c=133cW1=|}et#yKdBGrjl%c>Jtvjk4m39CJ!PHH2^LeNiY zJ6v$JpD>;O_BtWgO{h~dRHUEGM3ednufQaaEljj!!LZIJcsc*}I>E^y)G2>HW5oJ- zuYEs3!w7ZWUR{B#0-;X%{gk7haOnHLy-q*~kSe{sgNu-ngr45!O39|^HdjivMd%H} zx6kH6uI@(|UJ+|_dsj5FIl9fm`(xN6$Y+w~^!6I0s2{^3jT5m*w{1s@ z>GoDCVv$5Ey%nD4I1!5^S}C#`XoV1Zm~z}B0HNF(m67ACj1#d)x77@e`!OuiII=|& zcX`DGN!E6PY>{+g>!}VTOz1X8lMRdx4n!ob$vW83+wmaV3ugO5+DkEZnk3M2{6n-) zCAm$vRg0oURQI+DbTnCYZ?CTwEuy-&cX-LFdwbXQXt}C;E6nkDvf7E;i(69ZkFms# z7tzz(sxsrrdMaO#`Wh_sr7v*zdivy*VJV*lima16%g zr-v9qX&MzJ^uLA@QjtLaWk{g^Qiu<76&Uyo+U3E20J$Wr089}B%x66h4iV?bk>KSf zQF@u@L|E&<-3cav4J#A+&7>2f5q!WU1nA_65#S1{5Ks%+g#-a*@i*24|GyUj%HiYw zqX3{Ff+_&?rvU(sh#Eg0B0=~sD~g5%P`Zo+g{L}oF_SeBlO~Rz6s3vKO&m7~Jpg7g zkhdNZYEjl(iV2NCQD+_|G$IB9JhuoFN?0QhB;N5}lf1rIO%VfMQpyETeMxI=TiB@y zDRBT(i%o{-e-T85!?DN4Gqo^aKMPpJGOxdg)4#whaSUdU!>m{VY7DLoA>{xN3`omm z*8MZWii>QmSpXt}d^5;5!nlFN!b1UEW$=kZMI!*+B7Ho7!Dhn)1FLN?tVnonAt8A= zpn;gi0?rQ}K;vNr*)@9_z{i)7AcW9^=_}*nmjRG|sXjRwcC837#1w%a0DREW&1(k> zU}RbeD>FhfqWf=o8z538-Ua|Cf=;tT#XuF|6Z`Ix+tCzQ4gOP_A~`BZxVSOlfSD|M z6MGzH=7KqTgyCl|&ES7vL-$rAZOGNfV$s@+aQP z6!~@*HUuG*9Q8%|70hD^ksN>#38_^j*oP_@hm{RTS+6icGB<%E|F=sEV`{iLa$)Gk zM@LMEiG~BAZR5wVMhZRw-Xaji9)!b`*`C#~ph(d{t&CcOokR9(O#;QbbHl^h9{e4J%{N#ziY*;fWSz zKatF>2aLUC&68F4xeWySPz?Ks2*Rra3jzL_QxNQ_AFh!1)E8RE7#KBUu*HT8o+?P5 zn=t)R4(!O-X*qhtOohWAkndt1TC>5eSx*57kpd9ozmHQ>;c>eK5C@(Qe?39XgimTj z*!mEK`d9|{SOyr}|B{F>n00Fe;V1D7DQc}65mqC@@Z_@PSxwtU0emc8nfh$#fAF)R z|6>thI1`QYSYU$)#{zJknZyQISxA^~&|_SU2(v4K8WC0_!mtCeofaT{B!C(b{>dB( zfQN!DpRB=J*nHoLvjNnIu=FVbTx65KM#v5c^NC%E3G><9?x3(WO5l1>7;pAZFDi^o z=Bax_lOtj#PXNF;Zv<_T;R%`vF`DQ}C`3FyMg!sd@srWViBX#I z@EQgvM1fNIL1P$E@u;y>%gAA4DGE~Dn7e;cH1vHUB!hW*%*JE zNp1c&M(I#?lV`xKPjFyHTzyzWJu&UaKQV3cw7CmrEl7Tzq9{L#qG|Yz{*9yDsG-yd z>gSN4o(|u0;Cm{3O@mJ};O8{>Y5P5$xo;NSGaJ4iq-MeIIn-R{o>}no7u5ezk5G%? z=Tp=(%-64|Us228^x0p*cPyo+ehJs-F<*0mdM3krHq`fX_?i#geh!CcKLTm<*}!!+ z+;KmAO@X#N0LN-if&1>IVwkVHs8P(=wKSxj_;adZD4u!8Usz2pp=|7x|3+-{H|1;sfd2r_}rj4jKXpB(*AG7rT z383W@3r)QvY`K?nZBdZcpS!I1f_-X7wbP7 z9kKq;U}hZk&C&d$aYC{(iy47AFmE%U<+I_ZZT`jjzrxc0d6xbo*}~_43UB^N{paRi z%k+CR#r7Xbz=QCubp8iX13(tg*8tgHEFJeVlnS+Q;!6Ire)oeh!sNf1_BNXg-5qT$ z%}q_`8_iAL%`L6%o!thb*?g96p?%Dzp3au~o0Zoq$}g1B<#dJ5wd++i4K3|m29x<5 z-AdEu-iFTR8dF(Usj-AAHedC=a{2P5OBXL*IDg(;6n5_H*)xTOr%#v1%xLR4)($Q@+7XiJe+1TAuQ(9KuUDl_x48DvdbTNIk7jQ9O8iBYJ6`fe->yLW8M%TC|0@!;9(^@zm*p$BrJ_w`=>B?DW*{Htapl^0`E} z(H>?~ck}ha{GJNB+{kb-m6}U>7hk=4#eCVF8{9!?SPD)aKXPdA&TY9Fsq4PU0VYh3 zE?by1Rh-Iek!FCV=D?oqTe8xA_~x6PdyidX7+hf)bTyTq$ZZA&RGATL!3Ab|nHHRa zY1y}HTlS_OzWE~U$Q}!Ws}=^0WhZi)u9+&Ra#I<_u`pd8jyiDx&F$H^n9Ljkd zYOkBFQ6OD3^n??0^JR@?JkOmyee&?WZJ9rO@zI}m)$@5&@Oacn@BmFIEP#GwumAGn zcXHZT9!J=*xW?mg)53$D*3n2dLGh8^7o9s(aN^MJywtBgd1rkO&fz+bL$xS}b7!Cz zd$**2{pq{^qy-#q@HkZ6FkPpvnL)VBHsM0k8h)V&^^v_fuNPqmsv?lFY;Z z(#7*dXA6$)-=6iuKi+%KOa}*-*^EUqLqc?l( zxYY8v7^=*b#;PU(mq6eG>T>?Ud1LtEkHEDjJ&8&SQks*S`c> zbFt|3@%>xVzy9RyE*l##9LN&a^Vr<1>cYs!l%i%_I$wBf|F-mR|Mu>yzez~=t zy?*UlMR{rQ<)Xrqhjwqv*|Y(cIMdhWZ|JY-ul&FM>Z`B6{^r}gr^@R)3>c4k;9-V2 zr(tO5>vYA~vXbIUMW;_3+PhG+t$3zIoX*R>A)-fU_nJ=rxD{} zs5RHnwXHSAn|^1`fAZjml{ar*zEXJVx~ZCCCXTj@Q4uVFM-Cm>zi;RaRaIGe z9hwWmaPj=Pvjrzl96x@{beujGa`f<_1AF)E+Md59H)qS<3)O8s7~j^q*4ogUPxu85 z44rc|f98n3?!Kd!-amKu-fJ)+!Id`<&r%Tg^Q=QxXgD2w>csIQ2ls(F zarE$ky}RwF65w?sy|OSYJ}}hmpe zr?N~{zQ7mUUC;$kaPt)k{LoP3tDHYqcp987@IbfcovLVp*>2~}wyB}1o~{co`~JHZ zG!dix1A{)h89R3DtdEk14}QLMv1Ux@pw-nN3@~jsMqaO|C`US5QUZ%6<3a%UvuB{y zhxYB+eA140PXk>~H?-FeeQm6EKv0mcfAGW|b$K)H7#ZHzcgBf5krQ{mbjOCOo4~$L zRppHvpgBXC3QAB9Ec}tDc=XWz+!N)(mbWz+>PMEox@`NS;Y0fe26&Hr>3Ch;C-;n+ zGWM}A3u}Mp6P~j4*}p7KtzlbVi4-&3(tgCAS@qAul7I5p;T(JJEe)pnv0o1kjr(?X zREVd0K-jqF)88CAWbEe~-n>{_`-g#pMh%Wy5xL~e-~I5Xv(+He$bf`hg8>2q3+-pU ziVH=EJ#cUAVn>~`giXzjbi*B=hlc**!0|&5_VMU9WblCS0e->rO4n)@e|V_m{rN-3 ztWFr;KjglLhkbPOCP)nhc3}tzqO}6le&B!lRL+Sqjy?F_%%W;)YHhr$WOY!;9TCr7 zOYi3u5IQI{BrL#l{^t9KMUH*CI8HZo@?)d!2p)FNfS{4@?<}|QAI$Lw5n?77@h7l1 zG?^ReCPQP?o~H)+`TP0$WS$K3@$~Qy3F_X1^S1C1o{Ll`sYUxBeh|J28QVFc`w7t<4M9kMr@Hczoj>W2cS^^7Zoz3L6;U9n?24XzNxR3wX*B|d6 zdCw19HA~lq4Vv)8uT#y}LpWhV!b+&1r-g2&Ta3*k8jgKE;lZB;1^I{F7aB4weE3_H zONKAm_{1~CjkVQ{RTY=7Y(Hjh==bS};Gr>h4AVWZ_NAXcurOiG2ia%JKqrE&wG?0F zRbm~c){Yi)^T1r)px{A+gU1XXF)DoAw2_0JZMypX#m1Ui5VD>I|C%orPo4U~JD(qT z_aB-6EHG8~215_5gKVsMoVoF7ZKGTJeH9Ke7&iW)J4R_o&HeLJqnG3!E=@e1q6AIi0~Ygnft-v+74#GXfRk@t3qr&yjDSA)Y=|cbAHX3sFPRc zdH3<|8|*#hoB`&!qqzr^+N}Y`oUWy7Xz+Iem_dimk+^gOe)Pz=VVceD9|gMm51#d# z?JeDHoxrE3+1%vT*Z}^rg^xd4`fuF0#$&=6+RnBPzc;7e8Q3eO{UdFN-y?-)7_W}j z&K3hOp&LCL>g((3YHMrEH~m;9JRaN-8#}u?e9PV*GtB#umTv|R2>d-7bJP%Hv#H6e z(Z<4;VL{-~(@A&Go#qbzmbZPzXh*!E4-OhI>CFp0?OjY;Oe~)Ut|>l<4}ncvXFx}D z_UK{rhL2f1DdeGvzWOfU&=TvFB1(ZP&G7uX7%foCr!V|S4XFR$ESURN3R^985z`n=;L)JU_rOJWqf4N_I~c8k8T)RsS6$2UeO<< znH!)rb!cupc)YBAF?7+LRF|>S?@-*bVZP&^4+{zz^T6>wb+J|3 z=gG4TNaVc1Nhh)C=rVQs)VF^WG;qr3;X`8zIy;Q*ZkM9lEDTz-ea5j2ylQI*^zE%( zew{7b55BP=e(~L-HGl1F@1WbMr&_gcU$>c{0nJ+KNu-13I?q}Hc}Gi^Z_)S(XWouW zPka1{SFf2nJ3FY$My=aZndWx)%8#DYYQF~85*Qh^bgQ|=E&U;*mO9LGBn?YDFl>kq zxnJwAdwu1{^Rtbe-5pe>N$VC}WoYl+R{7@F#x}&vjF?@Ih6`VU+<3!46I$11>Td7y z`6}|`7efXO9h7sds;AS?L49M=Qd7Iydl)_@;6ufkw7ouT0zO{#L^g(Qx{K~Mba`L- z>Z?~?T^b&mSlroZ0#4AE6CGf7P>T$$ZY{v5_lFSDH8*(H6WY@X@wG1R-NF7}U7P3A zC$X&q^olxR(DqvUadlU_nPYPUx)t!RX5iD<&_H0*(gN-ADM^T#`O2$d3*Tq}9ipD@ z()ODAab|ZrGtzD5R(7P1z|IrtIDt!ZQ@3|l>#-R=gCjl~|COn$v(xQlbcfb$YNnx` zy3`C)2x>_;c{cKBxdouPr?J~BU+a74{FjD3P+;up0$L=q9~*(RMcX_2Vk<*Tn&Bo> zPfNE~-S0!*+FAPkz?Zwbx;x!=f6}3)cA0<}1n;SfAb6nBo=v=(Ikjmt^w8aOPiMDh zzIMXA#3hmMbVAE}ef#m#2Ce&Bpo{I7koJ9)(bEcX^kz~68ryriyS?h)m_N5~zfsSg z?Esy2yD}AoVj)cFB@nIl?luEZdo~m5Z)ol5>GrBBJ+2*e@4Z_|HzcJnQe!?#!t-OvD2E5CYf&gu|Eq`IZtVftqxQ6*3On5pr5PxD+B{nc zZLT*M=pM?@+~Z#R+lX2Jce$(Et@wGcOrID!sVgv8Or69U+YPNi^xVKvU$%F2_xkSB zwP0P>e%jT=5+WmMY$Fo37}|UKH2&e9x0>3R$?1e!&E#7f4NX1hCYVCFiQIuYXnbpT z2i(%#(bMkPM(Sh@@agDo13q0~doe!T!clKDqPyBHoJ{y#)^Ro(%?8TY0Cl1}5kI<} zC=B%-a90y(0cw3$2l1YUHn_*F$AFfU4v@3%4zfTu8|z`P(LJ4=#tx#=H8cRL7N`z2 z$|Q16x3QPWXfhl7z)$!x8V$XS1|vu+NPrP!m+CPhp@V{6u#Or!y*oNdW~s5()M#um zbUG%%f)tCXSDA_V8xjmg zs?=n3D>cHO2CBqhFqiZ)^pu!;+)8>N(bjD$M#|?&S|%EeW}}HZW7hUQQ>HC5n|i}f z_~dq`%%t@yEY%hojfL7$SRvr=5|e>CZPt38F430sz&9vx37CNbG(^OOwz126N4fS) z*%>WxHNhABWmO>TUQnMkTM0>jAv=&$!;9FZ@Dsk5q6c=cVjooH) z4NYCU3a zYu0+7MLmJR(UuvtpsXsGF%)le|rVhTm_NBPgDVz}OBY&Z14bre9e1fgI->qZIICi;rG5pL)(7~lq8Y!E77 z(zs0~`YLlrCm0~ly8OcH5R`=IVJjqhnS^mu5A9~AOU+HdrPE;Q@u=HdRLR6bTWLQt zlbFu??$dYQ_`|DtulT+Chd18+^t-&Hmv1uR*H+q>&3+%qOa11Hf9C$f_n%*UlbUzn zYy}EygUd_%vnlqoNB8FE<>usU&8BmFbMx}|9z9!H(*pStbPsLzLTUU82$i48KH+n! zu;_9{O*6#j;VKRT$Sw0Juee@$v%aMh@&K(6+u%V6av`4O+1%3B(GBo`7P<-eg7NJ! zcV~B*yZiJYSV1%FfQ{yKlM(if7_BSt17lgYGWdZzJ2r+gPX>Gdx^FD$Q+yTTUFOSP z7cX5xao?gM^SMEg-8~I4NEH6vxh*dzV-p#G;8q5CZkO-}Jpi&WO5z3(h(Qv6U`F@@ zlZrpEY`0bX0fQ)EA_pq|z%~{t{(zUi`cd%*+{Ocv^|uCoV5(~5rTr0zK>_^0Skcx&LJNZcxHU)wl>N6N5zZqdLOwzweEU;HBKTHZJDp!pee)({3M!2DKmF8@qC{^Y`SfPusXIBkKf0(1cik2q0#%fe3@D zE)*ARN;_Cpb0i~c`?VXpf5}V0Ar;{gvR=zb=5^hhf=fi0IG7XcGsq~yfeo$ zzTZ=|J$viMb^8%gU`!PN1(X62+>fQDt^Z-2IX!e+N!``ljpD$YTGjmFgZaRMx zAq5lz5vq^vEzaMtVf_yqvbP++R#$g6b^Vsjd(U65J(iYvWbdwmJr@N)1WR()DImh2 za~sy}E83F2J|izXbL+9o1=-)NKXdWqjoR9i<}BaTO$YM!ojg`_qJje=cypQ25&$;S zm;F(WT?&YBZr!?_<=4vdH*L<$$jM3Dn3a}aeK>W;>9Xp=U7I%_I?_Vo&CJ+aePDCmjssirV8U}UvkIw+~u>^z+CW=2xyRsiwOgSkO+M* zZrYTUotC>Bq$Fq0xq~@-D_WQgeQ#(u4?JK6B3Qr!B0z-3Q`s9+Gq!Ha%1qmox${u= zhOIXc5TSzs4{)Fc2_OPIAHi&^+S}XO+uDN9Z_VAfY0H*fyTR;}J$zzg zT4q*mUe4CilEUjv(qIGX;TI~_^|CJ0wZ)bcZ-1$_&5SiJX$*f zi+1H^rZR_!hR%Kv>(_6B0vd^>^<$@8cMqq?4NP8;4BQ&1doR@X5{W2)? zX#pO=iX(80M`*v0oxdgf)c&+h8Qag-DZwMOSLEmI*_gH?FX!N{oZ|?OfY22joWKb@ zLe=i<&FNdJZ|>W(ZS%=Cxp;(tjtjXtxqJ3ptUk0U?NBqMFHr-WhauBn_^xU)U2FTFKMlIrLLh7%#bAXxw`rM>CDvi8_RD(LQN4G0cCI)G(uzXl~ZT-f4`})$pth*Rr>nk zmVIfNCpug~BltF*+E}PR6!v*W2Wv z5gH+3 zBYTQEySv@G_+SJRLx|nH>ksbGgf;IjG+Mw7|t z0r&#|Aiy_eG8k!thtXhwe6=6 z)r)ze2q6>TY-0G1NVS45#WRe zm(E1^c+OuX&P0$6MQDcSDaRg8`rUoOGmC0Y{pGK}j{E-FS^F~)APFq%OoW=pz5GIh zXI|YleOQp2=g8QY*}L}$&P2E%>r8|Mui*ICwqBVRHvHcE?+hE+&pmwLZ(TnVA$9v} z{{Bl!YF-&O^ufaOj}5#pWpt36?-QBI&qP2-gtG6xT@!Yf#@pBbpErM{)lUE5*F*cq zl{^wQDkwAo9f=@7A~60f93UorB!U4wwu+8Kfb$#r`uTZy`A2WB+dOTwW^ka#)Z=^Z ziQe&|cD>}02p8eR1<4~3N?u-)|L~x}A-+C+hP?xTZ|@+#KySCvb0Q|q9eP*hb9W612?&}V9@sa0 z?7)8aJhS$lzaPbr2y6J#YW;px#g zctqHMkiNgZ_NReEh7KJWK4|2mKRg>bDj*;zWagH9IY%Nq)!W-gpLW*(eNLpGr&k|O zUvGC$KOgVF0AEj^NB;5Q-NS|t4h;>sXY|~~qehI>Jbq_r%$@f>vHHElzuJyOkVYb` zi}dh{K9)Lq)Rd9_o?c%5gF=1W;p77U0aHTVrwsJ<_v#luu>as;L;S)f55Fhnj}NWX z{e0Tw=jTK$_@0MEK$sB10GV3an`0VFe>>pOEs6a-2F(ig^6?z}?%P8GLWcYLzI64y z6{*W-Pl}oRbZXYF$2`2Xe_R}V_Z{DF36FhYP-yhyi5p}f5gLyED{A)rzJ5M~Cj|$D z4IcJ-<+DSdP5s4_SD7Ob$}e5cM@J%jGBjZDgwbIU_rCDr1NY94PyTCGp~R60+%%h8 z+*?~(;rs|AJbyVT=kES~{rdZB!-r`G>ZWKypJ}@K-32%qp|Qz`p6!Qk|C-Mq{n?bi z{OPm(@BBUE@2AYw18<_I>XB+#PYWRo7D6Qie>o`FKWLzCo>ntr_^h{|82;1&Gn1;z#Ck~%~Le}vJ0#w4EdijQijJ$LDOrPGpgFxEGW+JGG zTvP%akB|{TQ68QHwGqq4dwAV9NgMOWN~MT|7l#GRTvpfcmxqd zLQPT9$cPhH=6F!Ty3-4)hzL zhl57^LWV591;-;CT)rgCGiv2PzrYcHR(w3dhyMNV9~nMm(kVMYf`j7`;5dSo{DZGQ z_}n9RjtqNW0Un`9H?Hul6`M9b_KV+FNZ}EjKOG@1AZEDtJ%3#J(VQ&Dc!YmBIvwH5 zyFOeK5IST)_EDv$BUteWSHAq$%db2;BxKc98y=w(9@Lh1IzoWYm)GWcP^czdUIE>kY2p5n7K<_2_@s2hsmhfJfM>@ftJt#qfJiDZwMuy%g}qj?(w~ z{Z=s^VcYO=a}u7t`z?idgu2(}&I%0DJbi|MN9buohbxeuCP&93cvhAk8y-4o@@DyX zg!a`#N6t8*6p!%POl|*XZn%I)==j@Xk3~ivw!qlbSMG~L;wT=M32N)-lBo5)$9gc7-5eT>UaD-y@aD-cPID#_(1m;)=6@VZFAoM9k zhh8{+ID%sU0y;iH0)XK7a0CJXK|LJ7<--vuZ?|53JRoiw5Eytq$Q`53Bf=VwirS87{ zwG$7~o`&DSfwNG0kty}?A9UxGgS2P&6Kv@vs*Un+>*WzJBx2?PDE?(Q!{V}~I50eF z?ta?S_+%JUe1$Fc3>+FgZy)Vx`XyU@)lwWZEPDQ4R6Hca2Wl+l74NYXm)MFI?1the z*}F@5#k*|9WxV2@w&HSL@eU|{ie*y4E8cD^zQ!xgw-sOK6>qZ@-{2K*wG~(LinrK` zt9ZqEw&H4D@n$Ht^ztUJIM-HO!z<3Q71#2Lvu(w7yy7e+VPzq zJA==r|$=FXfl@or71KR6la9@@fOiP?2+wh_UPk|T;^t~kutia K_cFRQQU4F@Q^g +# Created: 1993-05-16 +# Public domain + +errstatus=0 +dirmode="" + +usage="\ +Usage: mkinstalldirs [-h] [--help] [-m mode] dir ..." + +# process command line arguments +while test $# -gt 0 ; do + case $1 in + -h | --help | --h*) # -h for help + echo "$usage" 1>&2 + exit 0 + ;; + -m) # -m PERM arg + shift + test $# -eq 0 && { echo "$usage" 1>&2; exit 1; } + dirmode=$1 + shift + ;; + --) # stop option processing + shift + break + ;; + -*) # unknown option + echo "$usage" 1>&2 + exit 1 + ;; + *) # first non-opt arg + break + ;; + esac +done + +for file +do + if test -d "$file"; then + shift + else + break + fi +done + +case $# in + 0) exit 0 ;; +esac + +case $dirmode in + '') + if mkdir -p -- . 2>/dev/null; then + echo "mkdir -p -- $*" + exec mkdir -p -- "$@" + fi + ;; + *) + if mkdir -m "$dirmode" -p -- . 2>/dev/null; then + echo "mkdir -m $dirmode -p -- $*" + exec mkdir -m "$dirmode" -p -- "$@" + fi + ;; +esac + +for file +do + set fnord `echo ":$file" | sed -ne 's/^:\//#/;s/^://;s/\// /g;s/^#/\//;p'` + shift + + pathcomp= + for d + do + pathcomp="$pathcomp$d" + case $pathcomp in + -*) pathcomp=./$pathcomp ;; + esac + + if test ! -d "$pathcomp"; then + echo "mkdir $pathcomp" + + mkdir "$pathcomp" || lasterr=$? + + if test ! -d "$pathcomp"; then + errstatus=$lasterr + else + if test ! -z "$dirmode"; then + echo "chmod $dirmode $pathcomp" + lasterr="" + chmod "$dirmode" "$pathcomp" || lasterr=$? + + if test ! -z "$lasterr"; then + errstatus=$lasterr + fi + fi + fi + fi + + pathcomp="$pathcomp/" + done +done + +exit $errstatus + +# Local Variables: +# mode: shell-script +# sh-indentation: 2 +# End: +# mkinstalldirs ends here diff --git a/tribler-mod/Tribler/Player/Build/Mac/process_libs b/tribler-mod/Tribler/Player/Build/Mac/process_libs new file mode 100755 index 0000000..d7a99a1 --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Mac/process_libs @@ -0,0 +1,34 @@ +#!/bin/bash + +TARGETDIR=$1 + +# process dependencies and their exact locations of all libs + +cd $TARGETDIR + +for i in `find . -name "*.dylib" -or -name "*.so"` +do + otool -L $i | perl -ne ' + if(m#/'`basename $i`' #) { + # skip references to self + + next; + } + + if(m#(/usr/local/lib/([^ /]+))#) { + # make reference to /usr/local/lib/* local + + print "# Reference to $1 found in '$i'\n"; + print "chmod a+w '$i'\n"; + print "install_name_tool -change $1 \@executable_path/../Frameworks/$2 '$i'\n"; + } + + if(m#(/opt/local/lib/([^ /]+))#) { + # make reference to /opt/local/lib/* local + + print "# Reference to $1 found in '$i'\n"; + print "chmod a+w '$i'\n"; + print "install_name_tool -change $1 \@executable_path/../Frameworks/$2 '$i'\n"; + } + ' +done diff --git a/tribler-mod/Tribler/Player/Build/Mac/setuptriblermac.py b/tribler-mod/Tribler/Player/Build/Mac/setuptriblermac.py new file mode 100644 index 0000000..0efc35d --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Mac/setuptriblermac.py @@ -0,0 +1,121 @@ +from time import localtime, strftime +# --------------- +# This script builds build/SwarmPlayer.app +# +# Meant to be called from Tribler/Player/Build/Mac/Makefile +# --------------- + +import py2app +from distutils.util import get_platform +import sys,os,platform,shutil +from setuptools import setup + +from Tribler.__init__ import LIBRARYNAME + +# modules to include into bundle +includeModules=["encodings.hex_codec","encodings.utf_8","encodings.latin_1","xml.sax", "email.iterators"] + +# ----- some basic checks + +if __debug__: + print "WARNING: Non optimised python bytecode (.pyc) will be produced. Run with -OO instead to produce and bundle .pyo files." + +if sys.platform != "darwin": + print "WARNING: You do not seem to be running Mac OS/X." + +# ----- import and verify wxPython + +import wxversion + +wxversion.select('2.8-unicode') + +import wx + +v = wx.__version__ + +if v < "2.6": + print "WARNING: You need wxPython 2.6 or higher but are using %s." % v + +if v < "2.8.4.2": + print "WARNING: wxPython before 2.8.4.2 could crash when loading non-present fonts. You are using %s." % v + +# ----- import and verify M2Crypto + +import M2Crypto +import M2Crypto.m2 +if "ec_init" not in M2Crypto.m2.__dict__: + print "WARNING: Could not import specialistic M2Crypto (imported %s)" % M2Crypto.__file__ + +# ----- import VLC + +#import vlc + +#vlc = vlc.MediaControl(["--plugin-path",os.getcwd()+"/macbinaries/vlc_plugins"]) + +# ================= +# build SwarmPlayer.app +# ================= + +from plistlib import Plist + +def includedir( srcpath, dstpath = None ): + """ Recursive directory listing, filtering out svn files. """ + + total = [] + + cwd = os.getcwd() + os.chdir( srcpath ) + + if dstpath is None: + dstpath = srcpath + + for root,dirs,files in os.walk( "." ): + if '.svn' in dirs: + dirs.remove('.svn') + + for f in files: + total.append( (root,f) ) + + os.chdir( cwd ) + + # format: (targetdir,[file]) + # so for us, (dstpath/filedir,[srcpath/filedir/filename]) + return [("%s/%s" % (dstpath,root),["%s/%s/%s" % (srcpath,root,f)]) for root,f in total] + +def filterincludes( l, f ): + """ Return includes which pass filter f. """ + + return [(x,y) for (x,y) in l if f(y[0])] + +# ----- build the app bundle +mainfile = os.path.join(LIBRARYNAME,'Player','swarmplayer.py') + +setup( + setup_requires=['py2app'], + name='SwarmPlayer', + app=[mainfile], + options={ 'py2app': { + 'argv_emulation': True, + 'includes': includeModules, + 'excludes': ["Tkinter","Tkconstants","tcl"], + 'iconfile': LIBRARYNAME+'/Player/Build/Mac/tribler.icns', + 'plist': Plist.fromFile(LIBRARYNAME+'/Player/Build/Mac/Info.plist'), + 'optimize': 2*int(not __debug__), + 'resources': + [(LIBRARYNAME+"/Lang", [LIBRARYNAME+"/Lang/english.lang"]), + LIBRARYNAME+"/binary-LICENSE.txt", + LIBRARYNAME+"/readme.txt", + LIBRARYNAME+"/Images/SwarmPlayerIcon.ico", + LIBRARYNAME+"/Player/Build/Mac/TriblerDoc.icns", + ] + # add images + + includedir( LIBRARYNAME+"/Images" ) + + # add VLC plugins + + includedir( "macbinaries/vlc_plugins" ) + + # add ffmpeg binary + + ["macbinaries/ffmpeg"] + , + } } +) diff --git a/tribler-mod/Tribler/Player/Build/Mac/setuptriblermac.py.bak b/tribler-mod/Tribler/Player/Build/Mac/setuptriblermac.py.bak new file mode 100644 index 0000000..c1e08a0 --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Mac/setuptriblermac.py.bak @@ -0,0 +1,120 @@ +# --------------- +# This script builds build/SwarmPlayer.app +# +# Meant to be called from Tribler/Player/Build/Mac/Makefile +# --------------- + +import py2app +from distutils.util import get_platform +import sys,os,platform,shutil +from setuptools import setup + +from Tribler.__init__ import LIBRARYNAME + +# modules to include into bundle +includeModules=["encodings.hex_codec","encodings.utf_8","encodings.latin_1","xml.sax", "email.iterators"] + +# ----- some basic checks + +if __debug__: + print "WARNING: Non optimised python bytecode (.pyc) will be produced. Run with -OO instead to produce and bundle .pyo files." + +if sys.platform != "darwin": + print "WARNING: You do not seem to be running Mac OS/X." + +# ----- import and verify wxPython + +import wxversion + +wxversion.select('2.8-unicode') + +import wx + +v = wx.__version__ + +if v < "2.6": + print "WARNING: You need wxPython 2.6 or higher but are using %s." % v + +if v < "2.8.4.2": + print "WARNING: wxPython before 2.8.4.2 could crash when loading non-present fonts. You are using %s." % v + +# ----- import and verify M2Crypto + +import M2Crypto +import M2Crypto.m2 +if "ec_init" not in M2Crypto.m2.__dict__: + print "WARNING: Could not import specialistic M2Crypto (imported %s)" % M2Crypto.__file__ + +# ----- import VLC + +#import vlc + +#vlc = vlc.MediaControl(["--plugin-path",os.getcwd()+"/macbinaries/vlc_plugins"]) + +# ================= +# build SwarmPlayer.app +# ================= + +from plistlib import Plist + +def includedir( srcpath, dstpath = None ): + """ Recursive directory listing, filtering out svn files. """ + + total = [] + + cwd = os.getcwd() + os.chdir( srcpath ) + + if dstpath is None: + dstpath = srcpath + + for root,dirs,files in os.walk( "." ): + if '.svn' in dirs: + dirs.remove('.svn') + + for f in files: + total.append( (root,f) ) + + os.chdir( cwd ) + + # format: (targetdir,[file]) + # so for us, (dstpath/filedir,[srcpath/filedir/filename]) + return [("%s/%s" % (dstpath,root),["%s/%s/%s" % (srcpath,root,f)]) for root,f in total] + +def filterincludes( l, f ): + """ Return includes which pass filter f. """ + + return [(x,y) for (x,y) in l if f(y[0])] + +# ----- build the app bundle +mainfile = os.path.join(LIBRARYNAME,'Player','swarmplayer.py') + +setup( + setup_requires=['py2app'], + name='SwarmPlayer', + app=[mainfile], + options={ 'py2app': { + 'argv_emulation': True, + 'includes': includeModules, + 'excludes': ["Tkinter","Tkconstants","tcl"], + 'iconfile': LIBRARYNAME+'/Player/Build/Mac/tribler.icns', + 'plist': Plist.fromFile(LIBRARYNAME+'/Player/Build/Mac/Info.plist'), + 'optimize': 2*int(not __debug__), + 'resources': + [(LIBRARYNAME+"/Lang", [LIBRARYNAME+"/Lang/english.lang"]), + LIBRARYNAME+"/binary-LICENSE.txt", + LIBRARYNAME+"/readme.txt", + LIBRARYNAME+"/Images/SwarmPlayerIcon.ico", + LIBRARYNAME+"/Player/Build/Mac/TriblerDoc.icns", + ] + # add images + + includedir( LIBRARYNAME+"/Images" ) + + # add VLC plugins + + includedir( "macbinaries/vlc_plugins" ) + + # add ffmpeg binary + + ["macbinaries/ffmpeg"] + , + } } +) diff --git a/tribler-mod/Tribler/Player/Build/Mac/smart_lipo_merge b/tribler-mod/Tribler/Player/Build/Mac/smart_lipo_merge new file mode 100755 index 0000000..3097e61 --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Mac/smart_lipo_merge @@ -0,0 +1,46 @@ +#!/bin/bash +# +# syntax: smart_lipo_merge filenative fileforeign fileout +# +# merges two binaries, taking the respective architecture part in case the input is fat +# + +NATIVE=$1 +FOREIGN=$2 +FILEOUT=$3 + +ARCH1=i386 +ARCH2=ppc +ARCH=`arch` +if [ $ARCH = $ARCH1 ] +then + FOREIGNARCH=$ARCH2 +else + FOREIGNARCH=$ARCH1 +fi + +if [ `lipo -info $NATIVE | cut -d\ -f1` != "Non-fat" ] +then + echo native file is fat -- extracting $ARCH + lipo -thin $ARCH $NATIVE -output $NATIVE.$ARCH +else + echo native file is thin -- using as is + cp $NATIVE $NATIVE.$ARCH +fi + +if [ `lipo -info $FOREIGN | cut -d\ -f1` != "Non-fat" ] +then + echo foreign file is fat -- extracting $FOREIGNARCH + lipo -thin $FOREIGNARCH $FOREIGN -output $FOREIGN.$FOREIGNARCH +else + echo foreign file is thin -- using as is + cp $FOREIGN $FOREIGN.$FOREIGNARCH +fi + +echo merging... +lipo -create $NATIVE.$ARCH $FOREIGN.$FOREIGNARCH -output $FILEOUT +echo cleanup.. +rm $NATIVE.$ARCH +rm $FOREIGN.$FOREIGNARCH + + diff --git a/tribler-mod/Tribler/Player/Build/Mac/smart_lipo_thin b/tribler-mod/Tribler/Player/Build/Mac/smart_lipo_thin new file mode 100755 index 0000000..b6fd13d --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Mac/smart_lipo_thin @@ -0,0 +1,19 @@ +#!/bin/bash +# +# syntax: smart_lipo_thin file +# +# extracts the native architecture part of the fat input file, or does nothing if input is thin +# + +INPUT=$1 +ARCH=`arch` + +REPORT=`lipo -info $INPUT 2>&1 | cut -d\ -f1-5` +if [ "$REPORT" == "Architectures in the fat file:" ] +then + echo thinning `basename $INPUT` + lipo -thin $ARCH $INPUT -output $INPUT.tmp + rm -f $INPUT + mv $INPUT.tmp $INPUT +fi + diff --git a/tribler-mod/Tribler/Player/Build/Mac/tribler.icns b/tribler-mod/Tribler/Player/Build/Mac/tribler.icns new file mode 100644 index 0000000000000000000000000000000000000000..8fd54eb95940a3461c8cce457ccfad738a53ca7d GIT binary patch literal 39131 zcmeI5cXSj-*61}e$~k8VrBMcv1Ok&WCK{V$g26UmFwOx7z+St%XAmMufZrM$Bb)$* zktQi;2?RnJB$RWQBx#hz_s2Wu`Q7TCo}K~xt>3O6KJPsBF090K-Kx4(b?ess)t!C& z;0H$y48CuB`_p-I4GeZw8ldO*|7pPb{i%P&7?^Gl=BJ57!qikO+2s0-pX1|yymBS~ zdy^|yevFU*`NnlD8BaA5iKJ=54`Ps%XanOHBne4^OKKkmZAF->(Q1+J_g72bHk&_0d$8IJhiLx@%(@&?xNMj^1;uukkFeWu71y3~* z3Pp)mSh%`l5S@9-=$Ci{HubGHR_a zxE_CvPnF?(_iJfi%oH(6Oc3Mv7%@tW5W~2d7$OGo0YXK95L`}x5T1-DH|)EA^M~C$ zRFY@FS%0}q9W#ZKC5bVdEJ>(wC`k<9WJyAf%Xs|+l*AS6he_XRO0rL6_ML(%aj24j zDsiZifGTk)h^oXDgtkgT!IoED))Zu&t&b*~ij(Dt5u7adw5IYD9Hp{DLKnQ=$A4-c zbz4gHk*6S(26yOk!a?32^pH^Dpc}fOgq)|~DOsv|x?sv6{wI5gD#TO(w&dnTiDQRD`Y|6{a)8ebnlqq0Hwk?e={0=HJ4{_o33l8=w0baV%ZDJcE|64Y6|c#UEZ=xBuGhmb<54p5^Yg z_M?4aq2WvR9(wNEPFf}pWD-McwfZ4#90UqXV=W_xZZ352T6^N^!}@m|j4hqrJzVS^ zoUJSaFIR1I_gQiH`(Dw$Sza>_99$7(Y34JZYr8VOp---9EE3Ec!qf(|V2(gBsu7&?H6D2}U|gAg3(us-c0FE1Nv5Y;7$q%yv}ehM1cf8C%-f+1gnd zJ1tx>XW_ai--s(0H_!HPw6o^x`N+sBB@rM)t_l3bifK2OaWrkf8H zVC3$D4XvvCD#Xb)XyYmeON-CTzVfg%H)5OE*jhn__HLfR0e&8?o^RgEebvRodxn3g zucuGY^IsiV5$f;k>gE}=FT3KQLfMaLyMPUARm2v^$=wDYT7Nyl%-Ux8xvxB37XOqI zi(m%k{CSty`+m2Uu zejFVuxOpwNYk*V~PmMFRKu@TxY5id8b4F&Cp>g?dhedsO*xS~^!qURh+Q!P<%-Y`8 z+Q#0=&ct-y+Re*7o!miNR#tAo{=p092Kdg|aQw}=kr82`bJo89(UvuzWcL%Kws=e> z9D!_@-2bpa>$abE1lzdb4ZqBD@Cple#4RH2qgIC6SlBq&+t>yjcs$9!vBduvz! z1#4EnwA9bR!gbA-St0x0fB&n~^H%KL7`l1o?8x2m9hjyP_Q(i5Dr~5RatJmI3_Roo z&g1{Q$J^XIbgc*Khuar7KX$fv4G(s7{jjz==7i+Jo7*?OxMl}GKfmsBq?wi5!LL_4 z2DtCdm&{sy{7nz{*;_w~7IzJjGmAHjk7x&mC6J+7@Ih=~ZEtD(%4IJtcCfXy4O#4F z<2u9B`%p#Za`#o|h?U!Ndiz?NdfOk?lvhba<^BD#qCL*GF7u;&-2=n7^N+8LT)FM= z+yD46|6z{`O)uUs!Bhe&QOw+vIbbf>*FKhXDukKg>{@}*;^E`Al8_n@w+Q#L?}=Z&yNX3(K4D zw;(%#TFe{4Ms*a^kC8InRI2+YLLD4k-2I}~&++r~@eSYi)xKF#?`8C2q?5(iThuZF z#1M=aj-Y=?<6*hn-Dl*YOrUWmVkt(}F-%H2`ifFo6X$j3wO2)+2Jwh*JNF{Xok zl(*%kR95ZhSh={k2hUo%KGM|0*wQm3D8PSPK|k!9de)SZ!W(6ejWuX3U>`<$iI~!* z;Y={c(8S!%Cv?u%J;4?x?%^SU0V`wM8KX=YP+AN=W zbaM<_Img0yL-f(zpI^U^!6@ph1B@!;Iz$5q6Y_WiurZH7Jfa?SxqC;ORrWj8%B9@heI9M>z4 zk*>8}-*0oxLL=PPA6jK^>mIc5aAG_5*l40jPW6Dt2;c~Ml%!kfB$M?N?XU}48|ePr z_JvN*t@AbCTn65g#tEbLAEZWtR03~;JvmA1fhFjs97@}oqd~sG?k?}&N?fsgm7mRv zcXV9^T^;Zy@JSslbYr9wY2V+M@oK1#cW}Ugl7>$g1qEyqbl}<+&O>JDm-)jV4mt66NvWoAHzs%kDBqhi*el+0 zj>|gUhz(;S27*{F`?)Jh0-8lIV%Q)ChvOwKv*~SYuQ}ml9R3^^_S@HLb-`#31XtAe?I*7`2Z$ zd-lx9H`au3xnH;7Bka-96oSj1$v=1Y8NiI={@V>ekuB; z?Uz!&*!Si;x&1gLxBm-*Yqz$VksWf2ikC zG9!C%SgkB+D9&X^#e+;Z1NRYhFd-A}^s+zfRX-u#qLPlD8ynUvePj z<}xS8=da7aii2ros~eAiMk7BH7WN`bPiW-z;uF{;>(*i%w0fC8#b@M^V~^zFN~^pG z^Lam4eCliLccPo}>kQ#MaXy&GHC?-lmy(iR+cAWJv0#m-LYo(;u?fS;$}s z-qcA(DokwB-V#bH#ls7`m#lpA(>W1uVznyNNJ7mjcrk`+6!O+-QQH84b^#=BUOCas zmK+R)AJ;v^r%o}_V1iQ{nUzW@Yv6}iyLlZrzUV?b<*#vu6<32!ag76mLc({62C*@m zlxGmg3uA=_fi`9yo1B_DttXDMq=d?{;XP{$r?7pmIvLS4oJuxAS z<#K}Fxixi;5dr3v@-*-P^UA|^ub&ZToS5-)K1TXK_=u^xF7cy}URl2)jL&5ssz=*{ z@GRFbeA705eXO1o%9IiV7Jc8(42xX9Yg5?kIRlhcCPs~FBtLz$b{5j7hyW4rL(mKS z_3Te`%Hk$<_q=X}#-Lgb4wfI=ykVw;llQ9cdT`Aqi;uBKIu1eU@JsjvYZA$c?2n#` zoz&a*x~a20K4{)oURd~swadKOpFF@o1m$sX#>bP^!J3SjfSE|PB~rFRf^Uk#%0QC8yE`fyK9Ef*30TT8VB>^(&QvQWk59}ZZbx-9y;U;d(X1d3u~Y(-qec`o}vky=~& z?BqG7G&ob#85kYyOa`;I`v-0iqjJ)bl>X7Nee)&82T;L*VD{fe9ZkiJ z!-mG_RHo~&5}IL*gAkkUI~chUqDIpiM-AdL?~o_|CmR$uev)|y&eQbqfR+*mQcmf< zAxCS7s)v|70}B?tahiF91Hlr_8!Rwd&FqQEW_*$})iO1OwXo1%oaRaNPpsxiyqN<( zLev+%Hqnetu$w2E@g{?DyeWG8EF%*IV5sQ?wzC4`npZY+n_+iq;kH!5USHXQb4}r& zDkmo^xy?A2gFXfe32&NIz>gE~hu|7C;ar1q{`fgYE*YvreZeL|pm;Ns=C(jfK`ytl z1^q_-8@IBOtNjINL86}3x4fyG+k}tDj_Dl2A!J8QBQ{9FoBg;g zAej-~JP8uGA};(_(@HJ~cc+C{jDtjw+ce(9rT&b^jWKZpHU#ddaexyDsKKlfH5xR+ zZ6UZObkp%>GZ3z6EocuEVGKG_l4SVKFzJ9y_Hc57#bJw0z>mV)Q=6;8SLy$Bms~K4qsUwMEei zj~hG3e8@^wF(xYx=3=bgXPDN=qH?jLEN#4wBUMe1A5zOXZY*vTqIq@nIq>@q%6?H5 z#*JblEH#b0*iqGEnARx8LPoJ$@Q7$)~g0 z|MmM{PjQoz1<6VLL_C3iGn(YMF~D&N4aYru)g=DfwO@1~t_gy;;;hUJUV2R0X`r~L zf#RMP#-(CHBT-WPHC=+6kW<~MI!lJNqRA*oG@MImf_ttD>o#h8Sa9Ppe*1`c3NqBk zspx=X*ndAkkKYc*JVKa~L2OU5Umh7j!0XoF48J@--)Z$H!fJD&LAR+eWI^0*DlP^t9E|d>;RhHZ;%rB{Jt*`4E z0Q^j&U`qFhx#A2yMEF(*1Y^VI&C;YKp(yR1vgHWdnUq@vqRjlBS}HSM9JwR%XC zgwhNpb!*Q}65Oh)?@^RxNYnF6@CmOB)0Gm9LifHBRw)BMLlJ3riyTa7Wbde}6J1;j~EJ!NrtZ8)W^ZLWx8yN>3MDPm+kz z>bi3ilG95b$eW5YGjmEy3(^x3#Mv3btomNKQ$SBhbY9Y7lBBysG4>_ufu0IUaZ$D^4jWyj4+M+uYR90d|l&wdx^AI1r($Nb84Mvy((=`42nqX5^NY zB&P~R5^-{pK#*|r`t@W{(v3thjBa*rmOuc80;)`hUgzhgiKKBq#573Dh9UFUgdYR#ev9&&a-2m{pLSkyG5Dq{3$4bd73JBsynN4BCBgu>XEe zeoaG(R4B~MPZhwrPpfFSo06O;!7{?{bamC=Z>cURDK5#*zSq{?_b?|lCG~c7QL+7MacQee-rpzd@9F98Zf@vQD3l!qLRfOSnL@E7zp64nEAMuBc}-)NOhpC&(CJoN zi4N)Zme=OwW~U321+eVWveVKs(j}6@c3DGNeRofL2RJKmV@xJj09cea-zmt>EUGBG z_n@}Cw!Z3qhYW5W3~0pjfNJWV%SkNk(N&n7DosmG66b(bkY?oFDb11QK9tM4`Vl=t zrom37Qg*jCH?;Kj^eSXvGW+FBU_<|`kC8In1_pZXf#zWjX69!|M5&Uj(#l(D(&EPc zKcbU5;Q*3YQ+MiIy6#9*lLV5i?2I&VN=m9Uw@{LlRMeqq7#&`tgB}C~shJJ}mqz}e zt4h^V0rn_aoRyVZkdu-s1Y-b;G`of2q;z{BpNHjixsa*PM^0IR>#lp1A z>=Lk@u!K_uNz#fw#wJttlMb%vM4;F}qNzKA&aH)MnFYm}Naxx4+3D%Um8E&NA9Q1X zi0E{}Bz=^B>!fVV7N=D-mI?$&#Ui2PUUz467aiWFBY1Q>fTv98u+Bc@4iPE`Wlwum zR%%*{qA^DZ_Axn8ka3^U$m4-Eov1Gge<6H~DebF^GC&`7@~*q$R9Hb-x9fWJwLvj? zD&hjsXl8JFse5^>vMfw$nRc(Y>n`k6(p$~lq&k_clMZWBp#oiQL(*ql3EZZ+JSQVf zTGZ872KF!eA^1Y%wnJTPqUQz`$VOBwUN~$;`ue`aRW|fp(r=#K2DO{@rGE|FvNhi5n)=`=c zb}B)VlbKdnoGmTsq7fx6LN_7_5oel7lH?irY_?tZGSV`Ig3=DKNcrh$#hujFt3S1= zpdbXDb;?uzqrFVlT%3`Xo|S#Cqqi(mEG=nj8#+*+jyJ`6#I#4_f!cvm z!ORupW+Z`|n^xLK@AZcAipyGNE2>k$d3}^*wqNiHWhe~5o5CJx`^N#IOuC?Ua(P$v zof45Cy`(TPK_Je3AfqrRXQ)qtnC*!s4BbIVLCp3?6#-%hMhpVeButju$a?#`@20|b zE=U%LZui1@04+Gfp`q+Wh}nMAjLbNa5;U8@K?;wNf|xDo4cjW@J?*7n2(qQA!n_tG z6|zULkz}^b=>7@GY<~j*Cm8`60?m`dtd}?3yWdt*T3%b8m!5Z@q?G^`F9WmX0JG&` zXHB9hX4^^=OV-Sj1dVL_fweJm5o-gRiYsj7_p`HVt4oR>G?$m$Ds88ErHn+jm+lUO zi(*=N6Cv9MxOYc^#)SxjDsSur2(l;-pqrjt>Jo>YvaL8pnv<3yE-fjq$gh`Eq>-(z zA(8Dcgnx}jM|;2f;UQd8L0mABf|qFs*)F;UG8wL~g_pKUWzRiHc};;ZIXSVUqoZGi z77M0kmF))!pMXqklru6gI07td9MMZoWbXt-rWj=VNQdr&4$yXoD=lTsRSoy=m89KB z%5PIrHl1}F0o%PpRPq}^(P zgCeph?BS-j5wQJSRMdy=VZe13vd_TLu_@MV64-vvf5d0xkpqw9;VP@ntfZ{wUN~>0 zRcKr~!{Ol}3fS)Er=(`ocB(OmQ?e!yD_by(O&U&3jotqSepw{uJ<_=XKtWApY%OGU zw+l-us&mqBR|2&_W}8(=;o38z_CfNp3rJ(nt7yhsXkOcZKhnegaC-#V94l~XWp%ki ziTGAUPQAua+Suz49n?ZdkmIMhYWdsP}=?S3#Ct421y?5Yjbf*UqFpIIVM&PwqZr68@gBo1Sfzpg=k({jEtx|@8skbRjCm7go^~q3nZ^C9@dsV ztAC}Sm-Y|v5tHGrVrhC|+b{$I#!W^a0I%Jm<+T;CizEZD?L9Mfo~fM$7Xh^>(`0nC zy-1Q?twP3wnBY|AN={=t@m=im$<44=ln}dEZz^LTsI8p}9Pk9S#feelj$4^kiZQY@ z+J;ZVNX1R+IG+?aebSepUevo30M5I<>qiTW)#v`;o--aEMIq^m`Ha7b{BPzpyc}o@L%b$dX(;lz+># zN)M{Td6PPsP|g?Cazq}qBLQ+Kgi5nvx_7bq#9XEogh&hhQrv{jolNr1JMHhHi)&P) z;~@P*r#iT{&b-4kNQaVZNb2Jaikmp8Cx?=v-EYQee?)xWud<~0B_-+SS?d{3kQCLa4~uf!7re=M6}@D`JR%6A_{Vc*}vGfe52FO-TF)d=7vukVb{D7{SRiQ?MM!2m2 zY^m0^aPby1)D4nFL(3^(TM57x90$O<8kYOm6#?LrXae9<4lON6UKI&!K{&}+DP^NE zB!Ml`Fg6LmmaGKYT!APnVy#HuvHBMaAbyM1Ix!W}LTVVafq*Td*c2uQV2gg9LQu7` z8R-{DHvn6t9Yk6wf@{dFsNj~LAJ@4DO90FUfC~isjQ%|aB_$Y#$%{W?w@$UdW(L|=Db}fb0U;Gw2V+EH(e(lC5kD0O z2J;!hxeWa0NFiGeeMqfl#B(8zrweRJt3$(8Y?L)j^DO|jhhA0k$cEq?pHmElRv4 zAn}%Wm54V6V*Be&-L%`pWN~(8Mn<$AvW-c_g(f1AIO*C?Pl2`&9#=&x;ba17x4eEH znRd(TCc1dtCOtiE4-22bws5Q_c@#w}P*j*8Lt`iw1Lp^ln;6O-Bt8ypDNaab&!V{a zljV32t!L||+-CKi(h$@qivhKR>QKCk4`-^U0b2ykP$R)LeylP=&L@u+>;YX`d-4zK zV!DP7B#`P!&t;O;{+W*P_3}C?Q2V4BA!Y}o!SoBc`b-9=mePW$B|G+{7U_=ZC2O*Q z?&`MDVMN)~DcZLqVPIIT=;&&!Z5eIf*rs+qE`d*a^;)>60kpIPe8i>l=#mY=c6Qqu2MSN@+`c5@1zu+S<=X=a>WCR+u+*v z0OqCd0wC(ZQB*81;k; zI#d_2k&vFcrtW_Py1PKSp{b#5!1F)K1`A#Z4)At&@QjS^C|KvTr!Xe!t^9kfq&Z+Z z%40ApOQ52};Uf{MtGhqWcq7om)y~qw*d*-Z?0!|-#l>DTmxQh77xu}|`54dt=(U|E zK7Z-QralA>30U&fDVPt30^M9pNFxUS4AMzws|z-}hkxdi7Rb#Qg{U%DpH%+~vx#tUm+zgF5>7PC3b*=fxOdxL^P7VS9{^(_^{KvU)k#Hr=I zb8pVK^?3gH)rSw?wqcptJG(eQlBKDc|K;kJojsPl`F)>wZ{IZNkU(=TCm~eLv25-cQ&`S zhAg^m)wzMjhAfVWjkTqf8QXE*l9}^fi1ZHs@#4l=u8_p-w`zvHt*ifRKL`7WEpL4R z>9qX-W??x#g=T7Umsi!F2(+^d*svVre|GOn7gJ-l0msV9)WpQl+SxV0$J^D(?O=J{ zZbw%SPv0OfSFeCoUmjk{^|5z?PhZWcY*eb`RN$PCqSZ8$)o@>8c)ePEJ>1B`V(GcB z+#DDFl(N7U`oV^OFov`~OLI#{J99I0o3+<(o$&Yd^Y(CdvGEOyJn+`C8NRNTVRP-B zw{P42;YmoPjnC`BhtLShldcY|;d%g8h+F8@eK4lHbrbNJaR-Kh#a% zlv8eiciV+k3tio3IGVY-dHDLf`pgXTbzgO>Kk@jD`i8cfK{e|PDN=nG$-&#S9%?p8pA8mS50$7j4{bbd!JjmO|%*$xLgGfO9D|0Oeh{QZ3w z?0<8<>#`Ju?17f8*$>F?CB>h=S#z(tr>U@_^+78dUrH}JAQh!CBboT1OWmL-u_6YUqOZGWsc6r)vvPph4%}HG}GreYO;8@h82k zk%CZAQ>!9$-5j}!_<&2(nE(S8$H>w%aOTUqxF#IuV34uwd?#guX=_R814Rb3G9bXC z0n4iFtu7`GGd)agoNa@b&oW`JJNe0uPp{XZkn@x3(9ISF8B}tzvr3h)+SmL2Yx`}D zjSP&PT-ZiOA691F$DXPVbW2;?4w?r$L4I>_^m3m$Y0BgUMh0w4dppOu-~WLw&>>kX zfsEXt3w`!yURmJh7po_ha>=o5q+`5$BMnAs{0d9M~hik@juYOW7(p>fYHrK zyZ)b}oIP!PFXz5)YiARRRrhKF_xe+TH_nHXyiNx+>Bsg8+4b2bK{H*}9$aB#-zDEf-GTL-KOa9OlAqroM$g;DbvMOOGI z*@poXkaF=`@XdLM#q79Co#Z>e45?nU53z+H3r z7Yqe=D(hfT!U_96b}bWu8-Phd6!YSmPCMr&k z4Ou#S1!$B+ttMn@tau1n z{eMMTNkYkoq;ZSJcg#)ff?wTjzd{OA5E&mG&`O|0&?iVM`P~C(w;%?sx-W*A?>Z$- z*y{9F70Sa#9;cxOwHhw{RZZoAqTE=0)XHRC0^F=RWWDdSIP0NgCmGOw91Ue#t>&k-oT%rthnZ!gbkbBKEU;waq!kK#YRm4 zvwnpzE7+2M30aX!v}aJ$ieszhI(mO5BbhO)`(rI*P5S_HVmbSPi@;9sRK`l>Y$ChC z8#XOF{_?gcuUBe^G4MFq!{Eo3E~C2BJTOsdFw zZQ+v5pS7ZU7yk;dY6vHFL^B;7%Gx%=f328I12Y->lZ2IaB8X8F;foG4zG*==15*9} zHD3i+lft0L88#YA-xhkT9=SnGEMEt%l78uv{=yS%75Sc2e~_b3fi=D++^*vwf;j5P zhq?CjJf!+mCI7L%tLAUdd6IF_q(ltGWs8Tti z`W;7&(E+LeW~qvhA4-qKV>ndF9AR1$Xb!EK>)G5^e?3Vh zX(yUh{})Iq`ocwucf!q_znY{%Mk!h>&xE9M;5#yPY%@xY)gh^@6cuel{yLD#29U}u z<7tli`=?RwziVKy!j$|fW9ly!df2)T=dXBl1;Wo{EjqQ*KeE^yLnC8T`Z1#(^Z)eG z$f3oWni?A$8A2=QYU!@C4UNq#;cE~b92^`W?B?X;?BeR`>h_;MAjQQQsgM&jtLPNvCXGYAPJ9qB9|MUSFvm<7PhlPfQz!MxC z6zK2mW^ZM}(P;n+g6Gz*e&JCo)^2=p^XAPjz4X$SZ98`D*|T>adS3f?AF}k`J$rWT z*tP{^g5=FFzPSE{rSpQk?97ds2Cxjx>^;Mu+i>urSel-mo{^E6nU$TBS5Qz`R8;(* zJw-)@1qFFI*;$#u>oef1veTs3j&5B&cZMT;Yp7-ffT-AeN31=Z|LFH;^R%9RX@S3! zsi9^Hu#K&~=WpW4pH1atE!Vd#_O~;oRsoA+<{ZB6!;xoL{qMj3^74FlOCwSNEMuD) zOJA>krq%!cUtg{bax@`l0Ncbd_(k5ct^W7lZ@&r(;2;IC&0Xj0NqyG!kH53R--eul zYzxn*H_M-O{l6dI7~+VQAd6!)*^P`v2|zzq|g$e{=t* z>;HS_-?aXx_5XL;zv=lmJ^!ZXANYTNlm9Wj{-)R8^!l4#f7ABo?>YZX+uv#XJ8gfb z?eDbxowmQvKK}dd{^|W^djFZ;f2Q}J>HTMV|C!!@o;84--hZe4k7@s7+W(mLKc@YU zY5!x||Cshao~;I^{jX{NYuf*s_P?h6uWA45|8xJ#@9*iq_WtwVbN`&V|Lyy?+<$)~ zioSpCyZobP@BhZ_i}FDC?@|84viF~T{=>OFk?tt}0Ofxye!KD6I)6~x$sKcC$OIs^ zg-7K6(q~@(iA|wo{)++I%q3#y^=DoGz4@+c3}D z+E|kX%VHbbc!aLle)_?)83VQS(7K2j_NHX^8_L0CaZGJogBCo$bzk(^bLY;*l26=) z3zxqA=j9)+T!sI~TzG!^KYGX_@mH_>aQUC#Uc3-TiHwZ}>8FqG-ne9@r@fiBdei`8 za|ciVS&@sEuUw^hR + + #define QT_MAX_DIRECTBUFFERS 10 + #define VL_MAX_DISPLAYS 16 +@@ -138,13 +139,22 @@ + p_vout->pf_display = DisplayVideo; + p_vout->pf_control = ControlVideo; + +- /* Are we embedded? If so, the drawable value will be a pointer to a ++ /* Are we embedded? If so, the drawable value should be a pointer to a + * CGrafPtr that we're expected to use */ + var_Get( p_vout->p_libvlc, "drawable", &value_drawable ); +- if( value_drawable.i_int != 0 ) ++ if( value_drawable.i_int != 0 ) { ++ vlc_value_t value_drawable_type; ++ ++ var_Get( p_vout->p_libvlc, "macosx-drawable-type", &value_drawable_type ); ++ if( value_drawable_type.i_int != VLCDrawableCGrafPtr ) { ++ msg_Err( p_vout, "QT interface requires a CGrafPtr when embedded" ); ++ return( 1 ); ++ } ++ + p_vout->p_sys->b_embedded = VLC_TRUE; +- else ++ } else { + p_vout->p_sys->b_embedded = VLC_FALSE; ++ } + + p_vout->p_sys->b_cpu_has_simd = + vlc_CPU() & (CPU_CAPABILITY_ALTIVEC|CPU_CAPABILITY_MMXEXT); +Index: modules/gui/macosx/voutgl.m +=================================================================== +--- modules/gui/macosx/voutgl.m (revision 20403) ++++ modules/gui/macosx/voutgl.m (working copy) +@@ -35,6 +35,7 @@ + #include /* strerror() */ + + #include ++#include + + #include "intf.h" + #include "vout.h" +@@ -43,6 +44,7 @@ + #include + + #include ++#include + + /***************************************************************************** + * VLCGLView interface +@@ -67,13 +69,18 @@ + /* Mozilla plugin-related variables */ + vlc_bool_t b_embedded; + AGLContext agl_ctx; +- AGLDrawable agl_drawable; + int i_offx, i_offy; + int i_width, i_height; + WindowRef theWindow; + WindowGroupRef winGroup; + vlc_bool_t b_clipped_out; +- Rect clipBounds, viewBounds; ++ Rect clipBounds, viewBounds; ++ ++ libvlc_macosx_drawable_type_t drawable_type; ++ union { ++ CGrafPtr CGrafPtr; ++ ControlRef ControlRef; ++ } drawable; + }; + + /***************************************************************************** +@@ -462,17 +469,90 @@ + static void aglReshape( vout_thread_t * p_vout ); + static OSStatus WindowEventHandler(EventHandlerCallRef nextHandler, EventRef event, void *userData); + +-static int aglInit( vout_thread_t * p_vout ) ++/* returns the bounds of the drawable control/window */ ++static Rect aglGetBounds( vout_thread_t * p_vout ) + { ++ WindowRef win; ++ Rect rect; ++ ++ switch( p_vout->p_sys->drawable_type ) { ++ case VLCDrawableCGrafPtr: ++ win = GetWindowFromPort( p_vout->p_sys->drawable.CGrafPtr ); ++ GetWindowPortBounds( win, &rect ); ++ break; ++ ++ case VLCDrawableControlRef: ++ win = GetControlOwner( p_vout->p_sys->drawable.ControlRef ); ++ GetControlBounds( p_vout->p_sys->drawable.ControlRef, &rect ); ++ break; ++ } ++ ++ return rect; ++} ++ ++/* returns the window containing the drawable area */ ++static WindowRef aglGetWindow( vout_thread_t * p_vout ) ++{ ++ WindowRef window; ++ ++ switch( p_vout->p_sys->drawable_type ) { ++ case VLCDrawableCGrafPtr: ++ window = GetWindowFromPort( p_vout->p_sys->drawable.CGrafPtr ); ++ break; ++ ++ case VLCDrawableControlRef: ++ window = GetControlOwner( p_vout->p_sys->drawable.ControlRef ); ++ break; ++ } ++ ++ return window; ++} ++ ++/* gets the graphics port associated with our drawing area */ ++static CGrafPtr aglGetPort( vout_thread_t * p_vout ) ++{ ++ CGrafPtr port; ++ ++ switch( p_vout->p_sys->drawable_type ) { ++ case VLCDrawableCGrafPtr: ++ port = p_vout->p_sys->drawable.CGrafPtr; ++ break; ++ ++ case VLCDrawableControlRef: ++ port = GetWindowPort( GetControlOwner( ++ p_vout->p_sys->drawable.ControlRef ++ ) ); ++ break; ++ } ++ ++ return port; ++} ++ ++/* (re)process "drawable-*" and "macosx-drawable-type" variables. `drawable' is a ++ parameter to allow it to be overridden (REPARENT) */ ++static int aglProcessDrawable( vout_thread_t * p_vout, libvlc_drawable_t drawable ) ++{ + vlc_value_t val; ++ vlc_value_t val_type; ++ AGLDrawable agl_drawable; ++ Rect clipBounds,viewBounds; + +- Rect viewBounds; +- Rect clipBounds; +- +- var_Get( p_vout->p_libvlc, "drawable", &val ); +- p_vout->p_sys->agl_drawable = (AGLDrawable)val.i_int; +- aglSetDrawable(p_vout->p_sys->agl_ctx, p_vout->p_sys->agl_drawable); ++ var_Get( p_vout->p_libvlc, "macosx-drawable-type", &val_type ); + ++ p_vout->p_sys->drawable_type = val_type.i_int; ++ switch( val_type.i_int ) { ++ case VLCDrawableCGrafPtr: ++ p_vout->p_sys->drawable.CGrafPtr = (CGrafPtr)drawable; ++ break; ++ ++ case VLCDrawableControlRef: ++ p_vout->p_sys->drawable.ControlRef = (ControlRef)drawable; ++ break; ++ } ++ ++ agl_drawable = (AGLDrawable)aglGetPort( p_vout ); ++ aglSetDrawable(p_vout->p_sys->agl_ctx, agl_drawable); ++ + var_Get( p_vout->p_libvlc, "drawable-view-top", &val ); + viewBounds.top = val.i_int; + var_Get( p_vout->p_libvlc, "drawable-view-left", &val ); +@@ -481,15 +561,21 @@ + viewBounds.bottom = val.i_int; + var_Get( p_vout->p_libvlc, "drawable-view-right", &val ); + viewBounds.right = val.i_int; +- var_Get( p_vout->p_libvlc, "drawable-clip-top", &val ); +- clipBounds.top = val.i_int; +- var_Get( p_vout->p_libvlc, "drawable-clip-left", &val ); +- clipBounds.left = val.i_int; +- var_Get( p_vout->p_libvlc, "drawable-clip-bottom", &val ); +- clipBounds.bottom = val.i_int; +- var_Get( p_vout->p_libvlc, "drawable-clip-right", &val ); +- clipBounds.right = val.i_int; + ++ if( !viewBounds.top && !viewBounds.left && !viewBounds.right && !viewBounds.bottom ) { ++ /* view bounds not set, use control/window bounds */ ++ clipBounds = viewBounds = aglGetBounds( p_vout ); ++ } else { ++ var_Get( p_vout->p_libvlc, "drawable-clip-top", &val ); ++ clipBounds.top = val.i_int; ++ var_Get( p_vout->p_libvlc, "drawable-clip-left", &val ); ++ clipBounds.left = val.i_int; ++ var_Get( p_vout->p_libvlc, "drawable-clip-bottom", &val ); ++ clipBounds.bottom = val.i_int; ++ var_Get( p_vout->p_libvlc, "drawable-clip-right", &val ); ++ clipBounds.right = val.i_int; ++ } ++ + p_vout->p_sys->b_clipped_out = (clipBounds.top == clipBounds.bottom) + || (clipBounds.left == clipBounds.right); + if( ! p_vout->p_sys->b_clipped_out ) +@@ -501,7 +587,15 @@ + } + p_vout->p_sys->clipBounds = clipBounds; + p_vout->p_sys->viewBounds = viewBounds; ++} + ++static int aglInit( vout_thread_t * p_vout ) ++{ ++ vlc_value_t val; ++ ++ var_Get( p_vout->p_libvlc, "drawable", &val ); ++ aglProcessDrawable( p_vout, val.i_int ); ++ + return VLC_SUCCESS; + } + +@@ -564,6 +658,26 @@ + + static int aglManage( vout_thread_t * p_vout ) + { ++ if( p_vout->p_sys->drawable_type == VLCDrawableControlRef ) { ++ /* auto-detect size changes in the control by polling */ ++ Rect clipBounds, viewBounds; ++ ++ clipBounds = viewBounds = aglGetBounds( p_vout ); ++ ++ if( memcmp(&clipBounds, &(p_vout->p_sys->clipBounds), sizeof(clipBounds) ) ++ && memcmp(&viewBounds, &(p_vout->p_sys->viewBounds), sizeof(viewBounds)) ) ++ { ++ /* size has changed since last poll */ ++ ++ p_vout->p_sys->clipBounds = clipBounds; ++ p_vout->p_sys->viewBounds = viewBounds; ++ aglLock( p_vout ); ++ aglSetViewport(p_vout, viewBounds, clipBounds); ++ aglReshape( p_vout ); ++ aglUnlock( p_vout ); ++ } ++ } ++ + if( p_vout->i_changes & VOUT_ASPECT_CHANGE ) + { + aglLock( p_vout ); +@@ -586,42 +700,28 @@ + { + /* Close the fullscreen window and resume normal drawing */ + vlc_value_t val; +- Rect viewBounds; +- Rect clipBounds; + + var_Get( p_vout->p_libvlc, "drawable", &val ); +- p_vout->p_sys->agl_drawable = (AGLDrawable)val.i_int; +- aglSetDrawable(p_vout->p_sys->agl_ctx, p_vout->p_sys->agl_drawable); ++ aglProcessDrawable( p_vout, val.i_int ); + +- var_Get( p_vout->p_libvlc, "drawable-view-top", &val ); +- viewBounds.top = val.i_int; +- var_Get( p_vout->p_libvlc, "drawable-view-left", &val ); +- viewBounds.left = val.i_int; +- var_Get( p_vout->p_libvlc, "drawable-view-bottom", &val ); +- viewBounds.bottom = val.i_int; +- var_Get( p_vout->p_libvlc, "drawable-view-right", &val ); +- viewBounds.right = val.i_int; +- var_Get( p_vout->p_libvlc, "drawable-clip-top", &val ); +- clipBounds.top = val.i_int; +- var_Get( p_vout->p_libvlc, "drawable-clip-left", &val ); +- clipBounds.left = val.i_int; +- var_Get( p_vout->p_libvlc, "drawable-clip-bottom", &val ); +- clipBounds.bottom = val.i_int; +- var_Get( p_vout->p_libvlc, "drawable-clip-right", &val ); +- clipBounds.right = val.i_int; ++ /*the following was here, superfluous due to the same in aglLock? ++ aglSetCurrentContext(p_vout->p_sys->agl_ctx);*/ + +- aglSetCurrentContext(p_vout->p_sys->agl_ctx); +- aglSetViewport(p_vout, viewBounds, clipBounds); +- + /* Most Carbon APIs are not thread-safe, therefore delagate some GUI visibilty update to the main thread */ + sendEventToMainThread(GetWindowEventTarget(p_vout->p_sys->theWindow), kEventClassVLCPlugin, kEventVLCPluginHideFullscreen); + } + else + { ++ CGDirectDisplayID displayID; ++ CGRect displayBounds; + Rect deviceRect; + +- GDHandle deviceHdl = GetMainDevice(); +- deviceRect = (*deviceHdl)->gdRect; ++ /* the main display has its origin at (0,0) */ ++ displayBounds = CGDisplayBounds( CGMainDisplayID() ); ++ deviceRect.left = 0; ++ deviceRect.top = 0; ++ deviceRect.right = displayBounds.size.width; ++ deviceRect.bottom = displayBounds.size.height; + + if( !p_vout->p_sys->theWindow ) + { +@@ -669,8 +769,9 @@ + SetWindowBounds(p_vout->p_sys->theWindow, kWindowContentRgn, &deviceRect); + } + glClear( GL_COLOR_BUFFER_BIT ); +- p_vout->p_sys->agl_drawable = (AGLDrawable)GetWindowPort(p_vout->p_sys->theWindow); +- aglSetDrawable(p_vout->p_sys->agl_ctx, p_vout->p_sys->agl_drawable); ++ p_vout->p_sys->drawable_type = VLCDrawableCGrafPtr; ++ p_vout->p_sys->drawable.CGrafPtr = GetWindowPort(p_vout->p_sys->theWindow); ++ aglSetDrawable(p_vout->p_sys->agl_ctx, p_vout->p_sys->drawable.CGrafPtr); + aglSetCurrentContext(p_vout->p_sys->agl_ctx); + aglSetViewport(p_vout, deviceRect, deviceRect); + //aglSetFullScreen(p_vout->p_sys->agl_ctx, device_width, device_height, 0, 0); +@@ -753,11 +854,10 @@ + + case VOUT_REPARENT: + { +- AGLDrawable drawable = (AGLDrawable)va_arg( args, int); +- if( !p_vout->b_fullscreen && drawable != p_vout->p_sys->agl_drawable ) ++ libvlc_drawable_t drawable = (libvlc_drawable_t)va_arg( args, int); ++ if( !p_vout->b_fullscreen ) + { +- p_vout->p_sys->agl_drawable = drawable; +- aglSetDrawable(p_vout->p_sys->agl_ctx, drawable); ++ aglProcessDrawable( p_vout, drawable ); + } + return VLC_SUCCESS; + } +@@ -771,8 +871,16 @@ + { + if( ! p_vout->p_sys->b_clipped_out ) + { ++ WindowRef win; ++ Rect rect; ++ + p_vout->p_sys->b_got_frame = VLC_TRUE; + aglSwapBuffers(p_vout->p_sys->agl_ctx); ++ ++ win = aglGetWindow( p_vout ); ++ rect = aglGetBounds( p_vout ); ++ ++ InvalWindowRect( win, &rect ); + } + else + { +@@ -788,12 +896,14 @@ + // however AGL coordinates are based on window structure region + // and are vertically flipped + GLint rect[4]; +- CGrafPtr port = (CGrafPtr)p_vout->p_sys->agl_drawable; ++ WindowRef window; + Rect winBounds, clientBounds; + +- GetWindowBounds(GetWindowFromPort(port), ++ window = aglGetWindow( p_vout ); ++ ++ GetWindowBounds(window, + kWindowStructureRgn, &winBounds); +- GetWindowBounds(GetWindowFromPort(port), ++ GetWindowBounds(window, + kWindowContentRgn, &clientBounds); + + /* update video clipping bounds in drawable */ +Index: bindings/python/vlc_instance.c +=================================================================== +--- bindings/python/vlc_instance.c (revision 20403) ++++ bindings/python/vlc_instance.c (working copy) +@@ -349,6 +349,30 @@ + } + + static PyObject * ++vlcInstance_video_set_macosx_parent_type( PyObject *self, PyObject *args ) ++{ ++ libvlc_exception_t ex; ++ int i_drawable_type; ++ ++ if( !PyArg_ParseTuple( args, "i", &i_drawable_type ) ) ++ return NULL; ++ ++ if( i_drawable_type != VLCDrawableCGrafPtr ++ && i_drawable_type != VLCDrawableControlRef ) ++ { ++ PyErr_SetString( vlcInstance_Exception, "Invalid drawable type." ); ++ return NULL; ++ } ++ ++ LIBVLC_TRY; ++ libvlc_video_set_macosx_parent_type( LIBVLC_INSTANCE->p_instance, (libvlc_macosx_drawable_type_t) i_drawable_type, &ex ); ++ LIBVLC_EXCEPT; ++ ++ Py_INCREF( Py_None ); ++ return Py_None; ++} ++ ++static PyObject * + vlcInstance_video_set_size( PyObject *self, PyObject *args ) + { + libvlc_exception_t ex; +@@ -733,6 +757,8 @@ + "playlist_get_input() -> object Return the current input"}, + { "video_set_parent", vlcInstance_video_set_parent, METH_VARARGS, + "video_set_parent(xid=int) Set the parent xid or HWND"}, ++ { "video_set_macosx_parent_type", vlcInstance_video_set_macosx_parent_type, METH_VARARGS, ++ "video_set_macosx_parent_type(drawabletype=int) Set the type of parent used on Mac OS/X (see the Drawable* constants)"}, + { "video_set_size", vlcInstance_video_set_size, METH_VARARGS, + "video_set_size(width=int, height=int) Set the video width and height"}, + { "audio_toggle_mute", vlcInstance_audio_toggle_mute, METH_VARARGS, +Index: bindings/python/vlc_module.c +=================================================================== +--- bindings/python/vlc_module.c (revision 20403) ++++ bindings/python/vlc_module.c (working copy) +@@ -147,6 +147,10 @@ + mediacontrol_EndStatus ); + PyModule_AddIntConstant( p_module, "UndefinedStatus", + mediacontrol_UndefinedStatus ); ++ PyModule_AddIntConstant( p_module, "DrawableCGrafPtr", ++ VLCDrawableCGrafPtr ); ++ PyModule_AddIntConstant( p_module, "DrawableControlRef", ++ VLCDrawableControlRef ); + } + + +Index: src/control/video.c +=================================================================== +--- src/control/video.c (revision 20403) ++++ src/control/video.c (working copy) +@@ -277,6 +277,21 @@ + + /* global video settings */ + ++void libvlc_video_set_macosx_parent_type( libvlc_instance_t *p_instance, libvlc_macosx_drawable_type_t t, ++ libvlc_exception_t *p_e ) ++{ ++ var_SetInteger(p_instance->p_libvlc_int, "macosx-drawable-type", (int)t); ++} ++ ++libvlc_macosx_drawable_type_t libvlc_video_get_macosx_parent_type( libvlc_instance_t *p_instance, libvlc_exception_t *p_e ) ++{ ++ libvlc_macosx_drawable_type_t result; ++ ++ result = var_GetInteger( p_instance->p_libvlc_int, "macosx-drawable-type" ); ++ ++ return result; ++} ++ + void libvlc_video_set_parent( libvlc_instance_t *p_instance, libvlc_drawable_t d, + libvlc_exception_t *p_e ) + { +Index: src/libvlc-common.c +=================================================================== +--- src/libvlc-common.c (revision 20403) ++++ src/libvlc-common.c (working copy) +@@ -941,6 +941,10 @@ + var_Create( p_libvlc, "drawable-clip-bottom", VLC_VAR_INTEGER ); + var_Create( p_libvlc, "drawable-clip-right", VLC_VAR_INTEGER ); + ++#ifdef __APPLE__ ++ var_Create( p_libvlc, "macosx-drawable-type", VLC_VAR_INTEGER ); ++#endif ++ + /* Create volume callback system. */ + var_Create( p_libvlc, "volume-change", VLC_VAR_BOOL ); + +Index: include/vlc/libvlc.h +=================================================================== +--- include/vlc/libvlc.h (revision 20403) ++++ include/vlc/libvlc.h (working copy) +@@ -424,6 +424,10 @@ + */ + VLC_PUBLIC_API void libvlc_video_redraw_rectangle( libvlc_input_t *, const libvlc_rectangle_t *, libvlc_exception_t * ); + ++VLC_PUBLIC_API void libvlc_video_set_macosx_parent_type( libvlc_instance_t *, libvlc_macosx_drawable_type_t, libvlc_exception_t * ); ++ ++VLC_PUBLIC_API libvlc_macosx_drawable_type_t libvlc_video_get_macosx_parent_type( libvlc_instance_t *, libvlc_exception_t * ); ++ + /** + * Set the default video output parent + * this settings will be used as default for all video outputs +Index: include/vlc/libvlc_structures.h +=================================================================== +--- include/vlc/libvlc_structures.h (revision 20403) ++++ include/vlc/libvlc_structures.h (working copy) +@@ -83,12 +83,22 @@ + /** + * Downcast to this general type as placeholder for a platform specific one, such as: + * Drawable on X11, +-* CGrafPort on MacOSX, ++* (libvlc_macosx_drawable_type_t) on MacOSX, + * HWND on win32 + */ + typedef int libvlc_drawable_t; + + /** ++* Type of libvlc_drawable_t on MaxOSX. Available types: ++* - VLCDrawableCGrafPtr ++* - VLCDrawableControlRef ++*/ ++typedef enum { ++ VLCDrawableCGrafPtr = 0, ++ VLCDrawableControlRef, ++} libvlc_macosx_drawable_type_t; ++ ++/** + * Rectangle type for video geometry + */ + typedef struct diff --git a/tribler-mod/Tribler/Player/Build/Ubuntu/changelog b/tribler-mod/Tribler/Player/Build/Ubuntu/changelog new file mode 100644 index 0000000..dbf4e49 --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Ubuntu/changelog @@ -0,0 +1,5 @@ +swarmplayer (1.0.0-1ubuntu3) hardy; urgency=low + + * First release + + -- Tribler Tue, 17 Jun 2008 11:22:05 +0200 diff --git a/tribler-mod/Tribler/Player/Build/Ubuntu/compat b/tribler-mod/Tribler/Player/Build/Ubuntu/compat new file mode 100644 index 0000000..b8626c4 --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Ubuntu/compat @@ -0,0 +1 @@ +4 diff --git a/tribler-mod/Tribler/Player/Build/Ubuntu/control b/tribler-mod/Tribler/Player/Build/Ubuntu/control new file mode 100644 index 0000000..614499d --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Ubuntu/control @@ -0,0 +1,16 @@ +Source: swarmplayer +Section: net +Priority: optional +Maintainer: Arno Bakker +Standards-Version: 3.7.2 +Build-Depends: python, debhelper (>= 5.0.37.2), devscripts + +Package: swarmplayer +Architecture: all +Depends: python, python-wxgtk2.8, python-m2crypto, python-apsw, vlc, ffmpeg +Description: Python based Bittorrent/Internet TV viewer. + It allows you to watch BitTorrent-hosted videos on demand and + plays live Tribler streams. It is based on the same core as the + Tribler TV application. + . + Homepage: http://www.tribler.org/ diff --git a/tribler-mod/Tribler/Player/Build/Ubuntu/copyright b/tribler-mod/Tribler/Player/Build/Ubuntu/copyright new file mode 100644 index 0000000..68f78f6 --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Ubuntu/copyright @@ -0,0 +1,630 @@ +Unless otherwise noted, all files are released under the MIT +license, exceptions contain licensing information in them. + +Copyright (C) 2001-2002 Bram Cohen + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation files +(the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +The Software is provided "AS IS", without warranty of any kind, +express or implied, including but not limited to the warranties of +merchantability, fitness for a particular purpose and +noninfringement. In no event shall the authors or copyright holders +be liable for any claim, damages or other liability, whether in an +action of contract, tort or otherwise, arising from, out of or in +connection with the Software or the use or other dealings in the +Software. + +------------------------------------------------------------------------------ + +All code written by Jie Yang, Pawel Garbacki, Jun Wang, Arno Bakker, +Jan David Mol, Qin Chen, Yuan Yuan, Jelle Roozenburg, Freek Zindel, +Fabian van der Werf, Lucian Musat, Michel Meulpolder, Maarten ten Brinke, +Ali Abbas, Boxun Zhang, Lucia d' Acunto, Rameez Rahman, Boudewijn Schoon, +Richard Gwin, Diego Rabaioli, Riccardo Petrocco has the following license: + + TRIBLER file-sharing library. + + Copyright (c) 2005-2009, Delft University of Technology and Vrije + Universiteit Amsterdam; All rights reserved. + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + Delft University of Technology + Postbus 5 + 2600 AA Delft + The Netherlands + + Vrije Universiteit + De Boelelaan 1105 + 1081 HV Amsterdam + The Netherlands + + +The research leading to this library has received funding from: + - BSIK Freeband Communication I-Share project (Dutch Ministry of Economic + Affairs) + - Netherlands Organisation for Scientific Research (NWO) grant 612.060.215. + - Dutch Technology Foundation STW: Veni project DTC.7299 + - European Community's Sixth Framework Programme in the P2P-FUSION project + under contract no 035249. + - The European Community's Seventh Framework Programme in the P2P-Next project + under grant agreement no 216217. + +------------------------------------------------------------------------------- + + BuddyCast4 content-recommendation library. + + The research leading to this library has received funding from the + European Community's Seventh Framework Programme [FP7/2007-2011] + in the Petamedia project under grant agreement no. 216444 + + The following library modules are Copyright (c) 2008-2009, + Delft University of Technology and Technische Universität Berlin; + All rights reserved. + + BaseLib/Core/BuddyCast/buddycast.py + + The following library modules are Copyright (c) 2008-2009, + Technische Universität Berlin; + All rights reserved. + + BaseLib/Core/Search/Reranking.py + BaseLib/Test/test_buddycast4.py + BaseLib/Test/test_buddycast4_stresstest.py + + All library modules are free software, unless stated otherwise; you can + redistribute them and/or modify them under the terms of the GNU Lesser + General Public License as published by the Free Software Foundation; in + particular, version 2.1 of the License. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + Delft University of Technology + Postbus 5 + 2600 AA Delft + The Netherlands + + Technische Universität Berlin + Strasse des 17. Juni 135 + 10623 Berlin + Germany + +------------------------------------------------------------------------------- + + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! + +------------------------------------------------------------------------------- + +PRIVACY WARNING: This software will by default exchange your download +history with others. This feature can be disabled by disabling the +recommender in the Preference menu. See also the disclaimer on +http://www.tribler.org/ diff --git a/tribler-mod/Tribler/Player/Build/Ubuntu/files b/tribler-mod/Tribler/Player/Build/Ubuntu/files new file mode 100644 index 0000000..06b52fd --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Ubuntu/files @@ -0,0 +1 @@ +swarmplayer_1.0.0-1ubuntu3_all.deb net optional diff --git a/tribler-mod/Tribler/Player/Build/Ubuntu/prerm b/tribler-mod/Tribler/Player/Build/Ubuntu/prerm new file mode 100644 index 0000000..082c7fe --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Ubuntu/prerm @@ -0,0 +1,47 @@ +#! /bin/sh +# prerm script for #PACKAGE# +# +# see: dh_installdeb(1) + +set -e + +# summary of how this script can be called: +# * `remove' +# * `upgrade' +# * `failed-upgrade' +# * `remove' `in-favour' +# * `deconfigure' `in-favour' +# `removing' +# +# for details, see http://www.debian.org/doc/debian-policy/ or +# the debian-policy package + +PACKAGE="swarmplayer" + +dpkg --listfiles $PACKAGE | + awk '$0~/\.py$/ {print $0"c\n" $0"o"}' | + xargs rm -f >&2 + +killall $PACKAGE || : + + +case "$1" in + remove|upgrade|deconfigure) +# install-info --quiet --remove /usr/info/#PACKAGE#.info.gz + ;; + failed-upgrade) + ;; + *) + echo "prerm called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 + + diff --git a/tribler-mod/Tribler/Player/Build/Ubuntu/pycompat b/tribler-mod/Tribler/Player/Build/Ubuntu/pycompat new file mode 100644 index 0000000..0cfbf08 --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Ubuntu/pycompat @@ -0,0 +1 @@ +2 diff --git a/tribler-mod/Tribler/Player/Build/Ubuntu/rules b/tribler-mod/Tribler/Player/Build/Ubuntu/rules new file mode 100755 index 0000000..86e8f9c --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Ubuntu/rules @@ -0,0 +1,85 @@ +#!/usr/bin/make -f +# Sample debian/rules that uses debhelper. +# GNU copyright 1997 to 1999 by Joey Hess. + +# Uncomment this to turn on verbose mode. +#export DH_VERBOSE=1 +LIBRARYNAME=Tribler + +configure: configure-stamp +configure-stamp: + dh_testdir + # Add here commands to configure the package. + + touch configure-stamp + + +build: build-stamp + +build-stamp: configure-stamp + dh_testdir + + # Add here commands to compile the package. + #$(MAKE) + #/usr/bin/docbook-to-man debian/bittorrent.sgml > bittorrent.1 + + touch build-stamp + +clean: + dh_testdir + dh_testroot + rm -f build-stamp configure-stamp + + # Add here commands to clean up after the build process. + #-$(MAKE) clean + find . -name '*.pyc' |xargs rm || : + + dh_clean + +install: build + dh_testdir + dh_testroot + dh_clean -k + dh_installdirs + +# Build architecture-independent files here. +binary-arch: build install +# We have nothing to do by default. + + +# Build architecture-dependent files here. +binary-indep: build install + dh_testdir + dh_testroot + dh_installdocs + dh_installexamples + dh_installmenu + dh_installmime + dh_installman + + mkdir -p debian/swarmplayer/usr/share/swarmplayer/ + cp -rf `ls -1d ${LIBRARYNAME} khashmir` debian/swarmplayer/usr/share/swarmplayer/ + rm -rf debian/swarmplayer/usr/share/swarmplayer/${LIBRARYNAME}/Test + # add other files + mkdir -p debian/swarmplayer/usr/bin + cp -f debian/swarmplayer.sh debian/swarmplayer/usr/bin/swarmplayer + cp -f ${LIBRARYNAME}/LICENSE.txt debian/copyright + # for the menu + mkdir -p debian/swarmplayer/usr/share/pixmaps + cp -f debian/swarmplayer.xpm debian/swarmplayer/usr/share/pixmaps/ + + dh_installchangelogs + dh_installinit -r --no-start -- stop 20 0 6 . + dh_install --sourcedir=debian/tmp + dh_install debian/swarmplayer.desktop usr/share/applications + dh_link + dh_compress + dh_fixperms + dh_installdeb + dh_python + dh_gencontrol + dh_md5sums + dh_builddeb + +binary: binary-indep binary-arch +.PHONY: build clean binary-indep binary-arch binary install configure diff --git a/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.1 b/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.1 new file mode 100644 index 0000000..f909b4e --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.1 @@ -0,0 +1,22 @@ +.\" SwarmPlayer: Python based Bittorrent/Internet TV viewer +.TH man 1 "12 June 2007" "1.0" "SwarmPlayer man page" +.SH NAME +swarmplayer \- Python based Bittorrent/Internet TV viewer +.SH SYNOPSIS +.B swarmplayer +.SH DESCRIPTION +.B SwarmPlayer +is a python-based Bittorrent/Internet TV viewer. +It allows you to watch BitTorrent-hosted videos on demand and +plays live Tribler streams. It is based on the same core as the +Tribler TV application. + +Homepage: http://www.tribler.org +.SH FILES +.P +.I /usr/bin/swarmplayer +.I /usr/share/swarmplayer +.SH AUTHOR +.nf +Arno Bakker (arno@cs.vu.nl) +.fi diff --git a/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.desktop b/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.desktop new file mode 100644 index 0000000..c3b1833 --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.desktop @@ -0,0 +1,8 @@ +[Desktop Entry] +Name=SwarmPlayer +GenericName=Bittorrent Video-On-Demand / Live streaming client +Exec=swarmplayer +Icon=swarmplayer +Terminal=false +Type=Application +Categories=Application;Network;P2P diff --git a/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.manpages b/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.manpages new file mode 100644 index 0000000..acc1bea --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.manpages @@ -0,0 +1 @@ +debian/swarmplayer.1 diff --git a/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.menu b/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.menu new file mode 100644 index 0000000..1de95ce --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.menu @@ -0,0 +1,4 @@ +?package(swarmplayer):needs="x11" section="Apps/Net" \ + title="SwarmPlayer" \ + icon="/usr/share/pixmaps/swarmplayer.xpm" \ + command="swarmplayer" diff --git a/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.postinst.debhelper b/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.postinst.debhelper new file mode 100644 index 0000000..8637a4e --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.postinst.debhelper @@ -0,0 +1,5 @@ +# Automatically added by dh_installmenu +if [ "$1" = "configure" ] && [ -x "`which update-menus 2>/dev/null`" ]; then + update-menus +fi +# End automatically added section diff --git a/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.postrm.debhelper b/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.postrm.debhelper new file mode 100644 index 0000000..2b4be4f --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.postrm.debhelper @@ -0,0 +1,3 @@ +# Automatically added by dh_installmenu +if [ -x "`which update-menus 2>/dev/null`" ]; then update-menus ; fi +# End automatically added section diff --git a/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.sh b/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.sh new file mode 100755 index 0000000..b5be675 --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.sh @@ -0,0 +1,31 @@ +#!/bin/sh +# Startup script for Ubuntu Linux + +# don't care about gtk/x11/whatever. Currently (>= 3.4.0) must be unicode +WXPYTHONVER24=`ls -1d /usr/lib/python2.4/site-packages/wx-2.8* 2>/dev/null | grep -v ansi | sed -e 's/.*wx-//g' -e 's/-.*//g' | sort -nr | head -1` +WXPYTHONVER25=`ls -1d /usr/lib/python2.5/site-packages/wx-2.8* 2>/dev/null | grep -v ansi | sed -e 's/.*wx-//g' -e 's/-.*//g' | sort -nr | head -1` + +if [ "$WXPYTHONVER24" = "" ] && [ "$WXPYTHONVER25" = "" ]; +then + echo "Hmmm... No wxPython unicode package found for python2.4 or 2.5, cannot run Tribler, sorry" + exit -1 +fi + +if [ "$WXPYTHONVER25" = "" ]; +then + PYTHON="python2.4" + WXPYTHONVER=$WXPYTHONVER24 + echo "Using python2.4" +else + PYTHON="python2.5" + WXPYTHONVER=$WXPYTHONVER25 + echo "Using python2.5" +fi + +WXPYTHON=`ls -1d /usr/lib/$PYTHON/site-packages/wx-$WXPYTHONVER* | grep -v ansi | head -1` + +PYTHONPATH=/usr/share/swarmplayer/:$WXPYTHON +export PYTHONPATH + +cd /usr/share/swarmplayer +exec $PYTHON /usr/share/swarmplayer/Tribler/Player/swarmplayer.py "$@" > /tmp/$USER-swarmplayer.log 2>&1 diff --git a/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.xpm b/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.xpm new file mode 100644 index 0000000..579b53f --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer.xpm @@ -0,0 +1,257 @@ +/* XPM */ +static char * swarmplayer_xpm[] = { +"32 32 222 2", +" c None", +". c #8F8F90", +"+ c #909191", +"@ c #8F9090", +"# c #8D8D8E", +"$ c #8B8B8C", +"% c #8A8A8B", +"& c #8A8A8A", +"* c #898A8A", +"= c #89898A", +"- c #898989", +"; c #888989", +"> c #888889", +", c #888888", +"' c #878888", +") c #878788", +"! c #878787", +"~ c #868787", +"{ c #868687", +"] c #858686", +"^ c #868686", +"/ c #8B8B8B", +"( c #C0C0C0", +"_ c #EBECEC", +": c #EEEFEF", +"< c #EEEEEE", +"[ c #EDEEEE", +"} c #ECEDED", +"| c #ECECEC", +"1 c #EAEBEB", +"2 c #C7C7C8", +"3 c #616161", +"4 c #C2E1C2", +"5 c #7BCA7A", +"6 c #E0EAE0", +"7 c #AEDCAE", +"8 c #CEE5CE", +"9 c #E6ECE7", +"0 c #EBEEEC", +"a c #D0E5D0", +"b c #3EB93B", +"c c #83CD82", +"d c #5AC058", +"e c #5EC45C", +"f c #37B234", +"g c #34B631", +"h c #99D298", +"i c #AEDAAE", +"j c #73C571", +"k c #19AA15", +"l c #1BB317", +"m c #A7D8A7", +"n c #46B543", +"o c #51BF4F", +"p c #A7D5A7", +"q c #42B93F", +"r c #5DBE5C", +"s c #B9DDBA", +"t c #DCE8DD", +"u c #DAE9DA", +"v c #32B530", +"w c #61C45F", +"x c #57C255", +"y c #11B00D", +"z c #1CAE19", +"A c #2AB627", +"B c #57BE54", +"C c #7AC879", +"D c #4BBD49", +"E c #36B934", +"F c #23B120", +"G c #33B630", +"H c #9CD49C", +"I c #CCE4CC", +"J c #56C254", +"K c #52C450", +"L c #3FBB3C", +"M c #38B736", +"N c #0AAD06", +"O c #15AD11", +"P c #0FA90B", +"Q c #1CAF19", +"R c #27B825", +"S c #55BC53", +"T c #1EAF1B", +"U c #25B421", +"V c #38BA35", +"W c #2EB42A", +"X c #40BC3E", +"Y c #5EC15C", +"Z c #84CE83", +"` c #9BD59B", +" . c #EDEFEE", +".. c #616262", +"+. c #9AD499", +"@. c #35B831", +"#. c #1EB11B", +"$. c #25B322", +"%. c #27B424", +"&. c #92D192", +"*. c #84CB83", +"=. c #ABDAAB", +"-. c #D7E7D8", +";. c #C5E1C5", +">. c #BCDDBC", +",. c #DDE9DE", +"'. c #56C255", +"). c #7DCB7C", +"!. c #83CE82", +"~. c #95D595", +"{. c #C7E3C7", +"]. c #626262", +"^. c #75CC74", +"/. c #15AF12", +"(. c #33B72F", +"_. c #1BB118", +":. c #36B532", +"<. c #6EC16D", +"[. c #DAE8DA", +"}. c #42BA3F", +"|. c #72CA70", +"1. c #0EAF0A", +"2. c #2BB428", +"3. c #B6DBB6", +"4. c #DEE8DF", +"5. c #C7E2C7", +"6. c #1FB51C", +"7. c #7AC97A", +"8. c #1AB116", +"9. c #B7DCB7", +"0. c #E0E9E1", +"a. c #31B82E", +"b. c #4ABD48", +"c. c #4EBE4C", +"d. c #6EC86C", +"e. c #E3E4E4", +"f. c #D6D7D7", +"g. c #DBDCDC", +"h. c #D7D8D8", +"i. c #DDDEDE", +"j. c #E7ECE8", +"k. c #87CD86", +"l. c #34B732", +"m. c #1AAC17", +"n. c #ADDAAD", +"o. c #9E9F9F", +"p. c #343434", +"q. c #252525", +"r. c #2A2A2A", +"s. c #525253", +"t. c #2B2B2B", +"u. c #6B6B6B", +"v. c #E9EAEA", +"w. c #87D086", +"x. c #5BC259", +"y. c #5ABF59", +"z. c #AAD8A9", +"A. c #EAEAEA", +"B. c #393939", +"C. c #515151", +"D. c #CFD0D0", +"E. c #C7C8C8", +"F. c #949494", +"G. c #DEDEDE", +"H. c #808080", +"I. c #D9DADA", +"J. c #8D8D8D", +"K. c #B4B5B5", +"L. c #61C460", +"M. c #46BC44", +"N. c #B4DDB4", +"O. c #3F4040", +"P. c #3B3B3B", +"Q. c #969697", +"R. c #BCBCBD", +"S. c #DEDFDF", +"T. c #ECEDEE", +"U. c #9C9C9C", +"V. c #B1B1B1", +"W. c #A5DAA4", +"X. c #4ABD47", +"Y. c #6AC768", +"Z. c #E9EDEA", +"`. c #BCBCBC", +" + c #565656", +".+ c #313131", +"++ c #262626", +"@+ c #484848", +"#+ c #DFE0E0", +"$+ c #262727", +"%+ c #282828", +"&+ c #575858", +"*+ c #E5E6E6", +"=+ c #E6EDE7", +"-+ c #82CE81", +";+ c #2EB52B", +">+ c #D4E7D5", +",+ c #E6E8E8", +"'+ c #919292", +")+ c #A9AAAA", +"!+ c #787878", +"~+ c #C5C6C6", +"{+ c #CECFCF", +"]+ c #4EC04C", +"^+ c #9ED79D", +"/+ c #6A6B6B", +"(+ c #505050", +"_+ c #CBCCCC", +":+ c #8D8E8E", +"<+ c #C5E3C6", +"[+ c #AAAAAA", +"}+ c #787979", +"|+ c #646464", +"1+ c #6F7070", +"2+ c #B7B7B7", +"3+ c #AAABAB", +"4+ c #C8C8C8", +"5+ c #D3D4D4", +"6+ c #848484", +"7+ c #838484", +"8+ c #838383", +"9+ c #828282", +" ", +" . + @ # $ % & * = - ; > , ' ' ) ! ~ { ] ^ { , * / = ", +" ( _ : : : : : : : : : : : : : < [ [ [ } } | _ _ _ _ 1 2 ", +" } : : : : : : : : : : : : : : : : : : : : : : : : : : [ 3 ", +" : : : : : : : : : : : : : : : : : : : : : : : : : : : : 3 ", +" : : : : : : : : : : : : : : : : : : : : : : : : : : : : 3 ", +" : : : : : : : : 4 5 6 7 8 9 : : : : : : : : : : : : : : 3 ", +" : : : : : : 0 a b c d e f g h : : : : : : : : : : : : : 3 ", +" : : : : : i j k l m n o p q r s t : : : : : : : : : : : 3 ", +" : : : : u v w x y z A B C D E F G H I 0 : : : : : : : : 3 ", +" : : : : J K L M N O P Q R S T U V W X Y Z ` .: : : : : .. ", +" [ : : : +.@.#.$.%.&.*.=.-.;.>.,.'.e ).!.~.{. .: : : : : ]. ", +" [ : : : ^./.(._.:.<.: : : : : : : : : : : : : : : : : : ]. ", +" } : : [.}.|.1.2.3.4.: : : : : : : : : : : : : : : : : : ]. ", +" _ : : 5.6.7.8.9.: : : : : : : : : : : : : : : : : : : : ]. ", +" _ : : 0.a.b.c.d.: : : : : e.f.g.1 : } h.h.h.i.[ : : : : ]. ", +" _ : : j.k.l.m.n.: : : : o.p.q.r.s.1 i.q.q.q.t.u.v.: : : ]. ", +" | : : : w.x.y.z.: : : A.B.C.D.E.F.v.G.q.H.I.J.q.K.: : : ]. ", +" } : : : -.L.M.N.: : : 1 O.P.Q.R.v.: S.q.* T.U.q.V.: : : ]. ", +" [ : : : : W.X.Y.Z.: : : `. +.+++@+#+S.q.++$+%+&+*+: : : ]. ", +" [ : : : : =+-+;+>+: : [ ,+: e.'+q.)+S.q.!+~+{+} : : : : ]. ", +" [ : : : : : =+]+^+: : 1 @+/+H.(+%+_+S.q.:+: : : : : : : ]. ", +" : : : : : : : <+e 0 : : [+}+|+1+2+: e.|+3+: : : : : : : ]. ", +" : : : : : : : : : : : : : : : : : : : : : : : : : : : : ]. ", +" : : : : : : : : : : : : : : : : : : : : : : : : : : : : ]. ", +" : : : : : : : : : : : : : : : : : : : : : : : : : : : : .. ", +" : : : : : : : : : : : : : : : : : : : : : : : : : : : : .. ", +" : : : : : : : : : : : : : : : : : : : : : : : : : : : : ]. ", +" [ : : : : : : : : : : : : : : : : : : : : : : : : : : : ", +" 4+: : : : : : : : : : : : : : : : : : : : : : : : : : 5+ ", +" / ^ 6+7+7+7+8+9+9+9+9+9+9+9+9+9+6+, ", +" "}; diff --git a/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer_big.xpm b/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer_big.xpm new file mode 100644 index 0000000..80af874 --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Ubuntu/swarmplayer_big.xpm @@ -0,0 +1,563 @@ +/* XPM */ +static char * swarmplayer_big_xpm[] = { +"48 48 512 2", +" c None", +". c #8F8F8F", +"+ c #A6A6A6", +"@ c #A4A5A5", +"# c #A4A4A4", +"$ c #A3A4A4", +"% c #A2A3A4", +"& c #A2A2A2", +"* c #A1A1A2", +"= c #A0A1A1", +"- c #A0A0A1", +"; c #A0A0A0", +"> c #9FA0A0", +", c #9F9FA0", +"' c #9E9F9F", +") c #9E9E9F", +"! c #9E9E9E", +"~ c #9D9E9E", +"{ c #9D9D9E", +"] c #9C9D9D", +"^ c #9C9C9D", +"/ c #9B9C9C", +"( c #9B9B9C", +"_ c #9B9B9B", +": c #9C9D9E", +"< c #8E8E8F", +"[ c #DDDEDE", +"} c #F0F0F0", +"| c #F1F2F2", +"1 c #F0F1F1", +"2 c #EFF0F0", +"3 c #EEEFEF", +"4 c #EDEEEE", +"5 c #ECEDEE", +"6 c #ECEDED", +"7 c #EBECEC", +"8 c #EAEBEB", +"9 c #EAEAEB", +"0 c #E9EAEA", +"a c #E8E9EA", +"b c #E8E9E9", +"c c #E7E8E9", +"d c #E6E7E8", +"e c #E6E6E7", +"f c #E5E6E7", +"g c #E4E5E6", +"h c #E3E4E5", +"i c #E3E3E4", +"j c #E2E3E4", +"k c #E2E3E3", +"l c #D9DADA", +"m c #A9AAAA", +"n c #F5F6F6", +"o c #F4F5F5", +"p c #F3F4F4", +"q c #F2F3F3", +"r c #F1F2F3", +"s c #F0F1F2", +"t c #EDEEEF", +"u c #EBECED", +"v c #EAEBEC", +"w c #E9EAEB", +"x c #DEDFDF", +"y c #616161", +"z c #5A5A5A", +"A c #EFF0F1", +"B c #EEEFF0", +"C c #757575", +"D c #5B5B5B", +"E c #F1F1F2", +"F c #F3F4F5", +"G c #F2F3F4", +"H c #E3ECE4", +"I c #C1E1C2", +"J c #97D397", +"K c #EAECEC", +"L c #DFE9E1", +"M c #D1E5D2", +"N c #E1E9E2", +"O c #E7E8E8", +"P c #E6E7E7", +"Q c #747474", +"R c #77CA75", +"S c #35B532", +"T c #49BA47", +"U c #C0E0C1", +"V c #57C156", +"W c #38BC35", +"X c #5CC15A", +"Y c #6FC66E", +"Z c #ACD8AC", +"` c #DCE6DD", +" . c #E5E7E7", +".. c #737373", +"+. c #ECEEED", +"@. c #DEE9DF", +"#. c #B2DAB2", +"$. c #57C155", +"%. c #31B62E", +"&. c #C2E0C2", +"*. c #58BE56", +"=. c #48BB45", +"-. c #8CD18C", +";. c #2AAC27", +">. c #4FBB4D", +",. c #12AF0F", +"'. c #55BD52", +"). c #D0E0D1", +"!. c #737374", +"~. c #EAEEEB", +"{. c #C7E2C7", +"]. c #9BD39A", +"^. c #37B133", +"/. c #15A811", +"(. c #10B00B", +"_. c #44BD42", +":. c #CDE2CE", +"<. c #60BE5F", +"[. c #2FB12C", +"}. c #36B933", +"|. c #7EC97D", +"1. c #83CB82", +"2. c #57BF55", +"3. c #51BA4F", +"4. c #A7D4A8", +"5. c #DDE4DF", +"6. c #E3E5E5", +"7. c #737474", +"8. c #C7E1C7", +"9. c #31B02F", +"0. c #86CC86", +"a. c #5CBE5A", +"b. c #0EAF0A", +"c. c #13B20F", +"d. c #44BD41", +"e. c #72C871", +"f. c #22AE1F", +"g. c #7AC679", +"h. c #71C56F", +"i. c #C4DDC4", +"j. c #83CC82", +"k. c #2BB528", +"l. c #40B63E", +"m. c #4BBC49", +"n. c #61C05F", +"o. c #7ECA7E", +"p. c #BBDABB", +"q. c #E1EAE2", +"r. c #7ACF78", +"s. c #15AD13", +"t. c #2AB426", +"u. c #A0D69F", +"v. c #39BA36", +"w. c #0CAF08", +"x. c #13B00F", +"y. c #14A810", +"z. c #0FB00C", +"A. c #60C55E", +"B. c #32B32F", +"C. c #3CB73A", +"D. c #93D093", +"E. c #2DB62A", +"F. c #2DB72B", +"G. c #2CB529", +"H. c #18AF15", +"I. c #20B21D", +"J. c #3AB838", +"K. c #8ACD89", +"L. c #B4DBB4", +"M. c #CFE2D1", +"N. c #E6E8E8", +"O. c #67C666", +"P. c #14B510", +"Q. c #64C863", +"R. c #23B320", +"S. c #7DCA7D", +"T. c #1FB01C", +"U. c #09AD05", +"V. c #0BAB08", +"W. c #0CAB08", +"X. c #0DA809", +"Y. c #14AF10", +"Z. c #19AE16", +"`. c #12B20F", +" + c #6DC56D", +".+ c #58BB56", +"++ c #1FAE1B", +"@+ c #1DB11A", +"#+ c #1AB117", +"$+ c #3CB839", +"%+ c #4ABE47", +"&+ c #22B01E", +"*+ c #30B72D", +"=+ c #38B835", +"-+ c #77C875", +";+ c #99D398", +">+ c #C0DFC1", +",+ c #BADDBA", +"'+ c #41BF3E", +")+ c #79CD78", +"!+ c #3CBA3A", +"~+ c #27B425", +"{+ c #1BAF17", +"]+ c #0EB10A", +"^+ c #36B833", +"/+ c #44B942", +"(+ c #1AAA16", +"_+ c #20B31D", +":+ c #5CBF5A", +"<+ c #6ECC6D", +"[+ c #70C76F", +"}+ c #46BC43", +"|+ c #3FB83D", +"1+ c #6DC36B", +"2+ c #7BCD7A", +"3+ c #1DB419", +"4+ c #34B731", +"5+ c #35B732", +"6+ c #4BBE49", +"7+ c #7BCC7A", +"8+ c #46BB44", +"9+ c #41BD3F", +"0+ c #4CBE4A", +"a+ c #88CF88", +"b+ c #E7EBE9", +"c+ c #9BD39B", +"d+ c #34BA31", +"e+ c #25B121", +"f+ c #1FB11C", +"g+ c #23B21F", +"h+ c #25B421", +"i+ c #39B836", +"j+ c #44BC41", +"k+ c #BDDBBE", +"l+ c #9FD29F", +"m+ c #BADBBB", +"n+ c #D3E2D5", +"o+ c #E3E6E5", +"p+ c #E0E5E2", +"q+ c #DAE4DC", +"r+ c #E1E6E3", +"s+ c #E2E7E4", +"t+ c #84CD83", +"u+ c #53C351", +"v+ c #76CB75", +"w+ c #90CF8F", +"x+ c #99D399", +"y+ c #92D291", +"z+ c #BBDFBC", +"A+ c #CEE4CF", +"B+ c #E5EAE6", +"C+ c #A7D7A7", +"D+ c #19B216", +"E+ c #1CAE19", +"F+ c #34B831", +"G+ c #27B323", +"H+ c #15AF11", +"I+ c #42BB40", +"J+ c #34B032", +"K+ c #8ECA8D", +"L+ c #81CD80", +"M+ c #18BA15", +"N+ c #1DB319", +"O+ c #1EB31A", +"P+ c #38BA35", +"Q+ c #10B20C", +"R+ c #4EBD4C", +"S+ c #57BA54", +"T+ c #B0D5B1", +"U+ c #BDDCBE", +"V+ c #48B945", +"W+ c #65C464", +"X+ c #8BCE8A", +"Y+ c #08AE04", +"Z+ c #16AD13", +"`+ c #3BB739", +" @ c #C9DECB", +".@ c #DEE4E0", +"+@ c #A2D4A1", +"@@ c #25B522", +"#@ c #37BD36", +"$@ c #A6D5A6", +"%@ c #16B113", +"&@ c #5AC258", +"*@ c #DBE3DC", +"=@ c #D1E1D3", +"-@ c #2BB929", +";@ c #2BB429", +">@ c #56BA54", +",@ c #27B424", +"'@ c #37BB34", +")@ c #91D291", +"!@ c #E9EBEB", +"~@ c #EEEEEF", +"{@ c #747575", +"]@ c #D2E0D3", +"^@ c #34B832", +"/@ c #29B626", +"(@ c #61C75F", +"_@ c #6BC56A", +":@ c #2DB429", +"<@ c #A3D6A3", +"[@ c #D4D5D6", +"}@ c #C9C9CA", +"|@ c #C9CACA", +"1@ c #E4E5E5", +"2@ c #D8D9D9", +"3@ c #CCCCCD", +"4@ c #CCCDCD", +"5@ c #CDCDCE", +"6@ c #CFD0D0", +"7@ c #D9E3DB", +"8@ c #71C770", +"9@ c #33B630", +"0@ c #33B830", +"a@ c #14A911", +"b@ c #36B733", +"c@ c #CFE1D0", +"d@ c #D3D4D5", +"e@ c #676767", +"f@ c #333333", +"g@ c #252525", +"h@ c #303030", +"i@ c #474747", +"j@ c #B6B7B7", +"k@ c #707070", +"l@ c #272727", +"m@ c #A3A3A3", +"n@ c #E4E6E6", +"o@ c #C0DEC1", +"p@ c #80CC80", +"q@ c #58BF56", +"r@ c #51BE4E", +"s@ c #47BB44", +"t@ c #D1E3D2", +"u@ c #EBEBEC", +"v@ c #686969", +"w@ c #252626", +"x@ c #2E2E2E", +"y@ c #646464", +"z@ c #4F4F4F", +"A@ c #2D2D2D", +"B@ c #717171", +"C@ c #363636", +"D@ c #656666", +"E@ c #666666", +"F@ c #454545", +"G@ c #262626", +"H@ c #C4C5C5", +"I@ c #E5E6E6", +"J@ c #ADD9AD", +"K@ c #47BC44", +"L@ c #4DBA4B", +"M@ c #77C876", +"N@ c #BADCBB", +"O@ c #E1E2E3", +"P@ c #3E3E3E", +"Q@ c #7E7E7F", +"R@ c #C0C1C1", +"S@ c #C7C8C8", +"T@ c #717172", +"U@ c #5B5B5C", +"V@ c #D7D7D8", +"W@ c #3C3C3C", +"X@ c #8E8F8F", +"Y@ c #DFE6E0", +"Z@ c #85CE84", +"`@ c #6AC669", +" # c #52BC50", +".# c #4AC048", +"+# c #DAE7DB", +"@# c #404041", +"## c #565656", +"$# c #B9BABA", +"%# c #DCDDDE", +"&# c #717272", +"*# c #E0E1E1", +"=# c #424242", +"-# c #8C8D8D", +";# c #C2E0C3", +"># c #5DC65B", +",# c #3EBC3B", +"'# c #4EBE4C", +")# c #A8D8A8", +"!# c #878788", +"~# c #282828", +"{# c #2C2C2C", +"]# c #535353", +"^# c #949494", +"/# c #727272", +"(# c #838484", +"_# c #848585", +":# c #575757", +"<# c #292929", +"[# c #E8EAEA", +"}# c #8AD289", +"|# c #3EB93B", +"1# c #8CD08C", +"2# c #4DBF4B", +"3# c #E0EAE1", +"4# c #A5A5A5", +"5# c #5D5D5D", +"6# c #3A3A3A", +"7# c #2B2B2B", +"8# c #787878", +"9# c #888989", +"0# c #D9E7DA", +"a# c #65C563", +"b# c #3BB838", +"c# c #35B832", +"d# c #C9E4CA", +"e# c #606060", +"f# c #727373", +"g# c #4A4A4A", +"h# c #B3B4B4", +"i# c #B4B5B5", +"j# c #B8B9B9", +"k# c #EBEDED", +"l# c #E7EBE8", +"m# c #7CCC7A", +"n# c #28B324", +"o# c #ADDBAD", +"p# c #959696", +"q# c #E1E2E2", +"r# c #E9E9EA", +"s# c #838383", +"t# c #DFE0E0", +"u# c #AFDCAF", +"v# c #45BE43", +"w# c #67C865", +"x# c #E8EEE9", +"y# c #414242", +"z# c #4E4E4E", +"A# c #494949", +"B# c #2A2A2A", +"C# c #E7E7E8", +"D# c #98D597", +"E# c #3CBA39", +"F# c #C2E2C2", +"G# c #9A9B9B", +"H# c #414141", +"I# c #313232", +"J# c #E3E4E4", +"K# c #7A7A7A", +"L# c #616262", +"M# c #ECEFEE", +"N# c #A7DBA7", +"O# c #C9E5C9", +"P# c #EEF0F0", +"Q# c #F2F4F4", +"R# c #D1D1D1", +"S# c #D0D1D1", +"T# c #EEEEEE", +"U# c #DCDDDD", +"V# c #DBDBDB", +"W# c #DBDCDC", +"X# c #DADBDB", +"Y# c #CECFCF", +"Z# c #929292", +"`# c #B3B3B3", +" $ c #888888", +".$ c #A5A6A6", +"+$ c #9C9C9C", +"@$ c #989999", +"#$ c #646565", +"$$ c #818181", +"%$ c #686868", +"&$ c #ACACAC", +"*$ c #B5B6B6", +"=$ c #8B8B8B", +"-$ c #C9C9C9", +";$ c #A7A7A7", +">$ c #AEAFAF", +",$ c #8B8C8C", +"'$ c #939393", +")$ c #B0B1B1", +"!$ c #868686", +"~$ c #707171", +"{$ c #949595", +"]$ c #7C7D7D", +"^$ c #9F9F9F", +"/$ c #999A9A", +"($ c #CACBCB", +"_$ c #909191", +":$ c #B2B2B2", +"<$ c #6B6B6B", +"[$ c #CACACA", +"}$ c #858585", +"|$ c #979898", +"1$ c #919292", +"2$ c #BCBDBD", +"3$ c #939494", +"4$ c #C1C2C2", +"5$ c #B2B3B3", +"6$ c #BFC0C0", +"7$ c #C2C3C3", +"8$ c #A7A8A8", +"9$ c #C4C4C4", +"0$ c #ABACAC", +"a$ c #C3C4C4", +"b$ c #B1B2B2", +"c$ c #CDCECE", +"d$ c #BDBEBE", +"e$ c #585858", +"f$ c #797A7A", +"g$ c #979797", +"h$ c #AFAFAF", +"i$ c #9D9D9D", +"j$ c #989898", +"k$ c #969696", +"l$ c #959595", +"m$ c #9A9A9A", +" ", +" ", +" . + @ # $ % & * = - - ; ; > , , ' ' ) ! ~ ~ { { ] ] ^ ^ / / ( ( _ ( / ] : ~ ) < ", +" ! [ } | | | 1 1 2 2 3 3 4 4 5 6 7 7 8 9 0 0 a b c d d e f g g h h i j k j i h h l m ", +" [ n n n o o p p q r | s 1 2 2 3 t 4 5 6 u 7 v 8 w a a c c d d f f g g f f d d c c x y ", +" z 1 n o o p p q q | | 1 A 2 B 3 t 4 6 u 7 v 8 w 0 a c c d d f f g g f f d d c c b a 0 C ", +" D E o F p G q | | 1 1 2 B H I J K L M N v w w a b c O d f f g g f f P d O c c a a 0 w Q ", +" z 1 p p q r | s 1 2 2 3 t R S T U V W X Y Z ` c c d e f g g f f .d d c c a a 0 w 8 v .. ", +" z 2 q r | s 1 A 2 3 +.@.#.$.%.&.*.=.-.;.>.,.'.).d f f g g f f d d c c b a 0 w 8 v v 7 !. ", +" z 3 | | 1 A 2 B ~.{.].^./.(._.:.<.[.}.|.1.2.3.4.5.6.g f f d d O c b a a w w 8 v 7 u 6 7. ", +" z 4 1 1 2 B 3 t 8.9.0.a.b.c.d.e.f.g.h.i.j.k.l.m.n.o.p.f d d c c a a 0 w 8 v 7 u u 6 5 Q ", +" z 5 2 2 3 t 4 q.r.s.t.u.v.w.x.y.z.A.B.C.D.E.F.G.H.I.J.K.L.M.N.a 0 w 8 v 7 7 u 6 5 4 t Q ", +" z u B 3 t 4 6 O.P.Q.R.S.T.U.V.W.X.Y.Z.`. +.+++@+#+$+%+&+*+=+-+;+Z >+8 7 u 6 5 4 4 t 3 Q ", +" z v t 4 5 6 u ,+'+)+!+~+{+]+^+/+(+_+:+<+[+}+|+1+2+3+4+5+6+7+8+9+0+a+b+6 5 5 4 t 3 B 2 Q ", +" z 9 5 6 u 7 v c+d+e+f+g+h+i+j+k+l+m+n+o+o+p+q+r+s+t+u+v+w+x+y+z+A+B+6 5 4 t 3 B 2 2 1 Q ", +" z 0 u 7 v 8 w C+D+E+F+G+H+I+J+K+g f f d d c c b a 0 w 8 v v 7 u 6 5 4 t 3 3 B 2 A 1 s Q ", +" z b v 8 w 0 a L+M+N+O+P+Q+R+S+T+f d d c c b a a w w 8 v 7 u 6 5 4 4 t 3 B 2 A 1 1 | | Q ", +" z c w 0 a b U+V+W+X+Y+Z+`+ @.@d d O c c a a w w 8 v 7 u 6 5 5 4 t 3 B 2 A 1 1 | | r q Q ", +" z d a a c c +@@@#@$@%@&@*@f d d c c a a 0 w 8 v 7 u u 6 5 4 t 3 B 2 2 A 1 s | r q q p Q ", +" z f c c d d =@-@;@>@,@'@)@d c c b a 0 w !@v v 7 u 6 5 4 t ~@3 B 2 A 1 s | | q q p p F {@ ", +" z g d d f f ]@^@/@(@_@:@<@c b a a w w 8 a [@}@|@[@1@4 t 3 2@3@4@4@5@6@O q q G p p o o C ", +" z h f f g g 7@8@9@0@a@b@c@a a w w 8 v d@e@f@g@g@h@i@j@B 2 k@g@g@g@g@l@i@m@p p o o n n C ", +" z i n@g f f f o@p@q@r@s@t@0 w 8 v u@7 v@w@x@D y@z@A@$ 2 A B@g@C@D@E@F@G@A@H@o o n n n C ", +" z k I@f f d d J@K@v.L@M@N@w v v 7 u O@P@g@Q@8 3 g R@S@1 s T@g@U@s p V@W@g@X@n n n n o {@ ", +" z i f d d O c Y@Z@`@ #.#+#v 7 u 6 5 h @#g@##$#%#O A 1 | | &#g@D | p *#=#g@-#n n n o o {@ ", +" z h d d c c a a ;#>#,#'#)#u u 6 5 4 t !#~#g@{#C@]#^#I@r q /#g@P@(#_#:#l@<#$#n o o o o {@ ", +" z 1@c c b a 0 w [#}#|#1#2#3#5 4 t 3 B O 4#5#6#7#g@l@8#2 p /#g@g@g@g@G@C@9#q o o o o o {@ ", +" z I@b a a w w 8 v 0#a#b#c#d#4 3 3 B 2 A 1 s P S@e#g@6#[ p f#g@g#h#i#j#*#o o o o o o p Q ", +" z f a w w 8 v 7 u k#l#m#n#o#3 B 2 A a p#R@q#6 r#s#g@W@t#o f#g@D p n o o o o o o p p p Q ", +" z P w 8 v 7 u u 6 5 4 u#v#w#x#2 A 1 8 y#{#=#z#A#B#g@k@q n ..g@D q o o o o o o p p p p Q ", +" z C#v v 7 u 6 5 4 t 3 ~.D#E#F#1 s | A G#y H#I#I#F@s#J#n n K#I#L#q o o o o p p p p p p Q ", +" z c 7 u 6 5 4 4 ~@3 B 2 M#N#O#| | r q G 4 I@*#q#O o n n o b *#I@o o o p p p p p p p q Q ", +" z a 6 5 5 4 t 3 B 2 2 1 1 s | r q G p p o o n n n n n o o o o o o p p p p p p p q q q Q ", +" z 0 5 4 t 3 B 2 2 1 1 s | | q q p p o o o n n n n o o o o o o o p p p p p p p q q q q Q ", +" z 8 t 3 3 P#2 A 1 s | | q q Q#p F o o n n n n o o o o o o o p p p p p p p q q q q q q Q ", +" z v 3 B 2 A 1 1 | | r q G p p o o n n n n o o o o o o o p p p p p p p q q q q q q q | Q ", +" z 7 2 2 1 1 | | r q q p p o o n n n n n o o o o o o o p p p p p p q q q q q q q | | | Q ", +" z 6 1 1 s | | q q J#V@h p 8 0 R#0 S#b 1@T#U#S#b 0 p t#V#J#4 W#6@X#Y#J#q q q | | | | | Q ", +" z 5 s | | q q G 1 Z#$ -#|@`# $.$8#+$@$#$$$%$&$ $*$3 =$_#^#-$%$;$>$,$'$2 | | | | | | | Q ", +" z 4 | r q G p p p )$!$= ~$'$(#{$%$]$^$/$($%$_$; :$O /#<$[$}$|$1$2$Q 3$1 | | | | | 1 1 Q ", +" z 3 q q p p o o o 4$.$b 5$6$7$7 8$l 4$9$*#;$7 t#Z#0$4$a$8 4$a$|$b$c$d$1 | | | | 1 1 1 Q ", +" D 3 p p F o o n n n n o o o o o o o p p p p p p q q q q q q q | | | | | | | 1 1 1 1 1 C ", +" e$2 p o o n n n n o o o o o o o p p p p p p p q q q q q q q | | | | | | 1 1 1 1 1 1 1 f$ ", +" I@o n n n n n o o o o o o p p p p p p p q q q q q q q | | | | | | 1 1 1 1 1 1 1 2 8 ", +" g$6 n n n o o o o o o o p p p p p p q q q q q q q | | | | | | | 1 1 1 1 1 1 2 2 7 h$ ", +" i$; ^$^$! i$_ j$k$p#p#l$l$l$l$l$l$l$l$l$l$l${${${${${$l$l$l$g$m$+$i$! ~ ! i$ ", +" ", +" "}; diff --git a/tribler-mod/Tribler/Player/Build/Win32/heading.bmp b/tribler-mod/Tribler/Player/Build/Win32/heading.bmp new file mode 100644 index 0000000000000000000000000000000000000000..7bdbfcd14a0e257739666d56fb6b677cbd9e6f0b GIT binary patch literal 25818 zcmeI(Pi$0G6vy$Yf@nZdP$|aX#t=0zL<6W0(Ie*mj2^Up`?XqRT2%Rx-%iQA)!XU^p~96JjNaKdhUC7a^~jFo7Z{w zolnp0`!j9X{!?3J_R&E50?(s7*Yd2Z)YDcfCwYJALOo{}(EOVok!K(oMh?jElVLK9 zKLhdaL8`-K=rIG+|J+u199#{?RM0w1hBf$InX8Zd>&~sY+H`K)EodED{xB>&WBi6+ z|M|UQYG#@+J#~^{CLx|)UYok1$QX*T@U<6GL|cddXUNW#q4tVp819Yw$?)EsI9t*& z@tHMN)^4_ix1TnW49do#S0HG{!s<<#-!07`FZRDIvz=li$eD zO3`t0a~hg^Ljlsx?`>RkZaqO}o*mkXVNNM>zZv(2WN>kRGll}BT{E&@;Xb)J4bZ*e z$_u*`Bt(p6_+W3tqqAk1Q?9&jCLTj1U%YVl^nwSB0!)Tv!@c2xz9vF|7>aS>xpf49 z$WOO=AwU_1WxHl9atpQ@V zdSoB*KyQPNa>_|(d+pnhVWL-ecFkk`8;FPdnvi_{{LUoIRxd8 zA#?{O4if+pR3MjaRt&$>`okwgY4x<@z72IM_|(9gL}`8p(2}4cwD#%2%}?~VG+u5cY@O0};xS~}lp%o9v$nK)+Hv28y5q}8 zw~ifB?7sdnhog#Q=-yC(Y6Zv*;Ls3BF|NDvL77X@aUF(+5D5@NW8~zJ>!sY8!m!MY zjtz0Mxs<;2tkuCPErum$^lcc08Pgd4+w>1k(u8|sT}w41JeNMMMY86wGrQv$s(%Esu}&;aRh$yAI?n;63f|>hI5pG za4w~5Y@CbV#>icMzK*f#(rzv1Eh|D8nNE4hG0^hC zw^%`wa5BsnkgZiU#EJk1Jw;VR9o7IC8X~EkIQaADMD9Cu|2OD+F4g{qj5@;*Am3Kx z8#XFJ*5m}Avg8jS9gFnt~vGIQ!6 zhE{-TwZbcx_7Li447-2(N}+e-Y9ldJQZt5Orm05Um|e|~cSFE% + + + My Manifest Testing application + + + + + + + diff --git a/tribler-mod/Tribler/Player/Build/Win32/triblerplay.nsi b/tribler-mod/Tribler/Player/Build/Win32/triblerplay.nsi new file mode 100644 index 0000000..35074cf --- /dev/null +++ b/tribler-mod/Tribler/Player/Build/Win32/triblerplay.nsi @@ -0,0 +1,222 @@ +!define PRODUCT "SwarmPlayer" +!define VERSION "1.1.0" +!define LIBRARYNAME "Tribler" + + +!include "MUI.nsh" + +;-------------------------------- +;Configuration + +;General + Name "${PRODUCT} ${VERSION}" +OutFile "${PRODUCT}_${VERSION}.exe" + +;Folder selection page +InstallDir "$PROGRAMFILES\${PRODUCT}" + +;Remember install folder +InstallDirRegKey HKCU "Software\${PRODUCT}" "" + +; +; Uncomment for smaller file size +; +SetCompressor "lzma" +; +; Uncomment for quick built time +; +;SetCompress "off" + +CompletedText "Installation completed. Thank you for choosing ${PRODUCT}" + +BrandingText "${PRODUCT}" + +;-------------------------------- +;Modern UI Configuration + +!define MUI_ABORTWARNING +!define MUI_HEADERIMAGE +!define MUI_HEADERIMAGE_BITMAP "heading.bmp" + +;-------------------------------- +;Pages + +!define MUI_LICENSEPAGE_RADIOBUTTONS +!define MUI_LICENSEPAGE_RADIOBUTTONS_TEXT_ACCEPT "I accept" +!define MUI_LICENSEPAGE_RADIOBUTTONS_TEXT_DECLINE "I decline" +; !define MUI_FINISHPAGE_RUN "$INSTDIR\swarmplayer.exe" + +!insertmacro MUI_PAGE_LICENSE "binary-LICENSE.txt" +!insertmacro MUI_PAGE_COMPONENTS +!insertmacro MUI_PAGE_DIRECTORY +!insertmacro MUI_PAGE_INSTFILES +!insertmacro MUI_PAGE_FINISH + +!insertmacro MUI_UNPAGE_CONFIRM +!insertmacro MUI_UNPAGE_INSTFILES + +;!insertmacro MUI_DEFAULT UMUI_HEADERIMAGE_BMP heading.bmp" + +;-------------------------------- +;Languages + +!insertmacro MUI_LANGUAGE "English" + +;-------------------------------- +;Language Strings + +;Description +LangString DESC_SecMain ${LANG_ENGLISH} "Install ${PRODUCT}" +LangString DESC_SecDesk ${LANG_ENGLISH} "Create Desktop Shortcuts" +LangString DESC_SecStart ${LANG_ENGLISH} "Create Start Menu Shortcuts" +LangString DESC_SecDefaultTStream ${LANG_ENGLISH} "Associate .tstream files with ${PRODUCT}" +LangString DESC_SecDefaultTorrent ${LANG_ENGLISH} "Associate .torrent files with ${PRODUCT}" + +;-------------------------------- +;Installer Sections + +Section "!Main EXE" SecMain + SectionIn RO + SetOutPath "$INSTDIR" + File *.txt + File swarmplayer.exe.manifest + File swarmplayer.exe + File ffmpeg.exe + File /r vlc + File *.bat + Delete "$INSTDIR\*.pyd" + File *.pyd + Delete "$INSTDIR\python*.dll" + Delete "$INSTDIR\wx*.dll" + File *.dll + Delete "$INSTDIR\*.zip" + File *.zip + CreateDirectory "$INSTDIR\${LIBRARYNAME}" + CreateDirectory "$INSTDIR\${LIBRARYNAME}\Core" + SetOutPath "$INSTDIR\${LIBRARYNAME}\Core" + File ${LIBRARYNAME}\Core\*.txt + CreateDirectory "$INSTDIR\${LIBRARYNAME}\Core\Statistics" + SetOutPath "$INSTDIR\${LIBRARYNAME}\Core\Statistics" + File ${LIBRARYNAME}\Core\Statistics\*.txt + File ${LIBRARYNAME}\Core\Statistics\*.sql + CreateDirectory "$INSTDIR\${LIBRARYNAME}\Images" + SetOutPath "$INSTDIR\${LIBRARYNAME}\Images" + File ${LIBRARYNAME}\Images\*.* + CreateDirectory "$INSTDIR\${LIBRARYNAME}\Video" + CreateDirectory "$INSTDIR\${LIBRARYNAME}\Video\Images" + SetOutPath "$INSTDIR\${LIBRARYNAME}\Video\Images" + File ${LIBRARYNAME}\Video\Images\*.* + CreateDirectory "$INSTDIR\${LIBRARYNAME}\Lang" + SetOutPath "$INSTDIR\${LIBRARYNAME}\Lang" + IfFileExists user.lang userlang + File ${LIBRARYNAME}\Lang\*.* + userlang: + File /x user.lang ${LIBRARYNAME}\Lang\*.* + SetOutPath "$INSTDIR" + WriteRegStr HKEY_LOCAL_MACHINE "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}" "DisplayName" "${PRODUCT} (remove only)" + WriteRegStr HKEY_LOCAL_MACHINE "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}" "UninstallString" "$INSTDIR\Uninstall.exe" + +; Now writing to KHEY_LOCAL_MACHINE only -- remove references to uninstall from current user + DeleteRegKey HKEY_CURRENT_USER "SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}" +; Remove old error log if present + Delete "$INSTDIR\swarmplayer.exe.log" + + WriteUninstaller "$INSTDIR\Uninstall.exe" + + ; Add an application to the firewall exception list - All Networks - All IP Version - Enabled + SimpleFC::AddApplication "Tribler" "$INSTDIR\${PRODUCT}.exe" 0 2 "" 1 + ; Pop $0 ; return error(1)/success(0) + +SectionEnd + + +Section "Desktop Icons" SecDesk + CreateShortCut "$DESKTOP\${PRODUCT}.lnk" "$INSTDIR\${PRODUCT}.exe" "" +SectionEnd + + +Section "Startmenu Icons" SecStart + CreateDirectory "$SMPROGRAMS\${PRODUCT}" + CreateShortCut "$SMPROGRAMS\${PRODUCT}\Uninstall.lnk" "$INSTDIR\Uninstall.exe" "" "$INSTDIR\Uninstall.exe" 0 + CreateShortCut "$SMPROGRAMS\${PRODUCT}\${PRODUCT}.lnk" "$INSTDIR\${PRODUCT}.exe" "" "$INSTDIR\${PRODUCT}.exe" 0 +SectionEnd + + +Section "Make Default For .tstream" SecDefaultTStream + WriteRegStr HKCR .tstream "" tstream + WriteRegStr HKCR .tstream "Content Type" application/x-tribler-stream + WriteRegStr HKCR "MIME\Database\Content Type\application/x-tribler-stream" Extension .tstream + WriteRegStr HKCR tstream "" "TSTREAM File" + WriteRegBin HKCR tstream EditFlags 00000100 + WriteRegStr HKCR "tstream\shell" "" open + WriteRegStr HKCR "tstream\shell\open\command" "" '"$INSTDIR\${PRODUCT}.exe" "%1"' + WriteRegStr HKCR "tstream\DefaultIcon" "" "$INSTDIR\${LIBRARYNAME}\Images\SwarmPlayerIcon.ico" +SectionEnd + + +Section /o "Make Default For .torrent" SecDefaultTorrent + ; Delete ddeexec key if it exists + DeleteRegKey HKCR "bittorrent\shell\open\ddeexec" + WriteRegStr HKCR .torrent "" bittorrent + WriteRegStr HKCR .torrent "Content Type" application/x-bittorrent + WriteRegStr HKCR "MIME\Database\Content Type\application/x-bittorrent" Extension .torrent + WriteRegStr HKCR bittorrent "" "TORRENT File" + WriteRegBin HKCR bittorrent EditFlags 00000100 + WriteRegStr HKCR "bittorrent\shell" "" open + WriteRegStr HKCR "bittorrent\shell\open\command" "" '"$INSTDIR\${PRODUCT}.exe" "%1"' + WriteRegStr HKCR "bittorrent\DefaultIcon" "" "$INSTDIR\${LIBRARYNAME}\Images\torrenticon.ico" +SectionEnd + + + +;-------------------------------- +;Descriptions + +!insertmacro MUI_FUNCTION_DESCRIPTION_BEGIN +!insertmacro MUI_DESCRIPTION_TEXT ${SecMain} $(DESC_SecMain) +!insertmacro MUI_DESCRIPTION_TEXT ${SecDesk} $(DESC_SecDesk) +!insertmacro MUI_DESCRIPTION_TEXT ${SecStart} $(DESC_SecStart) +;!insertmacro MUI_DESCRIPTION_TEXT ${SecLang} $(DESC_SecLang) +!insertmacro MUI_DESCRIPTION_TEXT ${SecDefaultTStream} $(DESC_SecDefaultTStream) +!insertmacro MUI_DESCRIPTION_TEXT ${SecDefaultTorrent} $(DESC_SecDefaultTorrent) +!insertmacro MUI_FUNCTION_DESCRIPTION_END + +;-------------------------------- +;Uninstaller Section + +Section "Uninstall" + + Delete "$INSTDIR\${LIBRARYNAME}\*.*" + RMDir "$INSTDIR\${LIBRARYNAME}" + + Delete "$INSTDIR\*.*" + RMDir "$INSTDIR" + + Delete "$DESKTOP\${PRODUCT}.lnk" + Delete "$SMPROGRAMS\${PRODUCT}\*.*" + RmDir "$SMPROGRAMS\${PRODUCT}" + + DeleteRegKey HKEY_LOCAL_MACHINE "SOFTWARE\${PRODUCT}" + DeleteRegKey HKEY_LOCAL_MACHINE "SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}" + + ; Remove an application from the firewall exception list + SimpleFC::RemoveApplication "$INSTDIR\${PRODUCT}.exe" + ; Pop $0 ; return error(1)/success(0) + +SectionEnd + + +;-------------------------------- +;Functions Section + +Function .onInit + System::Call 'kernel32::CreateMutexA(i 0, i 0, t "SwarmPlayer") i .r1 ?e' + + Pop $R0 + + StrCmp $R0 0 +3 + + MessageBox MB_OK "The installer is already running." + + Abort +FunctionEnd diff --git a/tribler-mod/Tribler/Player/Reporter.py b/tribler-mod/Tribler/Player/Reporter.py new file mode 100644 index 0000000..6f4ef76 --- /dev/null +++ b/tribler-mod/Tribler/Player/Reporter.py @@ -0,0 +1,164 @@ +from time import localtime, strftime +# Written by Jan David Mol +# see LICENSE.txt for license information + +# Collects statistics about a download/VOD session, and sends it +# home on a regular interval. + +import sys,urllib,zlib,pickle +from time import time +from traceback import print_exc + +PHONEHOME = False +DEBUG = True + +class Reporter: + def __init__( self, sconfig ): + self.sconfig = sconfig + + # time of initialisation + self.epoch = time() + + # mapping from peer ids to (shorter) numbers + self.peernr = {} + + # remember static peer information, such as IP + # self.peerinfo[id] = info string + self.peerinfo = {} + + # remember which peers we were connected to in the last report + # self.connected[id] = timestamp when last seen + self.connected = {} + + # collected reports + self.buffered_reports = [] + + # whether to phone home to send collected data + self.do_reporting = True + + # send data at this interval (seconds) + self.report_interval = 30 + + # send first report immediately + self.last_report_ts = 0 + + # record when we started (used as a session id) + self.epoch = time() + + def phone_home( self, report ): + """ Report status to a centralised server. """ + + #if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","\nreport: ".join(reports) + + # do not actually send if reporting is disabled + if not self.do_reporting or not PHONEHOME: + return + + # add reports to buffer + self.buffered_reports.append( report ) + + # only process at regular intervals + now = time() + if now - self.last_report_ts < self.report_interval: + return + self.last_report_ts = now + + # send complete buffer + s = pickle.dumps( self.buffered_reports ) + self.buffered_reports = [] + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","\nreport: phoning home." + try: + data = zlib.compress( s, 9 ).encode("base64") + sock = urllib.urlopen("http://swpreporter.tribler.org/reporting/report.cgi",data) + result = sock.read() + sock.close() + + result = int(result) + + if result == 0: + # remote server is not recording, so don't bother sending info + self.do_reporting = False + else: + self.report_interval = result + except IOError, e: + # error contacting server + print_exc(file=sys.stderr) + self.do_reporting = False + except ValueError, e: + # page did not obtain an integer + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","report: got %s" % (result,) + print_exc(file=sys.stderr) + self.do_reporting = False + except: + # any other error + print_exc(file=sys.stderr) + self.do_reporting = False + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","\nreport: succes. reported %s bytes, will report again (%s) in %s seconds" % (len(data),self.do_reporting,self.report_interval) + + def report_stat( self, ds ): + chokestr = lambda b: ["c","C"][int(bool(b))] + intereststr = lambda b: ["i","I"][int(bool(b))] + optstr = lambda b: ["o","O"][int(bool(b))] + protstr = lambda b: ["bt","g2g"][int(bool(b))] + + now = time() + v = ds.get_vod_stats() or { "played": 0, "stall": 0, "late": 0, "dropped": 0, "prebuf": -1, "pieces": {} } + vi = ds.get_videoinfo() or { "live": False, "inpath": "(none)", "status": None } + vs = vi["status"] + + scfg = self.sconfig + + down_total, down_rate, up_total, up_rate = 0, 0.0, 0, 0.0 + peerinfo = {} + + for p in ds.get_peerlist(): + down_total += p["dtotal"]/1024 + down_rate += p["downrate"]/1024.0 + up_total += p["utotal"]/1024 + up_rate += p["uprate"]/1024.0 + + id = p["id"] + peerinfo[id] = { + "g2g": protstr(p["g2g"]), + "addr": "%s:%s:%s" % (p["ip"],p["port"],p["direction"]), + "id": id, + "g2g_score": "%s,%s" % (p["g2g_score"][0],p["g2g_score"][1]), + "down_str": "%s%s" % (chokestr(p["dchoked"]),intereststr(p["dinterested"])), + "down_total": p["dtotal"]/1024, + "down_rate": p["downrate"]/1024.0, + "up_str": "%s%s%s" % (chokestr(p["uchoked"]),intereststr(p["uinterested"]),optstr(p["optimistic"])), + "up_total": p["utotal"]/1024, + "up_rate": p["uprate"]/1024.0, + } + + if vs: + valid_range = vs.download_range() + else: + valid_range = "" + + stats = { + "timestamp": time(), + "epoch": self.epoch, + "listenport": scfg.get_listen_port(), + "infohash": `ds.get_download().get_def().get_infohash()`, + "filename": vi["inpath"], + "peerid": `ds.get_peerid()`, + "live": vi["live"], + "progress": 100.00*ds.get_progress(), + "down_total": down_total, + "down_rate": down_rate, + "up_total": up_total, + "up_rate": up_rate, + "p_played": v["played"], + "t_stall": v["stall"], + "p_late": v["late"], + "p_dropped": v["dropped"], + "t_prebuf": v["prebuf"], + "peers": peerinfo.values(), + "pieces": v["pieces"], + "validrange": valid_range, + } + + self.phone_home( stats ) + diff --git a/tribler-mod/Tribler/Player/Reporter.py.bak b/tribler-mod/Tribler/Player/Reporter.py.bak new file mode 100644 index 0000000..c631bc4 --- /dev/null +++ b/tribler-mod/Tribler/Player/Reporter.py.bak @@ -0,0 +1,163 @@ +# Written by Jan David Mol +# see LICENSE.txt for license information + +# Collects statistics about a download/VOD session, and sends it +# home on a regular interval. + +import sys,urllib,zlib,pickle +from time import time +from traceback import print_exc + +PHONEHOME = False +DEBUG = True + +class Reporter: + def __init__( self, sconfig ): + self.sconfig = sconfig + + # time of initialisation + self.epoch = time() + + # mapping from peer ids to (shorter) numbers + self.peernr = {} + + # remember static peer information, such as IP + # self.peerinfo[id] = info string + self.peerinfo = {} + + # remember which peers we were connected to in the last report + # self.connected[id] = timestamp when last seen + self.connected = {} + + # collected reports + self.buffered_reports = [] + + # whether to phone home to send collected data + self.do_reporting = True + + # send data at this interval (seconds) + self.report_interval = 30 + + # send first report immediately + self.last_report_ts = 0 + + # record when we started (used as a session id) + self.epoch = time() + + def phone_home( self, report ): + """ Report status to a centralised server. """ + + #if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","\nreport: ".join(reports) + + # do not actually send if reporting is disabled + if not self.do_reporting or not PHONEHOME: + return + + # add reports to buffer + self.buffered_reports.append( report ) + + # only process at regular intervals + now = time() + if now - self.last_report_ts < self.report_interval: + return + self.last_report_ts = now + + # send complete buffer + s = pickle.dumps( self.buffered_reports ) + self.buffered_reports = [] + + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","\nreport: phoning home." + try: + data = zlib.compress( s, 9 ).encode("base64") + sock = urllib.urlopen("http://swpreporter.tribler.org/reporting/report.cgi",data) + result = sock.read() + sock.close() + + result = int(result) + + if result == 0: + # remote server is not recording, so don't bother sending info + self.do_reporting = False + else: + self.report_interval = result + except IOError, e: + # error contacting server + print_exc(file=sys.stderr) + self.do_reporting = False + except ValueError, e: + # page did not obtain an integer + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","report: got %s" % (result,) + print_exc(file=sys.stderr) + self.do_reporting = False + except: + # any other error + print_exc(file=sys.stderr) + self.do_reporting = False + if DEBUG: print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","\nreport: succes. reported %s bytes, will report again (%s) in %s seconds" % (len(data),self.do_reporting,self.report_interval) + + def report_stat( self, ds ): + chokestr = lambda b: ["c","C"][int(bool(b))] + intereststr = lambda b: ["i","I"][int(bool(b))] + optstr = lambda b: ["o","O"][int(bool(b))] + protstr = lambda b: ["bt","g2g"][int(bool(b))] + + now = time() + v = ds.get_vod_stats() or { "played": 0, "stall": 0, "late": 0, "dropped": 0, "prebuf": -1, "pieces": {} } + vi = ds.get_videoinfo() or { "live": False, "inpath": "(none)", "status": None } + vs = vi["status"] + + scfg = self.sconfig + + down_total, down_rate, up_total, up_rate = 0, 0.0, 0, 0.0 + peerinfo = {} + + for p in ds.get_peerlist(): + down_total += p["dtotal"]/1024 + down_rate += p["downrate"]/1024.0 + up_total += p["utotal"]/1024 + up_rate += p["uprate"]/1024.0 + + id = p["id"] + peerinfo[id] = { + "g2g": protstr(p["g2g"]), + "addr": "%s:%s:%s" % (p["ip"],p["port"],p["direction"]), + "id": id, + "g2g_score": "%s,%s" % (p["g2g_score"][0],p["g2g_score"][1]), + "down_str": "%s%s" % (chokestr(p["dchoked"]),intereststr(p["dinterested"])), + "down_total": p["dtotal"]/1024, + "down_rate": p["downrate"]/1024.0, + "up_str": "%s%s%s" % (chokestr(p["uchoked"]),intereststr(p["uinterested"]),optstr(p["optimistic"])), + "up_total": p["utotal"]/1024, + "up_rate": p["uprate"]/1024.0, + } + + if vs: + valid_range = vs.download_range() + else: + valid_range = "" + + stats = { + "timestamp": time(), + "epoch": self.epoch, + "listenport": scfg.get_listen_port(), + "infohash": `ds.get_download().get_def().get_infohash()`, + "filename": vi["inpath"], + "peerid": `ds.get_peerid()`, + "live": vi["live"], + "progress": 100.00*ds.get_progress(), + "down_total": down_total, + "down_rate": down_rate, + "up_total": up_total, + "up_rate": up_rate, + "p_played": v["played"], + "t_stall": v["stall"], + "p_late": v["late"], + "p_dropped": v["dropped"], + "t_prebuf": v["prebuf"], + "peers": peerinfo.values(), + "pieces": v["pieces"], + "validrange": valid_range, + } + + self.phone_home( stats ) + diff --git a/tribler-mod/Tribler/Player/UtilityStub.py b/tribler-mod/Tribler/Player/UtilityStub.py new file mode 100644 index 0000000..99483f5 --- /dev/null +++ b/tribler-mod/Tribler/Player/UtilityStub.py @@ -0,0 +1,38 @@ +from time import localtime, strftime +# Written by ABC authors and Arno Bakker +# see LICENSE.txt for license information +import sys +import os + +from Tribler.__init__ import LIBRARYNAME +from Tribler.Lang.lang import Lang + +################################################################ +# +# Class: UtilityStub +# +################################################################ +class UtilityStub: + def __init__(self,installdir,statedir): + self.installdir = installdir + self.statedir = statedir + + self.config = self + + # Setup language files + self.lang = Lang(self) + + + + def getConfigPath(self): + return self.statedir + + def getPath(self): + return self.installdir.decode(sys.getfilesystemencoding()) + + def Read(self,key): + if key == 'language_file': + return os.path.join(self.installdir,LIBRARYNAME,'Lang','english.lang') + elif key == 'videoplayerpath': + return 'vlc' + return None diff --git a/tribler-mod/Tribler/Player/UtilityStub.py.bak b/tribler-mod/Tribler/Player/UtilityStub.py.bak new file mode 100644 index 0000000..949df85 --- /dev/null +++ b/tribler-mod/Tribler/Player/UtilityStub.py.bak @@ -0,0 +1,37 @@ +# Written by ABC authors and Arno Bakker +# see LICENSE.txt for license information +import sys +import os + +from Tribler.__init__ import LIBRARYNAME +from Tribler.Lang.lang import Lang + +################################################################ +# +# Class: UtilityStub +# +################################################################ +class UtilityStub: + def __init__(self,installdir,statedir): + self.installdir = installdir + self.statedir = statedir + + self.config = self + + # Setup language files + self.lang = Lang(self) + + + + def getConfigPath(self): + return self.statedir + + def getPath(self): + return self.installdir.decode(sys.getfilesystemencoding()) + + def Read(self,key): + if key == 'language_file': + return os.path.join(self.installdir,LIBRARYNAME,'Lang','english.lang') + elif key == 'videoplayerpath': + return 'vlc' + return None diff --git a/tribler-mod/Tribler/Player/__init__.py b/tribler-mod/Tribler/Player/__init__.py new file mode 100644 index 0000000..b7ef832 --- /dev/null +++ b/tribler-mod/Tribler/Player/__init__.py @@ -0,0 +1,3 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Player/__init__.py.bak b/tribler-mod/Tribler/Player/__init__.py.bak new file mode 100644 index 0000000..395f8fb --- /dev/null +++ b/tribler-mod/Tribler/Player/__init__.py.bak @@ -0,0 +1,2 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Player/swarmplayer.py b/tribler-mod/Tribler/Player/swarmplayer.py new file mode 100644 index 0000000..393b7f4 --- /dev/null +++ b/tribler-mod/Tribler/Player/swarmplayer.py @@ -0,0 +1,620 @@ +from time import localtime, strftime +# Written by Arno Bakker, Choopan RATTANAPOKA, Jie Yang +# see LICENSE.txt for license information +# +# TODO: +# * set 'download_slice_size' to 32K, such that pieces are no longer +# downloaded in 2 chunks. This particularly avoids a bad case where you +# kick the source: you download chunk 1 of piece X +# from lagging peer and download chunk 2 of piece X from source. With the piece +# now complete you check the sig. As the first part of the piece is old, this +# fails and we kick the peer that gave us the completing chunk, which is the +# source. +# +# Note that the BT spec says: +# "All current implementations use 2 15 , and close connections which request +# an amount greater than 2 17." http://www.bittorrent.org/beps/bep_0003.html +# +# So it should be 32KB already. However, the BitTorrent (3.4.1, 5.0.9), +# BitTornado and Azureus all use 2 ** 14 = 16KB chunks. +# +# - See if we can use stream.seek() to optimize SwarmPlayer as well (see SwarmPlugin) + +import os +import sys +import time +import tempfile +import shutil +from traceback import print_exc +from cStringIO import StringIO +from threading import Thread + +if sys.platform == "darwin": + # on Mac, we can only load VLC/OpenSSL libraries + # relative to the location of tribler.py + os.chdir(os.path.abspath(os.path.dirname(sys.argv[0]))) +try: + import wxversion + wxversion.select('2.8') +except: + pass +import wx + +from Tribler.__init__ import LIBRARYNAME +from Tribler.Core.API import * +from Tribler.Core.Utilities.unicode import bin2unicode +from Tribler.Core.Utilities.timeouturlopen import urlOpenTimeout + +from Tribler.Video.defs import * +from Tribler.Video.VideoPlayer import VideoPlayer, VideoChooser +from Tribler.Video.VideoFrame import VideoFrame +from Tribler.Video.utils import videoextdefaults +from Tribler.Utilities.LinuxSingleInstanceChecker import * +from Tribler.Utilities.Instance2Instance import Instance2InstanceClient + +from Tribler.Player.BaseApp import BaseApp + +DEBUG = True +ONSCREENDEBUG = False +ALLOW_MULTIPLE = False + +PLAYER_VERSION = '1.1.0' + +I2I_LISTENPORT = 57894 +PLAYER_LISTENPORT = 8620 +VIDEOHTTP_LISTENPORT = 6879 + +class PlayerApp(BaseApp): + def __init__(self, redirectstderrout, appname, params, single_instance_checker, installdir, i2iport, sport): + + BaseApp.__init__(self, redirectstderrout, appname, params, single_instance_checker, installdir, i2iport, sport) + + self.said_start_playback = False + self.decodeprogress = 0 + + + def OnInit(self): + try: + # If already running, and user starts a new instance without a URL + # on the cmd line + if not ALLOW_MULTIPLE and self.single_instance_checker.IsAnotherRunning(): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Another instance running, no URL on CMD, asking user" + torrentfilename = self.select_torrent_from_disk() + if torrentfilename is not None: + i2ic = Instance2InstanceClient(I2I_LISTENPORT,'START',torrentfilename) + return False + + # Do common initialization + BaseApp.OnInitBase(self) + + # Fire up the VideoPlayer, it abstracts away whether we're using + # an internal or external video player. + self.videoplayer = VideoPlayer.getInstance(httpport=VIDEOHTTP_LISTENPORT) + playbackmode = PLAYBACKMODE_INTERNAL + self.videoplayer.register(self.utility,preferredplaybackmode=playbackmode) + + # Open video window + self.start_video_frame() + + # Load torrent + if self.params[0] != "": + torrentfilename = self.params[0] + + # TEST: just play video file + #self.videoplayer.play_url(torrentfilename) + #return True + + else: + torrentfilename = self.select_torrent_from_disk() + if torrentfilename is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: User selected no file" + self.OnExit() + return False + + + # Start download + if not self.select_file_start_download(torrentfilename): + + self.OnExit() + return False + + return True + + except Exception,e: + print_exc() + self.show_error(str(e)) + self.OnExit() + return False + + + def start_video_frame(self): + self.videoFrame = PlayerFrame(self,self.appname) + self.Bind(wx.EVT_CLOSE, self.videoFrame.OnCloseWindow) + self.Bind(wx.EVT_QUERY_END_SESSION, self.videoFrame.OnCloseWindow) + self.Bind(wx.EVT_END_SESSION, self.videoFrame.OnCloseWindow) + self.videoFrame.show_videoframe() + + if self.videoplayer is not None: + self.videoplayer.set_videoframe(self.videoFrame) + self.said_start_playback = False + + + def select_torrent_from_disk(self): + dlg = wx.FileDialog(None, + self.appname+': Select torrent to play', + '', # default dir + '', # default file + 'TSTREAM and TORRENT files (*.tstream;*.torrent)|*.tstream;*.torrent', + wx.OPEN|wx.FD_FILE_MUST_EXIST) + if dlg.ShowModal() == wx.ID_OK: + filename = dlg.GetPath() + else: + filename = None + dlg.Destroy() + return filename + + + def select_file_start_download(self,torrentfilename): + tdef = TorrentDef.load(torrentfilename) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Starting download, infohash is",`tdef.get_infohash()` + + # Select which video to play (if multiple) + videofiles = tdef.get_files(exts=videoextdefaults) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Found video files",videofiles + + if len(videofiles) == 0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: No video files found! Let user select" + # Let user choose any file + videofiles = tdef.get_files(exts=None) + + if len(videofiles) > 1: + selectedvideofile = self.ask_user_which_video_from_torrent(videofiles) + if selectedvideofile is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: User selected no video" + return False + dlfile = selectedvideofile + else: + dlfile = videofiles[0] + + + # Start video window if not open + if self.videoFrame is None: + self.start_video_frame() + else: + # Stop playing, reset stream progress info + sliders + self.videoplayer.stop_playback(reset=True) + self.said_start_playback = False + self.decodeprogress = 0 + + # Display name and thumbnail + cname = tdef.get_name_as_unicode() + if len(videofiles) > 1: + cname += u' - '+bin2unicode(dlfile) + self.videoplayer.set_content_name(u'Loading: '+cname) + + try: + [mime,imgdata] = tdef.get_thumbnail() + if mime is not None: + f = StringIO(imgdata) + img = wx.EmptyImage(-1,-1) + img.LoadMimeStream(f,mime,-1) + self.videoplayer.set_content_image(img) + else: + self.videoplayer.set_content_image(None) + except: + print_exc() + + + # Start actual download + self.start_download(tdef,dlfile) + return True + + + + def ask_user_which_video_from_torrent(self,videofiles): + dlg = VideoChooser(self.videoFrame,self.utility,videofiles,title=self.appname,expl='Select which file to play') + result = dlg.ShowModal() + if result == wx.ID_OK: + index = dlg.getChosenIndex() + filename = videofiles[index] + else: + filename = None + dlg.Destroy() + return filename + + + # ARNOTODO: see how VideoPlayer manages stopping downloads + + def sesscb_vod_event_callback(self,d,event,params): + self.videoplayer.sesscb_vod_event_callback(d,event,params) + + + def get_supported_vod_events(self): + return self.videoplayer.get_supported_vod_events() + + + # + # Remote start of new torrents + # + def i2ithread_readlinecallback(self,ic,cmd): + """ Called by Instance2Instance thread """ + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Another instance called us with cmd",cmd + ic.close() + + if cmd.startswith('START '): + param = cmd[len('START '):] + torrentfilename = None + if param.startswith('http:'): + # Retrieve from web + f = tempfile.NamedTemporaryFile() + n = urlOpenTimeout(param) + data = n.read() + f.write(data) + f.close() + n.close() + torrentfilename = f.name + else: + torrentfilename = param + + # Switch to GUI thread + wx.CallAfter(self.remote_start_download,torrentfilename) + + def remote_start_download(self,torrentfilename): + """ Called by GUI thread """ + self.videoplayer.stop_playback(reset=True) + + self.remove_downloads_in_vodmode_if_not_complete() + self.select_file_start_download(torrentfilename) + + + # + # Display stats in videoframe + # + def gui_states_callback(self,dslist,haspeerlist): + """ Override BaseApp """ + (playing_dslist,totalhelping,totalspeed) = BaseApp.gui_states_callback(self,dslist,haspeerlist) + + # Don't display stats if there is no video frame to show them on. + if self.videoFrame is None: + return + elif len(playing_dslist) > 0: + ds = playing_dslist[0] # only single playing Download at the moment in swarmplayer + self.display_stats_in_videoframe(ds,totalhelping,totalspeed) + + + def display_stats_in_videoframe(self,ds,totalhelping,totalspeed): + # Display stats for currently playing Download + + videoplayer_mediastate = self.videoplayer.get_state() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Stats: VideoPlayer state",videoplayer_mediastate + + [topmsg,msg,self.said_start_playback,self.decodeprogress] = get_status_msgs(ds,videoplayer_mediastate,self.appname,self.said_start_playback,self.decodeprogress,totalhelping,totalspeed) + # Display helping info on "content name" line. + self.videoplayer.set_content_name(topmsg) + + # Update status msg and progress bar + self.videoplayer.set_player_status_and_progress(msg,ds.get_pieces_complete()) + + # Toggle save button + self.videoplayer.set_save_button(ds.get_status() == DLSTATUS_SEEDING, self.save_video_copy) + + if False: # Only works if the sesscb_states_callback() method returns (x,True) + peerlist = ds.get_peerlist() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Connected to",len(peerlist),"peers" + for peer in peerlist: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Connected to",peer['ip'],peer['uprate'],peer['downrate'] + + + def videoserver_set_status_guicallback(self,status): + """ Override BaseApp """ + if self.videoFrame is not None: + self.videoFrame.set_player_status(status) + + # + # Save button logic + # + def save_video_copy(self): + # Save a copy of playing download to other location + + for d2 in self.downloads_in_vodmode: + # only single playing Download at the moment in swarmplayer + d = d2 + dest_files = d.get_dest_files() + dest_file = dest_files[0] # only single file at the moment in swarmplayer + savethread_callback_lambda = lambda:self.savethread_callback(dest_file) + + t = Thread(target = savethread_callback_lambda) + t.setName( self.appname+"Save"+t.getName() ) + t.setDaemon(True) + t.start() + + def savethread_callback(self,dest_file): + + # Save a copy of playing download to other location + # called by new thread from self.save_video_copy + try: + if sys.platform == 'win32': + # Jelle also goes win32, find location of "My Documents" + # see http://www.mvps.org/access/api/api0054.htm + from win32com.shell import shell + pidl = shell.SHGetSpecialFolderLocation(0,0x05) + defaultpath = shell.SHGetPathFromIDList(pidl) + else: + defaultpath = os.path.expandvars('$HOME') + except Exception, msg: + defaultpath = '' + print_exc() + + dest_file_only = os.path.split(dest_file[1])[1] + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Defaultpath:', defaultpath, 'Dest:', dest_file + dlg = wx.FileDialog(self.videoFrame, + message = self.utility.lang.get('savemedia'), + defaultDir = defaultpath, + defaultFile = dest_file_only, + wildcard = self.utility.lang.get('allfileswildcard') + ' (*.*)|*.*', + style = wx.SAVE) + dlg.Raise() + result = dlg.ShowModal() + dlg.Destroy() + + if result == wx.ID_OK: + path = dlg.GetPath() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Path:', path + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Copy: %s to %s' % (dest_file[1], path) + if sys.platform == 'win32': + try: + import win32file + win32file.CopyFile(dest_file[1], path, 0) # do succeed on collision + except: + shutil.copyfile(dest_file[1], path) + else: + shutil.copyfile(dest_file[1], path) + + # On Exit + + def clear_session_state(self): + """ Try to fix apps by doing hard reset. Called from systray menu """ + try: + self.videoplayer.stop_playback() + except: + print_exc() + BaseApp.clear_session_state(self) + + + +def get_status_msgs(ds,videoplayer_mediastate,appname,said_start_playback,decodeprogress,totalhelping,totalspeed): + + intime = "Not playing for quite some time." + ETA = ((60 * 15, "Playing in less than 15 minutes."), + (60 * 10, "Playing in less than 10 minutes."), + (60 * 5, "Playing in less than 5 minutes."), + (60, "Playing in less than a minute.")) + + topmsg = '' + msg = '' + + logmsgs = ds.get_log_messages() + logmsg = None + if len(logmsgs) > 0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Log",logmsgs[0] + logmsg = logmsgs[-1][1] + + preprogress = ds.get_vod_prebuffering_progress() + playable = ds.get_vod_playable() + t = ds.get_vod_playable_after() + + intime = ETA[0][1] + for eta_time, eta_msg in ETA: + if t > eta_time: + break + intime = eta_msg + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: playble",playable,"preprog",preprogress + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: ETA is",t,"secs" + # if t > float(2 ** 30): + # intime = "inf" + # elif t == 0.0: + # intime = "now" + # else: + # h, t = divmod(t, 60.0*60.0) + # m, s = divmod(t, 60.0) + # if h == 0.0: + # if m == 0.0: + # intime = "%ds" % (s) + # else: + # intime = "%dm:%02ds" % (m,s) + # else: + # intime = "%dh:%02dm:%02ds" % (h,m,s) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: VODStats",preprogress,playable,"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%" + + if ds.get_status() == DLSTATUS_HASHCHECKING: + genprogress = ds.get_progress() + pstr = str(int(genprogress*100)) + msg = "Checking already downloaded parts "+pstr+"% done" + elif ds.get_status() == DLSTATUS_STOPPED_ON_ERROR: + msg = 'Error playing: '+str(ds.get_error()) + elif ds.get_progress() == 1.0: + msg = '' + elif playable: + if not said_start_playback: + msg = "Starting playback..." + + if videoplayer_mediastate == MEDIASTATE_STOPPED and said_start_playback: + if totalhelping == 0: + topmsg = u"Please leave the "+appname+" running, this will help other "+appname+" users to download faster." + else: + topmsg = u"Helping "+str(totalhelping)+" "+appname+" users to download. Please leave it running in the background." + + # Display this on status line + # TODO: Show balloon in systray when closing window to indicate things continue there + msg = '' + + elif videoplayer_mediastate == MEDIASTATE_PLAYING: + said_start_playback = True + # It may take a while for VLC to actually start displaying + # video, as it is trying to tune in to the stream (finding + # I-Frame). Display some info to show that: + # + cname = ds.get_download().get_def().get_name_as_unicode() + topmsg = u'Decoding: '+cname+' '+str(decodeprogress)+' s' + decodeprogress += 1 + msg = '' + elif videoplayer_mediastate == MEDIASTATE_PAUSED: + # msg = "Buffering... " + str(int(100.0*preprogress))+"%" + msg = "Buffering... " + str(int(100.0*preprogress))+"%. " + intime + else: + msg = '' + + elif preprogress != 1.0: + pstr = str(int(preprogress*100)) + npeers = ds.get_num_peers() + npeerstr = str(npeers) + if npeers == 0 and logmsg is not None: + msg = logmsg + elif npeers == 1: + msg = "Prebuffering "+pstr+"% done (connected to 1 person). " + intime + else: + msg = "Prebuffering "+pstr+"% done (connected to "+npeerstr+" people). " + intime + + try: + d = ds.get_download() + tdef = d.get_def() + videofiles = d.get_selected_files() + if len(videofiles) >= 1: + videofile = videofiles[0] + else: + videofile = None + if tdef.get_bitrate(videofile) is None: + msg += ' This video may not play properly because its bitrate is unknown' + except: + print_exc() + else: + # msg = "Waiting for sufficient download speed... "+intime + msg = 'Waiting for sufficient download speed... ' + intime + + global ONSCREENDEBUG + if msg == '' and ONSCREENDEBUG: + uptxt = "up %.1f" % (totalspeed[UPLOAD]) + downtxt = " down %.1f" % (totalspeed[DOWNLOAD]) + peertxt = " peer %d" % (totalhelping) + msg = uptxt + downtxt + peertxt + + return [topmsg,msg,said_start_playback,decodeprogress] + + + +class PlayerFrame(VideoFrame): + def __init__(self,parent,appname): + VideoFrame.__init__(self,parent,parent.utility,appname+' '+PLAYER_VERSION,parent.iconpath,parent.videoplayer.get_vlcwrap(),parent.logopath) + self.parent = parent + self.closed = False + + dragdroplist = FileDropTarget(self.parent) + self.SetDropTarget(dragdroplist) + + self.Bind(wx.EVT_CLOSE, self.OnCloseWindow) + + def OnCloseWindow(self, event = None): + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: ON CLOSE WINDOW" + + # TODO: first event.Skip does not close window, second apparently does + # Check how event differs + + if event is not None: + nr = event.GetEventType() + lookup = { wx.EVT_CLOSE.evtType[0]: "EVT_CLOSE", wx.EVT_QUERY_END_SESSION.evtType[0]: "EVT_QUERY_END_SESSION", wx.EVT_END_SESSION.evtType[0]: "EVT_END_SESSION" } + if nr in lookup: + nr = lookup[nr] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Closing due to event ",nr + event.Skip() + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Closing untriggered by event" + + # This gets called multiple times somehow + if not self.closed: + self.closed = True + self.parent.videoFrame = None + + self.parent.videoplayer.stop_playback() + self.parent.remove_downloads_in_vodmode_if_not_complete() + self.parent.restart_other_downloads() + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Closing done" + # TODO: Show balloon in systray when closing window to indicate things continue there + + +class FileDropTarget(wx.FileDropTarget): + """ To enable drag and drop of .tstream to window """ + + def __init__(self,app): + wx.FileDropTarget.__init__(self) + self.app = app + + def OnDropFiles(self, x, y, filenames): + for filename in filenames: + self.app.remote_start_download(filename) + return True + + + + +############################################################## +# +# Main Program Start Here +# +############################################################## +def run_playerapp(appname,params = None): + if params is None: + params = [""] + + if len(sys.argv) > 1: + params = sys.argv[1:] + + if 'debug' in params: + global ONSCREENDEBUG + ONSCREENDEBUG=True + if 'raw' in params: + Tribler.Video.VideoPlayer.USE_VLC_RAW_INTERFACE = True + + # Create single instance semaphore + # Arno: On Linux and wxPython-2.8.1.1 the SingleInstanceChecker appears + # to mess up stderr, i.e., I get IOErrors when writing to it via print_exc() + # + siappname = appname.lower() # For backwards compatibility + if sys.platform != 'linux2': + single_instance_checker = wx.SingleInstanceChecker(siappname+"-"+ wx.GetUserId()) + else: + single_instance_checker = LinuxSingleInstanceChecker(siappname) + + #print "[StartUpDebug]---------------- 1", time()-start_time + if not ALLOW_MULTIPLE and single_instance_checker.IsAnotherRunning(): + if params[0] != "": + torrentfilename = params[0] + i2ic = Instance2InstanceClient(I2I_LISTENPORT,'START',torrentfilename) + time.sleep(1) + return + + arg0 = sys.argv[0].lower() + if arg0.endswith('.exe'): + installdir = os.path.abspath(os.path.dirname(sys.argv[0])) + else: + installdir = os.getcwd() + + # Launch first single instance + app = PlayerApp(0, appname, params, single_instance_checker, installdir, I2I_LISTENPORT, PLAYER_LISTENPORT) + app.MainLoop() + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Sleeping seconds to let other threads finish" + time.sleep(2) + + if not ALLOW_MULTIPLE: + del single_instance_checker + + +if __name__ == '__main__': + run_playerapp("SwarmPlayer") + diff --git a/tribler-mod/Tribler/Player/swarmplayer.py.bak b/tribler-mod/Tribler/Player/swarmplayer.py.bak new file mode 100644 index 0000000..61764a0 --- /dev/null +++ b/tribler-mod/Tribler/Player/swarmplayer.py.bak @@ -0,0 +1,619 @@ +# Written by Arno Bakker, Choopan RATTANAPOKA, Jie Yang +# see LICENSE.txt for license information +# +# TODO: +# * set 'download_slice_size' to 32K, such that pieces are no longer +# downloaded in 2 chunks. This particularly avoids a bad case where you +# kick the source: you download chunk 1 of piece X +# from lagging peer and download chunk 2 of piece X from source. With the piece +# now complete you check the sig. As the first part of the piece is old, this +# fails and we kick the peer that gave us the completing chunk, which is the +# source. +# +# Note that the BT spec says: +# "All current implementations use 2 15 , and close connections which request +# an amount greater than 2 17." http://www.bittorrent.org/beps/bep_0003.html +# +# So it should be 32KB already. However, the BitTorrent (3.4.1, 5.0.9), +# BitTornado and Azureus all use 2 ** 14 = 16KB chunks. +# +# - See if we can use stream.seek() to optimize SwarmPlayer as well (see SwarmPlugin) + +import os +import sys +import time +import tempfile +import shutil +from traceback import print_exc +from cStringIO import StringIO +from threading import Thread + +if sys.platform == "darwin": + # on Mac, we can only load VLC/OpenSSL libraries + # relative to the location of tribler.py + os.chdir(os.path.abspath(os.path.dirname(sys.argv[0]))) +try: + import wxversion + wxversion.select('2.8') +except: + pass +import wx + +from Tribler.__init__ import LIBRARYNAME +from Tribler.Core.API import * +from Tribler.Core.Utilities.unicode import bin2unicode +from Tribler.Core.Utilities.timeouturlopen import urlOpenTimeout + +from Tribler.Video.defs import * +from Tribler.Video.VideoPlayer import VideoPlayer, VideoChooser +from Tribler.Video.VideoFrame import VideoFrame +from Tribler.Video.utils import videoextdefaults +from Tribler.Utilities.LinuxSingleInstanceChecker import * +from Tribler.Utilities.Instance2Instance import Instance2InstanceClient + +from Tribler.Player.BaseApp import BaseApp + +DEBUG = True +ONSCREENDEBUG = False +ALLOW_MULTIPLE = False + +PLAYER_VERSION = '1.1.0' + +I2I_LISTENPORT = 57894 +PLAYER_LISTENPORT = 8620 +VIDEOHTTP_LISTENPORT = 6879 + +class PlayerApp(BaseApp): + def __init__(self, redirectstderrout, appname, params, single_instance_checker, installdir, i2iport, sport): + + BaseApp.__init__(self, redirectstderrout, appname, params, single_instance_checker, installdir, i2iport, sport) + + self.said_start_playback = False + self.decodeprogress = 0 + + + def OnInit(self): + try: + # If already running, and user starts a new instance without a URL + # on the cmd line + if not ALLOW_MULTIPLE and self.single_instance_checker.IsAnotherRunning(): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Another instance running, no URL on CMD, asking user" + torrentfilename = self.select_torrent_from_disk() + if torrentfilename is not None: + i2ic = Instance2InstanceClient(I2I_LISTENPORT,'START',torrentfilename) + return False + + # Do common initialization + BaseApp.OnInitBase(self) + + # Fire up the VideoPlayer, it abstracts away whether we're using + # an internal or external video player. + self.videoplayer = VideoPlayer.getInstance(httpport=VIDEOHTTP_LISTENPORT) + playbackmode = PLAYBACKMODE_INTERNAL + self.videoplayer.register(self.utility,preferredplaybackmode=playbackmode) + + # Open video window + self.start_video_frame() + + # Load torrent + if self.params[0] != "": + torrentfilename = self.params[0] + + # TEST: just play video file + #self.videoplayer.play_url(torrentfilename) + #return True + + else: + torrentfilename = self.select_torrent_from_disk() + if torrentfilename is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: User selected no file" + self.OnExit() + return False + + + # Start download + if not self.select_file_start_download(torrentfilename): + + self.OnExit() + return False + + return True + + except Exception,e: + print_exc() + self.show_error(str(e)) + self.OnExit() + return False + + + def start_video_frame(self): + self.videoFrame = PlayerFrame(self,self.appname) + self.Bind(wx.EVT_CLOSE, self.videoFrame.OnCloseWindow) + self.Bind(wx.EVT_QUERY_END_SESSION, self.videoFrame.OnCloseWindow) + self.Bind(wx.EVT_END_SESSION, self.videoFrame.OnCloseWindow) + self.videoFrame.show_videoframe() + + if self.videoplayer is not None: + self.videoplayer.set_videoframe(self.videoFrame) + self.said_start_playback = False + + + def select_torrent_from_disk(self): + dlg = wx.FileDialog(None, + self.appname+': Select torrent to play', + '', # default dir + '', # default file + 'TSTREAM and TORRENT files (*.tstream;*.torrent)|*.tstream;*.torrent', + wx.OPEN|wx.FD_FILE_MUST_EXIST) + if dlg.ShowModal() == wx.ID_OK: + filename = dlg.GetPath() + else: + filename = None + dlg.Destroy() + return filename + + + def select_file_start_download(self,torrentfilename): + tdef = TorrentDef.load(torrentfilename) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Starting download, infohash is",`tdef.get_infohash()` + + # Select which video to play (if multiple) + videofiles = tdef.get_files(exts=videoextdefaults) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Found video files",videofiles + + if len(videofiles) == 0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: No video files found! Let user select" + # Let user choose any file + videofiles = tdef.get_files(exts=None) + + if len(videofiles) > 1: + selectedvideofile = self.ask_user_which_video_from_torrent(videofiles) + if selectedvideofile is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: User selected no video" + return False + dlfile = selectedvideofile + else: + dlfile = videofiles[0] + + + # Start video window if not open + if self.videoFrame is None: + self.start_video_frame() + else: + # Stop playing, reset stream progress info + sliders + self.videoplayer.stop_playback(reset=True) + self.said_start_playback = False + self.decodeprogress = 0 + + # Display name and thumbnail + cname = tdef.get_name_as_unicode() + if len(videofiles) > 1: + cname += u' - '+bin2unicode(dlfile) + self.videoplayer.set_content_name(u'Loading: '+cname) + + try: + [mime,imgdata] = tdef.get_thumbnail() + if mime is not None: + f = StringIO(imgdata) + img = wx.EmptyImage(-1,-1) + img.LoadMimeStream(f,mime,-1) + self.videoplayer.set_content_image(img) + else: + self.videoplayer.set_content_image(None) + except: + print_exc() + + + # Start actual download + self.start_download(tdef,dlfile) + return True + + + + def ask_user_which_video_from_torrent(self,videofiles): + dlg = VideoChooser(self.videoFrame,self.utility,videofiles,title=self.appname,expl='Select which file to play') + result = dlg.ShowModal() + if result == wx.ID_OK: + index = dlg.getChosenIndex() + filename = videofiles[index] + else: + filename = None + dlg.Destroy() + return filename + + + # ARNOTODO: see how VideoPlayer manages stopping downloads + + def sesscb_vod_event_callback(self,d,event,params): + self.videoplayer.sesscb_vod_event_callback(d,event,params) + + + def get_supported_vod_events(self): + return self.videoplayer.get_supported_vod_events() + + + # + # Remote start of new torrents + # + def i2ithread_readlinecallback(self,ic,cmd): + """ Called by Instance2Instance thread """ + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Another instance called us with cmd",cmd + ic.close() + + if cmd.startswith('START '): + param = cmd[len('START '):] + torrentfilename = None + if param.startswith('http:'): + # Retrieve from web + f = tempfile.NamedTemporaryFile() + n = urlOpenTimeout(param) + data = n.read() + f.write(data) + f.close() + n.close() + torrentfilename = f.name + else: + torrentfilename = param + + # Switch to GUI thread + wx.CallAfter(self.remote_start_download,torrentfilename) + + def remote_start_download(self,torrentfilename): + """ Called by GUI thread """ + self.videoplayer.stop_playback(reset=True) + + self.remove_downloads_in_vodmode_if_not_complete() + self.select_file_start_download(torrentfilename) + + + # + # Display stats in videoframe + # + def gui_states_callback(self,dslist,haspeerlist): + """ Override BaseApp """ + (playing_dslist,totalhelping,totalspeed) = BaseApp.gui_states_callback(self,dslist,haspeerlist) + + # Don't display stats if there is no video frame to show them on. + if self.videoFrame is None: + return + elif len(playing_dslist) > 0: + ds = playing_dslist[0] # only single playing Download at the moment in swarmplayer + self.display_stats_in_videoframe(ds,totalhelping,totalspeed) + + + def display_stats_in_videoframe(self,ds,totalhelping,totalspeed): + # Display stats for currently playing Download + + videoplayer_mediastate = self.videoplayer.get_state() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Stats: VideoPlayer state",videoplayer_mediastate + + [topmsg,msg,self.said_start_playback,self.decodeprogress] = get_status_msgs(ds,videoplayer_mediastate,self.appname,self.said_start_playback,self.decodeprogress,totalhelping,totalspeed) + # Display helping info on "content name" line. + self.videoplayer.set_content_name(topmsg) + + # Update status msg and progress bar + self.videoplayer.set_player_status_and_progress(msg,ds.get_pieces_complete()) + + # Toggle save button + self.videoplayer.set_save_button(ds.get_status() == DLSTATUS_SEEDING, self.save_video_copy) + + if False: # Only works if the sesscb_states_callback() method returns (x,True) + peerlist = ds.get_peerlist() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Connected to",len(peerlist),"peers" + for peer in peerlist: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Connected to",peer['ip'],peer['uprate'],peer['downrate'] + + + def videoserver_set_status_guicallback(self,status): + """ Override BaseApp """ + if self.videoFrame is not None: + self.videoFrame.set_player_status(status) + + # + # Save button logic + # + def save_video_copy(self): + # Save a copy of playing download to other location + + for d2 in self.downloads_in_vodmode: + # only single playing Download at the moment in swarmplayer + d = d2 + dest_files = d.get_dest_files() + dest_file = dest_files[0] # only single file at the moment in swarmplayer + savethread_callback_lambda = lambda:self.savethread_callback(dest_file) + + t = Thread(target = savethread_callback_lambda) + t.setName( self.appname+"Save"+t.getName() ) + t.setDaemon(True) + t.start() + + def savethread_callback(self,dest_file): + + # Save a copy of playing download to other location + # called by new thread from self.save_video_copy + try: + if sys.platform == 'win32': + # Jelle also goes win32, find location of "My Documents" + # see http://www.mvps.org/access/api/api0054.htm + from win32com.shell import shell + pidl = shell.SHGetSpecialFolderLocation(0,0x05) + defaultpath = shell.SHGetPathFromIDList(pidl) + else: + defaultpath = os.path.expandvars('$HOME') + except Exception, msg: + defaultpath = '' + print_exc() + + dest_file_only = os.path.split(dest_file[1])[1] + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Defaultpath:', defaultpath, 'Dest:', dest_file + dlg = wx.FileDialog(self.videoFrame, + message = self.utility.lang.get('savemedia'), + defaultDir = defaultpath, + defaultFile = dest_file_only, + wildcard = self.utility.lang.get('allfileswildcard') + ' (*.*)|*.*', + style = wx.SAVE) + dlg.Raise() + result = dlg.ShowModal() + dlg.Destroy() + + if result == wx.ID_OK: + path = dlg.GetPath() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Path:', path + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'Copy: %s to %s' % (dest_file[1], path) + if sys.platform == 'win32': + try: + import win32file + win32file.CopyFile(dest_file[1], path, 0) # do succeed on collision + except: + shutil.copyfile(dest_file[1], path) + else: + shutil.copyfile(dest_file[1], path) + + # On Exit + + def clear_session_state(self): + """ Try to fix apps by doing hard reset. Called from systray menu """ + try: + self.videoplayer.stop_playback() + except: + print_exc() + BaseApp.clear_session_state(self) + + + +def get_status_msgs(ds,videoplayer_mediastate,appname,said_start_playback,decodeprogress,totalhelping,totalspeed): + + intime = "Not playing for quite some time." + ETA = ((60 * 15, "Playing in less than 15 minutes."), + (60 * 10, "Playing in less than 10 minutes."), + (60 * 5, "Playing in less than 5 minutes."), + (60, "Playing in less than a minute.")) + + topmsg = '' + msg = '' + + logmsgs = ds.get_log_messages() + logmsg = None + if len(logmsgs) > 0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Log",logmsgs[0] + logmsg = logmsgs[-1][1] + + preprogress = ds.get_vod_prebuffering_progress() + playable = ds.get_vod_playable() + t = ds.get_vod_playable_after() + + intime = ETA[0][1] + for eta_time, eta_msg in ETA: + if t > eta_time: + break + intime = eta_msg + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: playble",playable,"preprog",preprogress + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: ETA is",t,"secs" + # if t > float(2 ** 30): + # intime = "inf" + # elif t == 0.0: + # intime = "now" + # else: + # h, t = divmod(t, 60.0*60.0) + # m, s = divmod(t, 60.0) + # if h == 0.0: + # if m == 0.0: + # intime = "%ds" % (s) + # else: + # intime = "%dm:%02ds" % (m,s) + # else: + # intime = "%dh:%02dm:%02ds" % (h,m,s) + + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: VODStats",preprogress,playable,"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%" + + if ds.get_status() == DLSTATUS_HASHCHECKING: + genprogress = ds.get_progress() + pstr = str(int(genprogress*100)) + msg = "Checking already downloaded parts "+pstr+"% done" + elif ds.get_status() == DLSTATUS_STOPPED_ON_ERROR: + msg = 'Error playing: '+str(ds.get_error()) + elif ds.get_progress() == 1.0: + msg = '' + elif playable: + if not said_start_playback: + msg = "Starting playback..." + + if videoplayer_mediastate == MEDIASTATE_STOPPED and said_start_playback: + if totalhelping == 0: + topmsg = u"Please leave the "+appname+" running, this will help other "+appname+" users to download faster." + else: + topmsg = u"Helping "+str(totalhelping)+" "+appname+" users to download. Please leave it running in the background." + + # Display this on status line + # TODO: Show balloon in systray when closing window to indicate things continue there + msg = '' + + elif videoplayer_mediastate == MEDIASTATE_PLAYING: + said_start_playback = True + # It may take a while for VLC to actually start displaying + # video, as it is trying to tune in to the stream (finding + # I-Frame). Display some info to show that: + # + cname = ds.get_download().get_def().get_name_as_unicode() + topmsg = u'Decoding: '+cname+' '+str(decodeprogress)+' s' + decodeprogress += 1 + msg = '' + elif videoplayer_mediastate == MEDIASTATE_PAUSED: + # msg = "Buffering... " + str(int(100.0*preprogress))+"%" + msg = "Buffering... " + str(int(100.0*preprogress))+"%. " + intime + else: + msg = '' + + elif preprogress != 1.0: + pstr = str(int(preprogress*100)) + npeers = ds.get_num_peers() + npeerstr = str(npeers) + if npeers == 0 and logmsg is not None: + msg = logmsg + elif npeers == 1: + msg = "Prebuffering "+pstr+"% done (connected to 1 person). " + intime + else: + msg = "Prebuffering "+pstr+"% done (connected to "+npeerstr+" people). " + intime + + try: + d = ds.get_download() + tdef = d.get_def() + videofiles = d.get_selected_files() + if len(videofiles) >= 1: + videofile = videofiles[0] + else: + videofile = None + if tdef.get_bitrate(videofile) is None: + msg += ' This video may not play properly because its bitrate is unknown' + except: + print_exc() + else: + # msg = "Waiting for sufficient download speed... "+intime + msg = 'Waiting for sufficient download speed... ' + intime + + global ONSCREENDEBUG + if msg == '' and ONSCREENDEBUG: + uptxt = "up %.1f" % (totalspeed[UPLOAD]) + downtxt = " down %.1f" % (totalspeed[DOWNLOAD]) + peertxt = " peer %d" % (totalhelping) + msg = uptxt + downtxt + peertxt + + return [topmsg,msg,said_start_playback,decodeprogress] + + + +class PlayerFrame(VideoFrame): + def __init__(self,parent,appname): + VideoFrame.__init__(self,parent,parent.utility,appname+' '+PLAYER_VERSION,parent.iconpath,parent.videoplayer.get_vlcwrap(),parent.logopath) + self.parent = parent + self.closed = False + + dragdroplist = FileDropTarget(self.parent) + self.SetDropTarget(dragdroplist) + + self.Bind(wx.EVT_CLOSE, self.OnCloseWindow) + + def OnCloseWindow(self, event = None): + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: ON CLOSE WINDOW" + + # TODO: first event.Skip does not close window, second apparently does + # Check how event differs + + if event is not None: + nr = event.GetEventType() + lookup = { wx.EVT_CLOSE.evtType[0]: "EVT_CLOSE", wx.EVT_QUERY_END_SESSION.evtType[0]: "EVT_QUERY_END_SESSION", wx.EVT_END_SESSION.evtType[0]: "EVT_END_SESSION" } + if nr in lookup: + nr = lookup[nr] + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Closing due to event ",nr + event.Skip() + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Closing untriggered by event" + + # This gets called multiple times somehow + if not self.closed: + self.closed = True + self.parent.videoFrame = None + + self.parent.videoplayer.stop_playback() + self.parent.remove_downloads_in_vodmode_if_not_complete() + self.parent.restart_other_downloads() + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","main: Closing done" + # TODO: Show balloon in systray when closing window to indicate things continue there + + +class FileDropTarget(wx.FileDropTarget): + """ To enable drag and drop of .tstream to window """ + + def __init__(self,app): + wx.FileDropTarget.__init__(self) + self.app = app + + def OnDropFiles(self, x, y, filenames): + for filename in filenames: + self.app.remote_start_download(filename) + return True + + + + +############################################################## +# +# Main Program Start Here +# +############################################################## +def run_playerapp(appname,params = None): + if params is None: + params = [""] + + if len(sys.argv) > 1: + params = sys.argv[1:] + + if 'debug' in params: + global ONSCREENDEBUG + ONSCREENDEBUG=True + if 'raw' in params: + Tribler.Video.VideoPlayer.USE_VLC_RAW_INTERFACE = True + + # Create single instance semaphore + # Arno: On Linux and wxPython-2.8.1.1 the SingleInstanceChecker appears + # to mess up stderr, i.e., I get IOErrors when writing to it via print_exc() + # + siappname = appname.lower() # For backwards compatibility + if sys.platform != 'linux2': + single_instance_checker = wx.SingleInstanceChecker(siappname+"-"+ wx.GetUserId()) + else: + single_instance_checker = LinuxSingleInstanceChecker(siappname) + + #print "[StartUpDebug]---------------- 1", time()-start_time + if not ALLOW_MULTIPLE and single_instance_checker.IsAnotherRunning(): + if params[0] != "": + torrentfilename = params[0] + i2ic = Instance2InstanceClient(I2I_LISTENPORT,'START',torrentfilename) + time.sleep(1) + return + + arg0 = sys.argv[0].lower() + if arg0.endswith('.exe'): + installdir = os.path.abspath(os.path.dirname(sys.argv[0])) + else: + installdir = os.getcwd() + + # Launch first single instance + app = PlayerApp(0, appname, params, single_instance_checker, installdir, I2I_LISTENPORT, PLAYER_LISTENPORT) + app.MainLoop() + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Sleeping seconds to let other threads finish" + time.sleep(2) + + if not ALLOW_MULTIPLE: + del single_instance_checker + + +if __name__ == '__main__': + run_playerapp("SwarmPlayer") + diff --git a/tribler-mod/Tribler/Player/systray.py b/tribler-mod/Tribler/Player/systray.py new file mode 100644 index 0000000..402e698 --- /dev/null +++ b/tribler-mod/Tribler/Player/systray.py @@ -0,0 +1,189 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys +import os +from traceback import print_exc +import wx + +from Tribler.Core.API import * + +class PlayerTaskBarIcon(wx.TaskBarIcon): + + def __init__(self,wxapp,iconfilename): + wx.TaskBarIcon.__init__(self) + self.wxapp = wxapp + + self.icons = wx.IconBundle() + self.icons.AddIconFromFile(iconfilename,wx.BITMAP_TYPE_ICO) + self.icon = self.icons.GetIcon(wx.Size(-1,-1)) + + if sys.platform != "darwin": + # Mac already has the right icon set at startup + self.SetIcon(self.icon,'SwarmPlayer') + + def CreatePopupMenu(self): + menu = wx.Menu() + + mi = menu.Append(-1,"Options...") + self.Bind(wx.EVT_MENU, self.OnOptions, id=mi.GetId()) + menu.AppendSeparator() + mi = menu.Append(-1,"Exit") + self.Bind(wx.EVT_MENU, self.OnExitClient, id=mi.GetId()) + return menu + + def OnOptions(self,event=None): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PlayerTaskBarIcon: OnOptions" + dlg = PlayerOptionsDialog(self.wxapp,self.icons) + ret = dlg.ShowModal() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PlayerTaskBarIcon: Dialog returned",ret + dlg.Destroy() + + def OnExitClient(self,event=None): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PlayerTaskBarIcon: OnExitClient" + self.wxapp.ExitMainLoop() + + + def set_icon_tooltip(self,txt): + if sys.platform == "darwin": + # no taskbar tooltip on OS/X + return + + self.SetIcon(self.icon,txt) + + + +class PlayerOptionsDialog(wx.Dialog): + + def __init__(self,wxapp,icons): + style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER + wx.Dialog.__init__(self, None, -1, 'SwarmPlayer Options', size=(400,200), style=style) + self.wxapp = wxapp + + self.port = None + + self.icons = icons + self.SetIcons(self.icons) + + mainbox = wx.BoxSizer(wx.VERTICAL) + + aboutbox = wx.BoxSizer(wx.VERTICAL) + aboutlabel1 = wx.StaticText(self, -1, 'SwarmPlayer is a product of the Tribler team.') + aboutlabel2 = wx.StaticText(self, -1, 'Visit us at www.tribler.org!') + aboutbox.Add(aboutlabel1, 1, wx.EXPAND|wx.LEFT|wx.RIGHT, 5) + aboutbox.Add(aboutlabel2, 1, wx.EXPAND|wx.LEFT|wx.RIGHT, 5) + + uploadrate = self.wxapp.get_playerconfig('total_max_upload_rate') + + uploadratebox = wx.BoxSizer(wx.HORIZONTAL) + label = wx.StaticText(self, -1, 'Max upload to others (KB/s)') + self.uploadratectrl = wx.TextCtrl(self, -1, str(uploadrate)) + uploadratebox.Add(label, 1, wx.ALIGN_CENTER_VERTICAL) + uploadratebox.Add(self.uploadratectrl) + + + buttonbox2 = wx.BoxSizer(wx.HORIZONTAL) + advbtn = wx.Button(self, -1, 'Advanced...') + buttonbox2.Add(advbtn, 0, wx.ALL, 5) + + + buttonbox = wx.BoxSizer(wx.HORIZONTAL) + okbtn = wx.Button(self, wx.ID_OK, 'OK') + buttonbox.Add(okbtn, 0, wx.ALL, 5) + cancelbtn = wx.Button(self, wx.ID_CANCEL, 'Cancel') + buttonbox.Add(cancelbtn, 0, wx.ALL, 5) + applybtn = wx.Button(self, -1, 'Apply') + buttonbox.Add(applybtn, 0, wx.ALL, 5) + + mainbox.Add(aboutbox, 1, wx.ALL, 5) + mainbox.Add(uploadratebox, 1, wx.EXPAND|wx.ALL, 5) + mainbox.Add(buttonbox2, 1, wx.EXPAND, 1) + mainbox.Add(buttonbox, 1, wx.EXPAND, 1) + self.SetSizerAndFit(mainbox) + + self.Bind(wx.EVT_BUTTON, self.OnAdvanced, advbtn) + self.Bind(wx.EVT_BUTTON, self.OnOK, okbtn) + #self.Bind(wx.EVT_BUTTON, self.OnCancel, cancelbtn) + self.Bind(wx.EVT_BUTTON, self.OnApply, applybtn) + #self.Bind(wx.EVT_CLOSE, self.OnCloseWindow) + + def OnOK(self,event = None): + self.OnApply(event) + self.EndModal(wx.ID_OK) + + #def OnCancel(self,event = None): + # self.EndModal(wx.ID_CANCEL) + + def OnApply(self,event = None): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PlayerOptionsDialog: OnApply",self.port + + if self.port is not None: + session = self.wxapp.s + state_dir = session.get_state_dir() + cfgfilename = Session.get_default_config_filename(state_dir) + scfg = SessionStartupConfig.load(cfgfilename) + + scfg.set_listen_port(self.port) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PlayerOptionsDialog: OnApply: Saving SessionStartupConfig to",cfgfilename + scfg.save(cfgfilename) + + uploadrate = int(self.uploadratectrl.GetValue()) + self.wxapp.set_playerconfig('total_max_upload_rate',uploadrate) + self.wxapp.save_playerconfig() + + # TODO: For max upload, etc. we also have to modify the runtime Session. + + def OnAdvanced(self,event = None): + + if self.port is None: + self.port = self.wxapp.s.get_listen_port() + #destdir = self.wxapp.s.get_dest_dir() + + dlg = PlayerAdvancedOptionsDialog(self.icons,self.port,self.wxapp) + ret = dlg.ShowModal() + if ret == wx.ID_OK: + self.port = dlg.get_port() + dlg.Destroy() + + +class PlayerAdvancedOptionsDialog(wx.Dialog): + + def __init__(self,icons,port,wxapp): + style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER # TODO: Add OK+Cancel + wx.Dialog.__init__(self, None, -1, 'SwarmPlayer Advanced Options', size=(400,200), style=style) + self.wxapp = wxapp + + self.SetIcons(icons) + + mainbox = wx.BoxSizer(wx.VERTICAL) + + portbox = wx.BoxSizer(wx.HORIZONTAL) + label = wx.StaticText(self, -1, 'Port') + self.portctrl = wx.TextCtrl(self, -1, str(port)) + portbox.Add(label, 1, wx.ALIGN_CENTER_VERTICAL) + portbox.Add(self.portctrl) + + button2box = wx.BoxSizer(wx.HORIZONTAL) + clearbtn = wx.Button(self, -1, 'Clear disk cache and exit') + button2box.Add(clearbtn, 0, wx.ALL, 5) + self.Bind(wx.EVT_BUTTON, self.OnClear, clearbtn) + + buttonbox = wx.BoxSizer(wx.HORIZONTAL) + okbtn = wx.Button(self, wx.ID_OK, 'OK') + buttonbox.Add(okbtn, 0, wx.ALL, 5) + cancelbtn = wx.Button(self, wx.ID_CANCEL, 'Cancel') + buttonbox.Add(cancelbtn, 0, wx.ALL, 5) + + mainbox.Add(portbox, 1, wx.EXPAND|wx.ALL, 5) + mainbox.Add(button2box, 1, wx.EXPAND, 1) + mainbox.Add(buttonbox, 1, wx.EXPAND, 1) + self.SetSizerAndFit(mainbox) + + def get_port(self): + return int(self.portctrl.GetValue()) + + def OnClear(self,event=None): + self.wxapp.clear_session_state() + + diff --git a/tribler-mod/Tribler/Player/systray.py.bak b/tribler-mod/Tribler/Player/systray.py.bak new file mode 100644 index 0000000..6c1937e --- /dev/null +++ b/tribler-mod/Tribler/Player/systray.py.bak @@ -0,0 +1,188 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information + +import sys +import os +from traceback import print_exc +import wx + +from Tribler.Core.API import * + +class PlayerTaskBarIcon(wx.TaskBarIcon): + + def __init__(self,wxapp,iconfilename): + wx.TaskBarIcon.__init__(self) + self.wxapp = wxapp + + self.icons = wx.IconBundle() + self.icons.AddIconFromFile(iconfilename,wx.BITMAP_TYPE_ICO) + self.icon = self.icons.GetIcon(wx.Size(-1,-1)) + + if sys.platform != "darwin": + # Mac already has the right icon set at startup + self.SetIcon(self.icon,'SwarmPlayer') + + def CreatePopupMenu(self): + menu = wx.Menu() + + mi = menu.Append(-1,"Options...") + self.Bind(wx.EVT_MENU, self.OnOptions, id=mi.GetId()) + menu.AppendSeparator() + mi = menu.Append(-1,"Exit") + self.Bind(wx.EVT_MENU, self.OnExitClient, id=mi.GetId()) + return menu + + def OnOptions(self,event=None): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PlayerTaskBarIcon: OnOptions" + dlg = PlayerOptionsDialog(self.wxapp,self.icons) + ret = dlg.ShowModal() + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PlayerTaskBarIcon: Dialog returned",ret + dlg.Destroy() + + def OnExitClient(self,event=None): + #print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PlayerTaskBarIcon: OnExitClient" + self.wxapp.ExitMainLoop() + + + def set_icon_tooltip(self,txt): + if sys.platform == "darwin": + # no taskbar tooltip on OS/X + return + + self.SetIcon(self.icon,txt) + + + +class PlayerOptionsDialog(wx.Dialog): + + def __init__(self,wxapp,icons): + style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER + wx.Dialog.__init__(self, None, -1, 'SwarmPlayer Options', size=(400,200), style=style) + self.wxapp = wxapp + + self.port = None + + self.icons = icons + self.SetIcons(self.icons) + + mainbox = wx.BoxSizer(wx.VERTICAL) + + aboutbox = wx.BoxSizer(wx.VERTICAL) + aboutlabel1 = wx.StaticText(self, -1, 'SwarmPlayer is a product of the Tribler team.') + aboutlabel2 = wx.StaticText(self, -1, 'Visit us at www.tribler.org!') + aboutbox.Add(aboutlabel1, 1, wx.EXPAND|wx.LEFT|wx.RIGHT, 5) + aboutbox.Add(aboutlabel2, 1, wx.EXPAND|wx.LEFT|wx.RIGHT, 5) + + uploadrate = self.wxapp.get_playerconfig('total_max_upload_rate') + + uploadratebox = wx.BoxSizer(wx.HORIZONTAL) + label = wx.StaticText(self, -1, 'Max upload to others (KB/s)') + self.uploadratectrl = wx.TextCtrl(self, -1, str(uploadrate)) + uploadratebox.Add(label, 1, wx.ALIGN_CENTER_VERTICAL) + uploadratebox.Add(self.uploadratectrl) + + + buttonbox2 = wx.BoxSizer(wx.HORIZONTAL) + advbtn = wx.Button(self, -1, 'Advanced...') + buttonbox2.Add(advbtn, 0, wx.ALL, 5) + + + buttonbox = wx.BoxSizer(wx.HORIZONTAL) + okbtn = wx.Button(self, wx.ID_OK, 'OK') + buttonbox.Add(okbtn, 0, wx.ALL, 5) + cancelbtn = wx.Button(self, wx.ID_CANCEL, 'Cancel') + buttonbox.Add(cancelbtn, 0, wx.ALL, 5) + applybtn = wx.Button(self, -1, 'Apply') + buttonbox.Add(applybtn, 0, wx.ALL, 5) + + mainbox.Add(aboutbox, 1, wx.ALL, 5) + mainbox.Add(uploadratebox, 1, wx.EXPAND|wx.ALL, 5) + mainbox.Add(buttonbox2, 1, wx.EXPAND, 1) + mainbox.Add(buttonbox, 1, wx.EXPAND, 1) + self.SetSizerAndFit(mainbox) + + self.Bind(wx.EVT_BUTTON, self.OnAdvanced, advbtn) + self.Bind(wx.EVT_BUTTON, self.OnOK, okbtn) + #self.Bind(wx.EVT_BUTTON, self.OnCancel, cancelbtn) + self.Bind(wx.EVT_BUTTON, self.OnApply, applybtn) + #self.Bind(wx.EVT_CLOSE, self.OnCloseWindow) + + def OnOK(self,event = None): + self.OnApply(event) + self.EndModal(wx.ID_OK) + + #def OnCancel(self,event = None): + # self.EndModal(wx.ID_CANCEL) + + def OnApply(self,event = None): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PlayerOptionsDialog: OnApply",self.port + + if self.port is not None: + session = self.wxapp.s + state_dir = session.get_state_dir() + cfgfilename = Session.get_default_config_filename(state_dir) + scfg = SessionStartupConfig.load(cfgfilename) + + scfg.set_listen_port(self.port) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","PlayerOptionsDialog: OnApply: Saving SessionStartupConfig to",cfgfilename + scfg.save(cfgfilename) + + uploadrate = int(self.uploadratectrl.GetValue()) + self.wxapp.set_playerconfig('total_max_upload_rate',uploadrate) + self.wxapp.save_playerconfig() + + # TODO: For max upload, etc. we also have to modify the runtime Session. + + def OnAdvanced(self,event = None): + + if self.port is None: + self.port = self.wxapp.s.get_listen_port() + #destdir = self.wxapp.s.get_dest_dir() + + dlg = PlayerAdvancedOptionsDialog(self.icons,self.port,self.wxapp) + ret = dlg.ShowModal() + if ret == wx.ID_OK: + self.port = dlg.get_port() + dlg.Destroy() + + +class PlayerAdvancedOptionsDialog(wx.Dialog): + + def __init__(self,icons,port,wxapp): + style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER # TODO: Add OK+Cancel + wx.Dialog.__init__(self, None, -1, 'SwarmPlayer Advanced Options', size=(400,200), style=style) + self.wxapp = wxapp + + self.SetIcons(icons) + + mainbox = wx.BoxSizer(wx.VERTICAL) + + portbox = wx.BoxSizer(wx.HORIZONTAL) + label = wx.StaticText(self, -1, 'Port') + self.portctrl = wx.TextCtrl(self, -1, str(port)) + portbox.Add(label, 1, wx.ALIGN_CENTER_VERTICAL) + portbox.Add(self.portctrl) + + button2box = wx.BoxSizer(wx.HORIZONTAL) + clearbtn = wx.Button(self, -1, 'Clear disk cache and exit') + button2box.Add(clearbtn, 0, wx.ALL, 5) + self.Bind(wx.EVT_BUTTON, self.OnClear, clearbtn) + + buttonbox = wx.BoxSizer(wx.HORIZONTAL) + okbtn = wx.Button(self, wx.ID_OK, 'OK') + buttonbox.Add(okbtn, 0, wx.ALL, 5) + cancelbtn = wx.Button(self, wx.ID_CANCEL, 'Cancel') + buttonbox.Add(cancelbtn, 0, wx.ALL, 5) + + mainbox.Add(portbox, 1, wx.EXPAND|wx.ALL, 5) + mainbox.Add(button2box, 1, wx.EXPAND, 1) + mainbox.Add(buttonbox, 1, wx.EXPAND, 1) + self.SetSizerAndFit(mainbox) + + def get_port(self): + return int(self.portctrl.GetValue()) + + def OnClear(self,event=None): + self.wxapp.clear_session_state() + + diff --git a/tribler-mod/Tribler/Plugin/BackgroundProcess.py b/tribler-mod/Tribler/Plugin/BackgroundProcess.py new file mode 100644 index 0000000..b22d29b --- /dev/null +++ b/tribler-mod/Tribler/Plugin/BackgroundProcess.py @@ -0,0 +1,441 @@ +from time import localtime, strftime +# Written by Arno Bakker, Diego Rabioli +# see LICENSE.txt for license information +# +# Notes: +# - Implement play while hashcheck? +# Not needed when proper shutdown & restart was done. +# - load_checkpoint with DLSTATUS_DOWNLOADING for Plugin? +# Nah, if we start BG when plugin started we have a video to play soon, +# so start others in STOPPED state (rather than switching them all +# to off and restart one in VOD mode just after) +# +# + +import os +import sys +import time +import random +import binascii +from traceback import print_exc + +import win32event +import win32api + +try: + import wxversion + wxversion.select('2.8') +except: + pass +import wx + +from Tribler.Core.API import * +from Tribler.Utilities.LinuxSingleInstanceChecker import * +from Tribler.Utilities.Instance2Instance import InstanceConnectionHandler,InstanceConnection + +from Tribler.Player.BaseApp import BaseApp +from Tribler.Video.utils import videoextdefaults +from Tribler.Video.VideoServer import VideoHTTPServer + +DEBUG = True +ALLOW_MULTIPLE = False + +I2I_LISTENPORT = 62062 +BG_LISTENPORT = 8621 +VIDEOHTTP_LISTENPORT = 6878 + +class BackgroundApp(BaseApp): + + def __init__(self, redirectstderrout, appname, params, single_instance_checker, installdir, i2iport, sport): + + BaseApp.__init__(self, redirectstderrout, appname, params, single_instance_checker, installdir, i2iport, sport) + + self.videoHTTPServer = VideoHTTPServer(VIDEOHTTP_LISTENPORT) + self.videoHTTPServer.background_serve() + self.videoHTTPServer.register(self.videoservthread_error_callback,self.videoservthread_set_status_callback) + + # Maps Downloads to a using InstanceConnection and streaminfo when it + # plays. So it contains the Downloads in VOD mode for which there is + # active interest from a plugin. + # + # At the moment each Download is used/owned by a single IC and a new + # request for the same torrent will stop playback to the original IC + # and resume it to the new user. + # + self.dusers = {} + + # If the BG Process is started by the plug-in notify it with an event + startupEvent = win32event.CreateEvent( None, 0, 0, 'startupEvent' ) + win32event.SetEvent( startupEvent ) + win32api.CloseHandle( startupEvent ) # TODO : is it possible to avoid importing win32api just to close an handler? + + def OnInit(self): + try: + # Do common initialization + BaseApp.OnInitBase(self) + return True + + except Exception,e: + print_exc() + self.show_error(str(e)) + self.OnExit() + return False + + + # + # InstanceConnectionHandler interface. Called by Instance2InstanceThread + # + def external_connection_made(self,s): + ic = BGInstanceConnection(s,self,self.readlinecallback,self.videoHTTPServer) + self.singsock2ic[s] = ic + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: Plugin connection_made",len(self.singsock2ic),"++++++++++++++++++++++++++++++++++++++++++++++++" + + def connection_lost(self,s): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: Plugin: connection_lost ------------------------------------------------" + + ic = self.singsock2ic[s] + InstanceConnectionHandler.connection_lost(self,s) + wx.CallAfter(self.gui_connection_lost,ic) + + def gui_connection_lost(self,ic): + # IC may or may not have been shutdown: + # Not: sudden browser crashes + # Yes: controlled stop via ic.shutdown() + ic.shutdown() # idempotent + + # Now apply cleanup policy to the Download, but only after X seconds + # so if the plugin comes back with a new request for the same stuff + # we can give it to him pronto. This is expected to happen a lot due + # to page reloads / history navigation. + # + ic_delayed_remove_if_lambda = lambda:self.i2ithread_delayed_remove_if_not_complete(ic) + # h4x0r, abuse Istance2Instance server task queue for the delay + self.i2is.add_task(ic_delayed_remove_if_lambda,20.0) + + def i2ithread_delayed_remove_if_not_complete(self,ic): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: i2ithread_delayed_remove_if_not_complete" + wx.CallAfter(self.gui_delayed_remove_if_not_complete,ic) + + def gui_delayed_remove_if_not_complete(self,ic): + for d,duser in self.dusers.iteritems(): + if duser['uic'] == ic: + # should not remove download if in the meantime a + # new request for this content has been made. + # In this case the Download is still used by the old IC. + d.set_state_callback(self.sesscb_remove_playing_callback) + break + + def remove_playing_download(self,d2remove): + """ Called when sesscb_remove_playing_callback has determined that + we should remove this Download, because it would take too much + bandwidth to download it and the user is apparently no longer + interested. + """ + BaseApp.remove_playing_download(self,d2remove) + if d2remove in self.dusers: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: remove_playing_download" + if 'streaminfo' in self.dusers[d2remove]: + stream = self.dusers[d2remove]['streaminfo']['stream'] + stream.close() # Close original stream. + del self.dusers[d2remove] + + + def i2ithread_readlinecallback(self,ic,cmd): + """ Called by Instance2Instance thread """ + wx.CallAfter(self.gui_readlinecallback,ic,cmd) + + def gui_readlinecallback(self,ic,cmd): + """ Receive command from Plugin """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: Got command:",cmd + try: + # START command + if cmd.startswith( 'START' ): + torrenturl = cmd.partition( ' ' )[2] + if torrenturl is None: + raise ValueError('bg: Unformatted START command') + else: + self.get_torrent_start_download(ic,torrenturl) + + # SHUTDOWN command + elif cmd.startswith( 'SHUTDOWN' ): + ic.shutdown() + else: + raise ValueError('bg: Unknown command: '+cmd) + except: + print_exc() + ic.shutdown() + + + def get_torrent_start_download(self,ic,url): + """ Retrieve torrent file from url and start it in VOD mode, if not already """ + tdef = TorrentDef.load_from_url(url) + + # Select which video to play (if multiple) + videofiles = tdef.get_files(exts=videoextdefaults) + if len(videofiles) == 1: + dlfile = videofiles[0] + elif len(videofiles) == 0: + raise ValueError("bg: get_torrent_start_download: No video files found! Giving up") + elif len(videofiles) > 1: + raise ValueError("bg: get_torrent_start_download: Too many files found! Giving up") + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: get_torrent_start_download: Found video file",dlfile + + infohash = tdef.get_infohash() + oldd = None + for d in self.s.get_downloads(): + if d.get_def().get_infohash() == infohash: + oldd = d + break + + # + # Start a new Download, or if it already exists, start playback from + # beginning. This means that we don't currently support two ICs + # playing the same video. That is, two browser windows cannot play the + # same video. + # + if oldd is None or (oldd not in self.downloads_in_vodmode): + # New Download, or Download exists, but not in VOD mode, restart + + if DEBUG: + if oldd is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: get_torrent_start_download: Starting new Download" + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: get_torrent_start_download: Restarting old Download in VOD mode" + + d = self.start_download(tdef,dlfile) + duser = {'uic':ic} + self.dusers[d] = duser + else: + # oldd is already running in VOD mode. If it's a VOD torrent we + # don't need to restart, we can just seek(0) on the stream. + # If it's a live torrent, we should tell EOF to any old IC and + # continue playback to the new IC where it left off. + # + duser = self.dusers[d] + olduic = duser['uic'] + olduic.shutdown() + duser['uic'] = ic + if 'streaminfo' not in duser: + # Hasn't started playing yet, ignore. + pass + else: + # Already playing. Tell previous owner IC to quit, let new IC + # start either from start (VOD) or where previous left off + # (live). + if not tdef.get_live(): + duser['streaminfo']['stream'].seek(0) + ic.set_streaminfo(duser['streaminfo']) + + ic.start_playback(infohash) + + def sesscb_vod_event_callback( self, d, event, params ): + """ Registered by BaseApp. Called by SessionCallbackThread """ + wx.CallAfter(self.gui_vod_event_callback,d,event,params) + + def gui_vod_event_callback( self, d, event, params ): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: gui_vod_event_callback: Event: ", event + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: gui_vod_event_callback: Params: ", params + + if event == VODEVENT_START: + if params['filename']: + stream = open( params['filename'], "rb" ) + else: + stream = params['stream'] + + streaminfo = { 'mimetype': params['mimetype'], 'stream': stream, 'length': params['length'] } + + duser = self.dusers[d] + duser['streaminfo'] = streaminfo + duser['uic'].set_streaminfo(duser['streaminfo']) + duser['uic'].start_playback(d.get_def().get_infohash()) + + elif event == VODEVENT_PAUSE: + duser = self.dusers[d] + duser['uic'].pause() + + elif event == VODEVENT_RESUME: + duser = self.dusers[d] + duser['uic'].resume() + + def get_supported_vod_events(self): + return [ VODEVENT_START, VODEVENT_PAUSE, VODEVENT_RESUME ] + + # + # VideoServer status/error reporting + # + def videoservthread_error_callback(self,e,url): + """ Called by HTTP serving thread """ + wx.CallAfter(self.videoserver_error_guicallback,e,url) + + def videoserver_error_guicallback(self,e,url): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: Video server reported error",str(e) + # self.show_error(str(e)) + pass + # ARNOTODO: schedule current Download for removal? + + def videoservthread_set_status_callback(self,status): + """ Called by HTTP serving thread """ + wx.CallAfter(self.videoserver_set_status_guicallback,status) + + def videoserver_set_status_guicallback(self,status): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: Video server sets status callback",status + # ARNOTODO: Report status to plugin + + + + +class BGInstanceConnection(InstanceConnection): + + def __init__(self,singsock,connhandler,readlinecallback,videoHTTPServer): + InstanceConnection.__init__(self, singsock, connhandler, readlinecallback) + + self.videoHTTPServer = videoHTTPServer + self.urlpath = None + self.cstreaminfo = {} + self.shutteddown = False + + + def set_streaminfo(self,streaminfo): + """ Copy streaminfo contents and replace stream with a ControlledStream """ + """ + For each IC we create separate stream object and a unique path in the + HTTP server. This avoids nasty thread synchronization with the server + when a new IC wants to play the same content. The Tribler Core stream + does not allow multiple readers. This means we would have to stop + the HTTP server from writing the stream to the old IC, before we + can allow the new IC to read. + + We solved this as follows. The original Tribler Core stream is + wrapped in a ControlledStream, one for each IC. When a new IC + wants to play we tell the old IC's ControlledStream to generate + an EOF to the HTTP server, and tell the old IC to SHUTDOWN. We + then either rewind the Tribler Core stream (VOD) or leave it (live) + and tell the new IC to PLAY. The new ControlledStream will then + be read by the HTTP server again. + """ + self.cstreaminfo.update(streaminfo) + stream = streaminfo['stream'] + cstream = ControlledStream(stream) + self.cstreaminfo['stream'] = cstream + + def start_playback(self,infohash): + """ Register cstream with HTTP server and tell IC to start reading """ + + self.urlpath = '/'+binascii.hexlify(infohash)+'/'+str(random.random()) + self.videoHTTPServer.set_inputstream(self.cstreaminfo,self.urlpath) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bg: Telling plugin to start playback of",self.urlpath + + self.write( 'PLAY '+self.get_video_url()+'\r\n' ) + + + def get_video_url(self): + return 'http://127.0.0.1:'+str(self.videoHTTPServer.get_port())+self.urlpath + + def pause(self): + self.write( 'PAUSE\r\n' ) + + def resume(self): + self.write( 'RESUME\r\n' ) + + def shutdown(self): + # SHUTDOWN Service + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'bg: Shutting down connection to Plugin' + if not self.shutteddown: + self.shutteddown = True + # Cause HTTP server thread to receive EOF on inputstream + if len(self.cstreaminfo) != 0: + self.cstreaminfo['stream'].close() + self.videoHTTPServer.del_inputstream(self.urlpath) + + self.write( 'SHUTDOWN\r\n' ) + # Will cause BaseApp.connection_lost() to be called, where we'll + # handle what to do about the Download that was started for this + # IC. + self.close() + + +class ControlledStream: + """ A file-like object that throws EOF when closed, without actually closing + the underlying inputstream. See BGInstanceConnection.set_streaminfo() for + an explanation on how this is used. + """ + def __init__(self,stream): + self.stream = stream + self.done = False # Event() + + def read(self,nbytes=None): + if not self.done: + return self.stream.read(nbytes) + else: + return '' # EOF + + def seek(self,pos,whence=os.SEEK_SET): + self.stream.seek(pos,whence) + + def close(self): + self.done = True + # DO NOT close original stream + + +############################################################## +# +# Main Program Start Here +# +############################################################## +def run_bgapp(appname,params = None): + if params is None: + params = [""] + + if len(sys.argv) > 1: + params = sys.argv[1:] + + # Create single instance semaphore + # Arno: On Linux and wxPython-2.8.1.1 the SingleInstanceChecker appears + # to mess up stderr, i.e., I get IOErrors when writing to it via print_exc() + # + if sys.platform != 'linux2': + single_instance_checker = wx.SingleInstanceChecker(appname+"-"+ wx.GetUserId()) + else: + single_instance_checker = LinuxSingleInstanceChecker(appname) + + #print "[StartUpDebug]---------------- 1", time()-start_time + if not ALLOW_MULTIPLE and single_instance_checker.IsAnotherRunning(): + if params[0] != "": + torrentfilename = params[0] + i2ic = Instance2InstanceClient(I2I_LISTENPORT,'START',torrentfilename) + time.sleep(1) + return + + arg0 = sys.argv[0].lower() + if arg0.endswith('.exe'): + installdir = os.path.abspath(os.path.dirname(sys.argv[0])) + else: + installdir = os.getcwd() + + # Launch first single instance + app = BackgroundApp(0, appname, params, single_instance_checker, installdir, I2I_LISTENPORT, BG_LISTENPORT) + app.MainLoop() + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Sleeping seconds to let other threads finish" + time.sleep(2) + + if not ALLOW_MULTIPLE: + del single_instance_checker + + +if __name__ == '__main__': + run_bgapp("SwarmPlugin") + + diff --git a/tribler-mod/Tribler/Plugin/BackgroundProcess.py.bak b/tribler-mod/Tribler/Plugin/BackgroundProcess.py.bak new file mode 100644 index 0000000..e9379d9 --- /dev/null +++ b/tribler-mod/Tribler/Plugin/BackgroundProcess.py.bak @@ -0,0 +1,440 @@ +# Written by Arno Bakker, Diego Rabioli +# see LICENSE.txt for license information +# +# Notes: +# - Implement play while hashcheck? +# Not needed when proper shutdown & restart was done. +# - load_checkpoint with DLSTATUS_DOWNLOADING for Plugin? +# Nah, if we start BG when plugin started we have a video to play soon, +# so start others in STOPPED state (rather than switching them all +# to off and restart one in VOD mode just after) +# +# + +import os +import sys +import time +import random +import binascii +from traceback import print_exc + +import win32event +import win32api + +try: + import wxversion + wxversion.select('2.8') +except: + pass +import wx + +from Tribler.Core.API import * +from Tribler.Utilities.LinuxSingleInstanceChecker import * +from Tribler.Utilities.Instance2Instance import InstanceConnectionHandler,InstanceConnection + +from Tribler.Player.BaseApp import BaseApp +from Tribler.Video.utils import videoextdefaults +from Tribler.Video.VideoServer import VideoHTTPServer + +DEBUG = True +ALLOW_MULTIPLE = False + +I2I_LISTENPORT = 62062 +BG_LISTENPORT = 8621 +VIDEOHTTP_LISTENPORT = 6878 + +class BackgroundApp(BaseApp): + + def __init__(self, redirectstderrout, appname, params, single_instance_checker, installdir, i2iport, sport): + + BaseApp.__init__(self, redirectstderrout, appname, params, single_instance_checker, installdir, i2iport, sport) + + self.videoHTTPServer = VideoHTTPServer(VIDEOHTTP_LISTENPORT) + self.videoHTTPServer.background_serve() + self.videoHTTPServer.register(self.videoservthread_error_callback,self.videoservthread_set_status_callback) + + # Maps Downloads to a using InstanceConnection and streaminfo when it + # plays. So it contains the Downloads in VOD mode for which there is + # active interest from a plugin. + # + # At the moment each Download is used/owned by a single IC and a new + # request for the same torrent will stop playback to the original IC + # and resume it to the new user. + # + self.dusers = {} + + # If the BG Process is started by the plug-in notify it with an event + startupEvent = win32event.CreateEvent( None, 0, 0, 'startupEvent' ) + win32event.SetEvent( startupEvent ) + win32api.CloseHandle( startupEvent ) # TODO : is it possible to avoid importing win32api just to close an handler? + + def OnInit(self): + try: + # Do common initialization + BaseApp.OnInitBase(self) + return True + + except Exception,e: + print_exc() + self.show_error(str(e)) + self.OnExit() + return False + + + # + # InstanceConnectionHandler interface. Called by Instance2InstanceThread + # + def external_connection_made(self,s): + ic = BGInstanceConnection(s,self,self.readlinecallback,self.videoHTTPServer) + self.singsock2ic[s] = ic + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: Plugin connection_made",len(self.singsock2ic),"++++++++++++++++++++++++++++++++++++++++++++++++" + + def connection_lost(self,s): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: Plugin: connection_lost ------------------------------------------------" + + ic = self.singsock2ic[s] + InstanceConnectionHandler.connection_lost(self,s) + wx.CallAfter(self.gui_connection_lost,ic) + + def gui_connection_lost(self,ic): + # IC may or may not have been shutdown: + # Not: sudden browser crashes + # Yes: controlled stop via ic.shutdown() + ic.shutdown() # idempotent + + # Now apply cleanup policy to the Download, but only after X seconds + # so if the plugin comes back with a new request for the same stuff + # we can give it to him pronto. This is expected to happen a lot due + # to page reloads / history navigation. + # + ic_delayed_remove_if_lambda = lambda:self.i2ithread_delayed_remove_if_not_complete(ic) + # h4x0r, abuse Istance2Instance server task queue for the delay + self.i2is.add_task(ic_delayed_remove_if_lambda,20.0) + + def i2ithread_delayed_remove_if_not_complete(self,ic): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: i2ithread_delayed_remove_if_not_complete" + wx.CallAfter(self.gui_delayed_remove_if_not_complete,ic) + + def gui_delayed_remove_if_not_complete(self,ic): + for d,duser in self.dusers.iteritems(): + if duser['uic'] == ic: + # should not remove download if in the meantime a + # new request for this content has been made. + # In this case the Download is still used by the old IC. + d.set_state_callback(self.sesscb_remove_playing_callback) + break + + def remove_playing_download(self,d2remove): + """ Called when sesscb_remove_playing_callback has determined that + we should remove this Download, because it would take too much + bandwidth to download it and the user is apparently no longer + interested. + """ + BaseApp.remove_playing_download(self,d2remove) + if d2remove in self.dusers: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: remove_playing_download" + if 'streaminfo' in self.dusers[d2remove]: + stream = self.dusers[d2remove]['streaminfo']['stream'] + stream.close() # Close original stream. + del self.dusers[d2remove] + + + def i2ithread_readlinecallback(self,ic,cmd): + """ Called by Instance2Instance thread """ + wx.CallAfter(self.gui_readlinecallback,ic,cmd) + + def gui_readlinecallback(self,ic,cmd): + """ Receive command from Plugin """ + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: Got command:",cmd + try: + # START command + if cmd.startswith( 'START' ): + torrenturl = cmd.partition( ' ' )[2] + if torrenturl is None: + raise ValueError('bg: Unformatted START command') + else: + self.get_torrent_start_download(ic,torrenturl) + + # SHUTDOWN command + elif cmd.startswith( 'SHUTDOWN' ): + ic.shutdown() + else: + raise ValueError('bg: Unknown command: '+cmd) + except: + print_exc() + ic.shutdown() + + + def get_torrent_start_download(self,ic,url): + """ Retrieve torrent file from url and start it in VOD mode, if not already """ + tdef = TorrentDef.load_from_url(url) + + # Select which video to play (if multiple) + videofiles = tdef.get_files(exts=videoextdefaults) + if len(videofiles) == 1: + dlfile = videofiles[0] + elif len(videofiles) == 0: + raise ValueError("bg: get_torrent_start_download: No video files found! Giving up") + elif len(videofiles) > 1: + raise ValueError("bg: get_torrent_start_download: Too many files found! Giving up") + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: get_torrent_start_download: Found video file",dlfile + + infohash = tdef.get_infohash() + oldd = None + for d in self.s.get_downloads(): + if d.get_def().get_infohash() == infohash: + oldd = d + break + + # + # Start a new Download, or if it already exists, start playback from + # beginning. This means that we don't currently support two ICs + # playing the same video. That is, two browser windows cannot play the + # same video. + # + if oldd is None or (oldd not in self.downloads_in_vodmode): + # New Download, or Download exists, but not in VOD mode, restart + + if DEBUG: + if oldd is None: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: get_torrent_start_download: Starting new Download" + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: get_torrent_start_download: Restarting old Download in VOD mode" + + d = self.start_download(tdef,dlfile) + duser = {'uic':ic} + self.dusers[d] = duser + else: + # oldd is already running in VOD mode. If it's a VOD torrent we + # don't need to restart, we can just seek(0) on the stream. + # If it's a live torrent, we should tell EOF to any old IC and + # continue playback to the new IC where it left off. + # + duser = self.dusers[d] + olduic = duser['uic'] + olduic.shutdown() + duser['uic'] = ic + if 'streaminfo' not in duser: + # Hasn't started playing yet, ignore. + pass + else: + # Already playing. Tell previous owner IC to quit, let new IC + # start either from start (VOD) or where previous left off + # (live). + if not tdef.get_live(): + duser['streaminfo']['stream'].seek(0) + ic.set_streaminfo(duser['streaminfo']) + + ic.start_playback(infohash) + + def sesscb_vod_event_callback( self, d, event, params ): + """ Registered by BaseApp. Called by SessionCallbackThread """ + wx.CallAfter(self.gui_vod_event_callback,d,event,params) + + def gui_vod_event_callback( self, d, event, params ): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: gui_vod_event_callback: Event: ", event + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: gui_vod_event_callback: Params: ", params + + if event == VODEVENT_START: + if params['filename']: + stream = open( params['filename'], "rb" ) + else: + stream = params['stream'] + + streaminfo = { 'mimetype': params['mimetype'], 'stream': stream, 'length': params['length'] } + + duser = self.dusers[d] + duser['streaminfo'] = streaminfo + duser['uic'].set_streaminfo(duser['streaminfo']) + duser['uic'].start_playback(d.get_def().get_infohash()) + + elif event == VODEVENT_PAUSE: + duser = self.dusers[d] + duser['uic'].pause() + + elif event == VODEVENT_RESUME: + duser = self.dusers[d] + duser['uic'].resume() + + def get_supported_vod_events(self): + return [ VODEVENT_START, VODEVENT_PAUSE, VODEVENT_RESUME ] + + # + # VideoServer status/error reporting + # + def videoservthread_error_callback(self,e,url): + """ Called by HTTP serving thread """ + wx.CallAfter(self.videoserver_error_guicallback,e,url) + + def videoserver_error_guicallback(self,e,url): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: Video server reported error",str(e) + # self.show_error(str(e)) + pass + # ARNOTODO: schedule current Download for removal? + + def videoservthread_set_status_callback(self,status): + """ Called by HTTP serving thread """ + wx.CallAfter(self.videoserver_set_status_guicallback,status) + + def videoserver_set_status_guicallback(self,status): + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","bg: Video server sets status callback",status + # ARNOTODO: Report status to plugin + + + + +class BGInstanceConnection(InstanceConnection): + + def __init__(self,singsock,connhandler,readlinecallback,videoHTTPServer): + InstanceConnection.__init__(self, singsock, connhandler, readlinecallback) + + self.videoHTTPServer = videoHTTPServer + self.urlpath = None + self.cstreaminfo = {} + self.shutteddown = False + + + def set_streaminfo(self,streaminfo): + """ Copy streaminfo contents and replace stream with a ControlledStream """ + """ + For each IC we create separate stream object and a unique path in the + HTTP server. This avoids nasty thread synchronization with the server + when a new IC wants to play the same content. The Tribler Core stream + does not allow multiple readers. This means we would have to stop + the HTTP server from writing the stream to the old IC, before we + can allow the new IC to read. + + We solved this as follows. The original Tribler Core stream is + wrapped in a ControlledStream, one for each IC. When a new IC + wants to play we tell the old IC's ControlledStream to generate + an EOF to the HTTP server, and tell the old IC to SHUTDOWN. We + then either rewind the Tribler Core stream (VOD) or leave it (live) + and tell the new IC to PLAY. The new ControlledStream will then + be read by the HTTP server again. + """ + self.cstreaminfo.update(streaminfo) + stream = streaminfo['stream'] + cstream = ControlledStream(stream) + self.cstreaminfo['stream'] = cstream + + def start_playback(self,infohash): + """ Register cstream with HTTP server and tell IC to start reading """ + + self.urlpath = '/'+binascii.hexlify(infohash)+'/'+str(random.random()) + self.videoHTTPServer.set_inputstream(self.cstreaminfo,self.urlpath) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "bg: Telling plugin to start playback of",self.urlpath + + self.write( 'PLAY '+self.get_video_url()+'\r\n' ) + + + def get_video_url(self): + return 'http://127.0.0.1:'+str(self.videoHTTPServer.get_port())+self.urlpath + + def pause(self): + self.write( 'PAUSE\r\n' ) + + def resume(self): + self.write( 'RESUME\r\n' ) + + def shutdown(self): + # SHUTDOWN Service + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ",'bg: Shutting down connection to Plugin' + if not self.shutteddown: + self.shutteddown = True + # Cause HTTP server thread to receive EOF on inputstream + if len(self.cstreaminfo) != 0: + self.cstreaminfo['stream'].close() + self.videoHTTPServer.del_inputstream(self.urlpath) + + self.write( 'SHUTDOWN\r\n' ) + # Will cause BaseApp.connection_lost() to be called, where we'll + # handle what to do about the Download that was started for this + # IC. + self.close() + + +class ControlledStream: + """ A file-like object that throws EOF when closed, without actually closing + the underlying inputstream. See BGInstanceConnection.set_streaminfo() for + an explanation on how this is used. + """ + def __init__(self,stream): + self.stream = stream + self.done = False # Event() + + def read(self,nbytes=None): + if not self.done: + return self.stream.read(nbytes) + else: + return '' # EOF + + def seek(self,pos,whence=os.SEEK_SET): + self.stream.seek(pos,whence) + + def close(self): + self.done = True + # DO NOT close original stream + + +############################################################## +# +# Main Program Start Here +# +############################################################## +def run_bgapp(appname,params = None): + if params is None: + params = [""] + + if len(sys.argv) > 1: + params = sys.argv[1:] + + # Create single instance semaphore + # Arno: On Linux and wxPython-2.8.1.1 the SingleInstanceChecker appears + # to mess up stderr, i.e., I get IOErrors when writing to it via print_exc() + # + if sys.platform != 'linux2': + single_instance_checker = wx.SingleInstanceChecker(appname+"-"+ wx.GetUserId()) + else: + single_instance_checker = LinuxSingleInstanceChecker(appname) + + #print "[StartUpDebug]---------------- 1", time()-start_time + if not ALLOW_MULTIPLE and single_instance_checker.IsAnotherRunning(): + if params[0] != "": + torrentfilename = params[0] + i2ic = Instance2InstanceClient(I2I_LISTENPORT,'START',torrentfilename) + time.sleep(1) + return + + arg0 = sys.argv[0].lower() + if arg0.endswith('.exe'): + installdir = os.path.abspath(os.path.dirname(sys.argv[0])) + else: + installdir = os.getcwd() + + # Launch first single instance + app = BackgroundApp(0, appname, params, single_instance_checker, installdir, I2I_LISTENPORT, BG_LISTENPORT) + app.MainLoop() + + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Sleeping seconds to let other threads finish" + time.sleep(2) + + if not ALLOW_MULTIPLE: + del single_instance_checker + + +if __name__ == '__main__': + run_bgapp("SwarmPlugin") + + diff --git a/tribler-mod/Tribler/Plugin/Build/Win32/setupBGexe.py b/tribler-mod/Tribler/Plugin/Build/Win32/setupBGexe.py new file mode 100644 index 0000000..8b1e686 --- /dev/null +++ b/tribler-mod/Tribler/Plugin/Build/Win32/setupBGexe.py @@ -0,0 +1,14 @@ +from time import localtime, strftime +# Written by Diego Rabioli, Arno Bakker +# see LICENSE.txt for license information +# +# Run from console: "python createBGexe.py py2exe" +import os + +from distutils.core import setup + +from Tribler.__init__ import LIBRARYNAME +mainfile = os.path.join(LIBRARYNAME,'Plugin','BackgroundProcess.py') + +setup(console=[mainfile]) + diff --git a/tribler-mod/Tribler/Plugin/Build/Win32/setupBGexe.py.bak b/tribler-mod/Tribler/Plugin/Build/Win32/setupBGexe.py.bak new file mode 100644 index 0000000..fa8b836 --- /dev/null +++ b/tribler-mod/Tribler/Plugin/Build/Win32/setupBGexe.py.bak @@ -0,0 +1,13 @@ +# Written by Diego Rabioli, Arno Bakker +# see LICENSE.txt for license information +# +# Run from console: "python createBGexe.py py2exe" +import os + +from distutils.core import setup + +from Tribler.__init__ import LIBRARYNAME +mainfile = os.path.join(LIBRARYNAME,'Plugin','BackgroundProcess.py') + +setup(console=[mainfile]) + diff --git a/tribler-mod/Tribler/Plugin/pluginemulator-http.py b/tribler-mod/Tribler/Plugin/pluginemulator-http.py new file mode 100644 index 0000000..1c44e05 --- /dev/null +++ b/tribler-mod/Tribler/Plugin/pluginemulator-http.py @@ -0,0 +1,46 @@ +from time import localtime, strftime + +import sys +import socket +import urlparse +import time + +class PluginEmulator: + + def __init__(self,port,cmd,param): + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect(('127.0.0.1',port)) + msg = cmd+' '+param+'\r\n' + s.send(msg) + + time.sleep(1) + s.close() + return + + while True: + data = s.recv(1024) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pe: Got BG command",data + if len(data) == 0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pe: BG closes IC" + return + elif data.startswith("PLAY"): + break + + #url = data[len("PLAY "):-2] + url = data[len("PLAY "):] + p = urlparse.urlparse(url) + path = p.path + + s2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s2.connect(('127.0.0.1',6878)) + cmd = "GET "+path+" HTTP/1.1\r\nHost: localhost:6878\r\n\r\n" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SENDING CMD",cmd + s2.send(cmd) + data = s2.recv(256) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pe: Got HTTP command",data + + +#pe = PluginEmulator(62062,"START","http://www.cs.vu.nl/~arno/vod/route2.tstream") +pe = PluginEmulator(62062,"START","http://www.vuze.com/download/XUGIN6PEJJCQ5777C3WUMMBRFI6HYIHJ.torrent?referal=torrentfilelinkcdp&title=Gopher") + + \ No newline at end of file diff --git a/tribler-mod/Tribler/Plugin/pluginemulator-http.py.bak b/tribler-mod/Tribler/Plugin/pluginemulator-http.py.bak new file mode 100644 index 0000000..ee58e5e --- /dev/null +++ b/tribler-mod/Tribler/Plugin/pluginemulator-http.py.bak @@ -0,0 +1,45 @@ + +import sys +import socket +import urlparse +import time + +class PluginEmulator: + + def __init__(self,port,cmd,param): + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect(('127.0.0.1',port)) + msg = cmd+' '+param+'\r\n' + s.send(msg) + + time.sleep(1) + s.close() + return + + while True: + data = s.recv(1024) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pe: Got BG command",data + if len(data) == 0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pe: BG closes IC" + return + elif data.startswith("PLAY"): + break + + #url = data[len("PLAY "):-2] + url = data[len("PLAY "):] + p = urlparse.urlparse(url) + path = p.path + + s2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s2.connect(('127.0.0.1',6878)) + cmd = "GET "+path+" HTTP/1.1\r\nHost: localhost:6878\r\n\r\n" + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","SENDING CMD",cmd + s2.send(cmd) + data = s2.recv(256) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pe: Got HTTP command",data + + +#pe = PluginEmulator(62062,"START","http://www.cs.vu.nl/~arno/vod/route2.tstream") +pe = PluginEmulator(62062,"START","http://www.vuze.com/download/XUGIN6PEJJCQ5777C3WUMMBRFI6HYIHJ.torrent?referal=torrentfilelinkcdp&title=Gopher") + + \ No newline at end of file diff --git a/tribler-mod/Tribler/Plugin/pluginemulator.py b/tribler-mod/Tribler/Plugin/pluginemulator.py new file mode 100644 index 0000000..4d55938 --- /dev/null +++ b/tribler-mod/Tribler/Plugin/pluginemulator.py @@ -0,0 +1,45 @@ +from time import localtime, strftime +# BAD CLIENT: SENDS +# GET /path\r\n +# HTTP/1.1\r\n +# Host: localhost:6878\r\n +# \r\n +# +# Then Python HTTP server doesn't correctly send headers. + +import sys +import socket +import urlparse + +class PluginEmulator: + + def __init__(self,port,cmd,param): + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect(('127.0.0.1',port)) + msg = cmd+' '+param+'\r\n' + s.send(msg) + #s.close() + while True: + data = s.recv(1024) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pe: Got BG command",data + if len(data) == 0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pe: BG closes IC" + return + elif data.startswith("PLAY"): + break + + url = data[len("PLAY "):] + p = urlparse.urlparse(url) + path = p.path + + s2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s2.connect(('127.0.0.1',6878)) + s2.send("GET "+path+"HTTP/1.1\r\nHost: localhost:6878\r\n\r\n") + data = s2.recv(100) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pe: Got HTTP command",data + + +#pe = PluginEmulator(62062,"START","http://www.cs.vu.nl/~arno/vod/route2.tstream") +pe = PluginEmulator(62062,"START","http://www.vuze.com/download/XUGIN6PEJJCQ5777C3WUMMBRFI6HYIHJ.torrent?referal=torrentfilelinkcdp&title=Gopher") + + \ No newline at end of file diff --git a/tribler-mod/Tribler/Plugin/pluginemulator.py.bak b/tribler-mod/Tribler/Plugin/pluginemulator.py.bak new file mode 100644 index 0000000..dc71298 --- /dev/null +++ b/tribler-mod/Tribler/Plugin/pluginemulator.py.bak @@ -0,0 +1,44 @@ +# BAD CLIENT: SENDS +# GET /path\r\n +# HTTP/1.1\r\n +# Host: localhost:6878\r\n +# \r\n +# +# Then Python HTTP server doesn't correctly send headers. + +import sys +import socket +import urlparse + +class PluginEmulator: + + def __init__(self,port,cmd,param): + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect(('127.0.0.1',port)) + msg = cmd+' '+param+'\r\n' + s.send(msg) + #s.close() + while True: + data = s.recv(1024) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pe: Got BG command",data + if len(data) == 0: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pe: BG closes IC" + return + elif data.startswith("PLAY"): + break + + url = data[len("PLAY "):] + p = urlparse.urlparse(url) + path = p.path + + s2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s2.connect(('127.0.0.1',6878)) + s2.send("GET "+path+"HTTP/1.1\r\nHost: localhost:6878\r\n\r\n") + data = s2.recv(100) + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","pe: Got HTTP command",data + + +#pe = PluginEmulator(62062,"START","http://www.cs.vu.nl/~arno/vod/route2.tstream") +pe = PluginEmulator(62062,"START","http://www.vuze.com/download/XUGIN6PEJJCQ5777C3WUMMBRFI6HYIHJ.torrent?referal=torrentfilelinkcdp&title=Gopher") + + \ No newline at end of file diff --git a/tribler-mod/Tribler/Policies/RateManager.py b/tribler-mod/Tribler/Policies/RateManager.py new file mode 100644 index 0000000..8822b79 --- /dev/null +++ b/tribler-mod/Tribler/Policies/RateManager.py @@ -0,0 +1,301 @@ +from time import localtime, strftime +# Written by Arno Bakker and ABC authors +# see LICENSE.txt for license information + +import sys +from sets import Set +from threading import RLock +from traceback import print_exc + + +from Tribler.Core.simpledefs import * + +DEBUG = False + + +class RateManager: + def __init__(self): + self.lock = RLock() + self.statusmap = {} + self.currenttotal = {} + self.dset = Set() + self.clear_downloadstates() + + def add_downloadstate(self,ds): + """ Returns the number of unique states currently stored """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: add_downloadstate",`ds.get_download().get_def().get_infohash()` + + self.lock.acquire() + try: + d = ds.get_download() + if d not in self.dset: + self.statusmap[ds.get_status()].append(ds) + for dir in [UPLOAD,DOWNLOAD]: + self.currenttotal[dir] += ds.get_current_speed(dir) + self.dset.add(d) + return len(self.dset) + finally: + self.lock.release() + + def add_downloadstatelist(self, dslist): + for ds in dslist: + self.add_downloadstate(ds) + + def adjust_speeds(self): + """ Adjust speeds for the specified set of downloads and clears the set """ + self.lock.acquire() + try: + self.calc_and_set_speed_limits(DOWNLOAD) + self.calc_and_set_speed_limits(UPLOAD) + self.clear_downloadstates() + finally: + self.lock.release() + + + def clear_downloadstates(self): + self.statusmap[DLSTATUS_ALLOCATING_DISKSPACE] = [] + self.statusmap[DLSTATUS_WAITING4HASHCHECK] = [] + self.statusmap[DLSTATUS_HASHCHECKING] = [] + self.statusmap[DLSTATUS_DOWNLOADING] = [] + self.statusmap[DLSTATUS_SEEDING] = [] + self.statusmap[DLSTATUS_STOPPED] = [] + self.statusmap[DLSTATUS_STOPPED_ON_ERROR] = [] + for dir in [UPLOAD,DOWNLOAD]: + self.currenttotal[dir] = 0 + self.dset.clear() + + # + # Internal methods + # + # + # The following methods are all called with the lock held + # + + def calc_and_set_speed_limits(self,direct): + """ Override this method to write you own speed management policy. """ + pass + + +class UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager(RateManager): + """ This class implements a simple rate management policy that: + 1. If the API user set a desired speed for a particular download, + the speed limit for this download is set to the desired value. + 2. For all torrents for which no desired speeds have been set, + the global limit is equally divided amongst all downloads. + (however small the piece of the pie may be). + 3. There are separate global limits for download speed, upload speed + and upload speed when all torrents are seeding. + """ + def __init__(self): + RateManager.__init__(self) + self.global_max_speed = {} + self.global_max_speed[UPLOAD] = 0.0 + self.global_max_speed[DOWNLOAD] = 0.0 + self.global_max_seedupload_speed = 0.0 + + def set_global_max_speed(self,direct,speed): + self.lock.acquire() + self.global_max_speed[direct] = speed + self.lock.release() + + def set_global_max_seedupload_speed(self,speed): + self.lock.acquire() + self.global_max_seedupload_speed = speed + self.lock.release() + + def calc_and_set_speed_limits(self, dir = UPLOAD): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits",dir + + if dir == UPLOAD: + workingset = self.statusmap[DLSTATUS_DOWNLOADING]+self.statusmap[DLSTATUS_SEEDING] + else: + workingset = self.statusmap[DLSTATUS_DOWNLOADING] + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: len workingset",len(workingset) + + # Limit working set to active torrents with connections: + newws = [] + for ds in workingset: + if ds.get_num_peers() > 0: + newws.append(ds) + workingset = newws + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: len active workingset",len(workingset) + + # No active file, not need to calculate + if not workingset: + return + + globalmaxspeed = self.get_global_max_speed(dir) + # See if global speed settings are set to unlimited + if globalmaxspeed == 0: + # Unlimited speed + for ds in workingset: + d = ds.get_download() + d.set_max_speed(dir,d.get_max_desired_speed(dir)) + return + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: globalmaxspeed is",globalmaxspeed,dir + + # User set priority is always granted, ignoring global limit + todoset = [] + for ds in workingset: + d = ds.get_download() + maxdesiredspeed = d.get_max_desired_speed(dir) + if maxdesiredspeed > 0.0: + d.set_max_speed(dir,maxdesiredspeed) + else: + todoset.append(ds) + + if len(todoset) > 0: + # Rest divides globalmaxspeed equally + localmaxspeed = globalmaxspeed / float(len(todoset)) + # if too small than user's problem + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: localmaxspeed is",localmaxspeed,dir + + for ds in todoset: + d = ds.get_download() + d.set_max_speed(dir,localmaxspeed) + + + def get_global_max_speed(self, dir = UPLOAD): + if dir == UPLOAD and len(self.statusmap[DLSTATUS_DOWNLOADING]) == 0 and len(self.statusmap[DLSTATUS_SEEDING]) > 0: + # Static overall maximum up speed when seeding + return self.global_max_seedupload_speed + else: + return self.global_max_speed[dir] + + +class UserDefinedMaxAlwaysOtherwiseDividedOnDemandRateManager(UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager): + """ This class implements a simple rate management policy that: + 1. If the API user set a desired speed for a particular download, + the speed limit for this download is set to the desired value. + 2. For all torrents for which no desired speeds have been set, + the global limit is divided on demand amongst all downloads. + 3. There are separate global limits for download speed, upload speed + and upload speed when all torrents are seeding. + + TODO: if vod: give all of global limit? Do this at higher level: stop + all dls when going to VOD + """ + def __init__(self): + UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager.__init__(self) + + self.ROOM = 5.0 # the amount of room in speed underutilizing downloads get + + def calc_and_set_speed_limits(self, dir = UPLOAD): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits",dir + + if dir == UPLOAD: + workingset = self.statusmap[DLSTATUS_DOWNLOADING]+self.statusmap[DLSTATUS_SEEDING] + else: + workingset = self.statusmap[DLSTATUS_DOWNLOADING] + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: len workingset",len(workingset) + + # Limit working set to active torrents with connections: + newws = [] + for ds in workingset: + if ds.get_num_peers() > 0: + newws.append(ds) + workingset = newws + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: len new workingset",len(workingset) + for ds in workingset: + d = ds.get_download() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: working is",d.get_def().get_name() + + # No active file, not need to calculate + if not workingset: + return + + globalmaxspeed = self.get_global_max_speed(dir) + # See if global speed settings are set to unlimited + if globalmaxspeed == 0: + # Unlimited speed + for ds in workingset: + d = ds.get_download() + d.set_max_speed(dir,d.get_max_desired_speed(dir)) + return + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: globalmaxspeed is",globalmaxspeed,dir + + # User set priority is always granted, ignoring global limit + todoset = [] + for ds in workingset: + d = ds.get_download() + maxdesiredspeed = d.get_max_desired_speed(dir) + if maxdesiredspeed > 0.0: + d.set_max_speed(dir,maxdesiredspeed) + else: + todoset.append(ds) + + if len(todoset) > 0: + # Rest divides globalmaxspeed based on their demand + localmaxspeed = globalmaxspeed / float(len(todoset)) + # if too small than user's problem + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: localmaxspeed is",localmaxspeed,dir + + # See if underutilizers and overutilizers. If not, just divide equally + downloadsatmax = False + downloadsunderutil = False + for ds in todoset: + d = ds.get_download() + currspeed = ds.get_current_speed(dir) + currmaxspeed = d.get_max_speed(dir) + + newmaxspeed = currspeed+self.ROOM + if currspeed >= (currmaxspeed-3.0): # dl needs more + downloadsatmax = True + elif newmaxspeed < localmaxspeed: # dl got quota to spare + downloadsunderutil = True + + if downloadsatmax and downloadsunderutil: + totalunused = 0.0 + todoset2 = [] + for ds in todoset: + d = ds.get_download() + currspeed = ds.get_current_speed(dir) + + newmaxspeed = currspeed+self.ROOM + if newmaxspeed < localmaxspeed: + # If unterutilizing: + totalunused += (localmaxspeed-newmaxspeed) + # Give current speed + 5.0 KB/s extra so it can grow + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: Underutil set to",newmaxspeed + d.set_max_speed(dir,newmaxspeed) + else: + todoset2.append(ds) + + # Divide the unused bandwidth equally amongst others + if len(todoset2) > 0: + pie = float(len(todoset2)) * localmaxspeed + totalunused + piece = pie / float(len(todoset2)) + for ds in todoset: + d = ds.get_download() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: Overutil set to",piece + d.set_max_speed(dir,piece) + else: + # what the f? No overutilizers now? + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","UserDefinedMaxAlwaysOtherwiseDividedOnDemandRateManager: Internal error: No overutilizers anymore?" + else: + # No over and under utilizers, just divide equally + for ds in todoset: + d = ds.get_download() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: Normal set to",piece + d.set_max_speed(dir,localmaxspeed) diff --git a/tribler-mod/Tribler/Policies/RateManager.py.bak b/tribler-mod/Tribler/Policies/RateManager.py.bak new file mode 100644 index 0000000..3d358b7 --- /dev/null +++ b/tribler-mod/Tribler/Policies/RateManager.py.bak @@ -0,0 +1,300 @@ +# Written by Arno Bakker and ABC authors +# see LICENSE.txt for license information + +import sys +from sets import Set +from threading import RLock +from traceback import print_exc + + +from Tribler.Core.simpledefs import * + +DEBUG = False + + +class RateManager: + def __init__(self): + self.lock = RLock() + self.statusmap = {} + self.currenttotal = {} + self.dset = Set() + self.clear_downloadstates() + + def add_downloadstate(self,ds): + """ Returns the number of unique states currently stored """ + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: add_downloadstate",`ds.get_download().get_def().get_infohash()` + + self.lock.acquire() + try: + d = ds.get_download() + if d not in self.dset: + self.statusmap[ds.get_status()].append(ds) + for dir in [UPLOAD,DOWNLOAD]: + self.currenttotal[dir] += ds.get_current_speed(dir) + self.dset.add(d) + return len(self.dset) + finally: + self.lock.release() + + def add_downloadstatelist(self, dslist): + for ds in dslist: + self.add_downloadstate(ds) + + def adjust_speeds(self): + """ Adjust speeds for the specified set of downloads and clears the set """ + self.lock.acquire() + try: + self.calc_and_set_speed_limits(DOWNLOAD) + self.calc_and_set_speed_limits(UPLOAD) + self.clear_downloadstates() + finally: + self.lock.release() + + + def clear_downloadstates(self): + self.statusmap[DLSTATUS_ALLOCATING_DISKSPACE] = [] + self.statusmap[DLSTATUS_WAITING4HASHCHECK] = [] + self.statusmap[DLSTATUS_HASHCHECKING] = [] + self.statusmap[DLSTATUS_DOWNLOADING] = [] + self.statusmap[DLSTATUS_SEEDING] = [] + self.statusmap[DLSTATUS_STOPPED] = [] + self.statusmap[DLSTATUS_STOPPED_ON_ERROR] = [] + for dir in [UPLOAD,DOWNLOAD]: + self.currenttotal[dir] = 0 + self.dset.clear() + + # + # Internal methods + # + # + # The following methods are all called with the lock held + # + + def calc_and_set_speed_limits(self,direct): + """ Override this method to write you own speed management policy. """ + pass + + +class UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager(RateManager): + """ This class implements a simple rate management policy that: + 1. If the API user set a desired speed for a particular download, + the speed limit for this download is set to the desired value. + 2. For all torrents for which no desired speeds have been set, + the global limit is equally divided amongst all downloads. + (however small the piece of the pie may be). + 3. There are separate global limits for download speed, upload speed + and upload speed when all torrents are seeding. + """ + def __init__(self): + RateManager.__init__(self) + self.global_max_speed = {} + self.global_max_speed[UPLOAD] = 0.0 + self.global_max_speed[DOWNLOAD] = 0.0 + self.global_max_seedupload_speed = 0.0 + + def set_global_max_speed(self,direct,speed): + self.lock.acquire() + self.global_max_speed[direct] = speed + self.lock.release() + + def set_global_max_seedupload_speed(self,speed): + self.lock.acquire() + self.global_max_seedupload_speed = speed + self.lock.release() + + def calc_and_set_speed_limits(self, dir = UPLOAD): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits",dir + + if dir == UPLOAD: + workingset = self.statusmap[DLSTATUS_DOWNLOADING]+self.statusmap[DLSTATUS_SEEDING] + else: + workingset = self.statusmap[DLSTATUS_DOWNLOADING] + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: len workingset",len(workingset) + + # Limit working set to active torrents with connections: + newws = [] + for ds in workingset: + if ds.get_num_peers() > 0: + newws.append(ds) + workingset = newws + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: len active workingset",len(workingset) + + # No active file, not need to calculate + if not workingset: + return + + globalmaxspeed = self.get_global_max_speed(dir) + # See if global speed settings are set to unlimited + if globalmaxspeed == 0: + # Unlimited speed + for ds in workingset: + d = ds.get_download() + d.set_max_speed(dir,d.get_max_desired_speed(dir)) + return + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: globalmaxspeed is",globalmaxspeed,dir + + # User set priority is always granted, ignoring global limit + todoset = [] + for ds in workingset: + d = ds.get_download() + maxdesiredspeed = d.get_max_desired_speed(dir) + if maxdesiredspeed > 0.0: + d.set_max_speed(dir,maxdesiredspeed) + else: + todoset.append(ds) + + if len(todoset) > 0: + # Rest divides globalmaxspeed equally + localmaxspeed = globalmaxspeed / float(len(todoset)) + # if too small than user's problem + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: localmaxspeed is",localmaxspeed,dir + + for ds in todoset: + d = ds.get_download() + d.set_max_speed(dir,localmaxspeed) + + + def get_global_max_speed(self, dir = UPLOAD): + if dir == UPLOAD and len(self.statusmap[DLSTATUS_DOWNLOADING]) == 0 and len(self.statusmap[DLSTATUS_SEEDING]) > 0: + # Static overall maximum up speed when seeding + return self.global_max_seedupload_speed + else: + return self.global_max_speed[dir] + + +class UserDefinedMaxAlwaysOtherwiseDividedOnDemandRateManager(UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager): + """ This class implements a simple rate management policy that: + 1. If the API user set a desired speed for a particular download, + the speed limit for this download is set to the desired value. + 2. For all torrents for which no desired speeds have been set, + the global limit is divided on demand amongst all downloads. + 3. There are separate global limits for download speed, upload speed + and upload speed when all torrents are seeding. + + TODO: if vod: give all of global limit? Do this at higher level: stop + all dls when going to VOD + """ + def __init__(self): + UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager.__init__(self) + + self.ROOM = 5.0 # the amount of room in speed underutilizing downloads get + + def calc_and_set_speed_limits(self, dir = UPLOAD): + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits",dir + + if dir == UPLOAD: + workingset = self.statusmap[DLSTATUS_DOWNLOADING]+self.statusmap[DLSTATUS_SEEDING] + else: + workingset = self.statusmap[DLSTATUS_DOWNLOADING] + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: len workingset",len(workingset) + + # Limit working set to active torrents with connections: + newws = [] + for ds in workingset: + if ds.get_num_peers() > 0: + newws.append(ds) + workingset = newws + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: len new workingset",len(workingset) + for ds in workingset: + d = ds.get_download() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: working is",d.get_def().get_name() + + # No active file, not need to calculate + if not workingset: + return + + globalmaxspeed = self.get_global_max_speed(dir) + # See if global speed settings are set to unlimited + if globalmaxspeed == 0: + # Unlimited speed + for ds in workingset: + d = ds.get_download() + d.set_max_speed(dir,d.get_max_desired_speed(dir)) + return + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: globalmaxspeed is",globalmaxspeed,dir + + # User set priority is always granted, ignoring global limit + todoset = [] + for ds in workingset: + d = ds.get_download() + maxdesiredspeed = d.get_max_desired_speed(dir) + if maxdesiredspeed > 0.0: + d.set_max_speed(dir,maxdesiredspeed) + else: + todoset.append(ds) + + if len(todoset) > 0: + # Rest divides globalmaxspeed based on their demand + localmaxspeed = globalmaxspeed / float(len(todoset)) + # if too small than user's problem + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: localmaxspeed is",localmaxspeed,dir + + # See if underutilizers and overutilizers. If not, just divide equally + downloadsatmax = False + downloadsunderutil = False + for ds in todoset: + d = ds.get_download() + currspeed = ds.get_current_speed(dir) + currmaxspeed = d.get_max_speed(dir) + + newmaxspeed = currspeed+self.ROOM + if currspeed >= (currmaxspeed-3.0): # dl needs more + downloadsatmax = True + elif newmaxspeed < localmaxspeed: # dl got quota to spare + downloadsunderutil = True + + if downloadsatmax and downloadsunderutil: + totalunused = 0.0 + todoset2 = [] + for ds in todoset: + d = ds.get_download() + currspeed = ds.get_current_speed(dir) + + newmaxspeed = currspeed+self.ROOM + if newmaxspeed < localmaxspeed: + # If unterutilizing: + totalunused += (localmaxspeed-newmaxspeed) + # Give current speed + 5.0 KB/s extra so it can grow + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: Underutil set to",newmaxspeed + d.set_max_speed(dir,newmaxspeed) + else: + todoset2.append(ds) + + # Divide the unused bandwidth equally amongst others + if len(todoset2) > 0: + pie = float(len(todoset2)) * localmaxspeed + totalunused + piece = pie / float(len(todoset2)) + for ds in todoset: + d = ds.get_download() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: Overutil set to",piece + d.set_max_speed(dir,piece) + else: + # what the f? No overutilizers now? + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","UserDefinedMaxAlwaysOtherwiseDividedOnDemandRateManager: Internal error: No overutilizers anymore?" + else: + # No over and under utilizers, just divide equally + for ds in todoset: + d = ds.get_download() + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","RateManager: calc_and_set_speed_limits: Normal set to",piece + d.set_max_speed(dir,localmaxspeed) diff --git a/tribler-mod/Tribler/Policies/SeedingManager.py b/tribler-mod/Tribler/Policies/SeedingManager.py new file mode 100644 index 0000000..ab518d8 --- /dev/null +++ b/tribler-mod/Tribler/Policies/SeedingManager.py @@ -0,0 +1,210 @@ +from time import localtime, strftime +# Written by Boxun Zhang +# see LICENSE.txt for license information + +import sys +import time +from Tribler.Core.simpledefs import * + +DEBUG = False + +class GlobalSeedingManager: + def __init__(self, Read): + self.curr_seedings = [] + self.info_hashes = [] + self.Read = Read + + def apply_seeding_policy(self, dslist): + # Remove stoped seeds + for curr in self.curr_seedings: + if not curr.get_status() == DLSTATUS_SEEDING: + self.info_hashes.remove(curr.get_def().get_infohash()) + self.curr_seedings.remove(curr) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","GlobalSeedingManager: current seedings: ",len(self.curr_seedings) + + for ds in dslist: + if ds.get_status() == DLSTATUS_SEEDING and ds.get_download().get_def().get_infohash() not in self.info_hashes: + # apply new seeding manager + seeding_manager = SeedingManager(ds) +# t4t option_ + t4t_option = self.Read('t4t_option', "int") + + if t4t_option == 0: + # No Bittorrent bleeching, seeding until sharing ratio = 1.0 + seeding_manager.set_t4t_policy(TitForTatRatioBasedSeeding(ds)) + elif t4t_option == 1: + # Unlimited seeding + seeding_manager.set_t4t_policy(UnlimitedSeeding()) + elif t4t_option == 2: + # Time based seeding + seeding_manager.set_t4t_policy(TitForTatTimeBasedSeeding(self.Read)) + else: + # t4t_option == 3, no seeding + seeding_manager.set_t4t_policy(NoSeeding()) +# _t4t option + +# g2g option_ + g2g_option = self.Read('g2g_option', "int") + + if g2g_option == 0: + # Seeding to peers with large sharing ratio + seeding_manager.set_g2g_policy(GiveToGetRatioBasedSeeding(self.Read, ds)) + elif g2g_option == 1: + # Boost your reputation + seeding_manager.set_g2g_policy(UnlimitedSeeding()) + elif g2g_option == 2: + # Seeding for sometime + seeding_manager.set_g2g_policy(GiveToGetTimeBasedSeeding(self.Read)) + else: + # g2g_option == 3, no seeding + seeding_manager.set_g2g_policy(NoSeeding()) +# _g2g option + + # Apply seeding manager + ds.get_download().set_seeding_policy(seeding_manager) + + self.curr_seedings.append(ds) + self.info_hashes.append(ds.get_download().get_def().get_infohash()) + + +class SeedingManager: + def __init__(self, ds): + self.ds = ds + self.t4t_policy = None + self.g2g_policy = None + + self.t4t_stop = False + self.g2g_stop = False + + def is_conn_eligible(self, conn): + if conn.use_g2g: + g2g_r = self.g2g_policy.apply(conn) + self.g2g_stop = g2g_r + + # If seeding stop both to g2g and t4t + # then stop seeding + if self.t4t_stop and self.g2g_stop: + self.ds.get_download().stop() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Stop seedings: ",self.ds.get_download().get_dest_files() + + return g2g_r + + else: + t4t_r = self.t4t_policy.apply(conn) + self.t4t_stop = t4t_r + + if self.t4t_stop and self.g2g_stop: + self.ds.get_download().stop() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Stop seedings: ",self.ds.get_download().get_dest_files() + + + return t4t_r + + + def set_t4t_policy(self, policy): + self.t4t_policy = policy + + def set_g2g_policy(self, policy): + self.g2g_policy = policy + +class SeedingPolicy: + def __init__(self): + pass + + def apply(self, conn): + pass + +class UnlimitedSeeding(SeedingPolicy): + def __init__(self): + SeedingPolicy.__init__(self) + + def apply(self, conn): + return True + + +class NoSeeding(SeedingPolicy): + def __init__(self): + SeedingPolicy.__init__(self) + + def apply(self, conn): + return False + +class TitForTatTimeBasedSeeding(SeedingPolicy): + def __init__(self, Read): + SeedingPolicy.__init__(self) + self.Read = Read + self.begin = time.time() + + def apply(self, conn): + seeding_secs = 0 + seeding_secs = long(self.Read('t4t_hours', "int"))*3600 + long(self.Read('t4t_mins', "int"))*60 + + if time.time() - self.begin <= seeding_secs: + return True + else: + return False + +class GiveToGetTimeBasedSeeding(SeedingPolicy): + def __init__(self, Read): + SeedingPolicy.__init__(self) + self.Read = Read + self.begin = time.time() + + def apply(self, conn): + seeding_secs = 0 + seeding_secs = long(self.Read('g2g_hours', "int"))*3600 + long(self.Read('g2g_mins', "int"))*60 + + if time.time() - self.begin <= seeding_secs: + return True + else: + return False + + +class TitForTatRatioBasedSeeding(SeedingPolicy): + def __init__(self, ds): + SeedingPolicy.__init__(self) + self.ds = ds + + def apply(self, conn): + # No Bittorrent leeching +# ratio = self.ds.stats['utotal']/self.ds.stats['dtotal'] + ratio = 0.0 + stats = self.ds.stats['stats'] + dl = stats.downTotal + ul = stats.upTotal + + if not dl == 0: + ratio = ul/dl + + if ratio <= 1.0: + return True + else: + return False + +class GiveToGetRatioBasedSeeding(SeedingPolicy): + def __init__(self, Read, ds): + SeedingPolicy.__init__(self) + self.Read = Read + self.ds = ds + + def apply(self, conn): + # Seeding to peers with large sharing ratio + ratio = 0.0 + + dl = conn.download.measure.get_total() + ul = conn.upload.measure.get_total() + + if not dl == 0: + ratio = ul/dl + + if ratio <= Read('g2g_ratio', "int")/100.0: + return False + else: + return True + diff --git a/tribler-mod/Tribler/Policies/SeedingManager.py.bak b/tribler-mod/Tribler/Policies/SeedingManager.py.bak new file mode 100644 index 0000000..316360d --- /dev/null +++ b/tribler-mod/Tribler/Policies/SeedingManager.py.bak @@ -0,0 +1,209 @@ +# Written by Boxun Zhang +# see LICENSE.txt for license information + +import sys +import time +from Tribler.Core.simpledefs import * + +DEBUG = False + +class GlobalSeedingManager: + def __init__(self, Read): + self.curr_seedings = [] + self.info_hashes = [] + self.Read = Read + + def apply_seeding_policy(self, dslist): + # Remove stoped seeds + for curr in self.curr_seedings: + if not curr.get_status() == DLSTATUS_SEEDING: + self.info_hashes.remove(curr.get_def().get_infohash()) + self.curr_seedings.remove(curr) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","GlobalSeedingManager: current seedings: ",len(self.curr_seedings) + + for ds in dslist: + if ds.get_status() == DLSTATUS_SEEDING and ds.get_download().get_def().get_infohash() not in self.info_hashes: + # apply new seeding manager + seeding_manager = SeedingManager(ds) +# t4t option_ + t4t_option = self.Read('t4t_option', "int") + + if t4t_option == 0: + # No Bittorrent bleeching, seeding until sharing ratio = 1.0 + seeding_manager.set_t4t_policy(TitForTatRatioBasedSeeding(ds)) + elif t4t_option == 1: + # Unlimited seeding + seeding_manager.set_t4t_policy(UnlimitedSeeding()) + elif t4t_option == 2: + # Time based seeding + seeding_manager.set_t4t_policy(TitForTatTimeBasedSeeding(self.Read)) + else: + # t4t_option == 3, no seeding + seeding_manager.set_t4t_policy(NoSeeding()) +# _t4t option + +# g2g option_ + g2g_option = self.Read('g2g_option', "int") + + if g2g_option == 0: + # Seeding to peers with large sharing ratio + seeding_manager.set_g2g_policy(GiveToGetRatioBasedSeeding(self.Read, ds)) + elif g2g_option == 1: + # Boost your reputation + seeding_manager.set_g2g_policy(UnlimitedSeeding()) + elif g2g_option == 2: + # Seeding for sometime + seeding_manager.set_g2g_policy(GiveToGetTimeBasedSeeding(self.Read)) + else: + # g2g_option == 3, no seeding + seeding_manager.set_g2g_policy(NoSeeding()) +# _g2g option + + # Apply seeding manager + ds.get_download().set_seeding_policy(seeding_manager) + + self.curr_seedings.append(ds) + self.info_hashes.append(ds.get_download().get_def().get_infohash()) + + +class SeedingManager: + def __init__(self, ds): + self.ds = ds + self.t4t_policy = None + self.g2g_policy = None + + self.t4t_stop = False + self.g2g_stop = False + + def is_conn_eligible(self, conn): + if conn.use_g2g: + g2g_r = self.g2g_policy.apply(conn) + self.g2g_stop = g2g_r + + # If seeding stop both to g2g and t4t + # then stop seeding + if self.t4t_stop and self.g2g_stop: + self.ds.get_download().stop() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Stop seedings: ",self.ds.get_download().get_dest_files() + + return g2g_r + + else: + t4t_r = self.t4t_policy.apply(conn) + self.t4t_stop = t4t_r + + if self.t4t_stop and self.g2g_stop: + self.ds.get_download().stop() + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","Stop seedings: ",self.ds.get_download().get_dest_files() + + + return t4t_r + + + def set_t4t_policy(self, policy): + self.t4t_policy = policy + + def set_g2g_policy(self, policy): + self.g2g_policy = policy + +class SeedingPolicy: + def __init__(self): + pass + + def apply(self, conn): + pass + +class UnlimitedSeeding(SeedingPolicy): + def __init__(self): + SeedingPolicy.__init__(self) + + def apply(self, conn): + return True + + +class NoSeeding(SeedingPolicy): + def __init__(self): + SeedingPolicy.__init__(self) + + def apply(self, conn): + return False + +class TitForTatTimeBasedSeeding(SeedingPolicy): + def __init__(self, Read): + SeedingPolicy.__init__(self) + self.Read = Read + self.begin = time.time() + + def apply(self, conn): + seeding_secs = 0 + seeding_secs = long(self.Read('t4t_hours', "int"))*3600 + long(self.Read('t4t_mins', "int"))*60 + + if time.time() - self.begin <= seeding_secs: + return True + else: + return False + +class GiveToGetTimeBasedSeeding(SeedingPolicy): + def __init__(self, Read): + SeedingPolicy.__init__(self) + self.Read = Read + self.begin = time.time() + + def apply(self, conn): + seeding_secs = 0 + seeding_secs = long(self.Read('g2g_hours', "int"))*3600 + long(self.Read('g2g_mins', "int"))*60 + + if time.time() - self.begin <= seeding_secs: + return True + else: + return False + + +class TitForTatRatioBasedSeeding(SeedingPolicy): + def __init__(self, ds): + SeedingPolicy.__init__(self) + self.ds = ds + + def apply(self, conn): + # No Bittorrent leeching +# ratio = self.ds.stats['utotal']/self.ds.stats['dtotal'] + ratio = 0.0 + stats = self.ds.stats['stats'] + dl = stats.downTotal + ul = stats.upTotal + + if not dl == 0: + ratio = ul/dl + + if ratio <= 1.0: + return True + else: + return False + +class GiveToGetRatioBasedSeeding(SeedingPolicy): + def __init__(self, Read, ds): + SeedingPolicy.__init__(self) + self.Read = Read + self.ds = ds + + def apply(self, conn): + # Seeding to peers with large sharing ratio + ratio = 0.0 + + dl = conn.download.measure.get_total() + ul = conn.upload.measure.get_total() + + if not dl == 0: + ratio = ul/dl + + if ratio <= Read('g2g_ratio', "int")/100.0: + return False + else: + return True + diff --git a/tribler-mod/Tribler/Policies/UploadLimitation.py b/tribler-mod/Tribler/Policies/UploadLimitation.py new file mode 100644 index 0000000..de66c4f --- /dev/null +++ b/tribler-mod/Tribler/Policies/UploadLimitation.py @@ -0,0 +1,256 @@ +from time import localtime, strftime +# Written by Jelle Roozenburg +# see LICENSE.txt for license information + +import sys, commands, re +from Tribler.Core.simpledefs import UPLOAD +from Tribler.Core.exceptions import NotYetImplementedException + +DEBUG = False +DUMMY = True +class UploadLimitation: + + def __init__(self, session, ratemanager): + self.session = session + self.logFilename = 'uploadLimitation.log' + self.ratemanager = ratemanager + self.measure_interval = 5 # measure upload speed every 5 seconds + self.dslist = None + self.register_get_download_states() + + + def register_get_download_states(self): + self.session.set_download_states_callback(self.download_states_callback) + + def download_states_callback(self, dslist): + self.dslist = dslist + total_upload = 0.0 + for downloadstate in dslist: + upload = downloadstate.get_current_speed(UPLOAD) + total_upload += upload + self.log('Total ulspeed: %f' % total_upload) + self.upload_speed_callback(total_upload) + return (self.measure_interval, False) + + def upload_speed_callback(self, speed): + raise NotYetImplementedException() + + def set_max_upload_speed(self, speed): + "Set the max_upload_speed in kb/s in the ratemanager and force speed adjust" + if DUMMY: + self.log('Not setting max ulspeed to: %f (DUMMY MODE)' % speed) + return + + self.log('Setting max ulspeed to: %f' % speed) + self.ratemanager.set_global_max_speed(UPLOAD, speed) + # also set upload limit for seeding + self.ratemanager.set_global_max_seedupload_speed(speed) + + self.ratemanager.add_downloadstatelist(self.dslist) + self.ratemanager.adjust_speeds() + + def log(self, s): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'UploadLimitation: ', s + + def logSpeeds(self, speed, limit, mode): + f = file(self.logFilename, 'a') + f.write('%f\t%f\t%s\n' % (speed, limit, mode)) + f.close() + +class TestUploadLimitation(UploadLimitation): + """ + Test implementation of UploadLimitation. + Decreases upload and sets + + limit = current upload speed - 1 + + When no uploadspeed anymore, set upload limit to 50 kb/s + """ + def __init__(self, session, ratemanager): + UploadLimitation.__init__(self, session, ratemanager) + + def upload_speed_callback(self, speed): + newspeed = max(0.0, speed-1.0) + if newspeed == 0.0: + newspeed = 50.0 + + self.set_max_upload_speed(newspeed) + +class MeasureUploadLimitation(UploadLimitation): + """ + """ + def __init__(self, session, ratemanager): + + self.minLimitTime = 3 + self.maxLimitTime = 80 + self.limitTime = 10 + self.measureTime = 4 + self.measureToLimitFactor = 0.95 + self.shortTermToLimitFactor = 0.7 + self.measureToGlobalMaxFactor = 0.9 + self.shortTermCorrectionFactor = 0.9 + self.shortTermLength = 4 # average this amount of measurements to get short term upload in limited mode + self.step = 0 + self.uploadLimit = 0.0 + + self.maxUpload = 0.0 # Max measured upload speed is stored here + self.measureMode = True # start in measureMode + self.freeMeasurements = [] + self.limitedMeasurements = [] + self.measureTimer = self.measureTime - 1 + self.limitTimer = self.limitTime - 1 + + UploadLimitation.__init__(self, session, ratemanager) + + def upload_speed_callback(self, speed): + self.logSpeeds(speed, self.uploadLimit, int(self.measureMode)) + self.maxUpload = max(self.maxUpload, speed) + if self.measureMode: + self.log('measure step %d/%d' % (self.measureTime - self.measureTimer,self.measureTime)) + self.measureModeUpdate(speed) + else: + self.log('limit step %d/%d' % (self.limitTime - self.limitTimer,self.limitTime)) + self.limitModeUpdate(speed) + + self.log('freeM: %s' % self.freeMeasurements) + self.log('limitM: %s' % self.limitedMeasurements) + + def measureModeUpdate(self, speed): + self.freeMeasurements.append(speed) + if self.measureTimer == 0: + # Switch to limit mode + assert len(self.freeMeasurements) == self.measureTime + measureMax = max(self.freeMeasurements) + self.freeMeasurements = [] + # If measurement is similar to historic measurements, increase limit time + if measureMax > self.measureToGlobalMaxFactor * self.maxUpload: + self.limitTime = min(self.limitTime*2, self.maxLimitTime) + else: + self.limitTime = self.minLimitTime + self.log('Changed limit time to: %d' % self.limitTime) + + self.measureMode = False + self.limitTimer = self.limitTime - 1 + self.uploadLimit = self.measureToLimitFactor * measureMax + self.log('Switching to limit mode with limit: %f' % self.uploadLimit) + self.set_max_upload_speed(self.uploadLimit) + + self.measureTimer -= 1 + + def limitModeUpdate(self, speed): + if self.limitTimer == 0: + self.limitedMeasurements = [] + self.measureMode = True + self.log('Switching to measure mode for %d steps' % self.measureTime) + self.measureTimer = self.measureTime - 1 + self.uploadLimit = 0.0 + self.set_max_upload_speed(self.uploadLimit) + else: + self.limitedMeasurements.append(speed) + shortTerm = self.getShortTermUpload() + if (shortTerm is not None and + shortTerm < self.uploadLimit * self.shortTermToLimitFactor): + self.log('Low shortTerm bw: %f to a limit of %f' % (shortTerm, self.uploadLimit)) + self.uploadLimit *= self.shortTermCorrectionFactor + self.limitTime = self.minLimitTime + self.limitTimer = min(self.limitTimer, self.limitTime-1) + self.set_max_upload_speed(self.uploadLimit) + self.log('Setting upload limit to %f and limittime to %d' % (self.uploadLimit, self.limitTime)) + self.limitTimer-= 1 + def getShortTermUpload(self): + assert not self.measureMode + if len(self.limitedMeasurements) >= self.shortTermLength: + return sum(self.limitedMeasurements[-self.shortTermLength:]) / float(self.shortTermLength) + else: + return None + + +from Tribler.Tools.BandwidthCounter import get_bandwidth_speed + +class PingUploadLimitation(UploadLimitation): + """ + This upload limitation uses ping times to some hosts to see if the upload capacity is consumed + """ + + unix_ping_regexp = re.compile(r'= ?[\d.]+/([\d.]+)/[\d.]+/[\d.]+') + hosts = ['www.google.com', 'www.yahoo.com', 'www.bbc.co.uk'] + + + def __init__(self, session, ratemanager): + UploadLimitation.__init__(self, session, ratemanager) + self.ping_hosts = {} + self.low_upload = 5 # Less than 5 kb/s upload is considered low upload with no delaying in pings + self.upload_limit = None + self.idletime = {} + + def upload_speed_callback(self, speed): + total_down, total_up = get_bandwidth_speed() + self.log('Tribler upload speed: %f, total upload speed: %f, total down speed %f ' % (speed, total_up, total_down)) + if total_up < low_upload: + self.addIdlePing() + else: + pass # Todo + + def addIdlePing(self): + ping_times = self.ping_hosts() + for host, time in ping_times.iteritems(): + oldtimes = self.idletime.get(host) + if oldtimes: + self.idletime[host] = ((oldtimes[0]*oldtimes[1]+ping_times[host])/oldtimes[1]+1, oldtimes[1]+1) + else: + self.idletime[host] = (ping_times[host], 1) + + + def ping_hosts(self): + data = {} + for host in self.hosts: + data[host] = self.ping(host) + return data + + def ping(self, host): + if sys.platform == 'win32': + return self.ping_win(host) + elif sys.platform.find('linux') != -1: + return self.ping_unix(host, 'linux') + elif sys.platform == 'darwin': + return self.ping_unix(host, 'darwin') + + def ping_win(self, host): + raise Exception('not yet implemented') + + def ping_unix(self, host, os): + if os == 'linux': + com = 'ping -c 3 -i0.2 -n %s' % host + else: + raise Exception('Not yet implemented') + pass + status, output = commands.getstatusoutput(com) + if status != 0: + self.log('Error: could not call ping') + else: + avg = self.unix_ping_regexp.findall(output) + try: + assert len(avg) == 1 + return avg[0] + except: + self.log('Incorrect ping output: %s' % output) + + + +class TotalUploadLimitation(UploadLimitation): + """ + This upload limitation uses the Tribler.Tools.BandwidthCounter to measure the system wide upload- + and download usage. If the upload usage encloses the total + """ + def __init__(self, session, ratemanager): + UploadLimitation.__init__(self, session, ratemanager) + + def upload_speed_callback(self, speed): + total_down, total_up = get_bandwidth_speed() + self.log('Tribler upload speed: %f, total upload speed: %f, total down speed %f ' % (speed, total_up, total_down)) + + #self.set_max_upload_speed(newspeed) + + + diff --git a/tribler-mod/Tribler/Policies/UploadLimitation.py.bak b/tribler-mod/Tribler/Policies/UploadLimitation.py.bak new file mode 100644 index 0000000..e2b7a29 --- /dev/null +++ b/tribler-mod/Tribler/Policies/UploadLimitation.py.bak @@ -0,0 +1,255 @@ +# Written by Jelle Roozenburg +# see LICENSE.txt for license information + +import sys, commands, re +from Tribler.Core.simpledefs import UPLOAD +from Tribler.Core.exceptions import NotYetImplementedException + +DEBUG = False +DUMMY = True +class UploadLimitation: + + def __init__(self, session, ratemanager): + self.session = session + self.logFilename = 'uploadLimitation.log' + self.ratemanager = ratemanager + self.measure_interval = 5 # measure upload speed every 5 seconds + self.dslist = None + self.register_get_download_states() + + + def register_get_download_states(self): + self.session.set_download_states_callback(self.download_states_callback) + + def download_states_callback(self, dslist): + self.dslist = dslist + total_upload = 0.0 + for downloadstate in dslist: + upload = downloadstate.get_current_speed(UPLOAD) + total_upload += upload + self.log('Total ulspeed: %f' % total_upload) + self.upload_speed_callback(total_upload) + return (self.measure_interval, False) + + def upload_speed_callback(self, speed): + raise NotYetImplementedException() + + def set_max_upload_speed(self, speed): + "Set the max_upload_speed in kb/s in the ratemanager and force speed adjust" + if DUMMY: + self.log('Not setting max ulspeed to: %f (DUMMY MODE)' % speed) + return + + self.log('Setting max ulspeed to: %f' % speed) + self.ratemanager.set_global_max_speed(UPLOAD, speed) + # also set upload limit for seeding + self.ratemanager.set_global_max_seedupload_speed(speed) + + self.ratemanager.add_downloadstatelist(self.dslist) + self.ratemanager.adjust_speeds() + + def log(self, s): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", 'UploadLimitation: ', s + + def logSpeeds(self, speed, limit, mode): + f = file(self.logFilename, 'a') + f.write('%f\t%f\t%s\n' % (speed, limit, mode)) + f.close() + +class TestUploadLimitation(UploadLimitation): + """ + Test implementation of UploadLimitation. + Decreases upload and sets + + limit = current upload speed - 1 + + When no uploadspeed anymore, set upload limit to 50 kb/s + """ + def __init__(self, session, ratemanager): + UploadLimitation.__init__(self, session, ratemanager) + + def upload_speed_callback(self, speed): + newspeed = max(0.0, speed-1.0) + if newspeed == 0.0: + newspeed = 50.0 + + self.set_max_upload_speed(newspeed) + +class MeasureUploadLimitation(UploadLimitation): + """ + """ + def __init__(self, session, ratemanager): + + self.minLimitTime = 3 + self.maxLimitTime = 80 + self.limitTime = 10 + self.measureTime = 4 + self.measureToLimitFactor = 0.95 + self.shortTermToLimitFactor = 0.7 + self.measureToGlobalMaxFactor = 0.9 + self.shortTermCorrectionFactor = 0.9 + self.shortTermLength = 4 # average this amount of measurements to get short term upload in limited mode + self.step = 0 + self.uploadLimit = 0.0 + + self.maxUpload = 0.0 # Max measured upload speed is stored here + self.measureMode = True # start in measureMode + self.freeMeasurements = [] + self.limitedMeasurements = [] + self.measureTimer = self.measureTime - 1 + self.limitTimer = self.limitTime - 1 + + UploadLimitation.__init__(self, session, ratemanager) + + def upload_speed_callback(self, speed): + self.logSpeeds(speed, self.uploadLimit, int(self.measureMode)) + self.maxUpload = max(self.maxUpload, speed) + if self.measureMode: + self.log('measure step %d/%d' % (self.measureTime - self.measureTimer,self.measureTime)) + self.measureModeUpdate(speed) + else: + self.log('limit step %d/%d' % (self.limitTime - self.limitTimer,self.limitTime)) + self.limitModeUpdate(speed) + + self.log('freeM: %s' % self.freeMeasurements) + self.log('limitM: %s' % self.limitedMeasurements) + + def measureModeUpdate(self, speed): + self.freeMeasurements.append(speed) + if self.measureTimer == 0: + # Switch to limit mode + assert len(self.freeMeasurements) == self.measureTime + measureMax = max(self.freeMeasurements) + self.freeMeasurements = [] + # If measurement is similar to historic measurements, increase limit time + if measureMax > self.measureToGlobalMaxFactor * self.maxUpload: + self.limitTime = min(self.limitTime*2, self.maxLimitTime) + else: + self.limitTime = self.minLimitTime + self.log('Changed limit time to: %d' % self.limitTime) + + self.measureMode = False + self.limitTimer = self.limitTime - 1 + self.uploadLimit = self.measureToLimitFactor * measureMax + self.log('Switching to limit mode with limit: %f' % self.uploadLimit) + self.set_max_upload_speed(self.uploadLimit) + + self.measureTimer -= 1 + + def limitModeUpdate(self, speed): + if self.limitTimer == 0: + self.limitedMeasurements = [] + self.measureMode = True + self.log('Switching to measure mode for %d steps' % self.measureTime) + self.measureTimer = self.measureTime - 1 + self.uploadLimit = 0.0 + self.set_max_upload_speed(self.uploadLimit) + else: + self.limitedMeasurements.append(speed) + shortTerm = self.getShortTermUpload() + if (shortTerm is not None and + shortTerm < self.uploadLimit * self.shortTermToLimitFactor): + self.log('Low shortTerm bw: %f to a limit of %f' % (shortTerm, self.uploadLimit)) + self.uploadLimit *= self.shortTermCorrectionFactor + self.limitTime = self.minLimitTime + self.limitTimer = min(self.limitTimer, self.limitTime-1) + self.set_max_upload_speed(self.uploadLimit) + self.log('Setting upload limit to %f and limittime to %d' % (self.uploadLimit, self.limitTime)) + self.limitTimer-= 1 + def getShortTermUpload(self): + assert not self.measureMode + if len(self.limitedMeasurements) >= self.shortTermLength: + return sum(self.limitedMeasurements[-self.shortTermLength:]) / float(self.shortTermLength) + else: + return None + + +from Tribler.Tools.BandwidthCounter import get_bandwidth_speed + +class PingUploadLimitation(UploadLimitation): + """ + This upload limitation uses ping times to some hosts to see if the upload capacity is consumed + """ + + unix_ping_regexp = re.compile(r'= ?[\d.]+/([\d.]+)/[\d.]+/[\d.]+') + hosts = ['www.google.com', 'www.yahoo.com', 'www.bbc.co.uk'] + + + def __init__(self, session, ratemanager): + UploadLimitation.__init__(self, session, ratemanager) + self.ping_hosts = {} + self.low_upload = 5 # Less than 5 kb/s upload is considered low upload with no delaying in pings + self.upload_limit = None + self.idletime = {} + + def upload_speed_callback(self, speed): + total_down, total_up = get_bandwidth_speed() + self.log('Tribler upload speed: %f, total upload speed: %f, total down speed %f ' % (speed, total_up, total_down)) + if total_up < low_upload: + self.addIdlePing() + else: + pass # Todo + + def addIdlePing(self): + ping_times = self.ping_hosts() + for host, time in ping_times.iteritems(): + oldtimes = self.idletime.get(host) + if oldtimes: + self.idletime[host] = ((oldtimes[0]*oldtimes[1]+ping_times[host])/oldtimes[1]+1, oldtimes[1]+1) + else: + self.idletime[host] = (ping_times[host], 1) + + + def ping_hosts(self): + data = {} + for host in self.hosts: + data[host] = self.ping(host) + return data + + def ping(self, host): + if sys.platform == 'win32': + return self.ping_win(host) + elif sys.platform.find('linux') != -1: + return self.ping_unix(host, 'linux') + elif sys.platform == 'darwin': + return self.ping_unix(host, 'darwin') + + def ping_win(self, host): + raise Exception('not yet implemented') + + def ping_unix(self, host, os): + if os == 'linux': + com = 'ping -c 3 -i0.2 -n %s' % host + else: + raise Exception('Not yet implemented') + pass + status, output = commands.getstatusoutput(com) + if status != 0: + self.log('Error: could not call ping') + else: + avg = self.unix_ping_regexp.findall(output) + try: + assert len(avg) == 1 + return avg[0] + except: + self.log('Incorrect ping output: %s' % output) + + + +class TotalUploadLimitation(UploadLimitation): + """ + This upload limitation uses the Tribler.Tools.BandwidthCounter to measure the system wide upload- + and download usage. If the upload usage encloses the total + """ + def __init__(self, session, ratemanager): + UploadLimitation.__init__(self, session, ratemanager) + + def upload_speed_callback(self, speed): + total_down, total_up = get_bandwidth_speed() + self.log('Tribler upload speed: %f, total upload speed: %f, total down speed %f ' % (speed, total_up, total_down)) + + #self.set_max_upload_speed(newspeed) + + + diff --git a/tribler-mod/Tribler/Policies/__init__.py b/tribler-mod/Tribler/Policies/__init__.py new file mode 100644 index 0000000..b7ef832 --- /dev/null +++ b/tribler-mod/Tribler/Policies/__init__.py @@ -0,0 +1,3 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Policies/__init__.py.bak b/tribler-mod/Tribler/Policies/__init__.py.bak new file mode 100644 index 0000000..395f8fb --- /dev/null +++ b/tribler-mod/Tribler/Policies/__init__.py.bak @@ -0,0 +1,2 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information diff --git a/tribler-mod/Tribler/Subscriptions/__init__.py b/tribler-mod/Tribler/Subscriptions/__init__.py new file mode 100644 index 0000000..7c348ca --- /dev/null +++ b/tribler-mod/Tribler/Subscriptions/__init__.py @@ -0,0 +1,4 @@ +from time import localtime, strftime +# Written by Arno Bakker +# see LICENSE.txt for license information + diff --git a/tribler-mod/Tribler/Subscriptions/__init__.py.bak b/tribler-mod/Tribler/Subscriptions/__init__.py.bak new file mode 100644 index 0000000..86ac17b --- /dev/null +++ b/tribler-mod/Tribler/Subscriptions/__init__.py.bak @@ -0,0 +1,3 @@ +# Written by Arno Bakker +# see LICENSE.txt for license information + diff --git a/tribler-mod/Tribler/Subscriptions/rss_client.py b/tribler-mod/Tribler/Subscriptions/rss_client.py new file mode 100644 index 0000000..07d066e --- /dev/null +++ b/tribler-mod/Tribler/Subscriptions/rss_client.py @@ -0,0 +1,551 @@ +from time import localtime, strftime +# Written by Freek Zindel, Arno Bakker +# see LICENSE.txt for license information +# +#this is a very limited torrent rss reader. +#works on some sites, but not on others due to captchas or username/password requirements for downloads. + +#usage: make a torrentfeedreader instance and call refresh whenevey you would like to check that feed for new torrents. e.g. every 15 minutes. +# +# Arno, 2007-05-7: We now store the urls visited on disk and don't recontact them for a certain period +# I've added special support for vuze torrents that have the links to the .torrent in the RSS XML +# but not as an tag. +# +# In addition, I've set the reader to be conservative for now, it only looks at .torrent files +# directly mentioned in the RSS XML, no recursive parsing, that, in case of vuze, visits a lot +# of sites unnecessarily and uses Java session IDs (";jsessionid") in the URLs, which renders +# our do-not-visit-if-recently-visited useless. +# +# 2007-05-08: vuze appears to have added a ;jsessionid to the tag. I now strip that for +# the URLHistory, but use it in requests. So don't be alarmed by the ;jsessionid in the debug messages. +# +# 2008-04-04: vuze appears to have changed format altogether: It no longer +# adheres to RSS. is called and is called +# + +import os +import sys +import traceback +from Tribler.Core.Utilities.timeouturlopen import urlOpenTimeout +#from BitTornado.zurllib import urlopen +import re +import urlparse +from xml.dom.minidom import parseString +from xml.parsers.expat import ExpatError +from threading import Thread,RLock,Event +import time +import sha + +from Tribler.Core.API import * +from Tribler.Core.BitTornado.bencode import bdecode,bencode + +URLHIST_TIMEOUT = 7*24*3600.0 # Don't revisit links for this time + +DEBUG = True #False + +class TorrentFeedThread(Thread): + + __single = None + + def __init__(self): + if TorrentFeedThread.__single: + raise RuntimeError, "TorrentFeedThread is singleton" + TorrentFeedThread.__single = self + Thread.__init__(self) + self.setName( "TorrentFeed"+self.getName() ) + self.setDaemon(True) + + self.urls = {} + self.feeds = [] + self.lock = RLock() + self.done = Event() + + # when rss feeds change, we have to restart the checking + self.feeds_changed = False + + def getInstance(*args, **kw): + if TorrentFeedThread.__single is None: + TorrentFeedThread(*args, **kw) + return TorrentFeedThread.__single + getInstance = staticmethod(getInstance) + """ + def register(self,utility): + self.utility = utility + self.intertorrentinterval = self.utility.config.Read("torrentcollectsleep","int") + + self.torrent_dir = self.utility.session.get_torrent_collecting_dir() + self.torrent_db = self.utility.session.open_dbhandler(NTFY_TORRENTS) + + filename = self.getfilename() + try: + f = open(filename,"rb") + for line in f.readlines(): + for key in ['active','inactive']: + if line.startswith(key): + url = line[len(key)+1:-2] # remove \r\n + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: Add from file URL",url,"EOU" + self.addURL(url,dowrite=False,status=key) + f.close() + except: + pass + #traceback.print_exc() + + #self.addURL('http://www.vuze.com/syndication/browse/AZHOT/ALL/X/X/26/X/_/_/X/X/feed.xml') + + + def addURL(self,url,dowrite=True,status="active"): + self.lock.acquire() + if url not in self.urls: + self.urls[url] = status + if status == "active": + feed = TorrentFeedReader(url,self.gethistfilename(url)) + self.feeds.append(feed) + self.feeds_changed = True + if dowrite: + self.writefile() + self.lock.release() + """ + + def register(self,session): + self.session = session + self.torrent_dir = self.session.get_torrent_collecting_dir() + self.torrent_db = self.session.open_dbhandler(NTFY_TORRENTS) + + filename = self.getfilename() + dirname = os.path.dirname(filename) + if not os.path.exists(dirname): + os.makedirs(dirname) + + try: + f = open(filename,"rb") + for line in f.readlines(): + for key in ['active','inactive']: + if line.startswith(key): + url = line[len(key)+1:-2] # remove \r\n + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: Add from file URL",url,"EOU" + self.addURL(url,dowrite=False,status=key) + f.close() + except: + pass + #traceback.print_exc() + + #self.addURL('http://www.vuze.com/syndication/browse/AZHOT/ALL/X/X/26/X/_/_/X/X/feed.xml') + + def addURL(self, url, dowrite=True, status="active", on_torrent_callback=None): + self.lock.acquire() + if url not in self.urls: + self.urls[url] = status + if status == "active": + feed = TorrentFeedReader(url,self.gethistfilename(url)) + self.feeds.append((feed, on_torrent_callback)) + self.feeds_changed = True + if dowrite: + self.writefile() + self.lock.release() + + def writefile(self): + filename = self.getfilename() + f = open(filename,"wb") + for url in self.urls: + val = self.urls[url] + f.write(val+' '+url+'\r\n') + f.close() + + def getfilename(self): + return os.path.join(self.getdir(),"subscriptions.txt") + + def gethistfilename(self,url): + # TODO: url2pathname or something that gives a readable filename + h = sha.sha(url).hexdigest() + return os.path.join(self.getdir(),h+'.txt') + + """ + def getdir(self): + return os.path.join(self.utility.getConfigPath(),"subscriptions") + """ + + def getdir(self): + return os.path.join(self.session.get_state_dir(),"subscriptions") + + def getURLs(self): + return self.urls # doesn't need to be locked + + def setURLStatus(self,url,newstatus): + self.lock.acquire() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: setURLStatus",url,newstatus + newtxt = "active" + if newstatus == False: + newtxt = "inactive" + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: setURLStatus: newstatus set to",url,newtxt + if url in self.urls: + self.urls[url] = newtxt + self.writefile() + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: setURLStatus: unknown URL?",url + self.lock.release() + + def deleteURL(self,url): + self.lock.acquire() + if url in self.urls: + del self.urls[url] + for i in range(len(self.feeds)): + feed = self.feeds[i] + if feed.feed_url == url: + del self.feeds[i] + self.feeds_changed = True + break + self.writefile() + self.lock.release() + + def run(self): + time.sleep(10) # Let other Tribler components, in particular, Session startup + while not self.done.isSet(): + self.lock.acquire() + cfeeds = self.feeds[:] + self.feeds_changed = False + self.lock.release() + + # feeds contains (rss_url, generator) pairs + feeds = {} + for feed, on_torrent_callback in cfeeds: + try: + sugestion_generator = feed.refresh() + except: + pass + else: + feeds[feed.feed_url] = sugestion_generator + + # loop through the feeds and try one from each feed at a time + while feeds: + for (rss_url, generator) in feeds.items(): + + # are there items left in this generator + try: + title, urlopenobj = generator.next() + if not urlopenobj: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "urlopenobj NONE: torrent not found", title + continue + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "urlopenobj : torrent found", title + + bdata = urlopenobj.read() + urlopenobj.close() + + data = bdecode(bdata) + if 'info' in data: + infohash = sha.sha(bencode(data['info'])).digest() + if not self.torrent_db.hasTorrent(infohash): + if DEBUG: + if "name" in data["info"]: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Injecting", data["info"]["name"] + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Injecting", title + self.save_torrent(infohash, bdata, source=rss_url) + if on_torrent_callback: + on_torrent_callback(rss_url, infohash, data) + + + except StopIteration: + # there are no more items in generator + del(feeds[rss_url]) + + except ValueError: + # the bdecode failed + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Bdecode failed: ", rss_url + + except ExpatError: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid RSS: ", rss_url + + # sleep in between torrent retrievals + #time.sleep(self.intertorrentinterval) + time.sleep(self.session.get_rss_check_frequency()) + + self.lock.acquire() + try: + if self.feeds_changed: + feeds = None + break + finally: + self.lock.release() + + # sleep for a relatively long time before downloading the + # rss feeds again + for count in range(int(self.session.get_rss_reload_frequency() / 10)): + self.lock.acquire() + try: + if self.feeds_changed: + break + finally: + self.lock.release() + + time.sleep(30) + + + def save_torrent(self,infohash,bdata,source=''): + hexinfohash = binascii.hexlify(infohash) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscript: Writing",hexinfohash + + filename = os.path.join(self.torrent_dir, hexinfohash+'.torrent' ) + f = open(filename,"wb") + f.write(bdata) + f.close() + + # Arno: hack, make sure these torrents are always good so they show up + # in Torrent DBHandler.getTorrents() + extra_info = {'status':'good'} + self.torrent_db.addExternalTorrent(filename,source=source,extra_info=extra_info) + + # ARNOCOMMENT: remove later + #self.torrent_db.commit() + + + def shutdown(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: Shutting down subscriptions module" + self.done.set() + self.lock.acquire() + cfeeds = self.feeds[:] + self.lock.release() + for feed in cfeeds: + feed.shutdown() + + self.utility.session.close_dbhandler(self.torrent_db) + +""" + def process_statscopy(self,statscopy): + today = [] + yesterday = [] + now = int(time()) + sotoday = math.floor(now / (24*3600.0))*24*3600.0 + soyester = sotday - (24*3600.0) + for rss in statscopy: + for url,t in statscopy[rss]: + if t > sotoday: + today.append(url) +""" + +class TorrentFeedReader: + def __init__(self,feed_url,histfilename): + self.feed_url = feed_url + self.urls_already_seen = URLHistory(histfilename) + # todo: the self.href_re expression does not take into account that single quotes, escaped quotes, etz. can be used + self.href_re = re.compile('href="(.*?)"', re.IGNORECASE) + # the following filter is applied on the xml data because other characters crash the parser + self.filter_xml_expression = re.compile("(&\w+;)|([^\w\d\s~`!@#$%^&*()-_=+{}[\]\\|:;\"'<,>.?/])", re.IGNORECASE) + + self.torrent_types = ['application/x-bittorrent','application/x-download'] + + def isTorrentType(self,type): + return type in self.torrent_types + + def refresh(self): + """Returns a generator for a list of (title,urllib2openedurl_to_torrent) + pairs for this feed. TorrentFeedReader instances keep a list of + torrent urls in memory and will yield a torrent only once. + If the feed points to a torrent url with webserver problems, + that url will not be retried. + urllib2openedurl_to_torrent may be None if there is a webserver problem. + """ + + # Load history from disk + if not self.urls_already_seen.readed: + self.urls_already_seen.read() + self.urls_already_seen.readed = True + + feed_socket = urlOpenTimeout(self.feed_url,timeout=20) + feed_xml = feed_socket.read() + feed_socket.close() + + # 14/07/08 boudewijn: some special characters and html code is + # raises a parser exception. We filter out these character + # sequenses using a regular expression in the filter_xml + # function + dom = parseString(self._filter_xml(feed_xml)) + entries = [] + + # The following XML will result in three links with the same title. + # + # + # The title + # http:/frayja.com/torrent/1 + # Unused title + # Unused title + # + for item in dom.getElementsByTagName("item"): #+ dom.getElementsByTagName("entry"): + title = None + links = [] + child = item.firstChild + while child: + if child.nodeType == 1: # ELEMENT_NODE (according to the DOM standard) + if child.nodeName == "title" and child.firstChild: + title = child.firstChild.data + + if child.nodeName == "link" and child.firstChild: + links.append(child.firstChild.data) + + if child.hasAttribute("src"): + links.append(child.getAttribute("src")) + + if child.hasAttribute("url"): + links.append(child.getAttribute("url")) + + child = child.nextSibling + + if title and links: + entries.extend([(title, link) for link in links]) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: Parse of RSS returned",len(entries),"previously unseen torrents" + + for title,link in entries: + # print title,link + try: + self.urls_already_seen.add(link) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: Opening",title,link + html_or_tor = urlOpenTimeout(link,timeout=20) + found_torrent = False + tor_type = html_or_tor.headers.gettype() + if self.isTorrentType(tor_type): + torrent = html_or_tor + found_torrent = True + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: torrent1: Yielding",link + yield title,torrent + elif False: # 'html' in tor_type: + html = html_or_tor.read() + hrefs = [match.group(1) for match in self.href_re.finditer(html)] + + urls = [] + for url in hrefs: + if not self.urls_already_seen.contains(url): + self.urls_already_seen.add(url) + urls.append(urlparse.urljoin(link,url)) + for url in urls: + #print url + try: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: torrent2: Opening",url + torrent = urlOpenTimeout(url) + url_type = torrent.headers.gettype() + #print url_type + if self.isTorrentType(url_type): + #print "torrent found:",url + found_torrent = True + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: torrent2: Yielding",url + yield title,torrent + break + else: + #its not a torrent after all, but just some html link + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "%s not a torrent" % url + except: + #url didn't open + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "%s did not open" % url + if not found_torrent: + yield title,None + except GeneratorExit: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "GENERATOREXIT" + # the generator is destroyed. we accept this by returning + return + except: + traceback.print_exc() + yield title,None + + def shutdown(self): + self.urls_already_seen.write() + + def _filter_xml_helper(self, match): + """helper function to filter invalid xml""" + one = match.group(1) + if one in (">", "<", """, "&"): + return one + return "?" + + def _filter_xml(self, xml): + """filters out characters and tags that crash xml.dom.minidom.parseString""" + return self.filter_xml_expression.sub(self._filter_xml_helper, xml) + +class URLHistory: + + read_history_expression = re.compile("(\d+(?:[.]\d+)?)\s+(\w+)", re.IGNORECASE) + + def __init__(self,filename): + self.urls = {} + self.filename = filename + self.readed = False + + def add(self,dirtyurl): + url = self.clean_link(dirtyurl) + self.urls[url] = time.time() + + def contains(self,dirtyurl): + url = self.clean_link(dirtyurl) + + # Poor man's filter + if url.endswith(".jpg") or url.endswith(".JPG"): + return True + + t = self.urls.get(url,None) + if t is None: + return False + else: + now = time.time() + return not self.timedout(t,now) # no need to delete + + def timedout(self,t,now): + return (t+URLHIST_TIMEOUT) < now + + def read(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: Reading cached",self.filename + try: + file_handle = open(self.filename, "rb") + except IOError: + # file not found... + # there is no cache available + pass + else: + data = file_handle.read() + file_handle.close() + + now = time.time() + for timestamp, url in self.read_history_expression.findall(data): + timestamp = float(timestamp) + if not self.timedout(timestamp, now): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: Cached url is",url + self.urls[url] = timestamp + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: Timed out cached url is %s" % url + + def write(self): + try: + file_handle = open(self.filename, "wb") + except IOError: + # can't write file + traceback.print_exc() + else: + for url, timestamp in self.urls.iteritems(): + file_handle.write("%f %s\r\n" % (timestamp, url)) + file_handle.close() + + def copy(self): + return self.urls.copy() + + def clean_link(self,link): + """ Special vuze case """ + idx = link.find(';jsessionid') + if idx == -1: + return link + else: + return link[:idx] + diff --git a/tribler-mod/Tribler/Subscriptions/rss_client.py.bak b/tribler-mod/Tribler/Subscriptions/rss_client.py.bak new file mode 100644 index 0000000..7f357ef --- /dev/null +++ b/tribler-mod/Tribler/Subscriptions/rss_client.py.bak @@ -0,0 +1,550 @@ +# Written by Freek Zindel, Arno Bakker +# see LICENSE.txt for license information +# +#this is a very limited torrent rss reader. +#works on some sites, but not on others due to captchas or username/password requirements for downloads. + +#usage: make a torrentfeedreader instance and call refresh whenevey you would like to check that feed for new torrents. e.g. every 15 minutes. +# +# Arno, 2007-05-7: We now store the urls visited on disk and don't recontact them for a certain period +# I've added special support for vuze torrents that have the links to the .torrent in the RSS XML +# but not as an tag. +# +# In addition, I've set the reader to be conservative for now, it only looks at .torrent files +# directly mentioned in the RSS XML, no recursive parsing, that, in case of vuze, visits a lot +# of sites unnecessarily and uses Java session IDs (";jsessionid") in the URLs, which renders +# our do-not-visit-if-recently-visited useless. +# +# 2007-05-08: vuze appears to have added a ;jsessionid to the tag. I now strip that for +# the URLHistory, but use it in requests. So don't be alarmed by the ;jsessionid in the debug messages. +# +# 2008-04-04: vuze appears to have changed format altogether: It no longer +# adheres to RSS. is called and is called +# + +import os +import sys +import traceback +from Tribler.Core.Utilities.timeouturlopen import urlOpenTimeout +#from BitTornado.zurllib import urlopen +import re +import urlparse +from xml.dom.minidom import parseString +from xml.parsers.expat import ExpatError +from threading import Thread,RLock,Event +import time +import sha + +from Tribler.Core.API import * +from Tribler.Core.BitTornado.bencode import bdecode,bencode + +URLHIST_TIMEOUT = 7*24*3600.0 # Don't revisit links for this time + +DEBUG = True #False + +class TorrentFeedThread(Thread): + + __single = None + + def __init__(self): + if TorrentFeedThread.__single: + raise RuntimeError, "TorrentFeedThread is singleton" + TorrentFeedThread.__single = self + Thread.__init__(self) + self.setName( "TorrentFeed"+self.getName() ) + self.setDaemon(True) + + self.urls = {} + self.feeds = [] + self.lock = RLock() + self.done = Event() + + # when rss feeds change, we have to restart the checking + self.feeds_changed = False + + def getInstance(*args, **kw): + if TorrentFeedThread.__single is None: + TorrentFeedThread(*args, **kw) + return TorrentFeedThread.__single + getInstance = staticmethod(getInstance) + """ + def register(self,utility): + self.utility = utility + self.intertorrentinterval = self.utility.config.Read("torrentcollectsleep","int") + + self.torrent_dir = self.utility.session.get_torrent_collecting_dir() + self.torrent_db = self.utility.session.open_dbhandler(NTFY_TORRENTS) + + filename = self.getfilename() + try: + f = open(filename,"rb") + for line in f.readlines(): + for key in ['active','inactive']: + if line.startswith(key): + url = line[len(key)+1:-2] # remove \r\n + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: Add from file URL",url,"EOU" + self.addURL(url,dowrite=False,status=key) + f.close() + except: + pass + #traceback.print_exc() + + #self.addURL('http://www.vuze.com/syndication/browse/AZHOT/ALL/X/X/26/X/_/_/X/X/feed.xml') + + + def addURL(self,url,dowrite=True,status="active"): + self.lock.acquire() + if url not in self.urls: + self.urls[url] = status + if status == "active": + feed = TorrentFeedReader(url,self.gethistfilename(url)) + self.feeds.append(feed) + self.feeds_changed = True + if dowrite: + self.writefile() + self.lock.release() + """ + + def register(self,session): + self.session = session + self.torrent_dir = self.session.get_torrent_collecting_dir() + self.torrent_db = self.session.open_dbhandler(NTFY_TORRENTS) + + filename = self.getfilename() + dirname = os.path.dirname(filename) + if not os.path.exists(dirname): + os.makedirs(dirname) + + try: + f = open(filename,"rb") + for line in f.readlines(): + for key in ['active','inactive']: + if line.startswith(key): + url = line[len(key)+1:-2] # remove \r\n + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: Add from file URL",url,"EOU" + self.addURL(url,dowrite=False,status=key) + f.close() + except: + pass + #traceback.print_exc() + + #self.addURL('http://www.vuze.com/syndication/browse/AZHOT/ALL/X/X/26/X/_/_/X/X/feed.xml') + + def addURL(self, url, dowrite=True, status="active", on_torrent_callback=None): + self.lock.acquire() + if url not in self.urls: + self.urls[url] = status + if status == "active": + feed = TorrentFeedReader(url,self.gethistfilename(url)) + self.feeds.append((feed, on_torrent_callback)) + self.feeds_changed = True + if dowrite: + self.writefile() + self.lock.release() + + def writefile(self): + filename = self.getfilename() + f = open(filename,"wb") + for url in self.urls: + val = self.urls[url] + f.write(val+' '+url+'\r\n') + f.close() + + def getfilename(self): + return os.path.join(self.getdir(),"subscriptions.txt") + + def gethistfilename(self,url): + # TODO: url2pathname or something that gives a readable filename + h = sha.sha(url).hexdigest() + return os.path.join(self.getdir(),h+'.txt') + + """ + def getdir(self): + return os.path.join(self.utility.getConfigPath(),"subscriptions") + """ + + def getdir(self): + return os.path.join(self.session.get_state_dir(),"subscriptions") + + def getURLs(self): + return self.urls # doesn't need to be locked + + def setURLStatus(self,url,newstatus): + self.lock.acquire() + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: setURLStatus",url,newstatus + newtxt = "active" + if newstatus == False: + newtxt = "inactive" + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: setURLStatus: newstatus set to",url,newtxt + if url in self.urls: + self.urls[url] = newtxt + self.writefile() + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: setURLStatus: unknown URL?",url + self.lock.release() + + def deleteURL(self,url): + self.lock.acquire() + if url in self.urls: + del self.urls[url] + for i in range(len(self.feeds)): + feed = self.feeds[i] + if feed.feed_url == url: + del self.feeds[i] + self.feeds_changed = True + break + self.writefile() + self.lock.release() + + def run(self): + time.sleep(10) # Let other Tribler components, in particular, Session startup + while not self.done.isSet(): + self.lock.acquire() + cfeeds = self.feeds[:] + self.feeds_changed = False + self.lock.release() + + # feeds contains (rss_url, generator) pairs + feeds = {} + for feed, on_torrent_callback in cfeeds: + try: + sugestion_generator = feed.refresh() + except: + pass + else: + feeds[feed.feed_url] = sugestion_generator + + # loop through the feeds and try one from each feed at a time + while feeds: + for (rss_url, generator) in feeds.items(): + + # are there items left in this generator + try: + title, urlopenobj = generator.next() + if not urlopenobj: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "urlopenobj NONE: torrent not found", title + continue + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "urlopenobj : torrent found", title + + bdata = urlopenobj.read() + urlopenobj.close() + + data = bdecode(bdata) + if 'info' in data: + infohash = sha.sha(bencode(data['info'])).digest() + if not self.torrent_db.hasTorrent(infohash): + if DEBUG: + if "name" in data["info"]: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Injecting", data["info"]["name"] + else: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Injecting", title + self.save_torrent(infohash, bdata, source=rss_url) + if on_torrent_callback: + on_torrent_callback(rss_url, infohash, data) + + + except StopIteration: + # there are no more items in generator + del(feeds[rss_url]) + + except ValueError: + # the bdecode failed + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Bdecode failed: ", rss_url + + except ExpatError: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "Invalid RSS: ", rss_url + + # sleep in between torrent retrievals + #time.sleep(self.intertorrentinterval) + time.sleep(self.session.get_rss_check_frequency()) + + self.lock.acquire() + try: + if self.feeds_changed: + feeds = None + break + finally: + self.lock.release() + + # sleep for a relatively long time before downloading the + # rss feeds again + for count in range(int(self.session.get_rss_reload_frequency() / 10)): + self.lock.acquire() + try: + if self.feeds_changed: + break + finally: + self.lock.release() + + time.sleep(30) + + + def save_torrent(self,infohash,bdata,source=''): + hexinfohash = binascii.hexlify(infohash) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscript: Writing",hexinfohash + + filename = os.path.join(self.torrent_dir, hexinfohash+'.torrent' ) + f = open(filename,"wb") + f.write(bdata) + f.close() + + # Arno: hack, make sure these torrents are always good so they show up + # in Torrent DBHandler.getTorrents() + extra_info = {'status':'good'} + self.torrent_db.addExternalTorrent(filename,source=source,extra_info=extra_info) + + # ARNOCOMMENT: remove later + #self.torrent_db.commit() + + + def shutdown(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: Shutting down subscriptions module" + self.done.set() + self.lock.acquire() + cfeeds = self.feeds[:] + self.lock.release() + for feed in cfeeds: + feed.shutdown() + + self.utility.session.close_dbhandler(self.torrent_db) + +""" + def process_statscopy(self,statscopy): + today = [] + yesterday = [] + now = int(time()) + sotoday = math.floor(now / (24*3600.0))*24*3600.0 + soyester = sotday - (24*3600.0) + for rss in statscopy: + for url,t in statscopy[rss]: + if t > sotoday: + today.append(url) +""" + +class TorrentFeedReader: + def __init__(self,feed_url,histfilename): + self.feed_url = feed_url + self.urls_already_seen = URLHistory(histfilename) + # todo: the self.href_re expression does not take into account that single quotes, escaped quotes, etz. can be used + self.href_re = re.compile('href="(.*?)"', re.IGNORECASE) + # the following filter is applied on the xml data because other characters crash the parser + self.filter_xml_expression = re.compile("(&\w+;)|([^\w\d\s~`!@#$%^&*()-_=+{}[\]\\|:;\"'<,>.?/])", re.IGNORECASE) + + self.torrent_types = ['application/x-bittorrent','application/x-download'] + + def isTorrentType(self,type): + return type in self.torrent_types + + def refresh(self): + """Returns a generator for a list of (title,urllib2openedurl_to_torrent) + pairs for this feed. TorrentFeedReader instances keep a list of + torrent urls in memory and will yield a torrent only once. + If the feed points to a torrent url with webserver problems, + that url will not be retried. + urllib2openedurl_to_torrent may be None if there is a webserver problem. + """ + + # Load history from disk + if not self.urls_already_seen.readed: + self.urls_already_seen.read() + self.urls_already_seen.readed = True + + feed_socket = urlOpenTimeout(self.feed_url,timeout=20) + feed_xml = feed_socket.read() + feed_socket.close() + + # 14/07/08 boudewijn: some special characters and html code is + # raises a parser exception. We filter out these character + # sequenses using a regular expression in the filter_xml + # function + dom = parseString(self._filter_xml(feed_xml)) + entries = [] + + # The following XML will result in three links with the same title. + # + # + # The title + # http:/frayja.com/torrent/1 + # Unused title + # Unused title + # + for item in dom.getElementsByTagName("item"): #+ dom.getElementsByTagName("entry"): + title = None + links = [] + child = item.firstChild + while child: + if child.nodeType == 1: # ELEMENT_NODE (according to the DOM standard) + if child.nodeName == "title" and child.firstChild: + title = child.firstChild.data + + if child.nodeName == "link" and child.firstChild: + links.append(child.firstChild.data) + + if child.hasAttribute("src"): + links.append(child.getAttribute("src")) + + if child.hasAttribute("url"): + links.append(child.getAttribute("url")) + + child = child.nextSibling + + if title and links: + entries.extend([(title, link) for link in links]) + + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: Parse of RSS returned",len(entries),"previously unseen torrents" + + for title,link in entries: + # print title,link + try: + self.urls_already_seen.add(link) + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: Opening",title,link + html_or_tor = urlOpenTimeout(link,timeout=20) + found_torrent = False + tor_type = html_or_tor.headers.gettype() + if self.isTorrentType(tor_type): + torrent = html_or_tor + found_torrent = True + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: torrent1: Yielding",link + yield title,torrent + elif False: # 'html' in tor_type: + html = html_or_tor.read() + hrefs = [match.group(1) for match in self.href_re.finditer(html)] + + urls = [] + for url in hrefs: + if not self.urls_already_seen.contains(url): + self.urls_already_seen.add(url) + urls.append(urlparse.urljoin(link,url)) + for url in urls: + #print url + try: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: torrent2: Opening",url + torrent = urlOpenTimeout(url) + url_type = torrent.headers.gettype() + #print url_type + if self.isTorrentType(url_type): + #print "torrent found:",url + found_torrent = True + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: torrent2: Yielding",url + yield title,torrent + break + else: + #its not a torrent after all, but just some html link + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "%s not a torrent" % url + except: + #url didn't open + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "%s did not open" % url + if not found_torrent: + yield title,None + except GeneratorExit: + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ", "GENERATOREXIT" + # the generator is destroyed. we accept this by returning + return + except: + traceback.print_exc() + yield title,None + + def shutdown(self): + self.urls_already_seen.write() + + def _filter_xml_helper(self, match): + """helper function to filter invalid xml""" + one = match.group(1) + if one in (">", "<", """, "&"): + return one + return "?" + + def _filter_xml(self, xml): + """filters out characters and tags that crash xml.dom.minidom.parseString""" + return self.filter_xml_expression.sub(self._filter_xml_helper, xml) + +class URLHistory: + + read_history_expression = re.compile("(\d+(?:[.]\d+)?)\s+(\w+)", re.IGNORECASE) + + def __init__(self,filename): + self.urls = {} + self.filename = filename + self.readed = False + + def add(self,dirtyurl): + url = self.clean_link(dirtyurl) + self.urls[url] = time.time() + + def contains(self,dirtyurl): + url = self.clean_link(dirtyurl) + + # Poor man's filter + if url.endswith(".jpg") or url.endswith(".JPG"): + return True + + t = self.urls.get(url,None) + if t is None: + return False + else: + now = time.time() + return not self.timedout(t,now) # no need to delete + + def timedout(self,t,now): + return (t+URLHIST_TIMEOUT) < now + + def read(self): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: Reading cached",self.filename + try: + file_handle = open(self.filename, "rb") + except IOError: + # file not found... + # there is no cache available + pass + else: + data = file_handle.read() + file_handle.close() + + now = time.time() + for timestamp, url in self.read_history_expression.findall(data): + timestamp = float(timestamp) + if not self.timedout(timestamp, now): + if DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: Cached url is",url + self.urls[url] = timestamp + elif DEBUG: + print >>sys.stderr, strftime("%d-%m-%Y %H:%M:%S", localtime())," ","subscrip: Timed out cached url is %s" % url + + def write(self): + try: + file_handle = open(self.filename, "wb") + except IOError: + # can't write file + traceback.print_exc() + else: + for url, timestamp in self.urls.iteritems(): + file_handle.write("%f %s\r\n" % (timestamp, url)) + file_handle.close() + + def copy(self): + return self.urls.copy() + + def clean_link(self,link): + """ Special vuze case """ + idx = link.find(';jsessionid') + if idx == -1: + return link + else: + return link[:idx] + diff --git a/tribler-mod/Tribler/Test/API/contentdir/file.avi b/tribler-mod/Tribler/Test/API/contentdir/file.avi new file mode 100644 index 0000000..08a5020 --- /dev/null +++ b/tribler-mod/Tribler/Test/API/contentdir/file.avi @@ -0,0 +1,1371 @@ +#!/usr/bin/python + +######################################################################### +# +# Author : Choopan RATTANAPOKA, Jie Yang, Arno Bakker +# +# Description : Main ABC [Yet Another Bittorrent Client] python script. +# you can run from source code by using +# >python abc.py +# need Python, WxPython in order to run from source code. +######################################################################### + +# Arno: M2Crypto overrides the method for https:// in the +# standard Python libraries. This causes msnlib to fail and makes Tribler +# freakout when "http://www.tribler.org/version" is redirected to +# "https://www.tribler.org/version/" (which happened during our website +# changeover) Until M2Crypto 0.16 is patched I'll restore the method to the +# original, as follows. +# +# This must be done in the first python file that is started. +# + +import urllib +original_open_https = urllib.URLopener.open_https +import M2Crypto +urllib.URLopener.open_https = original_open_https + +import sys, locale +import os +import wx, commands +from wx import xrc +#import hotshot + +if sys.platform == "darwin": + # on Mac, we can only load VLC libraries + # relative to the location of tribler.py + os.chdir(os.path.abspath(os.path.dirname(sys.argv[0]))) + +from threading import Thread, Timer, Event,currentThread,enumerate +from time import time, ctime, sleep +from traceback import print_exc, print_stack +from cStringIO import StringIO +import urllib + +from interconn import ServerListener, ClientPassParam +from launchmanycore import ABCLaunchMany + +from ABC.Toolbars.toolbars import ABCBottomBar2, ABCStatusBar, ABCMenuBar, ABCToolBar +from ABC.GUI.menu import ABCMenu +from ABC.Scheduler.scheduler import ABCScheduler + +from webservice import WebListener + +if (sys.platform == 'win32'): + from Dialogs.regdialog import RegCheckDialog + +from ABC.GUI.list import ManagedList +from Utility.utility import Utility +from Utility.constants import * #IGNORE:W0611 + +from Tribler.__init__ import tribler_init, tribler_done +from BitTornado.__init__ import product_name +from safeguiupdate import DelayedInvocation,FlaglessDelayedInvocation +import webbrowser +from Tribler.Dialogs.MugshotManager import MugshotManager +from Tribler.vwxGUI.GuiUtility import GUIUtility +import Tribler.vwxGUI.updateXRC as updateXRC +from Tribler.Video.VideoPlayer import VideoPlayer,return_feasible_playback_modes,PLAYBACKMODE_INTERNAL +from Tribler.Video.VideoServer import VideoHTTPServer +from Tribler.Dialogs.GUIServer import GUIServer +from Tribler.vwxGUI.TasteHeart import set_tasteheart_bitmaps +from Tribler.vwxGUI.perfBar import set_perfBar_bitmaps +from Tribler.Dialogs.BandwidthSelector import BandwidthSelector +from Tribler.Subscriptions.rss_client import TorrentFeedThread +from Tribler.Dialogs.activities import * +from Tribler.DecentralizedTracking import mainlineDHT +from Tribler.DecentralizedTracking.rsconvert import RawServerConverter +from Tribler.DecentralizedTracking.mainlineDHTChecker import mainlineDHTChecker + +from Tribler.notification import init as notification_init +from Tribler.vwxGUI.font import * +from Tribler.Web2.util.update import Web2Updater + +from Tribler.CacheDB.CacheDBHandler import BarterCastDBHandler +from Tribler.Overlay.permid import permid_for_user +from BitTornado.download_bt1 import EVIL + +DEBUG = False +ALLOW_MULTIPLE = False +start_time = 0 +start_time2 = 0 + + +################################################################ +# +# Class: FileDropTarget +# +# To enable drag and drop for ABC list in main menu +# +################################################################ +class FileDropTarget(wx.FileDropTarget): + def __init__(self, utility): + # Initialize the wsFileDropTarget Object + wx.FileDropTarget.__init__(self) + # Store the Object Reference for dropped files + self.utility = utility + + def OnDropFiles(self, x, y, filenames): + for filename in filenames: + self.utility.queue.addtorrents.AddTorrentFromFile(filename) + return True + + +############################################################## +# +# Class : ABCList +# +# ABC List class that contains the torrent list +# +############################################################## +class ABCList(ManagedList): + def __init__(self, parent): + style = wx.LC_REPORT|wx.LC_VRULES|wx.CLIP_CHILDREN + + prefix = 'column' + minid = 4 + maxid = 26 + exclude = [] + rightalign = [COL_PROGRESS, + COL_SIZE, + COL_DLSPEED, + COL_ULSPEED, + COL_RATIO, + COL_PEERPROGRESS, + COL_DLSIZE, + COL_ULSIZE, + COL_TOTALSPEED] + + ManagedList.__init__(self, parent, style, prefix, minid, maxid, exclude, rightalign) + + dragdroplist = FileDropTarget(self.utility) + self.SetDropTarget(dragdroplist) + + self.lastcolumnsorted = -1 + self.reversesort = 0 + + self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown) + self.Bind(wx.EVT_LIST_COL_CLICK, self.OnColLeftClick) + + self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.OnItemSelected) + + # Bring up advanced details on left double click + self.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDClick) + + # Bring up local settings on middle double click + self.Bind(wx.EVT_MIDDLE_DCLICK, self.utility.actions[ACTION_LOCALUPLOAD].action) + + # Do thing when keys are pressed down + def OnKeyDown(self, event): + keycode = event.GetKeyCode() + if event.CmdDown(): + if keycode == ord('a') or keycode == ord('A'): + # Select all files (CTRL-A) + self.selectAll() + elif keycode == ord('x') or keycode == ord('X'): + # Invert file selection (CTRL-X) + self.invertSelection() + elif keycode == wx.WXK_RETURN or keycode == wx.WXK_NUMPAD_ENTER: + # Open advanced details (Enter) + self.utility.actions[ACTION_DETAILS].action() + elif keycode == wx.WXK_SPACE: + # Open local settings (Space) + self.utility.actions[ACTION_LOCALUPLOAD].action() + elif keycode == 399: + # Open right-click menu (windows menu key) + self.OnItemSelected() + + event.Skip() + + def OnColLeftClick(self, event): + rank = event.GetColumn() + colid = self.columns.getIDfromRank(rank) + if colid == self.lastcolumnsorted: + self.reversesort = 1 - self.reversesort + else: + self.reversesort = 0 + self.lastcolumnsorted = colid + self.utility.queue.sortList(colid, self.reversesort) + + def selectAll(self): + self.updateSelected(select = range(0, self.GetItemCount())) + + def updateSelected(self, unselect = None, select = None): + if unselect is not None: + for index in unselect: + self.SetItemState(index, 0, wx.LIST_STATE_SELECTED) + if select is not None: + for index in select: + self.Select(index) + self.SetFocus() + + def getTorrentSelected(self, firstitemonly = False, reverse = False): + queue = self.utility.queue + + torrentselected = [] + for index in self.getSelected(firstitemonly, reverse): + ABCTorrentTemp = queue.getABCTorrent(index = index) + if ABCTorrentTemp is not None: + torrentselected.append(ABCTorrentTemp) + return torrentselected + + def OnItemSelected(self, event = None): + selected = self.getTorrentSelected() + if not selected: + return + + popupmenu = ABCMenu(self.utility, 'menu_listrightclick') + + # Popup the menu. If an item is selected then its handler + # will be called before PopupMenu returns. + if event is None: + # use the position of the first selected item (key event) + ABCTorrentTemp = selected[0] + position = self.GetItemPosition(ABCTorrentTemp.listindex) + else: + # use the cursor position (mouse event) + position = event.GetPosition() + + self.PopupMenu(popupmenu, position) + + def OnLeftDClick(self, event): + event.Skip() + try: + self.utility.actions[ACTION_DETAILS].action() + except: + print_exc() + + +############################################################## +# +# Class : ABCPanel +# +# Main ABC Panel class +# +############################################################## +class ABCPanel(wx.Panel): + def __init__(self, parent): + style = wx.CLIP_CHILDREN + wx.Panel.__init__(self, parent, -1, style = style) + + #Debug Output. + sys.stdout.write('Preparing GUI.\n'); + + self.utility = parent.utility + self.utility.window = self + self.queue = self.utility.queue + + # List of deleting torrents events that occur when the RateManager is active + # Such events are processed after the RateManager finishes + # postponedevents is a list of tupples : each tupple contains the method of ABCPanel to be called to + # deal with the event and the event. + self.postponedevents = [] + + #Manual Bittorrent Adding UI + ############################## + colSizer = wx.BoxSizer(wx.VERTICAL) + + self.list = ABCList(self) + self.utility.list = self.list + colSizer.Add(self.list, 1, wx.ALL|wx.EXPAND, 3) + + """ + # Add status bar + statbarbox = wx.BoxSizer(wx.HORIZONTAL) + self.sb_buttons = ABCStatusButtons(self,self.utility) + statbarbox.Add(self.sb_buttons, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 0) + self.abc_sb = ABCStatusBar(self,self.utility) + statbarbox.Add(self.abc_sb, 1, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 0) + colSizer.Add(statbarbox, 0, wx.ALL|wx.EXPAND, 0) + """ + + #colSizer.Add(self.contentPanel, 1, wx.ALL|wx.EXPAND, 3) + self.SetSizer(colSizer) + self.SetAutoLayout(True) + + self.list.SetFocus() + + + def getSelectedList(self, event = None): + return self.list + + ###################################### + # Update ABC on-the-fly + ###################################### + def updateColumns(self, force = False): + # Update display in column for inactive torrent + for ABCTorrentTemp in self.utility.torrents["all"]: + ABCTorrentTemp.updateColumns(force = force) + + +############################################################## +# +# Class : ABCTaskBarIcon +# +# Task Bar Icon +# +############################################################## +class ABCTaskBarIcon(wx.TaskBarIcon): + def __init__(self, parent): + wx.TaskBarIcon.__init__(self) + + self.utility = parent.utility + + self.TBMENU_RESTORE = wx.NewId() + + # setup a taskbar icon, and catch some events from it + self.Bind(wx.EVT_TASKBAR_LEFT_DCLICK, parent.onTaskBarActivate) + self.Bind(wx.EVT_MENU, parent.onTaskBarActivate, id = self.TBMENU_RESTORE) + + self.updateIcon(False) + + def updateIcon(self,iconifying = False): + remove = True + + mintray = self.utility.config.Read('mintray', "int") + if (mintray >= 2) or ((mintray >= 1) and iconifying): + remove = False + + if remove and self.IsIconInstalled(): + self.RemoveIcon() + elif not remove and not self.IsIconInstalled(): + self.SetIcon(self.utility.icon, product_name) + + def CreatePopupMenu(self): + menu = wx.Menu() + + self.utility.actions[ACTION_STOPALL].addToMenu(menu, bindto = self) + self.utility.actions[ACTION_UNSTOPALL].addToMenu(menu, bindto = self) + menu.AppendSeparator() + menu.Append(self.TBMENU_RESTORE, self.utility.lang.get('showabcwindow')) + self.utility.actions[ACTION_EXIT].addToMenu(menu, bindto = self) + return menu + + +############################################################## +# +# Class : ABColdFrame +# +# Main ABC Frame class that contains menu and menu bar management +# and contains ABCPanel +# +############################################################## +class ABCOldFrame(wx.Frame,FlaglessDelayedInvocation): + def __init__(self, ID, params, utility): + self.utility = utility + #self.utility.frame = self + + title = "Old Interface" + # Get window size and position from config file + size = (400,400) + style = wx.DEFAULT_FRAME_STYLE | wx.CLIP_CHILDREN + + wx.Frame.__init__(self, None, ID, title, size = size, style = style) + + FlaglessDelayedInvocation.__init__(self) + + self.GUIupdate = True + + self.window = ABCPanel(self) + self.Bind(wx.EVT_SET_FOCUS, self.onFocus) + self.Bind(wx.EVT_CLOSE, self.OnCloseWindow) + + self.tb = ABCToolBar(self) # new Tribler gui has no toolbar + self.SetToolBar(self.tb) + + + def onFocus(self, event = None): + if event is not None: + event.Skip() + self.window.getSelectedList(event).SetFocus() + + def OnCloseWindow(self, event = None): + self.Hide() + +# Custom class loaded by XRC +class ABCFrame(wx.Frame, DelayedInvocation): + def __init__(self, *args): + if len(args) == 0: + pre = wx.PreFrame() + # the Create step is done by XRC. + self.PostCreate(pre) + self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate) + else: + wx.Frame.__init__(self, args[0], args[1], args[2], args[3]) + self._PostInit() + + def OnCreate(self, event): + self.Unbind(wx.EVT_WINDOW_CREATE) + wx.CallAfter(self._PostInit) + event.Skip() + return True + + def _PostInit(self): + # Do all init here + self.guiUtility = GUIUtility.getInstance() + self.utility = self.guiUtility.utility + self.params = self.guiUtility.params + self.utility.frame = self + + title = self.utility.lang.get('title') + \ + " " + \ + self.utility.lang.get('version') + + # Get window size and position from config file + size, position = self.getWindowSettings() + style = wx.DEFAULT_FRAME_STYLE | wx.CLIP_CHILDREN + + self.SetSize(size) + self.SetPosition(position) + self.SetTitle(title) + tt = self.GetToolTip() + if tt is not None: + tt.SetTip('') + + #wx.Frame.__init__(self, None, ID, title, position, size, style = style) + + self.doneflag = Event() + DelayedInvocation.__init__(self) + + dragdroplist = FileDropTarget(self.utility) + self.SetDropTarget(dragdroplist) + + self.tbicon = None + + # Arno: see ABCPanel + #self.abc_sb = ABCStatusBar(self,self.utility) + #self.SetStatusBar(self.abc_sb) + + """ + # Add status bar + statbarbox = wx.BoxSizer(wx.HORIZONTAL) + self.sb_buttons = ABCStatusButtons(self,self.utility) + statbarbox.Add(self.sb_buttons, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 0) + self.abc_sb = ABCStatusBar(self,self.utility) + statbarbox.Add(self.abc_sb, 1, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 0) + #colSizer.Add(statbarbox, 0, wx.ALL|wx.EXPAND, 0) + self.SetStatusBar(statbarbox) + """ + + + try: + self.SetIcon(self.utility.icon) + except: + pass + + # Don't update GUI as often when iconized + self.GUIupdate = True + + # Start the scheduler before creating the ListCtrl + self.utility.queue = ABCScheduler(self.utility) + #self.window = ABCPanel(self) + #self.abc_sb = self.window.abc_sb + + + self.oldframe = ABCOldFrame(-1, self.params, self.utility) + self.oldframe.Refresh() + self.oldframe.Layout() + #self.oldframe.Show(True) + + self.window = self.GetChildren()[0] + self.window.utility = self.utility + + """ + self.list = ABCList(self.window) + self.list.Show(False) + self.utility.list = self.list + print self.window.GetName() + self.window.list = self.list + self.utility.window = self.window + """ + #self.window.sb_buttons = ABCStatusButtons(self,self.utility) + + self.utility.window.postponedevents = [] + + # Menu Options + ############################ + menuBar = ABCMenuBar(self) + if sys.platform == "darwin": + wx.App.SetMacExitMenuItemId(wx.ID_CLOSE) + self.SetMenuBar(menuBar) + + #self.tb = ABCToolBar(self) # new Tribler gui has no toolbar + #self.SetToolBar(self.tb) + + self.buddyFrame = None + self.fileFrame = None + self.buddyFrame_page = 0 + self.buddyFrame_size = (800, 500) + self.buddyFrame_pos = None + self.fileFrame_size = (800, 500) + self.fileFrame_pos = None + + # Menu Events + ############################ + + self.Bind(wx.EVT_CLOSE, self.OnCloseWindow) +# self.Bind(wx.EVT_MENU, self.OnMenuExit, id = wx.ID_CLOSE) + + # leaving here for the time being: + # wxMSW apparently sends the event to the App object rather than + # the top-level Frame, but there seemed to be some possibility of + # change + self.Bind(wx.EVT_QUERY_END_SESSION, self.OnCloseWindow) + self.Bind(wx.EVT_END_SESSION, self.OnCloseWindow) + + try: + self.tbicon = ABCTaskBarIcon(self) + except: + pass + self.Bind(wx.EVT_ICONIZE, self.onIconify) + self.Bind(wx.EVT_SET_FOCUS, self.onFocus) + self.Bind(wx.EVT_SIZE, self.onSize) + self.Bind(wx.EVT_MAXIMIZE, self.onSize) + #self.Bind(wx.EVT_IDLE, self.onIdle) + + # Start up the controller + self.utility.controller = ABCLaunchMany(self.utility) + self.utility.controller.start() + + # Start up mainline DHT + # Arno: do this in a try block, as khashmir gives a very funky + # error when started from a .dmg (not from cmd line) on Mac. In particular + # it complains that it cannot find the 'hex' encoding method when + # hstr.encode('hex') is called, and hstr is a string?! + # + try: + rsconvert = RawServerConverter(self.utility.controller.get_rawserver()) + mainlineDHT.init('', self.utility.listen_port, self.utility.getConfigPath(),rawserver=rsconvert) + # Create torrent-liveliness checker based on DHT + c = mainlineDHTChecker.getInstance() + c.register(mainlineDHT.dht) + except: + print_exc() + + # Give GUI time to set up stuff + wx.Yield() + + #if server start with params run it + ##################################### + + if DEBUG: + print >>sys.stderr,"abc: wxFrame: params is",self.params + + if self.params[0] != "": + success, msg, ABCTorrentTemp = self.utility.queue.addtorrents.AddTorrentFromFile(self.params[0],caller=CALLER_ARGV) + + self.utility.queue.postInitTasks(self.params) + + if self.params[0] != "": + # Update torrent.list, but after having read the old list of torrents, otherwise we get interference + ABCTorrentTemp.torrentconfig.writeSrc(False) + self.utility.torrentconfig.Flush() + + self.videoFrame = None + feasible = return_feasible_playback_modes(self.utility.getPath()) + if PLAYBACKMODE_INTERNAL in feasible: + # This means vlc is available + from Tribler.Video.EmbeddedPlayer import VideoFrame + self.videoFrame = VideoFrame(self) + + #self.videores = xrc.XmlResource("Tribler/vwxGUI/MyPlayer.xrc") + #self.videoframe = self.videores.LoadFrame(None, "MyPlayer") + #self.videoframe.Show() + + videoplayer = VideoPlayer.getInstance() + videoplayer.set_parentwindow(self.videoFrame) + else: + videoplayer = VideoPlayer.getInstance() + videoplayer.set_parentwindow(self) + + sys.stdout.write('GUI Complete.\n') + + self.Show(True) + + + # Just for debugging: add test permids and display top 5 peers from which the most is downloaded in bartercastdb + bartercastdb = BarterCastDBHandler() + mypermid = bartercastdb.my_permid + + if DEBUG: + bartercastdb.incrementItem((mypermid, "testpermid_1"), 'uploaded', 1024) + bartercastdb.incrementItem((mypermid, "testpermid_1"), 'downloaded', 20000) + + bartercastdb.incrementItem((mypermid, "testpermid_2"), 'uploaded', 40000) + bartercastdb.incrementItem((mypermid, "testpermid_2"), 'downloaded', 60000) + + top = bartercastdb.getTopNPeers(5)['top'] + + print 'My Permid: ', permid_for_user(mypermid) + + print 'Top 5 BarterCast peers:' + print '=======================' + + i = 1 + for (permid, up, down) in top: + print '%2d: %15s - %10d up %10d down' % (i, bartercastdb.getName(permid), up, down) + i += 1 + + + # Check to see if ABC is associated with torrents + ####################################################### + if (sys.platform == 'win32'): + if self.utility.config.Read('associate', "boolean"): + if self.utility.regchecker and not self.utility.regchecker.testRegistry(): + dialog = RegCheckDialog(self) + dialog.ShowModal() + dialog.Destroy() + + self.checkVersion() + + + def checkVersion(self): + t = Timer(2.0, self._checkVersion) + t.start() + + def _checkVersion(self): + my_version = self.utility.getVersion() + try: + curr_status = urllib.urlopen('http://tribler.org/version').readlines() + line1 = curr_status[0] + if len(curr_status) > 1: + self.update_url = curr_status[1].strip() + else: + self.update_url = 'http://tribler.org' + _curr_status = line1.split() + self.curr_version = _curr_status[0] + if self.newversion(self.curr_version, my_version): + # Arno: we are a separate thread, delegate GUI updates to MainThread + self.upgradeCallback() + + # Also check new version of web2definitions for youtube etc. search + Web2Updater(self.utility).checkUpdate() + except Exception,e: + print >> sys.stderr, "Tribler: Version check failed", ctime(time()), str(e) + #print_exc() + + def newversion(self, curr_version, my_version): + curr = curr_version.split('.') + my = my_version.split('.') + if len(my) >= len(curr): + nversion = len(my) + else: + nversion = len(curr) + for i in range(nversion): + if i < len(my): + my_v = int(my[i]) + else: + my_v = 0 + if i < len(curr): + curr_v = int(curr[i]) + else: + curr_v = 0 + if curr_v > my_v: + return True + elif curr_v < my_v: + return False + return False + + def upgradeCallback(self): + self.invokeLater(self.OnUpgrade) + # TODO: warn multiple times? + + def OnUpgrade(self, event=None): + self.setActivity(ACT_NEW_VERSION) + + def onFocus(self, event = None): + if event is not None: + event.Skip() + #self.window.getSelectedList(event).SetFocus() + + def setGUIupdate(self, update): + oldval = self.GUIupdate + self.GUIupdate = update + + if self.GUIupdate and not oldval: + # Force an update of all torrents + for torrent in self.utility.torrents["all"]: + torrent.updateColumns() + torrent.updateColor() + + + def taskbarCallback(self): + self.invokeLater(self.onTaskBarActivate,[]) + + + ####################################### + # minimize to tray bar control + ####################################### + def onTaskBarActivate(self, event = None): + self.Iconize(False) + self.Show(True) + self.Raise() + + if self.tbicon is not None: + self.tbicon.updateIcon(False) + + #self.window.list.SetFocus() + + # Resume updating GUI + self.setGUIupdate(True) + + def onIconify(self, event = None): + # This event handler is called both when being minimalized + # and when being restored. + if DEBUG: + if event is not None: + print >> sys.stderr,"abc: onIconify(",event.Iconized() + else: + print >> sys.stderr,"abc: onIconify event None" + if event.Iconized(): + if (self.utility.config.Read('mintray', "int") > 0 + and self.tbicon is not None): + self.tbicon.updateIcon(True) + self.Show(False) + + # Don't update GUI while minimized + self.setGUIupdate(False) + else: + self.setGUIupdate(True) + if event is not None: + event.Skip() + + def onSize(self, event = None): + # Arno: On Windows when I enable the tray icon and then change + # virtual desktop (see MS DeskmanPowerToySetup.exe) + # I get a onIconify(event.Iconized()==True) event, but when + # I switch back, I don't get an event. As a result the GUIupdate + # remains turned off. The wxWidgets wiki on the TaskBarIcon suggests + # catching the onSize event. + + if DEBUG: + if event is not None: + print >> sys.stderr,"abc: onSize:",self.GetSize() + else: + print >> sys.stderr,"abc: onSize: None" + self.setGUIupdate(True) + if event is not None: + if event.GetEventType() == wx.EVT_MAXIMIZE: + self.window.SetClientSize(self.GetClientSize()) + event.Skip() + + + # Refresh subscreens + self.refreshNeeded = True + self.guiUtility.refreshOnResize() + + def onIdle(self, event = None): + """ + Only refresh screens (especially detailsPanel) when resizes are finished + This gives less flickering, but doesnt look pretty, so i commented it out + """ + if self.refreshNeeded: + self.guiUtility.refreshOnResize() + self.refreshNeeded = False + + def getWindowSettings(self): + width = self.utility.config.Read("window_width") + height = self.utility.config.Read("window_height") + try: + size = wx.Size(int(width), int(height)) + except: + size = wx.Size(710, 400) + + x = self.utility.config.Read("window_x") + y = self.utility.config.Read("window_y") + if (x == "" or y == ""): + position = wx.DefaultPosition + else: + position = wx.Point(int(x), int(y)) + + return size, position + + def saveWindowSettings(self): + width, height = self.GetSizeTuple() + x, y = self.GetPositionTuple() + self.utility.config.Write("window_width", width) + self.utility.config.Write("window_height", height) + self.utility.config.Write("window_x", x) + self.utility.config.Write("window_y", y) + + self.utility.config.Flush() + + ################################## + # Close Program + ################################## + + def OnCloseWindow(self, event = None): + if event != None: + nr = event.GetEventType() + lookup = { wx.EVT_CLOSE.evtType[0]: "EVT_CLOSE", wx.EVT_QUERY_END_SESSION.evtType[0]: "EVT_QUERY_END_SESSION", wx.EVT_END_SESSION.evtType[0]: "EVT_END_SESSION" } + if nr in lookup: nr = lookup[nr] + print "Closing due to event ",nr + print >>sys.stderr,"Closing due to event ",nr + else: + print "Closing untriggered by event" + + # Don't do anything if the event gets called twice for some reason + if self.utility.abcquitting: + return + + # Check to see if we can veto the shutdown + # (might not be able to in case of shutting down windows) + if event is not None: + try: + if event.CanVeto() and self.utility.config.Read('confirmonclose', "boolean") and not event.GetEventType() == wx.EVT_QUERY_END_SESSION.evtType[0]: + dialog = wx.MessageDialog(None, self.utility.lang.get('confirmmsg'), self.utility.lang.get('confirm'), wx.OK|wx.CANCEL) + result = dialog.ShowModal() + dialog.Destroy() + if result != wx.ID_OK: + event.Veto() + return + except: + data = StringIO() + print_exc(file = data) + sys.stderr.write(data.getvalue()) + pass + + self.utility.abcquitting = True + self.GUIupdate = False + + self.guiUtility.guiOpen.clear() + + # Close the Torrent Maker + self.utility.actions[ACTION_MAKETORRENT].closeWin() + + try: + self.utility.webserver.stop() + except: + data = StringIO() + print_exc(file = data) + sys.stderr.write(data.getvalue()) + pass + + try: + # tell scheduler to close all active thread + self.utility.queue.clearScheduler() + except: + data = StringIO() + print_exc(file = data) + sys.stderr.write(data.getvalue()) + pass + + try: + # Restore the window before saving size and position + # (Otherwise we'll get the size of the taskbar button and a negative position) + self.onTaskBarActivate() + self.saveWindowSettings() + except: + #print_exc(file=sys.stderr) + print_exc() + + try: + if self.buddyFrame is not None: + self.buddyFrame.Destroy() + if self.fileFrame is not None: + self.fileFrame.Destroy() + if self.videoFrame is not None: + self.videoFrame.Destroy() + except: + pass + + self.oldframe.Destroy() + + try: + if self.tbicon is not None: + self.tbicon.RemoveIcon() + self.tbicon.Destroy() + self.Destroy() + except: + data = StringIO() + print_exc(file = data) + sys.stderr.write(data.getvalue()) + pass + + # Arno: at the moment, Tribler gets a segmentation fault when the + # tray icon is always enabled. This SEGV occurs in the wx mainloop + # which is entered as soon as we leave this method. Hence I placed + # tribler_done() here, so the database are closed properly + # before the crash. + # + # Arno, 2007-02-28: Preferably this should be moved to the main + # run() method below, that waits a while to allow threads to finish. + # Ideally, the database should still be open while they finish up. + # Because of the crash problem with the icontray this is the safer + # place. + # + # Arno, 2007-08-10: When a torrentfile is passed on the command line, + # the client will crash just after this point due to unknown reasons + # (it even does it when we don't look at the cmd line args at all!) + # Hence, for safety, I close the DB here already. + #if sys.platform == 'linux2': + # + + #tribler_done(self.utility.getConfigPath()) + + if DEBUG: + print >>sys.stderr,"abc: OnCloseWindow END" + + if DEBUG: + ts = enumerate() + for t in ts: + print >>sys.stderr,"abc: Thread still running",t.getName(),"daemon",t.isDaemon() + + + + def onWarning(self,exc): + msg = self.utility.lang.get('tribler_startup_nonfatalerror') + msg += str(exc.__class__)+':'+str(exc) + dlg = wx.MessageDialog(None, msg, self.utility.lang.get('tribler_warning'), wx.OK|wx.ICON_WARNING) + result = dlg.ShowModal() + dlg.Destroy() + + def onUPnPError(self,upnp_type,listenport,error_type,exc=None,listenproto='TCP'): + + if error_type == 0: + errormsg = unicode(' UPnP mode '+str(upnp_type)+' ')+self.utility.lang.get('tribler_upnp_error1') + elif error_type == 1: + errormsg = unicode(' UPnP mode '+str(upnp_type)+' ')+self.utility.lang.get('tribler_upnp_error2')+unicode(str(exc))+self.utility.lang.get('tribler_upnp_error2_postfix') + elif error_type == 2: + errormsg = unicode(' UPnP mode '+str(upnp_type)+' ')+self.utility.lang.get('tribler_upnp_error3') + else: + errormsg = unicode(' UPnP mode '+str(upnp_type)+' Unknown error') + + msg = self.utility.lang.get('tribler_upnp_error_intro') + msg += listenproto+' ' + msg += str(listenport) + msg += self.utility.lang.get('tribler_upnp_error_intro_postfix') + msg += errormsg + msg += self.utility.lang.get('tribler_upnp_error_extro') + + dlg = wx.MessageDialog(None, msg, self.utility.lang.get('tribler_warning'), wx.OK|wx.ICON_WARNING) + result = dlg.ShowModal() + dlg.Destroy() + + def onReachable(self,event=None): + """ Called by GUI thread """ + if self.firewallStatus is not None: + self.firewallStatus.setToggled(True) + tt = self.firewallStatus.GetToolTip() + if tt is not None: + tt.SetTip(self.utility.lang.get('reachable_tooltip')) + + + def setActivity(self,type,msg=u''): + + if currentThread().getName() != "MainThread": + print >> sys.stderr,"abc: setActivity thread",currentThread().getName(),"is NOT MAIN THREAD" + print_stack() + + if type == ACT_NONE: + prefix = u'' + msg = u'' + elif type == ACT_UPNP: + prefix = self.utility.lang.get('act_upnp') + elif type == ACT_REACHABLE: + prefix = self.utility.lang.get('act_reachable') + elif type == ACT_GET_EXT_IP_FROM_PEERS: + prefix = self.utility.lang.get('act_get_ext_ip_from_peers') + elif type == ACT_MEET: + prefix = self.utility.lang.get('act_meet') + elif type == ACT_GOT_METADATA: + prefix = self.utility.lang.get('act_got_metadata') + elif type == ACT_RECOMMEND: + prefix = self.utility.lang.get('act_recommend') + elif type == ACT_DISK_FULL: + prefix = self.utility.lang.get('act_disk_full') + elif type == ACT_NEW_VERSION: + prefix = self.utility.lang.get('act_new_version') + if msg == u'': + text = prefix + else: + text = unicode( prefix+u' '+msg) + + if DEBUG: + print >> sys.stderr,"abc: Setting activity",`text`,"EOT" + self.messageField.SetLabel(text) + + +class TorThread(Thread): + + def __init__(self): + Thread.__init__(self) + self.setDaemon(True) + self.setName("TorThread"+self.getName()) + self.child_out = None + self.child_in = None + + def run(self): + try: + if DEBUG: + print >>sys.stderr,"TorThread starting",currentThread().getName() + if sys.platform == "win32": + # Not "Nul:" but "nul" is /dev/null on Win32 + cmd = 'tor.exe' + sink = 'nul' + elif sys.platform == "darwin": + cmd = 'tor.mac' + sink = '/dev/null' + else: + cmd = 'tor' + sink = '/dev/null' + + (self.child_out,self.child_in) = os.popen2( "%s --log err-err > %s 2>&1" % (cmd,sink), 'b' ) + while True: + if DEBUG: + print >>sys.stderr,"TorThread reading",currentThread().getName() + + msg = self.child_in.read() + if DEBUG: + print >>sys.stderr,"TorThread: tor said",msg + if len(msg) == 0: + break + sleep(1) + except: + print_exc() + + def shutdown(self): + if self.child_out is not None: + self.child_out.close() + if self.child_in is not None: + self.child_in.close() + + +############################################################## +# +# Class : ABCApp +# +# Main ABC application class that contains ABCFrame Object +# +############################################################## +class ABCApp(wx.App,FlaglessDelayedInvocation): + def __init__(self, x, params, single_instance_checker, abcpath): + global start_time, start_time2 + start_time2 = time() + #print "[StartUpDebug]----------- from ABCApp.__init__ ----------Tribler starts up at", ctime(start_time2), "after", start_time2 - start_time + self.params = params + self.single_instance_checker = single_instance_checker + self.abcpath = abcpath + self.error = None + self.torthread = None + wx.App.__init__(self, x) + + def OnInit(self): + try: + self.utility = Utility(self.abcpath) + # Set locale to determine localisation + locale.setlocale(locale.LC_ALL, '') + + sys.stdout.write('Client Starting Up.\n') + sys.stdout.write('Build: ' + self.utility.lang.get('build') + '\n') + + bm = wx.Bitmap(os.path.join(self.utility.getPath(),'icons','splash.jpg'),wx.BITMAP_TYPE_JPEG) + #s = wx.MINIMIZE_BOX | wx.MAXIMIZE_BOX | wx.RESIZE_BORDER | wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX | wx.CLIP_CHILDREN + #s = wx.SIMPLE_BORDER|wx.FRAME_NO_TASKBAR|wx.FRAME_FLOAT_ON_PARENT + self.splash = wx.SplashScreen(bm, wx.SPLASH_CENTRE_ON_SCREEN|wx.SPLASH_TIMEOUT, 1000, None, -1) + + wx.CallAfter(self.PostInit) + return True + + except Exception,e: + print_exc() + self.error = e + self.onError() + return False + + + def PostInit(self): + try: + tribler_init(self.utility.getConfigPath(),self.utility.getPath(),self.db_exception_handler) + self.utility.setTriblerVariables() + self.utility.postAppInit() + + # Singleton for executing tasks that are too long for GUI thread and + # network thread + self.guiserver = GUIServer.getInstance() + self.guiserver.register() + + # Singleton for management of user's mugshots (i.e. icons/display pictures) + self.mm = MugshotManager.getInstance() + self.mm.register(self.utility.getConfigPath(),self.utility.getPath()) + + # H4x0r a bit + set_tasteheart_bitmaps(self.utility.getPath()) + set_perfBar_bitmaps(self.utility.getPath()) + + # Put it here so an error is shown in the startup-error popup + self.serverlistener = ServerListener(self.utility) + + # Check webservice for autostart webservice + ####################################################### + WebListener(self.utility) + if self.utility.webconfig.Read("webautostart", "boolean"): + self.utility.webserver.start() + + # Start single instance server listenner + ############################################ + self.serverthread = Thread(target = self.serverlistener.start) + self.serverthread.setDaemon(True) + self.serverthread.setName("SingleInstanceServer"+self.serverthread.getName()) + self.serverthread.start() + + self.videoplayer = VideoPlayer.getInstance() + self.videoplayer.register(self.utility) + self.videoserver = VideoHTTPServer.getInstance() + self.videoserver.background_serve() + + notification_init( self.utility ) + + # Change config when experiment ends, before ABCLaunchMany is created + global EVIL + if EVIL and time() > 1190099288.0: + EVIL = False + end = self.utility.config.Read('lure_ended', "boolean") + if not end: + self.utility.config.Write('lure_ended', 1, "boolean") + self.utility.config.Write('tor_enabled', 0, "boolean") + self.utility.config.Write('ut_pex_max_addrs_from_peer', 16) + + msg = "The Tribler download accelerator using the TOR network has been turned off. For more information visit http://TV.seas.Harvard.edu/" + dlg = wx.MessageDialog(None, msg, "Tribler Warning", wx.OK|wx.ICON_INFORMATION) + result = dlg.ShowModal() + dlg.Destroy() + + enabled = self.utility.config.Read('tor_enabled', "boolean") + if enabled: + self.torthread = TorThread() + self.torthread.start() + + # + # Read and create GUI from .xrc files + # + #self.frame = ABCFrame(-1, self.params, self.utility) + self.guiUtility = GUIUtility.getInstance(self.utility, self.params) + updateXRC.main([os.path.join(self.utility.getPath(),'Tribler','vwxGUI')]) + self.res = xrc.XmlResource(os.path.join(self.utility.getPath(),'Tribler','vwxGUI','MyFrame.xrc')) + self.guiUtility.xrcResource = self.res + self.frame = self.res.LoadFrame(None, "MyFrame") + self.guiUtility.frame = self.frame + self.guiUtility.scrollWindow = xrc.XRCCTRL(self.frame, "level0") + self.guiUtility.mainSizer = self.guiUtility.scrollWindow.GetSizer() + self.frame.topBackgroundRight = xrc.XRCCTRL(self.frame, "topBG3") + self.guiUtility.scrollWindow.SetScrollbars(1,1,1024,768) + self.guiUtility.scrollWindow.SetScrollRate(15,15) + self.frame.mainButtonPersons = xrc.XRCCTRL(self.frame, "mainButtonPersons") + + + self.frame.numberPersons = xrc.XRCCTRL(self.frame, "numberPersons") + numperslabel = xrc.XRCCTRL(self.frame, "persons") + self.frame.numberFiles = xrc.XRCCTRL(self.frame, "numberFiles") + numfileslabel = xrc.XRCCTRL(self.frame, "files") + self.frame.messageField = xrc.XRCCTRL(self.frame, "messageField") + self.frame.firewallStatus = xrc.XRCCTRL(self.frame, "firewallStatus") + tt = self.frame.firewallStatus.GetToolTip() + if tt is not None: + tt.SetTip(self.utility.lang.get('unknownreac_tooltip')) + + if sys.platform == "linux2": + self.frame.numberPersons.SetFont(wx.Font(9,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + self.frame.numberFiles.SetFont(wx.Font(9,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + self.frame.messageField.SetFont(wx.Font(9,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + numperslabel.SetFont(wx.Font(9,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + numfileslabel.SetFont(wx.Font(9,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE)) + """ + searchfilebut = xrc.XRCCTRL(self.frame, "bt257cC") + searchfilebut.Bind(wx.EVT_LEFT_UP, self.guiUtility.buttonClicked) + searchpersbut = xrc.XRCCTRL(self.frame, "bt258cC") + searchpersbut.Bind(wx.EVT_LEFT_UP, self.guiUtility.buttonClicked) + + self.frame.searchtxtctrl = xrc.XRCCTRL(self.frame, "tx220cCCC") + """ + + #self.frame.Refresh() + #self.frame.Layout() + self.frame.Show(True) +#=============================================================================== +# global start_time2 +# current_time = time() +# print "\n\n[StartUpDebug]-----------------------------------------" +# print "[StartUpDebug]" +# print "[StartUpDebug]----------- from ABCApp.OnInit ----------Tribler frame is shown after", current_time-start_time2 +# print "[StartUpDebug]" +# print "[StartUpDebug]-----------------------------------------\n\n" +#=============================================================================== + + # GUI start + # - load myFrame + # - load standardGrid + # - gui utility > button mainButtonFiles = clicked + + + self.Bind(wx.EVT_QUERY_END_SESSION, self.frame.OnCloseWindow) + self.Bind(wx.EVT_END_SESSION, self.frame.OnCloseWindow) + + + #asked = self.utility.config.Read('askeduploadbw', 'boolean') + asked = True + if not asked: + dlg = BandwidthSelector(self.frame,self.utility) + result = dlg.ShowModal() + if result == wx.ID_OK: + ulbw = dlg.getUploadBandwidth() + self.utility.config.Write('maxuploadrate',ulbw) + self.utility.config.Write('maxseeduploadrate',ulbw) + self.utility.config.Write('askeduploadbw','1') + dlg.Destroy() + + # Arno, 2007-05-03: wxWidgets 2.8.3.0 and earlier have the MIME-type for .bmp + # files set to 'image/x-bmp' whereas 'image/bmp' is the official one. + try: + bmphand = None + hands = wx.Image.GetHandlers() + for hand in hands: + #print "Handler",hand.GetExtension(),hand.GetType(),hand.GetMimeType() + if hand.GetMimeType() == 'image/x-bmp': + bmphand = hand + break + #wx.Image.AddHandler() + if bmphand is not None: + bmphand.SetMimeType('image/bmp') + except: + # wx < 2.7 don't like wx.Image.GetHandlers() + print_exc() + + # Must be after ABCLaunchMany is created + self.torrentfeed = TorrentFeedThread.getInstance() + self.torrentfeed.register(self.utility) + self.torrentfeed.start() + + #print "DIM",wx.GetDisplaySize() + #print "MM",wx.GetDisplaySizeMM() + + wx.CallAfter(self.startWithRightView) + + except Exception,e: + print_exc() + self.error = e + self.onError() + return False + + return True + + def onError(self,source=None): + # Don't use language independence stuff, self.utility may not be + # valid. + msg = "Unfortunately, Tribler ran into an internal error:\n\n" + if source is not None: + msg += source + msg += str(self.error.__class__)+':'+str(self.error) + msg += '\n' + msg += 'Please see the FAQ on www.tribler.org on how to act.' + dlg = wx.MessageDialog(None, msg, "Tribler Fatal Error", wx.OK|wx.ICON_ERROR) + result = dlg.ShowModal() + print_exc() + dlg.Destroy() + + def MacOpenFile(self,filename): + self.utility.queue.addtorrents.AddTorrentFromFile(filename) + + def OnExit(self): + + self.torrentfeed.shutdown() + if self.torthread is not None: + self.torthread.shutdown() + mainlineDHT.deinit() + + if not ALLOW_MULTIPLE: + del self.single_instance_checker + ClientPassParam("Close Connection") + return 0 + + def db_exception_handler(self,e): + if DEBUG: + print >> sys.stderr,"abc: Database Exception handler called",e,"value",e.args,"#" + try: + if e.args[1] == "DB object has been closed": + return # We caused this non-fatal error, don't show. + if self.error is not None and self.error.args[1] == e.args[1]: + return # don't repeat same error + except: + print >> sys.stderr, "abc: db_exception_handler error", e, type(e) + print_exc() + #print_stack() + self.error = e + self.invokeLater(self.onError,[],{'source':"The database layer reported: "}) + + def getConfigPath(self): + return self.utility.getConfigPath() + + def startWithRightView(self): + if self.params[0] != "": + self.guiUtility.standardLibraryOverview() + + +class DummySingleInstanceChecker: + + def __init__(self,basename): + pass + + def IsAnotherRunning(self): + "Uses pgrep to find other tribler.py processes" + # If no pgrep available, it will always start tribler + progressInfo = commands.getoutput('pgrep -fl tribler.py | grep -v pgrep') + numProcesses = len(progressInfo.split('\n')) + if DEBUG: + print 'ProgressInfo: %s, num: %d' % (progressInfo, numProcesses) + return numProcesses > 1 + + +############################################################## +# +# Main Program Start Here +# +############################################################## +def run(params = None): + global start_time + start_time = time() + if params is None: + params = [""] + + if len(sys.argv) > 1: + params = sys.argv[1:] + + # Create single instance semaphore + # Arno: On Linux and wxPython-2.8.1.1 the SingleInstanceChecker appears + # to mess up stderr, i.e., I get IOErrors when writing to it via print_exc() + # + # TEMPORARILY DISABLED on Linux + if sys.platform != 'linux2': + single_instance_checker = wx.SingleInstanceChecker("tribler-" + wx.GetUserId()) + else: + single_instance_checker = DummySingleInstanceChecker("tribler-") + + #print "[StartUpDebug]---------------- 1", time()-start_time + if not ALLOW_MULTIPLE and single_instance_checker.IsAnotherRunning(): + #Send torrent info to abc single instance + ClientPassParam(params[0]) + #print "[StartUpDebug]---------------- 2", time()-start_time + else: + abcpath = os.path.abspath(os.path.dirname(sys.argv[0])) + # Arno: don't chdir to allow testing as other user from other dir. + #os.chdir(abcpath) + + # Launch first abc single instance + app = ABCApp(0, params, single_instance_checker, abcpath) + configpath = app.getConfigPath() +# print "[StartUpDebug]---------------- 3", time()-start_time + app.MainLoop() + + print "Client shutting down. Sleeping for a few seconds to allow other threads to finish" + sleep(4) + + # This is the right place to close the database, unfortunately Linux has + # a problem, see ABCFrame.OnCloseWindow + # + #if sys.platform != 'linux2': + # tribler_done(configpath) + os._exit(0) + +if __name__ == '__main__': + run() + diff --git a/tribler-mod/Tribler/Test/API/contentdir/file.txt b/tribler-mod/Tribler/Test/API/contentdir/file.txt new file mode 100644 index 0000000..646d5c3 --- /dev/null +++ b/tribler-mod/Tribler/Test/API/contentdir/file.txt @@ -0,0 +1,5 @@ + + +Slightly Bigger +Test file + diff --git a/tribler-mod/Tribler/Test/API/ec.pem b/tribler-mod/Tribler/Test/API/ec.pem new file mode 100644 index 0000000..08094ad --- /dev/null +++ b/tribler-mod/Tribler/Test/API/ec.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MG0CAQEEHVrPzIQwJStZ6MU/RO6dqen9HQo1IekEfp7YGGtdoAcGBSuBBAAaoUAD +PgAEACFQOtu9k8A6l5+jHWpeao9AOc/mGxUXOMom4yoHANFgI1vQoKwUZCdLBo24 +QpToY7CS3EblruEri5gk +-----END EC PRIVATE KEY----- diff --git a/tribler-mod/Tribler/Test/API/ecpub.pem b/tribler-mod/Tribler/Test/API/ecpub.pem new file mode 100644 index 0000000..4a0d05f --- /dev/null +++ b/tribler-mod/Tribler/Test/API/ecpub.pem @@ -0,0 +1,4 @@ +-----BEGIN PUBLIC KEY----- +MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACFQOtu9k8A6l5+jHWpeao9AOc/mGxUX +OMom4yoHANFgI1vQoKwUZCdLBo24QpToY7CS3EblruEri5gk +-----END PUBLIC KEY----- diff --git a/tribler-mod/Tribler/Test/API/file.wmv b/tribler-mod/Tribler/Test/API/file.wmv new file mode 100644 index 0000000000000000000000000000000000000000..ff5f772b6aaec2fdd4986a21d7034a6bd5b9bf0e GIT binary patch literal 82948 zcmV(rK<>YWob!ii0ADf8B4cujiS|Vbjrd%{i*JtI&j+IC2%k+_sllr@XxZ9#MG@-t zwGH)2u8$1q^-z;bUYvTF>Pz2{6opQp8!IwBJ8tD*#k7tiDNLqU5SCdC#{?PfluCyS zg!2$MIc~Rt6PjonduKH=E`c-gIuJq{S(mt7Wx49?xO}UlnN4y-i3-0^!%mIbeL@ss zXUDN-uD{#hxJ>Rs>>tx7U-^0|*9p=X5IXsPKcs!uWJ?K%-TbvJc1fi1F`1f~uS;{d zJcYj32ktP}uw|hNb-5rQAPwxbWzy6{vBgLx57+b*ZV|}nuWVHD!oF~d_&a`MOFBPM z*>|tLnK*zx5ppta(N6`C7FDE1+-IJ)J9fCAAhqsoc+Pg?JgcW~C`WT-b1c`u&&o%) zXkV;~-aRn12YfdECe&5*MWQ1mJY$w?*K5$sBHP)COoM-s?%n^lk3^>EVRNW#+Qcne2zW`t5`{WL<#{<#$E= zDKS*7yk1o|M{?HO+SY(E7Pi? z@Sqb>;>&8)=fK8Qn4>NFrB7UBzyOd0k>tGHQhZ{wN0SSe3d?4pXT^!ys!3Voudyy@ zekewFEC?51Yo)coTe6A?{3H7~RU!Bo7)<0^JOo864n|-LvAl4oOV9YI5;GCMO561n zQ!->7Rw|Hd)zQ%1>Dy!qkRP4j*J)aGmLp(Gk?k*1i{c98IdmW!HTA`XYU!g{K9E7v zPKu+_$66L@f(xb^ODHs)n0j6Prk4I4#>K^F8;8nSD9Pri#}$RD@5zRz6zBHfI4+6|KZTr2&@c=9XAcFK#?x`jr1^ z%T8{yT2kdiF(oqXX*vIkv4j>E0EmM=yj^y=?&8mBz4&vX-5#|W%E&sj&Sh2Ze*J)4 zB7CoG;xjD~C4z|G{>@jj_A=0aVYCX)fxf_csq*wT&j@Xvs%iTc614rx_NZ90B!`0L z$nEUQQjRt*pF~D)LzdkW!&6KjwFI@o_a(&Wm^v6D5Z-nVtPHO{PGXUjyEm)ZsNv{{1j_Ic#`kvLr^69tup{+h;E^EK5@yD~I zoa|HBdu zI6lXv_DBFD7UTfbHLn=XSPP>3BRLREb$16Q_%uXAs^CS2r1mV%;}f=8)oXGt-)YXa z+p@a>EAAB0&@Rbb zxiPZk@%JW6;Xg-PmrFd%zE_#!B6LD1BoFEJ|nlP}(vxz+DV++D4EW+-5|} zNq&SS^rz3@bnus1VA7$(*be=dV(xW3XMxunKPJ`UXZWn#8I?t_*$*$Y!w(0Rc-M zL7YV$Lv$bttNf-6n7Afz^?+ZPtCr$MpX~Ag$6S(M??kj{V;7sw9ASN-v`%r`jWn3tB z@#<>aw^IEz3u;p^6++;oP($oZV^PMzsFp(POrtikULpQZWdW@&x3f3|p_d?(6@^d+ zs!G>zmH!;H5HD`v}9rA~2U^c*e z+cpp}&3!|!F4^!*8eoTcF_^KVTLk~dUpB|SGiWIot(jo3i-rK$Qb{Yk8){l) zt$wvv&{xNbM*Y}iT!GLLu13&9o&3>sRhBV_Vf#DXR^YG8rTqK$ip_od<=7}05~I2f zvll#qmLjAV>8EvZr?0%G<7_MBDsp&dSYRsjastB^@zgQ%H#kUfQ_=jE`q*h3ifRf@ z)=rcGT3_-1X|Ys4JK}jq=TA}&Q`Gj`^5Nvkq*5S0Ji#dRaJy4`3}1FXYp1B=0$-EL zi~~i11X8NwWEhfRriWqxS}B8%k>cAwfpL%r(F#N&96hXM)*WxCO)TomiD{y?|LL04 zMei7HHsl`5m=kpoD-*5#pLk!$F|Pyb^(scjsO@FbSp~usA|2V1fQ_>KAoz4=xVyfl zeP5mhjP%0dN$(i35SGh~HN0;iVi_*~=dY;?-`HSx+toX(EeWHJH0Dgsx2;t(D;MFk zOV}G)cF~v8J&C$4`^Q8RDdKSA&NIioRE=N=*+w!$;SOLRI~Dt-Lb0zyVq`eS5-Apz z6^m4T2Ut34PnbSf_ZxKGChm7gus3;rIB^$a^|nCxgGhptt}m_oBRkT?w()8XvVz2X?%aZ_e+otfRWS6v4{4Dlyw9Zk&s zPO%rEk6T`3e+p+|Fig3VS;kQz>F)f$p~F5~Gad1ZCf1N!*GQmeBqS|i2KP}cHkOIisjA74PTJOllhiu!}r6sx%MR0^>K>ga!4)q?SN7bsc}uH9Qc07VxBgcbNKlx!w!!*yYZsUe*`e0-XbX4?d9E5VxK)Zq=HVJ{4N zU-O?&048S8IcZbqg5=iNucCm@OU?P(!BIUj)6(;vM)Zt|?6JQS95XzO50S*Y0ON-W zC%DU9>xC;-LARjDK+SdGA_MNg@A`$m^=a`*t~}<%&>uBw>6)F(Z}mk%zF*zsUnqBH zNeAC*^2Hk|kfgrvi%QV5q+iRS7P{U5Kca2wQ=zR}6?*L05#G=EEJSnnZ&+7(Ej(%) zsY^jmhc)$Eemw(>EXw38>wFj%GPdF2zOcrz)WkZSK}hQumGOvEvVG<{jbG2yvY;!6 zq3soV`8AVMe%ZD-ub@voGcex(y9Rh<#`mMwU{&ZdOur0fvrF}y!2i=JX?&VmWahZZ zwizNWm#U~oF^HxrUMDL?Vx_?gF z{?WjeRdQ3~1NW?z6VnX&P*eb`j?$pdpK~5rgt&LeC}Tw@SwMxp)63&4T`D-YM>b&8M*_NFv!(v z)0h$k8nUn>#xIU?HlhHmQ~mFVyck+@IdKB5iiUz_!jk~;VD6(8p)J`C7I_VA!XuiX zQFjjRNia9>fBGv~21NnZ$rlUR(3m%Xe;IjU@sMaxF}q8{Dp(nkA3yYpr|X+DTeEIo zoYhw%c%s=$I9iiJ6_nZl0A&vVyhWIl9JF*Q)0Fkm0v{u8%BS>rem4uvLRUGiwssUo5{sU< z6+0X#ns=BF%n$I`pH)dRPB(SoiXQ~qVT){oO?|KmcQ@z*qi&$WFL>?CqhO0X<<2hJ z_i=@`_PN5xW7(C~PfZVUfTeUk^RDd$yt484xmKF9Y1|%__*a=uwMD$(g#D?UA!-(y zmrCaeXw%^pQv!$}!7qnaBrNO5b${Kn!+{rh!scDRamv917VsO~Xs?v3pB~yETR1X( zIyA<-%5xMDK7t2vobrd!)tEWsC8VUkphWVDi?~T)yvrFW8sml806qK=;|f3!Z1jS%g=OR_EfL4-%p5Am7n*47*+)kJErA4yLvtx0{E6{BvHbtoCu znad^7=nZ$(!xBC3SNx#$r9Ds6-O!{lCy5;>A+l-U|J6bvoU3Y8-{~LlK#7_beQNK^ zHhkcD4D<6-I%>l)y^|kCN$DBGNa6)RJ%zfa|2$6(oJENZ!XpRE&5WBt3}xgdFwQ$A2-Df> zmR%$d>v$ls3a!st*At8@>d-BIjWdjtwnbAO@b@YiP<6eiijkRADF>d_dyw9rXd4p# z-b@7FzNaG!NoF_PIVT0k*b;9&Xu2Z%RxB|M3yL54K?iL|X6xQ86UfLG%D^tN9R+MJ z>P@^5`qo5vMYkD2-o{SKaM3U6f0@VLB!REib@ptA-R3*Usx{!2Y807d0k-jig=R&& z=5Gss3Hy1%Q1T!oV__{*Bf>(4a(r3xOXA!Dr8Y~km4|}-z&CO}HgnbHw zlp`W3qJ=K%rsFrQ^#OmHl6T+xqj2xBy42Y7FLa87`xC!?-GX`aT4HC-%T~?ybxH&l zBNj3*k|6V~4{ALlt%|$tL+|(WjwcHIL=!~GaQX{A^kwVrPTRFL9ldta$OsQSd=Yu# z7}YtMD9uecF&LoWNC{+dVI4}2eY4-mt{3pqc)M(vG;R0I9qYW)+Bkxc@{4zXEV%IG z!caFMC0D7A&rckX`G!7TKF^%lZlWCpx)@Y_zs8&m$FSNNH-~xiqp4qWu6zsKEZEzKJ@~(Rxe$1lrJc_iVuC5CK}iEOooejAsyFb7wyQs z+dYX#Bp;1D9#A0NKIo*k#zq5`&=rK(5 zsQ71N#wWJOxW&J&fhnS21KC!1$rskx?Uup-+B2?H(0kKw5*d7%em}@PLX6PTw`kjX zGhNbz0(p5WFaPb(<03zyi&2ku^zvrDv?a!*S#+pkXZqWtZfdfB&cNNdhMe_mqE|XF z(B&Hcxd>0o=4mFqYLY$H{W>tZYLyA%i59D0rXtaXpI}8ltb)Y@-X$9uBetvn6@OsV`U};_$ui)n}(nZvOPT`)|ZIi zSs@0qXOlcTF4PoN472j9sIp>t{mE<>ug$T|xH0ZNCDB(5P$0;+zAwDn|(x*O{YbmluH%B8@ZmHDGWn!T{3hulA{SW~Y8{J50-x*3&C0 z(Fa{X6l12q0YOw{K+N!5*&6}EFuXt^OZB*sODo}mGHM-?Z;rY@emy@s;k{* zqiWY<;esvFG}=OoF}|*?pat%pWoWttC%4hnmtZ#TOVJGbGx~b3oA0lTNiIe|6I=8Q zB$UNgbHq?ptRbK9I6RbR5fsQ6c@u6Nw`7y2z0Zf5-!q)IPG3G$;z6x&R@(DBGte%5 z$Smx{KQW<-;>fj_cx$?>j!m~lhD(HYK)qzVCzUGT7Ik}poo`!z3ivG)+9iE4dIamv z7Aj_}^}PX{&v_J-#yBz>#&kGivk^bP5TwOKrs{tl= z@BjhOKEHhI(5yOSNC9ArNr3RlAObf=8ifs>s3wk`aZ7@VEqz;UQ9rn@#vuIj=oyt)x0_G-4-PhYn}HJVEr zhI_Tkctud2?~)r&!!g;&S`bSUdg$dyR8vFwt_cC2nK}6$?hK|h%06FR4IQ5!FPziY z3D5U70&s}VmBtaX`r>xqv{iD|+RUhtLz(p9ok18rP*}rNIKFc+!p{ySODM+0Al-z5 ziG%z$KzM;eKJ@n{lE0vyB<)E0iqB4HDB^-;PIKDht_n3N8)<}{Di7i(tzbSLab}%6 zn(HKyxh7Sx&(oOxJ>QfwUQhUQP!mBsC4mpW2%ID8vA$+N zm=(Dd2n=Q6yeQ_|5|_0M@k>6pB0&r(TnSw*r8C259uw*IYPC{+&eFYWX#K~aw3KzK z_J>Zc@@5NI_MwEWq)%Xgf*kU0v4E(DSsuyO5LG}^GwnI#*Ap(XsOM8UweTH3_mL;@ zPWsXtHdeuaor1!l!ug4scln9bv}~KXZxEkJx4T-cmevUUQ6XCHG~tPr zFuX%NXnin!g=?<7Nkt}$2=2_|JM}Xye4Q%K=|y9xP1D<do|$T@2MN&Y+UWZUT8T#srOEq=Z-47Rw5z$_HvUn*>ibs46C z%%@h)^=2(06d@}*G=o7*>)t@iGsCK3hMSbe5qpBNvnH~KJ*tIDEB^5Zx{=U-@xG2V z?-8qIGT!txfb&=pQ9Y^+Yo56Bbc_UDq-^r!m9`n;mL~gVPcpL81w^m?T$gY7r*K>A zq`5OBALtUf>rfX~Res45ymeZPwU3T?4T4z(Y`hAkK}v@42C($74uXPSeQ*CD)r5$| zm}R9{a%B#9A62e+lZ>iPT&_{V6NsFXk9CyryCw43i)TxJ@S?s6fua|@@WQ`rip(Zr zX$`p?=|7c1`HFFr3rs>YHgm%x*Nuz$CPbZmUdb@{Vxl;JK=%i+jEotGcZz}svkhIFt>Hm3n6Rj z$jUhsL>jRsdditU(oSDtjN0@Z!3E0kv>5yUytXO&2HH>p(@QejM;_lywH9&8GXCKs zzgb5DEYbEGyuP!De{L6JfjZ{8i`iud6cZ65Y6bC%0HJ#|zKSi_Hz^9<4+Mj-=5w+b zWjO5ia{s%r?M|R=g+{fBx(bNoR|?%*Pq`1#zsVW4l0=3;kk`_-go4ZPo&p^G*MS#z(N7lb`*FT}Y(F}K&uQeA zu2Dg{S>squI4@o_M~-&27Nc<3L8>tc&Z6;a%$&f6XDh%vC~ugrZXQ2`ByY6@Th6c} z=`al;ClxAL9krU3tZE`A4dVmpHmb3J+Fqg2ZNuVvh-Br0%NoA@z0|w4&3DA&Q%jD} zWwv#K0YI&K8Y+>lN6`-`A{fpvByM>0FX81-B=;p6u?-PEt2SdOpq(TGq&(~_uV>C0 z{e09TEYP}ifJ36O*ao?-am3_k#?kP(N?p>iU-@3%E)X+`o&Lvlahab@=?)YnH2cAL zZ?c0);ZOo6zE*Y0SQ5cFeo)0<8M(5>kYadI3iXvm)Cy2fLw>t5=;Gtx7S&ItleFT& zXgbA^?rj@C9nFY?1hO=uwHf>Qfywx~zRU*Re$L6OS{7QwGjC5;VV2r!{I}!lC+j8S zx;NOX>v0RwRj7B)W~p-pyF1g-VHLMS8@{BZhRv2lNrqa87q*=w#hG-@UY>nXZmR4F zn9jsT~OB2(Fc*gr_Mt#&Bix;cGNdvpWkwlx$e^9pb4beq$yGL z`^j*@&QdBPlrdiq7?zePVQTfNHg>YV`)yHEb?;UtDp4t>b&R;Pt2`FZqW?+3u8u<% zTkk1@RjI)$<)N$b5-YO3BrU;;;%BLi@=)xN7>c*$i%KTi5?oddVd^TP(L zMb|0w_oZg|nqjFHB+&iRy}t}51VQ$src7DO`ihwaQ1X3)q)%vq#nWDYQdIWG^rteT zhilon3;YiB7)9H{m;yQ7$6zoiKY|QGMg79_JI2X`@>}@^9R)QnvpOPkN<6G2u+eJ= z;CRc9RLH(#ZcyBX(J8^NT4eEO6QWgk=O4d$y$-|>-Ax%!z_3bul|o0@)Tu@b%WQ5? z8{dS63!f01Ath4KMbTHBO9eZmQ0_OJ`YSK6Hb@yuFE8@^d3M)0`H2Hsdc*m%Cli(S z#8@T3ME0#%iBf8uhs<;0o0H%nRqt4+@h|Eg%EAJAgU6v6h=8-8Z6;Ui0VgnfE}NK- z=Kw<>&)NmT;aYv^^;E?aRNQ{DQU?%)8V5 zJD@C5c=Xv-pO;+$;>z#8BlDTDnT+ojTWqDA@s)}yBzvMIPrQD$Wm0pepdl`l_l0Fz zY1W}0Lp3v6wC^rqn~t5x=P*7~8B6=wf5F0rN4oaTER0eW0O%9|?j7Sg9-#C4!1%vu zv8dNw4@CC|XSAQk(6{R|aNr9ziB`zNV&S}fQX0lGuJBm?L-gl#uXw?6Xu-u#e+@*_ zB|3{NNZTyLjO9%mQ{Qg0dGWmG%9vjB80d^@)Z3jO(3b0%Hka+^84bWm@c z!|4?Lv%^{rZM>k;rtqT(i7^pMOa^l$d5_Rtk|z9w5#QEf9?rJXGX@$n^Wz7sXdqq6 z@|Alc;`{_3E|>Wemzmct0=Sx^?@Hgf|!to$-VPsGkEKYDs^a9 zzZeTM#zOu$>YEXAyQOTIULm!aT5(ZO^73%|JanRQC4 zDx@GOH+kRwe5Dm}_Mu@2nl8>q3YwyB5#^j6S>A4Mq98+dFOz5b)kN9JRM3Ckyl^Ae z{mIH|{>$nL0I9dZa?>AX1N|zo!TR}lNbX{{OB+oKFI+vBb>Wm7s%2@?`=a}8Zu-ha zIF9RkxmRI88agcNyr~vqGLrhkos5_yOUmr`d=Q}j{HJNG=k)*%UJ}(ncM|_0c3h-w z)P2Zw`ZfCAetP^#oR+q7jTzEn?J=Ydbzxrt3xYCGFFSUT5$Xn*J%&VvOqajbY1^F6 zUv_y71);0>o6#=d2U7Fuh2wQIrK*A#R4t3@S@;cXK%G59a-8E)8qGGSF` zYxk;cWECTq0KhKF?9PHID#86x8r9!>%!&C|m5syEx&ETVMne}AtA^QylPc{WOF^Nb z&vJLv#5EWg@;M}LqknA2a4P67n>SI2sasCc4+4@pks+>l8MiP)^-wkMWW>tlL~Q%i z=^)ey7vNa!RKmGlgy2C1 z|JlC^q?_e6s|_ALhpN$WX;NJu6LGXUmci5; zf4JGV4vX217OU#`8MGPfH2YgZv8xV6BqO50Gjp7Jrom#%o(M!RI)O1Uh6D+P30+Je zxySO$xco=L{WgHV%D_zop62UI)lT*xT2H&&s=y1tEdt{eBrhL~bG%l8hS-tI;YStJkC_y&c34 zqD2xmhVe8Hp5eqo=_1CrvO0H$GS2-r)JEYdOI>M%U*yUseQ$~Ut5Gqy56NhPQ|8#C zh%gMwoP3~S+dX(Er$5THC2XuENw=peIYdNnU=0p-9n^5w<2K&00jbkBjcAie5k3?b z2qaq+%Pz-VIg(e3=675fY8BoxxKd8sI|wMAnK*wK={tb-^|x?U`U~EtGkXR!BrK=J z{8n57S|}uNNMLIaM8^J#BlRG7N8--INIp9J1I!49`)JT_AOE|eHn;)Xfg+9luWWuO zF`E(F?9aqbfqi(al^Vo)svqtuLVs+qzk9Z5@BuJfJ2kF#v_E3CR`$0#zBIPmQK z6yhNs!{s(w+@q?zc*=?M@hq_7pLB0PR3F48#HbJNEedw|1+9KCETB7&6->I+5x`M_ znyTVh$r!%@#zMVkC^xON*EHtQ^fuFksHsgGGvAIy*gF8Lligmbw|8;HKj}mL!#$&Y zP8o{f+qn*diZ&_bg1%C5E3r>nc>H4gisnw;LYZ3`g(%7)WU5l$=1k3>@ew`>QUeLk z*UB;w-1_$)EHA-k8|Znx^*grp80Yz$4ZU&7FER7ZCbbABrH_R3*2k9P=fm@A!$U!O z!SjgP-qsR6x4BSd&G z@NL@F_N?bFrYJ*lLKeugmv|SpSrjhq&!nbi%nMxT?g*Y6qnbo+CU6TVRhKP%NqMdR za_5-#H$2uKANfg1vubXf>|b?H!q#)1%4s$sa2VoH;~sW>E-t&oO`FbcKkc952Ad(} zo60JI^RJXn16b$QkDJn?NaZ<{9y)lgn?e+YJv|U~U-uZ8YesW zwn<=>N3o!KbMrRbr{2yT2*3`tk&WCD z3xuFmg0PwL6j0y~EMOJJJ^2}T~lPbO=9d17o8+Bt50zc^Gi9AEkdfESi_fRJ*8N;_0QjqCwi zbU<<>A(l=zs9>3@Oac!{DoRepgzazi%Q7DCtXWx$KV>OeYh4rx>aSWh+3#(_M>hW_ zUAGnpYi4fkKX6;c8R)p}yc~CF{QCnyplB?-V!Hf5Qe7Log~l%7Ql2SwroC*d&w`U( zYYbzsaqyDAeG-!>75>AMO=3I~2+;QXTQtciKuQWK-Ca;W4%5_{&aT-ZS%s6og8P7y zoT`Q1UM=4ZqGjI7$Dm7Mwp|~=sFlB+NmK+J1%pNa1&0I>YAElp;I`hy)#eJjQ2PvaH0^~MmDU@f3 zPtxtqB!<4pB$#;gXo8RH`O5&5;vG%*Cuuk%+-e3s-%E28wO~yA62N4{)(Grx%jPry>hbEPzC1mqfSOiNQGRRlp`++Q6 zgBHC2|5+clCi)ATgUhSEroiwp~IH2_9SyB7+$eaD+OUG zB`S=%_ypHr*dZxlhTYe4B(0nG(%D;$WTZKW82A@fL0r`Q&=F!^)9^CE9|bIoZz*fO z4XD})J^fv5_~fxZs?ywQb(lzLjiD<1!8GWSKr+d^F;}7%C8b=$PnJE}h=dAX#?fBi zX|kx-r`0gaALLA!s11Jy^}zeyu}kq;9tvZ7{(wJSow`(vykf)k(Ipm4oY{f=SWn}( zGRDpu(4Bk+REr>}e==;#(3fEppWLK)`fzHj38HEv(sFk{6{{qL-ZiM+^1Oy%k(!2R zu#pgsH_TNO4hwOs7jZ1M0rHf4R~Rs@B! z+P-3=AinS`nD8%4)#H_|$_&?;2o5$9p{Od4!d}a|MsYz5r~X)EgXFAR9hIX44yXfZoQkP?vJj(29oGd`+rP}{L8b)bJ zG#}T&qsgvVXHbd742$#=6RKt@&p1Bf^+I)MD>4{FV~W*xO3|CgY9TkOZoAcpQamrG zud{J!;^_s>=_R(yQP^duTsOC#E^Y^lUrl;nShq+hmWzC+1m9gf;xm(O_L=$_wUQ&w zbewTGvlG+cM;rWVCB!(<{6Mtjvz^=(6w2X3poZ_*4sHQhJx-Djm5(s!=%81T{+s_% zXI(oW(`L!>&@NUJFm`2rmmMI&#l(t#6M&R?5+I)Ig3-vl-tr?SH01P;k^U7;4~SFB z4Dr5yVad@I*b*zTniPg+_l+`VSDnB)U_*+;jb#;_J~+OM&b58h+{_X2r=snV_Yi;( z4ba63cVls9HVZdDK)BaX))r#G@}!vSVZV=MQ^}mdT4QTU{=FyFcSc6}s-k~;xM~Q> z#L2TtQ)5Pv`dkJb(m-e6=7v6jv&@wMI?D?7?&dt2<90j|fQmQ;s2*5))_$Pl3-puemOZdqrcn zK((#ucP9iwtmnU#rr|0GryCZxrm_mM9XPn{zcqr3b8PH&nzqbvb3A;dz%~vKxb1ys zp|I85ZVM-GgyCo11JSmskg)+sxN?v@&yUz8hoK50>NeWncVfqNu@^OEmY-%#!0^?Y z$*GFhTrXG*#vzXJCHIA;UO~P6XS#gMs55rLd{R3TL>{#@B;U>}Ol@*(JR?V*U`{*r zFlWIfd-T1!A1q1`JX+^F&3t1nYMYFC5)0UZ~$n{SXMz^ zCspx+JtlOp0!rDy2IYV!`sJCccDrWV#C|fGv~Vj?xF^Z9xE)d_95^%yK?G=)Bu0;J z`U19D*6tCtK%j2Cly#FGbFICF-7vVy3#X5G=7HBSEQ&K}v1vl=)*w8<$@wmMV%ZeB|)^CyP!ynrQ z{vfCKBi2ne*@5wlGY`H#u2R8m5qA|`=%>2Cy`vk07O_cqGJZ3QE1{aA1crL~wVv|J z+jRJ%J#D+U0?KM(;8r4M*=aSQ7L?V}OjivjsqGH~+0$HPY@>TcSiFGE%eIwhfUE6eJVzzNA|$UNIuCzfU>uFfHwo}^fO%Z?a@uxW}V+e zR@!2>b1NScQNf`t@j)>LvJJpW)m>BU#7D?Afbel>ZrD9&V)~V*vjH+XQCZF`e#;t& zL-gH>JoY3eYr>P7)#9Ks+FvdO5a9zzS=9iY1P}eKu=nWwgDLS*X@(T)f-?>>|u=QCPu%%1-VK2Zu;kXrf9hR4KBruBV@zsLT- zWt&+w*p#CcKjlz`jQ-<6$_}YtKf}F~VXRxBzWrMLwNd5^I4ftthOjc?j>K@_ETCbuLaK>M94_Yak`KjT_&6ziLy zXVXJ@>Rt|6aDrMxFuX0^mO9fzsJHA)3%$`_tUAVg%P&I2f;W8wp1&VziSr}W;51zG zXoI3$VpQ4!AUfzU%SBKt@st!veeXsJ7zodMOwunu?|Lw9jwp_Wi>D%jNi(n?s4s4n zd%WLB@bnnWpjMgn1YWlY=e9l5Ya`7he=q$*yH+I>#&(`qH5|6ZuCQ3dxb188bOD=f z7pZ1v!fWX?pD&Z%ha%){>DxM7zks7)F_E#y?9n!(^x(j+(TmzdR{jMi`V+WX$St>! z$O9XIID>9Ep_r8HU%aB!*|4GaD{!7Rk{6>>5}sYtJC!91$fwVxtAJgzf=5kmpB#S7 zO;O&?X6TOw#=k2M^T8isIPwjQoCCt*`IZyC-6Y4}x`?d0yEES>rvIHOdJ*wFa$)0R zphXJ%u9h{{{X@4M<^C-hHYRK}Zp_NTYY{L-_&*J&VcO^1St0@Nce16kGjQ|px=HaB zC-;nES`_$ICTNo>Hu(CEs*$sp`2gmMK_%Vw8Ih{IKBorihBL~;mU4P-3N@9zWScJn zHGNIr&9@)@%c!mz{46MKFEj@$mf@sWv7R~l?_1}@BHP7I4tgovg-8C;S^EGvC>v@q1pg^-)D1RMd2Ppb^)TxL!iG&A%B$m?2kv7=m`AbU zE2~(}ceFFjxlanCi&zYkuNSh0Nf|2m`ix*%?cfBC?0iF+kUn*D#FCQ-Cv7#^;tHEm zbL%u`Xan~v*^3@c=--*(Xyt1%yc6=$Py1ABTU3o6kzEpV$#YJl_Vd-@|&dg0H0aspsT z-HOT$xb4;E_QBNe&7UVt@wbIq<)VM%39v>Rh@$ACNM2k%Q4Dt@pd!2`fmlLlVF*Gb z@Bj8VB@{f2I@}uH^dYQDs?ULJ@!O8P(oD5+jFEz)am~FbCr@m2r%2A?{2n51%X-J{ z5b<#Cu%P0$n5;sf)Y7O6KYAay0VktTZ2qVsNyAzS!%Dz^M>*fbaZnsvau-kIALwmSU9k;|YzFQbwYXVPDU!Lh=p>P8qaC!P4 zx!hKz&UogLp+7cUC*1gmSgbW`00^D$BP(uv%l|HafdTe~F_!SB@kadSmoV3`B~lU? zcNk3h*%>&s_<4yy)Lgp5UG=OqvL&y$tQU#bwOYHXOy7h`P|N+-Rm_`Y^)TxSlUKnS ziP8jW`RvcNFn0d>B#0RDxSFL9TN_V>~me63zo~^&7I66AXxEEP=vy6;d;8me*eNh zI;V39HV=v1pJ%M5AX`}(LWkqGvw$CN{K@9Nm>Mr7$6@h_v= z^$B=iA|&*1j7M#55-#kY_amnFtb;T-L;)<0_A)*^NK9HmiZHvN4UhjfLam;q zIqs|V=f-0IHG>p-|A~{?={l8o{yz5m(9Zcl<{r9Id!K2 zmc7PtVdx|EC3GPN9&z;ag0nYTK7vml-1Z#mF3onMxgS zX*tmqG4t2*;gaCn`Cv~|4zZ;{U!fS?_)zkRWe_$Z#be;A)P<1Sbk!F60^bUff_2Ff z&yj#uBG#ZymKuo%+Zc%IR0(x@cWkd!nHk%`dwa$+cfS$mpO#BrWbTzR?L?`&{ErurWS<{|gx%G`x9<~| z!|_U0tpAZL>u;9nqpWvxf{Z2Y6SG6Kkhu>>s$|)}3=}Pn8EvFVl`3*P_$ukD`cEvT zuwPTmxwk@!Nq?rDK272~daKD1q*Aicl3G)>1aq~9bKttb1?n19fTcB(02EF>5t|Qq zk^(vXF09Wjub1vZNNC%4Bl2t#rcT(%uK?0P8njkA45GDW3Fd?ifu@4;HW5OvW z5~+Fj^Z{8Q7@G8;y9iz>%yA2PQ=cdvv2gzV#wv~0_~L<{lO0GFjN%ZXi#i>@Mxn7e z9M~81GRZXf8o+{hO=S?ddyIc7E?(eJdZlLMGVT*QnrNjgw}9oID(xh{ls32qI=6++nkxMN5J20KWd?wsk$xyqjBu%s!fTpk$hI3SrpWIXPGw_&8jLaq* zb7*OP9^EVK5sbxqX{GI^+2?!p^-#EGl}i_sxvWZ58B0@aj)f~vkT9PnM>0p0pHa6A z@TvE_$nM-GXTI#HBk$W|tK+bbB33~~%v-FzR$gq~V=gE0OkL%DQ=!Q1Qj4K15b>#~ z7{ttXzC!Ex$-rI{LIqHG^aw2P@>7@zmOr0Ye};aO(c5|Qud-2RbqMcyEBaHAqS_nWXRgKg$+t<|Ts zhc}7n=60l#-IzKyyHng4Eu1spHJO-7mp^fN@V%dt*##0&lcoU}a6Oty@*RX%M*c6v zvx~*LW{`qaV-}2c3KZ7UL2J7%#>1Q7m+N|dt zG8n4E$#!$wP>Ef+(~yqtFfXYLs%d=X47cW#6;Y(gMdYc4FhBtvF@);%FFV=>bAVM) zYZP;E_A+;mKcpSmeoSjs+v34I{&VFi<^$>Q!lBMp9-`C;8DUa-hWHhg2U?5}RfbiH z?u;TB$*+rDm$PTwH6A*MIzj9FMe%oE?Lr5hTDJbcQo{aMO}YpwPDGn=-ok#^<_Ye1 z-s4XEYrNeRt^YIE+%i-Y{mz#xWQ($}!CP_<1RKJ4@P$pEQf!rH<-8(?Q`L9mM+QW!I6|yk zRx}sY%gLi5v`>kd5s#Gf^R*KQxTSo30n@bquwMA$NtJ#&uN4RCT4JAH^?KajQ4=cS z!4y>r`^?Ca=EUa_S6%G_#{f4%|JVVF>2`{1eE2)@|6PA0{_Ei8WB@arWc~jGp2Gz- zCuJZjo$l(O-#Q}78n$UC=p!7zqR(5|(J#i3I|ZOfR<{C4!>%LIyG_wD;R251d#8J` zoSBS=-{+aCPd=Y%xO%Jg@n0SOCCB6+Rc?!9RxmDfK5f4B8X;(Kncu)y3oNX3!S9n2 z^vLI9MB~VaXAd;_=*w*R5x4l>a!wGw%5$Xlp-S+Cxk3{8w(xhHpdaeSs)TAay?MT( z;9iWU%|oU8N|or@H^z`;vsRX9;I}FR_|rN-W^2TVT0*%*KMAi5hW2zOpog_>j@Y-z~k>lA1N>NY zFWs>EWbq!zWMwZ(w?!vVx`+7N(m=U%fgK)R)Hn$ewK>Yl`ZNtvtfCZ_=@Z6B*Z3Ce zsEK}tu^8Oyw#v!pF^cKDV9fyEo|Zy|Oe`bb^2l3iMLJUK^fR>_QtP{D$lhX^Z($(b#*5p5e-vSTqL2b&4luC-N$R8B;q-Z)2%8oi%272Do?wy5i- zTf-jg7D9ZAoBRRWCS2Gt<@_c)%e{GNRpeIq04+e$zg~)(l)L`?0-c(IQw`6eg1N;gMH2nIFS z>j%@Kapd0_a!{0E#e(dt&xDe=S062L?L$-_jV7IF<3(z*kNLlYusK(!jc3+D%77XB(vd23Vc`|SN{JNChhftD{t*%#Sj-1`@5I9EZp=lLd4~7I6e4} zpJEy;7$Qar{Gf_2NEmR^rj<+5XxMNR(UG0_}piV{S{VC{JSh~l{mZr1cNhKte*DX?i?a7EQz4j z7F^$exZG@_?}X$V7c4QtSuIS_y!)ORKB9GB4aQQFQLGbEQfnkD^qpp%@bGuodX$?J z!*Sf{0o+pXqsf3fI5V%Xk9XO)R~*1@f~mD3ElK0eM8$g)SkWNCLZ%;I8TK&$O02); zKE}WWh07>g#Sv|eYoHY2#e!fk27?|jJjmd?0Ga-x2f;5QzgnG$@ttC-{-xT{6(-@t zFv`ef^yvFIZzVZ12!0U);dXf-t1Y_p3SXDQ=!Xr%zvw8&1C{9lJYuU=0KdYwBHuH^ z$kCmxA*!#ZrAIDgR1sE-$YB5y7$;}<(?adze31;*d9DuUgm`}9O?vs=#0AIKgHHq> zrk%~)JBPxBpAKFQL!W5#W}&XD7Lfg~8x^4wej_4-G(i0h{6zuvcnL;%MX^c#UpT(s zEjw`n(M;U?lf*>H$~22Q742IkC+);z8M9e6QN_%(_U>z-5QB2`slCk$ec)OO7p8M3 z+Hdi+3S&NSCu>YDix_wQ<}2$I&(=0vmKB+EAr9Wi|1fV4y1B!OqM9vSyh>8@u6KLR z(^uthRtbi*CxUl^XImC$Kf)1en?6!paxyZVPcfQ#gJY}-jpFF*$Y(jxzU{&5B}K9tp4|Q2%bbyJ z{{Ptz{GKpQ4+qHd^&<=62pgWlY%GpQjU(0z>;^BXFB>1>7w!Ml#w9?hZGM=Fz5LpG z?=;O)vn!rU+y?143M6C~DnGM7WkAt*Wz7;owxPn4lG@0RA9RsA_s)HMa%a0a6=meM zIYp%#V1EP^3g*I1Oz_&s(W}a2861PJv>|T}-jdu-oE|p$N&6y?rHovLAfKSS?um{> z$9?ToJp9!V?>iE>AtKvIHy_3Wcp9!zhWt>A5PUB1ii)Mz>deYdn?WV7Pn zxnxMX=B2&He#~DBHM4#GP!22C`%*ZjCA;Zu%x)HUF|)2aC}-yt$xfX@if4sRe5VqR zajstg8Sf(akgD?UX8}DMWFRKzO?6^t7}Sai>F97{Lk03HUeOYjdZ8@e+P(B3H^P$P z2yTT<0YGCv4B>}I)H8TRDyeVlkp;7ml%VwS=BgN8%NcyIsOQf{lR{J*SeDgToHSA( zYw(QE{oIu~jtUJe$guJ{)a3F_eSfN+x1-ewfo**zC$P*L^VP+Cm6gEl_039U;xcx z)jp&r@e$D?h~s_QaOh!ag~QIzBTf0;v+F{fZCTqBpU;h*tJLNm`A&_CmYY>#?lA`Krne$tT>!ZDUw%&S8E(_swO7%o?@Nd>VIT1Dfb#$>osAN z74E7JV&F*BdOg`7a6LdwfFEya2jx6ScH1Na9BG@ym1kfj&^2bdCJ&tv2+JU+1tYI~ zPlQm#EwpR|BY)uZRiMRzFrulN{yty=3-Br&^I&XCHz9pT(SF?jWYfOIda#b6Ov56V zsz{6HALh9TvThQohaxmD!^=Il-Oj3m;pX@d+MIq1CBJ`K#yxm68=HT;)q}uH0-o`W z6Wt1KTwj8*Xhd+|%Pfm;ryOqDI(OzgJZFFJN zG)H~HZC23c9=W+RGOp~+PH*)cpaB#xjnv#|PP1%Ve1|z4*SadU6)-l^8JE@_n<5Tv zjtId*S#`zd;fcn&>;J>3-hv`3*pT49d8QH6kI@&2opg2*$uA!Wv{`|jG!UWRvj1q* zAKHgd8&YKyrXiN0cn)Gr7m>Dzd0!KjlcNphG+WI(*OEur6LzBdZ->Ij4480#Yd)7q z!=}l+G@ zDY5#uu0)=OkWLdCJ?=Sl5m8{HxT~DdEh_3q54u4$WToR^zd4G*s<%`! z^L(gPB*ea?gWU{Sd-+5p6CR0+YGRSU>^r>h(tCz7)t~949O4ht2)T$ec<^c!WH%YA zH^$55=;vxmjM{`Uv=ZwZWrgyxITD5_y5lMlOL!6FHt->frd+7|eoe$03reeED_)q{ zGjiAd=o5Rt?ijvI+$Fwp<|KsfT^o_w(jau-MM;4vvzMvLV}SBGw`TaJt|&OY<@F#G zeIn_zdFXX`bGB5$JSjxsv#t@Ae3L=xcYfN>>Wde>)6 zJE9@KvQ{1%RKEeM&^|W*`30mfOY<$5d&OZeg~XgkN65t1>mKEtafXHguW`}V-hW!J zX3lDl)+a5~nQG8|uuaHTZ|Fx^o5-b*p@O43eLv$dLaBixtd?ZkmV*gzF&~xl=i4J+ z?O2xlfs&R80E2+8V;ayDPb@W$O@Rh~KVyFa0Z6V!#sb$`D@FgJd=NuE+*OYdAtaRG zZC--iD1iyV=d+ckQgbsZ)r1^K?M*ju^B zGYwv=ziI(9jcn;%O2rx2iA*mnj4EOdjPQIw^qg)g0&A_rJM%AJmyq#%+A9`bVWAb( zI<#5RdvyXWmcpZKMXAR`mbZ30=rt5aXX& zEppDESf)UFBv=jf>8e;YZEhRr+FTM}I*J(8yW$A5w6Xt-7J13}nJPS$YC!#7F099C zSIMvK!2SZY?Q%`yQ7Oq^la~4kTh!Z^22s~$us^2m^c#>;xmNP|hkl$o`^at9HvpLkA-AfDFVm#VyO!<4{ht~I=E_ow~bsf(SpS-i>IJ1WBjLlB!L z{e|06yvL*(xqC*M2}>_aQN>RkRUzM3x4X2_jz!EC7{%ZYdk-LD{ZjoI7d4F@Da#uP z>&>E}3fL&Qp&sPO?kj3$&eIqQhK@wbBQQ+=R+XbXF%P=TL-J{A>r&|q`w5h$x;wQFz(&2wmJ$?pr1 zfENR3%O>*LOJm;}rEF;v5azgm0p|1h@n3umdE*X!&#dIqb)(sqP#6g8F%qRfkG!!FbMw7%qx;E_t zqVl$8Z{&GuDAXO7!|TZ>f%obha}l{3=a6I3VmIF(_CDyu{}0QWBTnD8guB5kh~f57 z9tioP=3IKC^DI?q6R*b~3uN$Hc<2e5cYYx{q#~O!%2VFQJ`s>wbY6O| zW-t-h?95*}d^4kN4sT8dQa-F74g})Jpw|01 zxE!%%G5ge`ZE1>o{|oO^clop>eQ$qd?Yf@14^ZajLO_j9cTt>tn(+&J_@8(a=++;N|>mim$ior zHeH4mh=>k9VR0L9i3T{>_H|P1twX3xHM0nm2G&Lj6C$v^Y>Z-PHa;tEVyAPHlr=9f z)svth8u!kl7^x1ppU6YNz2dY6M#aMuaju+&8~3-I*7OC-M!PQP)Z)%pvqr^?)rmyD zQg1AAXAaAF6}ATVg-!_H1+Y~Z5_x#zwzb=dOg6C6yMSyTI51=BYz?5=$g^CK4MkfY zZA-puk`O+zilrHU#2Ep#e8$*r(GTmk1J4Cj`$#T=lZ6oP4i~PoF)RE98#0>uEf&1dV$v=@PCbzM%{0IReqA~b)6;J!XW=1wNN3KZ_W!eP}W z)YH4Z5cDp)_xt1!MkuxuJAB;x7=9o{M$KmcL)%RsA^ zS|{{Ly&v)T6~S^(;!)b>5y^gH1Se(478y%B3$dT_kW4-s(4pq6ErgbVh1cx$MKN2d zs=QY@Pa>q%9^C*+TLhby82N)ePJ;77X7?L}cNSwN zuhPMVD-G-Y%B$XjWET#p#R9O?P~h0uLt4x-spcdePF-7jk0p3C1yI1^Qy`ELrSUJZ z8=3t92c9Zs-yIdlFnyMWL&(f|UIte8x;jRSQTudPJ*VlW)7X9YdQSQr?%xNcmMhr+ zJ3bBdMR?D5ah(@hqBRafFRU2ixz(T0MLNLP|KI2<4AwFDa=wsXtK5YVQb>Q4Xtw#h zjHs?cBZO3&KHEqDU$qG{ku(itIP7;Q3B%qZO^3`t?U{U%fHi@`lNpybjl-JE37lvV zKBm174mzx_;iTO@SiH}atlg1fFTfhIc6OY}@W?U1OY?7X`>Ho<%bqqBB|D{yEcFZF z*7hUXKb2Zh+3y*3AOw-G7}=GhFM)$*RLL4J;n>sZM0^+pG$kEL4KM;F z9|XlGw^4DWIX46JIWk1u3Q|k=D|@hH2^BRC>j;q?B#Ez$t)M#p`XGa8j_`Qtbhx#a zlt54M6qCyVoBe4#P<@X+dTE)mQ=6&5#lE7V_yuEM!d`7OFG-frgm%SuwFRhUyLd-? zYPTT;?D0LMt8*ElaaDO(fGzE5WF|cmj`4k#tZm0aYM>;r9_QJ-ynwg;*G{I97Q7m{ zK-&MC($7pn+ME`8tr?a>6tXCX31&6myHDo(tSW7b&S_9zT2;WpduCZ`2Gdolc+>6? zwh;{0JLg<*^E^vfW8l~bn|1dgUYl$p1t#XoSe4z^xhBOejfPF zuD=6_Fkk!-kln2hr>v7ZAe5};`CiC9iu5tEd(W99Y_#~Q3G5hr4-c}4g=fv_r?#ReHrd8==f^4lwwA& z{xhnC6W9r0`>s^s_*)AOsZgRyb~Ke8i~bNi3IvTy67`>#0YHZjrK@L)3W zpfNqoSu;)a{DUYr$pFNoJYxetcV2@4AkiVrOxGn=?j5R-z+@8k2NU3S1Pa4q{{|Y4 zvJGanE;qzL9+9dU5~mb&pbISqvjSBBL6~ZSYQ#@^!<^KE;39Z9k8-wTpTDj091R*<*$OA?+VXmDjVq9<_;-t|s_WQf?Mh?7u0at`j#(V2 zx!*H4cl7A@zG!b}?nq}0O;lDLA5S=U`J$0ua*a|%9Y5Dd&8xyqPjf^bbBEW}z5fGl zUuBZ!Y0aF7P)GYc)O-GPAQpf^yp$_oTZ+Ys$s`J4fRAl&nAIn`wOuUlCtAODlpy`m|rP;^OT@ zc#hYgr8XX2tDW$`Q=RY`T2SvPtoYKH04qCsu7jDGDhda}DlzIay?F5V>#yErZ*y0L znA=ZzJKe#*aO8&InB!+vrLq=u)OvqiV>M)fYq}*J`Iqru=fW5#HTB%`g3y<dxHhtv*09D;WM}FqxDJ*ZI0)68Wr)Wma{evpn7Z zsSVS~6dcBF3o7Cag7e=`U#2zk?#nLTpdFv5qmlhJz799-E!7tL%B@MvA4R{Y)_JzZ zV>8ag9`+UKwNo+mvWm(F;USV5-X@T6wv-3;j5Rya3#nr ze*i}CdXe;6^cV-lQ!M0}EE!khrpz$noVEK^e$ZLuOMOIlp<_ux?JK6ZvE?`*GYht~ zIP=fAhS0hkosi*~vDEJY+sya}&%M(JsSN~h@5o{+R2zgO_i+u)>0umnQX=|-QAyx@a5hs+QjG4PZ zi>S~;`6oB;80WB~NZ~0Jx+R%h=kCZQElVr1Tf3oB-+4=-qtVvVS;;}+;}3}}fH-6} z+9pc3eyEM=b+phlQpsmma96rmaZXRaYrL_heuw7?@8&Er3? zq(YUv+EFH{w5@$is>|=-IUm{?>1@8f(=QNkNX`IDl8YVHMyy~KDHa?#YRnF!W!Sq! zKrz%IL`mTT?Y1MXXnQb|33WVwr#!KlPimt==&BjFrEF4|*f3VBF>h^`mE`(qQo7Ss zmsin?pl6f0+K-pDX{eN*CRFYprpc^^Qoavtp+-NLM@)BRy*F`5Z5x_#(f1;N-XtT&`v){|ydS~wpXVH>uoooF(%MqpJ}60}=D zihlpYo%o#Lulml#O`~*Naf@*Jjw+j8ngi>F=e#s$d<;@IJ0s-K8OX_qO-bO)+kx~r z8&{zDH@#W5h|0?su{()u(lM_iP)jj}@PCVN`+6-X9gn_ZM}*q6&HM7${>*OP({!9Zq$IoiS3nPUUm-qv$>ElX93-;gh| zTfU~-Kwi%%LKe(6jUQvH-XW0FE2@qdd?CbOPcm zw(bI_hgK8cnvt|s!zkUQK<7P+hz5K|zyVx2-qFP1XYT)a#41}{{5_c>`|YyUT;